function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
---|---|---|
def do_remove_data(self, data, distance, mtree):
for child in self.children.itervalues():
if abs(distance - child.distance_to_parent) <= child.radius: # TODO: confirm
distance_to_child = mtree.distance_function(data, child.data)
if distance_to_child <= child.radius:
try:
child.remove_data(data, distance_to_child, mtree)
except __HOLE__:
# If KeyError was raised, then the data was not found in the child
pass
except _NodeUnderCapacity:
expanded_child = self.balance_children(child, mtree)
self.update_radius(expanded_child)
return
else:
self.update_radius(child)
return
raise KeyError() | KeyError | dataset/ETHPy150Open erdavila/M-Tree/py/mtree/__init__.py/_NonLeafNodeTrait.do_remove_data |
def __getattribute__(self, name):
try:
return WebObRequest.__getattribute__(self, name)
except __HOLE__ as e:
logger.exception(e)
abort(400) | UnicodeDecodeError | dataset/ETHPy150Open pecan/pecan/pecan/core.py/Request.__getattribute__ |
def proxy(key):
class ObjectProxy(object):
explanation_ = AttributeError(
"`pecan.state` is not bound to a context-local context.\n"
"Ensure that you're accessing `pecan.request` or `pecan.response` "
"from within the context of a WSGI `__call__` and that "
"`use_context_locals` = True."
)
def __getattr__(self, attr):
try:
obj = getattr(state, key)
except __HOLE__:
raise self.explanation_
return getattr(obj, attr)
def __setattr__(self, attr, value):
obj = getattr(state, key)
return setattr(obj, attr, value)
def __delattr__(self, attr):
obj = getattr(state, key)
return delattr(obj, attr)
def __dir__(self):
obj = getattr(state, key)
return dir(obj)
return ObjectProxy() | AttributeError | dataset/ETHPy150Open pecan/pecan/pecan/core.py/proxy |
def find_controller(self, state):
'''
The main request handler for Pecan applications.
'''
# get a sorted list of hooks, by priority (no controller hooks yet)
req = state.request
pecan_state = req.pecan
# store the routing path for the current application to allow hooks to
# modify it
pecan_state['routing_path'] = path = req.path_info
# handle "on_route" hooks
self.handle_hooks(self.hooks, 'on_route', state)
# lookup the controller, respecting content-type as requested
# by the file extension on the URI
pecan_state['extension'] = None
# attempt to guess the content type based on the file extension
if self.guess_content_type_from_ext \
and not pecan_state['content_type'] \
and '.' in path:
_, extension = splitext(path.rstrip('/'))
# preface with a letter to ensure compat for 2.5
potential_type = guess_type('x' + extension)[0]
if extension and potential_type is not None:
path = ''.join(path.rsplit(extension, 1))
pecan_state['extension'] = extension
pecan_state['content_type'] = potential_type
controller, remainder = self.route(req, self.root, path)
cfg = _cfg(controller)
if cfg.get('generic_handler'):
raise exc.HTTPNotFound
# handle generic controllers
im_self = None
if cfg.get('generic'):
im_self = six.get_method_self(controller)
handlers = cfg['generic_handlers']
controller = handlers.get(req.method, handlers['DEFAULT'])
handle_security(controller, im_self)
cfg = _cfg(controller)
# add the controller to the state so that hooks can use it
state.controller = controller
# if unsure ask the controller for the default content type
content_types = cfg.get('content_types', {})
if not pecan_state['content_type']:
# attempt to find a best match based on accept headers (if they
# exist)
accept = getattr(req.accept, 'header_value', '*/*')
if accept == '*/*' or (
accept.startswith('text/html,') and
list(content_types.keys()) in self.SIMPLEST_CONTENT_TYPES):
pecan_state['content_type'] = cfg.get(
'content_type',
'text/html'
)
else:
best_default = acceptparse.MIMEAccept(
accept
).best_match(
content_types.keys()
)
if best_default is None:
msg = "Controller '%s' defined does not support " + \
"content_type '%s'. Supported type(s): %s"
logger.error(
msg % (
controller.__name__,
pecan_state['content_type'],
content_types.keys()
)
)
raise exc.HTTPNotAcceptable()
pecan_state['content_type'] = best_default
elif cfg.get('content_type') is not None and \
pecan_state['content_type'] not in content_types:
msg = "Controller '%s' defined does not support content_type " + \
"'%s'. Supported type(s): %s"
logger.error(
msg % (
controller.__name__,
pecan_state['content_type'],
content_types.keys()
)
)
raise exc.HTTPNotFound
# fetch any parameters
if req.method == 'GET':
params = req.GET
elif req.content_type in ('application/json',
'application/javascript'):
try:
if not isinstance(req.json, dict):
raise TypeError('%s is not a dict' % req.json)
params = NestedMultiDict(req.GET, req.json)
except (__HOLE__, ValueError):
params = req.params
else:
params = req.params
# fetch the arguments for the controller
args, varargs, kwargs = self.get_args(
state,
params.mixed(),
remainder,
cfg['argspec'],
im_self
)
state.arguments = Arguments(args, varargs, kwargs)
# handle "before" hooks
self.handle_hooks(self.determine_hooks(controller), 'before', state)
return controller, args + varargs, kwargs | TypeError | dataset/ETHPy150Open pecan/pecan/pecan/core.py/PecanBase.find_controller |
def _handle_empty_response_body(self, state):
# Enforce HTTP 204 for responses which contain no body
if state.response.status_int == 200:
# If the response is a generator...
if isinstance(state.response.app_iter, types.GeneratorType):
# Split the generator into two so we can peek at one of them
# and determine if there is any response body content
a, b = tee(state.response.app_iter)
try:
next(a)
except StopIteration:
# If we hit StopIteration, the body is empty
state.response.status = 204
finally:
state.response.app_iter = b
else:
text = None
if state.response.charset:
# `response.text` cannot be accessed without a valid
# charset (because we don't know which encoding to use)
try:
text = state.response.text
except __HOLE__:
# If a valid charset is not specified, don't bother
# trying to guess it (because there's obviously
# content, so we know this shouldn't be a 204)
pass
if not any((state.response.body, text)):
state.response.status = 204
if state.response.status_int in (204, 304):
state.response.content_type = None | UnicodeDecodeError | dataset/ETHPy150Open pecan/pecan/pecan/core.py/PecanBase._handle_empty_response_body |
def get_args(self, state, all_params, remainder, argspec, im_self):
# When comparing the argspec of the method to GET/POST params,
# ignore the implicit (req, resp) at the beginning of the function
# signature
if hasattr(state.controller, '__self__'):
_repr = '.'.join((
state.controller.__self__.__class__.__module__,
state.controller.__self__.__class__.__name__,
state.controller.__name__
))
else:
_repr = '.'.join((
state.controller.__module__,
state.controller.__name__
))
signature_error = TypeError(
'When `use_context_locals` is `False`, pecan passes an explicit '
'reference to the request and response as the first two arguments '
'to the controller.\nChange the `%s` signature to accept exactly '
'2 initial arguments (req, resp)' % _repr
)
try:
positional = argspec.args[:]
positional.pop(1) # req
positional.pop(1) # resp
argspec = argspec._replace(args=positional)
except __HOLE__:
raise signature_error
args, varargs, kwargs = super(ExplicitPecan, self).get_args(
state, all_params, remainder, argspec, im_self
)
if ismethod(state.controller):
args = [state.request, state.response] + args
else:
# generic controllers have an explicit self *first*
# (because they're decorated functions, not instance methods)
args[1:1] = [state.request, state.response]
return args, varargs, kwargs | IndexError | dataset/ETHPy150Open pecan/pecan/pecan/core.py/ExplicitPecan.get_args |
@cached
@gen.coroutine
def get(self, secure, netloc, url):
proto = 'http' + secure
netloc = url_unescape(netloc)
if '/?' in url:
url, query = url.rsplit('/?', 1)
else:
query = None
remote_url = u"{}://{}/{}".format(proto, netloc, quote(url))
if query:
remote_url = remote_url + '?' + query
if not url.endswith('.ipynb'):
# this is how we handle relative links (files/ URLs) in notebooks
# if it's not a .ipynb URL and it is a link from a notebook,
# redirect to the original URL rather than trying to render it as a notebook
refer_url = self.request.headers.get('Referer', '').split('://')[-1]
if refer_url.startswith(self.request.host + '/url'):
self.redirect(remote_url)
return
parse_result = urlparse(remote_url)
robots_url = parse_result.scheme + "://" + parse_result.netloc + "/robots.txt"
public = False # Assume non-public
try:
robots_response = yield self.fetch(robots_url)
robotstxt = response_text(robots_response)
rfp = robotparser.RobotFileParser()
rfp.set_url(robots_url)
rfp.parse(robotstxt.splitlines())
public = rfp.can_fetch('*', remote_url)
except httpclient.HTTPError as e:
app_log.debug("Robots.txt not available for {}".format(remote_url),
exc_info=True)
public = True
except Exception as e:
app_log.error(e)
response = yield self.fetch(remote_url)
try:
nbjson = response_text(response, encoding='utf-8')
except __HOLE__:
app_log.error("Notebook is not utf8: %s", remote_url, exc_info=True)
raise web.HTTPError(400)
yield self.finish_notebook(nbjson, download_url=remote_url,
msg="file from url: %s" % remote_url,
public=public,
request=self.request,
format=self.format) | UnicodeDecodeError | dataset/ETHPy150Open jupyter/nbviewer/nbviewer/providers/url/handlers.py/URLHandler.get |
def parse(self, stream):
""" parse unified diff
return True on success
"""
lineends = dict(lf=0, crlf=0, cr=0)
nexthunkno = 0 #: even if index starts with 0 user messages number hunks from 1
p = None
hunk = None
# hunkactual variable is used to calculate hunk lines for comparison
hunkactual = dict(linessrc=None, linestgt=None)
class wrapumerate(enumerate):
"""Enumerate wrapper that uses boolean end of stream status instead of
StopIteration exception, and properties to access line information.
"""
def __init__(self, *args, **kwargs):
# we don't call parent, it is magically created by __new__ method
self._exhausted = False
self._lineno = False # after end of stream equal to the num of lines
self._line = False # will be reset to False after end of stream
def next(self):
"""Try to read the next line and return True if it is available,
False if end of stream is reached."""
if self._exhausted:
return False
try:
self._lineno, self._line = compat_next(super(wrapumerate, self))
except __HOLE__:
self._exhausted = True
self._line = False
return False
return True
@property
def is_empty(self):
return self._exhausted
@property
def line(self):
return self._line
@property
def lineno(self):
return self._lineno
# define states (possible file regions) that direct parse flow
headscan = True # start with scanning header
filenames = False # lines starting with --- and +++
hunkhead = False # @@ -R +R @@ sequence
hunkbody = False #
hunkskip = False # skipping invalid hunk mode
hunkparsed = False # state after successfully parsed hunk
# regexp to match start of hunk, used groups - 1,3,4,6
re_hunk_start = re.compile(b"^@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@")
self.errors = 0
# temp buffers for header and filenames info
header = []
srcname = None
tgtname = None
# start of main cycle
# each parsing block already has line available in fe.line
fe = wrapumerate(stream)
while fe.next():
# -- deciders: these only switch state to decide who should process
# -- line fetched at the start of this cycle
if hunkparsed:
hunkparsed = False
if re_hunk_start.match(fe.line):
hunkhead = True
elif fe.line.startswith(b"--- "):
filenames = True
else:
headscan = True
# -- ------------------------------------
# read out header
if headscan:
while not fe.is_empty and not fe.line.startswith(b"--- "):
header.append(fe.line)
fe.next()
if fe.is_empty:
if p == None:
debug("no patch data found") # error is shown later
self.errors += 1
else:
info("%d unparsed bytes left at the end of stream" % len(b''.join(header)))
self.warnings += 1
# TODO check for \No new line at the end..
# TODO test for unparsed bytes
# otherwise error += 1
# this is actually a loop exit
continue
headscan = False
# switch to filenames state
filenames = True
line = fe.line
lineno = fe.lineno
# hunkskip and hunkbody code skipped until definition of hunkhead is parsed
if hunkbody:
# [x] treat empty lines inside hunks as containing single space
# (this happens when diff is saved by copy/pasting to editor
# that strips trailing whitespace)
if line.strip(b"\r\n") == b"":
debug("expanding empty line in a middle of hunk body")
self.warnings += 1
line = b' ' + line
# process line first
if re.match(b"^[- \\+\\\\]", line):
# gather stats about line endings
if line.endswith(b"\r\n"):
p.hunkends["crlf"] += 1
elif line.endswith(b"\n"):
p.hunkends["lf"] += 1
elif line.endswith(b"\r"):
p.hunkends["cr"] += 1
if line.startswith(b"-"):
hunkactual["linessrc"] += 1
elif line.startswith(b"+"):
hunkactual["linestgt"] += 1
elif not line.startswith(b"\\"):
hunkactual["linessrc"] += 1
hunkactual["linestgt"] += 1
hunk.text.append(line)
# todo: handle \ No newline cases
else:
warning("invalid hunk no.%d at %d for target file %s" % (nexthunkno, lineno + 1, p.target))
# add hunk status node
hunk.invalid = True
p.hunks.append(hunk)
self.errors += 1
# switch to hunkskip state
hunkbody = False
hunkskip = True
# check exit conditions
if hunkactual["linessrc"] > hunk.linessrc or hunkactual["linestgt"] > hunk.linestgt:
warning("extra lines for hunk no.%d at %d for target %s" % (nexthunkno, lineno + 1, p.target))
# add hunk status node
hunk.invalid = True
p.hunks.append(hunk)
self.errors += 1
# switch to hunkskip state
hunkbody = False
hunkskip = True
elif hunk.linessrc == hunkactual["linessrc"] and hunk.linestgt == hunkactual["linestgt"]:
# hunk parsed successfully
p.hunks.append(hunk)
# switch to hunkparsed state
hunkbody = False
hunkparsed = True
# detect mixed window/unix line ends
ends = p.hunkends
if ((ends["cr"] != 0) + (ends["crlf"] != 0) + (ends["lf"] != 0)) > 1:
warning("inconsistent line ends in patch hunks for %s" % p.source)
self.warnings += 1
if debugmode:
debuglines = dict(ends)
debuglines.update(file=p.target, hunk=nexthunkno)
debug("crlf: %(crlf)d lf: %(lf)d cr: %(cr)d\t - file: %(file)s hunk: %(hunk)d" % debuglines)
# fetch next line
continue
if hunkskip:
if re_hunk_start.match(line):
# switch to hunkhead state
hunkskip = False
hunkhead = True
elif line.startswith(b"--- "):
# switch to filenames state
hunkskip = False
filenames = True
if debugmode and len(self.items) > 0:
debug("- %2d hunks for %s" % (len(p.hunks), p.source))
if filenames:
if line.startswith(b"--- "):
if srcname != None:
# XXX testcase
warning("skipping false patch for %s" % srcname)
srcname = None
# XXX header += srcname
# double source filename line is encountered
# attempt to restart from this second line
re_filename = b"^--- ([^\t]+)"
match = re.match(re_filename, line)
# todo: support spaces in filenames
if match:
srcname = match.group(1).strip()
else:
warning("skipping invalid filename at line %d" % (lineno + 1))
self.errors += 1
# XXX p.header += line
# switch back to headscan state
filenames = False
headscan = True
elif not line.startswith(b"+++ "):
if srcname != None:
warning("skipping invalid patch with no target for %s" % srcname)
self.errors += 1
srcname = None
# XXX header += srcname
# XXX header += line
else:
# this should be unreachable
warning("skipping invalid target patch")
filenames = False
headscan = True
else:
if tgtname != None:
# XXX seems to be a dead branch
warning("skipping invalid patch - double target at line %d" % (lineno + 1))
self.errors += 1
srcname = None
tgtname = None
# XXX header += srcname
# XXX header += tgtname
# XXX header += line
# double target filename line is encountered
# switch back to headscan state
filenames = False
headscan = True
else:
re_filename = b"^\+\+\+ ([^\t]+)"
match = re.match(re_filename, line)
if not match:
warning("skipping invalid patch - no target filename at line %d" % (lineno + 1))
self.errors += 1
srcname = None
# switch back to headscan state
filenames = False
headscan = True
else:
if p: # for the first run p is None
self.items.append(p)
p = Patch()
p.source = srcname
srcname = None
p.target = match.group(1).strip()
p.header = header
header = []
# switch to hunkhead state
filenames = False
hunkhead = True
nexthunkno = 0
p.hunkends = lineends.copy()
continue
if hunkhead:
match = re.match(b"^@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@(.*)", line)
if not match:
if not p.hunks:
warning("skipping invalid patch with no hunks for file %s" % p.source)
self.errors += 1
# XXX review switch
# switch to headscan state
hunkhead = False
headscan = True
continue
else:
# TODO review condition case
# switch to headscan state
hunkhead = False
headscan = True
else:
hunk = Hunk()
hunk.startsrc = int(match.group(1))
hunk.linessrc = 1
if match.group(3): hunk.linessrc = int(match.group(3))
hunk.starttgt = int(match.group(4))
hunk.linestgt = 1
if match.group(6): hunk.linestgt = int(match.group(6))
hunk.invalid = False
hunk.desc = match.group(7)[1:].rstrip()
hunk.text = []
hunkactual["linessrc"] = hunkactual["linestgt"] = 0
# switch to hunkbody state
hunkhead = False
hunkbody = True
nexthunkno += 1
continue
# /while fe.next()
if p:
self.items.append(p)
if not hunkparsed:
if hunkskip:
warning("warning: finished with errors, some hunks may be invalid")
elif headscan:
if len(self.items) == 0:
warning("error: no patch data found!")
return False
else: # extra data at the end of file
pass
else:
warning("error: patch stream is incomplete!")
self.errors += 1
if len(self.items) == 0:
return False
if debugmode and len(self.items) > 0:
debug("- %2d hunks for %s" % (len(p.hunks), p.source))
# XXX fix total hunks calculation
debug("total files: %d total hunks: %d" % (len(self.items),
sum(len(p.hunks) for p in self.items)))
# ---- detect patch and patchset types ----
for idx, p in enumerate(self.items):
self.items[idx].type = self._detect_type(p)
types = set([p.type for p in self.items])
if len(types) > 1:
self.type = MIXED
else:
self.type = types.pop()
# --------
self._normalize_filenames()
return (self.errors == 0) | StopIteration | dataset/ETHPy150Open ensime/ensime-sublime/patch.py/PatchSet.parse |
def apply(self, strip=0, root=None):
""" Apply parsed patch, optionally stripping leading components
from file paths. `root` parameter specifies working dir.
return True on success
"""
if root:
prevdir = os.getcwd()
os.chdir(root)
total = len(self.items)
errors = 0
if strip:
# [ ] test strip level exceeds nesting level
# [ ] test the same only for selected files
# [ ] test if files end up being on the same level
try:
strip = int(strip)
except __HOLE__:
errors += 1
warning("error: strip parameter '%s' must be an integer" % strip)
strip = 0
# for fileno, filename in enumerate(self.source):
for i, p in enumerate(self.items):
if strip:
debug("stripping %s leading component(s) from:" % strip)
debug(" %s" % p.source)
debug(" %s" % p.target)
old = pathstrip(p.source, strip)
new = pathstrip(p.target, strip)
else:
old, new = p.source, p.target
filename = self.findfile(old, new)
if not filename:
warning("source/target file does not exist:\n --- %s\n +++ %s" % (old, new))
errors += 1
continue
if not isfile(filename):
warning("not a file - %s" % filename)
errors += 1
continue
# [ ] check absolute paths security here
debug("processing %d/%d:\t %s" % (i + 1, total, filename))
# validate before patching
f2fp = open(filename, 'rb')
hunkno = 0
hunk = p.hunks[hunkno]
hunkfind = []
hunkreplace = []
validhunks = 0
canpatch = False
for lineno, line in enumerate(f2fp):
if lineno + 1 < hunk.startsrc:
continue
elif lineno + 1 == hunk.startsrc:
hunkfind = [x[1:].rstrip(b"\r\n") for x in hunk.text if x[0] in b" -"]
hunkreplace = [x[1:].rstrip(b"\r\n") for x in hunk.text if x[0] in b" +"]
# pprint(hunkreplace)
hunklineno = 0
# todo \ No newline at end of file
# check hunks in source file
if lineno + 1 < hunk.startsrc + len(hunkfind) - 1:
if line.rstrip(b"\r\n") == hunkfind[hunklineno]:
hunklineno += 1
else:
info("file %d/%d:\t %s" % (i + 1, total, filename))
info(" hunk no.%d doesn't match source file at line %d" % (hunkno + 1, lineno + 1))
info(" expected: %s" % hunkfind[hunklineno])
info(" actual : %s" % line.rstrip(b"\r\n"))
# not counting this as error, because file may already be patched.
# check if file is already patched is done after the number of
# invalid hunks if found
# TODO: check hunks against source/target file in one pass
# API - check(stream, srchunks, tgthunks)
# return tuple (srcerrs, tgterrs)
# continue to check other hunks for completeness
hunkno += 1
if hunkno < len(p.hunks):
hunk = p.hunks[hunkno]
continue
else:
break
# check if processed line is the last line
if lineno + 1 == hunk.startsrc + len(hunkfind) - 1:
debug(" hunk no.%d for file %s -- is ready to be patched" % (hunkno + 1, filename))
hunkno += 1
validhunks += 1
if hunkno < len(p.hunks):
hunk = p.hunks[hunkno]
else:
if validhunks == len(p.hunks):
# patch file
canpatch = True
break
else:
if hunkno < len(p.hunks):
warning("premature end of source file %s at hunk %d" % (filename, hunkno + 1))
errors += 1
f2fp.close()
if validhunks < len(p.hunks):
if self._match_file_hunks(filename, p.hunks):
warning("already patched %s" % filename)
else:
warning("source file is different - %s" % filename)
errors += 1
if canpatch:
backupname = filename + b".orig"
if exists(backupname):
warning("can't backup original file to %s - aborting" % backupname)
else:
import shutil
shutil.move(filename, backupname)
if self.write_hunks(backupname, filename, p.hunks):
info("successfully patched %d/%d:\t %s" % (i + 1, total, filename))
os.unlink(backupname)
else:
errors += 1
warning("error patching file %s" % filename)
shutil.copy(filename, filename + ".invalid")
warning("invalid version is saved to %s" % filename + ".invalid")
# todo: proper rejects
shutil.move(backupname, filename)
if root:
os.chdir(prevdir)
# todo: check for premature eof
return (errors == 0) | ValueError | dataset/ETHPy150Open ensime/ensime-sublime/patch.py/PatchSet.apply |
def get_console(request, console_type, instance):
"""Get a console url based on console type."""
if console_type == 'AUTO':
check_consoles = CONSOLES
else:
try:
check_consoles = {'console_type': CONSOLES[console_type]}
except KeyError:
msg = _('Console type "%s" not supported.') % console_type
raise exceptions.NotAvailable(msg)
for api_call in check_consoles.values():
# Ugly workaround due novaclient API change from 2.17 to 2.18.
try:
httpnotimplemented = nova_exception.HttpNotImplemented
except __HOLE__:
httpnotimplemented = nova_exception.HTTPNotImplemented
try:
console = api_call(request, instance.id)
# If not supported, don't log it to avoid lot of errors in case
# of AUTO.
except httpnotimplemented:
continue
except Exception:
LOG.debug('Console not available', exc_info=True)
continue
console_url = "%s&%s(%s)" % (
console.url,
urlencode({'title': getattr(instance, "name", "")}),
instance.id)
return console_url
raise exceptions.NotAvailable(_('No available console found.')) | AttributeError | dataset/ETHPy150Open CiscoSystems/avos/openstack_dashboard/dashboards/project/instances/console.py/get_console |
def _process_data(self):
# Process all incoming data until there are no more complete packets.
while len(self._buffer):
if self._expecting_ack:
self._expecting_ack = False
self._check_expected_ack()
# Check for a ctrl-c.
if len(self._buffer) and self._buffer[0] == CTRL_C:
self.interrupt_event.set()
self._buffer = self._buffer[1:]
try:
# Look for complete packet and extract from buffer.
pkt_begin = self._buffer.index("$")
pkt_end = self._buffer.index("#") + 2
if pkt_begin >= 0 and pkt_end < len(self._buffer):
pkt = self._buffer[pkt_begin:pkt_end + 1]
self._buffer = self._buffer[pkt_end + 1:]
self._handling_incoming_packet(pkt)
else:
break
except __HOLE__:
# No complete packet received yet.
break | ValueError | dataset/ETHPy150Open mbedmicro/pyOCD/pyOCD/gdbserver/gdbserver.py/GDBServerPacketIOThread._process_data |
@classmethod
def cache_distribution(cls, zf, source, target_dir):
"""Possibly cache an egg from within a zipfile into target_cache.
Given a zipfile handle and a filename corresponding to an egg distribution within
that zip, maybe write to the target cache and return a Distribution."""
dependency_basename = os.path.basename(source)
if not os.path.exists(target_dir):
target_dir_tmp = target_dir + '.' + uuid.uuid4().hex
for name in zf.namelist():
if name.startswith(source) and not name.endswith('/'):
# strip off prefix + '/'
target_name = os.path.join(dependency_basename, name[len(source) + 1:])
with contextlib.closing(zf.open(name)) as zi:
with safe_open(os.path.join(target_dir_tmp, target_name), 'wb') as fp:
shutil.copyfileobj(zi, fp)
try:
os.rename(target_dir_tmp, target_dir)
except __HOLE__ as e:
if e.errno == errno.ENOTEMPTY:
safe_rmtree(target_dir_tmp)
else:
raise
dist = DistributionHelper.distribution_from_path(target_dir)
assert dist is not None, 'Failed to cache distribution %s' % source
return dist | OSError | dataset/ETHPy150Open pantsbuild/pex/pex/util.py/CacheHelper.cache_distribution |
def open(self, desktop=None):
"""
Open a dialogue box (dialog) using a program appropriate to the desktop
environment in use.
If the optional 'desktop' parameter is specified then attempt to use
that particular desktop environment's mechanisms to open the dialog
instead of guessing or detecting which environment is being used.
Suggested values for 'desktop' are "standard", "KDE", "GNOME",
"Mac OS X", "Windows".
The result of the dialogue interaction may be a string indicating user
input (for Input, Password, Menu, Pulldown), a list of strings
indicating selections of one or more items (for RadioList, CheckList),
or a value indicating true or false (for Question, Warning, Message,
Error).
Where a string value may be expected but no choice is made, an empty
string may be returned. Similarly, where a list of values is expected
but no choice is made, an empty list may be returned.
"""
# Decide on the desktop environment in use.
desktop_in_use = use_desktop(desktop)
# Get the program.
try:
program = self.commands[desktop_in_use]
except __HOLE__:
raise OSError, "Desktop '%s' not supported (no known dialogue box command could be suggested)" % desktop_in_use
# The handler is one of the functions communicating with the subprocess.
# Some handlers return boolean values, others strings.
handler, options = self.info[program]
cmd = [program]
for option in options:
if isinstance(option, str):
cmd.append(option)
else:
value = getattr(self, option.name, None)
cmd += option.convert(value, program)
return handler(cmd, 0) | KeyError | dataset/ETHPy150Open JT5D/Alfred-Popclip-Sublime/Sublime Text 2/SideBarEnhancements/sidebar/desktop/dialog.py/Dialogue.open |
def _getBuiltinExceptionNames():
def isExceptionName(builtin_name):
if builtin_name.endswith("Error") or \
builtin_name.endswith("Exception"):
return True
elif builtin_name in ("StopIteration", "GeneratorExit", "SystemExit",
"NotImplemented", "KeyboardInterrupt",
"StopAsyncIteration"):
return True
else:
return False
# Hide Python3 changes for built-in exception names
try:
import exceptions
names = [
str(name) for name in dir(exceptions)
if isExceptionName(name)
]
values = {}
for key in names:
values[key] = getattr(exceptions, key)
for key in dir(sys.modules["__builtin__"]):
name = str(key)
if isExceptionName(name):
names.append(key)
values[name] = getattr(sys.modules["__builtin__"], key)
except __HOLE__:
exceptions = {}
for key, value in sys.modules["builtins"].__dict__.items():
if isExceptionName(key):
exceptions[key] = value
names = [
key for key, value in exceptions.items()
]
values = {}
for key, value in exceptions.items():
values[key] = value
return names, values | ImportError | dataset/ETHPy150Open kayhayen/Nuitka/nuitka/Builtins.py/_getBuiltinExceptionNames |
def Add(self, ports, kind, allow_privileged=False, prohibited_host_ports=()):
"""Load port configurations and adds them to an internal dict.
Args:
ports: A list of strings or a CSV representing port forwarding.
kind: what kind of port configuration this is, only used for error
reporting.
allow_privileged: Allow to bind to ports under 1024.
prohibited_host_ports: A list of ports that are used outside of
the container and may not be mapped to this port manager.
Raises:
InconsistentPortConfigurationError: If a port is configured to do
two different conflicting things.
IllegalPortConfigurationError: If the port is out of range or
is not a number.
Returns:
A dictionary with forwarding rules as external_port => local_port.
"""
if not ports:
# Obviously nothing to do.
return
if isinstance(ports, int):
ports = str(ports)
if isinstance(ports, basestring):
# split a csv
ports = [port.strip() for port in ports.split(',')]
port_translations = {'tcp': {}, 'udp': {}}
for port in ports:
try:
if '/' in port:
tmp = port.split('/')
if len(tmp) != 2 or not re.match(PROTOCOL_RE, tmp[1].lower()):
raise IllegalPortConfigurationError(
'%r was not recognized as a valid port configuration.' % port)
port = tmp[0]
protocol = tmp[1].lower()
else:
protocol = 'tcp' # This is the default.
if ':' in port:
host_port, docker_port = (int(p.strip()) for p in port.split(':'))
port_translations[protocol][host_port] = docker_port
else:
host_port = int(port)
docker_port = host_port
port_translations[protocol][host_port] = host_port
if host_port in prohibited_host_ports:
raise InconsistentPortConfigurationError(
'Configuration conflict, port %d cannot be used by the '
'application.' % host_port)
if (host_port in self.used_host_ports and
self.used_host_ports[host_port] != docker_port):
raise InconsistentPortConfigurationError(
'Configuration conflict, port %d configured to forward '
'differently.' % host_port)
self.used_host_ports[host_port] = docker_port
if (host_port < 1 or host_port > 65535 or
docker_port < 1 or docker_port > 65535):
raise IllegalPortConfigurationError(
'Failed to load %s port configuration: invalid port %s'
% (kind, port))
if docker_port < 1024 and not allow_privileged:
raise IllegalPortConfigurationError(
'Cannot listen on port %d as it is priviliged, use a forwarding '
'port.' % docker_port)
if docker_port in RESERVED_DOCKER_PORTS:
raise IllegalPortConfigurationError(
'Cannot use port %d as it is reserved on the VM.'
% docker_port)
if host_port in RESERVED_HOST_PORTS:
raise IllegalPortConfigurationError(
'Cannot use port %d as it is reserved on the VM.'
% host_port)
except __HOLE__ as e:
logging.exception('Bad port description')
raise IllegalPortConfigurationError(
'Failed to load %s port configuration: "%s" error: "%s"'
% (kind, port, e))
# At this point we know they are not destructive.
self._port_mappings['tcp'].update(port_translations['tcp'])
self._port_mappings['udp'].update(port_translations['udp'])
# TODO: This is a bit of a hack.
self._port_names[kind] = port_translations
return port_translations | ValueError | dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/client/services/port_manager.py/PortManager.Add |
def moderation_queue(request):
"""
Displays a list of unapproved comments to be approved.
Templates: `comments/moderation_queue.html`
Context:
comments
Comments to be approved (paginated).
empty
Is the comment list empty?
is_paginated
Is there more than one page?
results_per_page
Number of comments per page
has_next
Is there a next page?
has_previous
Is there a previous page?
page
The current page number
next
The next page number
pages
Number of pages
hits
Total number of comments
page_range
Range of page numbers
"""
qs = comments.get_model().objects.filter(is_public=False, is_removed=False)
paginator = Paginator(qs, 100)
try:
page = int(request.GET.get("page", 1))
except __HOLE__:
raise Http404
try:
comments_per_page = paginator.page(page)
except InvalidPage:
raise Http404
return render_to_response("comments/moderation_queue.html", {
'comments' : comments_per_page.object_list,
'empty' : page == 1 and paginator.count == 0,
'is_paginated': paginator.num_pages > 1,
'results_per_page': 100,
'has_next': comments_per_page.has_next(),
'has_previous': comments_per_page.has_previous(),
'page': page,
'next': page + 1,
'previous': page - 1,
'pages': paginator.num_pages,
'hits' : paginator.count,
'page_range' : paginator.page_range
}, context_instance=template.RequestContext(request)) | ValueError | dataset/ETHPy150Open CollabQ/CollabQ/vendor/django/contrib/comments/views/moderation.py/moderation_queue |
@staticmethod
def find_file(names, tail):
try:
return next(n for n in names if n.endswith(tail))
except __HOLE__:
return None | StopIteration | dataset/ETHPy150Open SMFOSS/CheesePrism/cheeseprism/pipext.py/RequirementDownloader.find_file |
def handle_requirement(self, req, finder):
"""
Download requirement, return a new requirement set of
requirements dependencies.
"""
if req.editable:
msg = "Editables not supported: %s" %req
logger.warn(msg)
self.errors.append("%s: %s" %(req, msg))
return
try:
url = finder.find_requirement(req, self.upgrade)
except DistributionNotFound:
msg = "No distribution found for %s" %req.name
logger.warn(msg)
self.errors.append(msg)
return
if url.hash in self.seen:
logger.debug('Seen: %s', url)
self.skip.append(url)
return
try:
pkginfo, outfile = self.download_url(url)
except __HOLE__, e:
msg = "Issue with download: %s" %e
logger.error(msg)
self.errors.append("%s: %s" %(req, msg))
return
except TypeError:
raise
except Exception, e:
msg = "Issue with archive: %s" %e
logger.error(msg)
self.errors.append("%s: %s" %(req, msg))
return
self.seen.add(outfile.read_md5().encode('hex'))
deplinks, reqs = self.depinfo_for_file(outfile)
if not reqs:
return pkginfo, outfile, None
content = "\n".join(reqs)
pkg = "%s-%s" %(pkginfo.name, pkginfo.version)
req_set, _ = self.req_set_from_file(self.temp_req(pkg, content),
self.download_dir,
deplinks=deplinks)
return pkginfo, outfile, req_set, | HTTPError | dataset/ETHPy150Open SMFOSS/CheesePrism/cheeseprism/pipext.py/RequirementDownloader.handle_requirement |
def accept(self, match, include_rejected=False, include_denied=False):
'''
Accept the keys matched
:param str match: A string to match against. i.e. 'web*'
:param bool include_rejected: Whether or not to accept a matched key that was formerly rejected
:param bool include_denied: Whether or not to accept a matched key that was formerly denied
'''
def _print_accepted(matches, after_match):
if self.key.ACC in after_match:
accepted = sorted(
set(after_match[self.key.ACC]).difference(
set(matches.get(self.key.ACC, []))
)
)
for key in accepted:
print('Key for minion {0} accepted.'.format(key))
matches = self.key.name_match(match)
keys = {}
if self.key.PEND in matches:
keys[self.key.PEND] = matches[self.key.PEND]
if include_rejected and bool(matches.get(self.key.REJ)):
keys[self.key.REJ] = matches[self.key.REJ]
if include_denied and bool(matches.get(self.key.DEN)):
keys[self.key.DEN] = matches[self.key.DEN]
if not keys:
msg = (
'The key glob \'{0}\' does not match any unaccepted{1} keys.'
.format(match, (('', ' or denied'),
(' or rejected', ', rejected or denied')
)[include_rejected][include_denied])
)
print(msg)
raise salt.exceptions.SaltSystemExit(code=1)
if not self.opts.get('yes', False):
print('The following keys are going to be accepted:')
salt.output.display_output(
keys,
'key',
self.opts)
try:
veri = input('Proceed? [n/Y] ')
except __HOLE__:
raise SystemExit("\nExiting on CTRL-c")
if not veri or veri.lower().startswith('y'):
_print_accepted(
matches,
self.key.accept(
match_dict=keys,
include_rejected=include_rejected,
include_denied=include_denied
)
)
else:
print('The following keys are going to be accepted:')
salt.output.display_output(
keys,
'key',
self.opts)
_print_accepted(
matches,
self.key.accept(
match_dict=keys,
include_rejected=include_rejected,
include_denied=include_denied
)
) | KeyboardInterrupt | dataset/ETHPy150Open saltstack/salt/salt/key.py/KeyCLI.accept |
def delete(self, match):
'''
Delete the matched keys
:param str match: A string to match against. i.e. 'web*'
'''
def _print_deleted(matches, after_match):
deleted = []
for keydir in (self.key.ACC, self.key.PEND, self.key.REJ):
deleted.extend(list(
set(matches.get(keydir, [])).difference(
set(after_match.get(keydir, []))
)
))
for key in sorted(deleted):
print('Key for minion {0} deleted.'.format(key))
matches = self.key.name_match(match)
if not matches:
print(
'The key glob \'{0}\' does not match any accepted, unaccepted '
'or rejected keys.'.format(match)
)
raise salt.exceptions.SaltSystemExit(code=1)
if not self.opts.get('yes', False):
print('The following keys are going to be deleted:')
salt.output.display_output(
matches,
'key',
self.opts)
try:
veri = input('Proceed? [N/y] ')
except __HOLE__:
raise SystemExit("\nExiting on CTRL-c")
if veri.lower().startswith('y'):
_print_deleted(
matches,
self.key.delete_key(match_dict=matches, revoke_auth=True)
)
else:
print('Deleting the following keys:')
salt.output.display_output(
matches,
'key',
self.opts)
_print_deleted(
matches,
self.key.delete_key(match_dict=matches)
) | KeyboardInterrupt | dataset/ETHPy150Open saltstack/salt/salt/key.py/KeyCLI.delete |
def list_keys(self):
'''
Return a dict of managed keys and what the key status are
'''
key_dirs = []
# We have to differentiate between RaetKey._check_minions_directories
# and Zeromq-Keys. Raet-Keys only have three states while ZeroMQ-keys
# havd an additional 'denied' state.
if self.opts['transport'] in ('zeromq', 'tcp'):
key_dirs = self._check_minions_directories()
else:
key_dirs = self._check_minions_directories()
ret = {}
for dir_ in key_dirs:
ret[os.path.basename(dir_)] = []
try:
for fn_ in salt.utils.isorted(os.listdir(dir_)):
if not fn_.startswith('.'):
if os.path.isfile(os.path.join(dir_, fn_)):
ret[os.path.basename(dir_)].append(fn_)
except (OSError, __HOLE__):
# key dir kind is not created yet, just skip
continue
return ret | IOError | dataset/ETHPy150Open saltstack/salt/salt/key.py/Key.list_keys |
def accept(self, match=None, match_dict=None, include_rejected=False, include_denied=False):
'''
Accept public keys. If "match" is passed, it is evaluated as a glob.
Pre-gathered matches can also be passed via "match_dict".
'''
if match is not None:
matches = self.name_match(match)
elif match_dict is not None and isinstance(match_dict, dict):
matches = match_dict
else:
matches = {}
keydirs = [self.PEND]
if include_rejected:
keydirs.append(self.REJ)
if include_denied:
keydirs.append(self.DEN)
for keydir in keydirs:
for key in matches.get(keydir, []):
try:
shutil.move(
os.path.join(
self.opts['pki_dir'],
keydir,
key),
os.path.join(
self.opts['pki_dir'],
self.ACC,
key)
)
eload = {'result': True,
'act': 'accept',
'id': key}
self.event.fire_event(eload, tagify(prefix='key'))
except (__HOLE__, OSError):
pass
return (
self.name_match(match) if match is not None
else self.dict_match(matches)
) | IOError | dataset/ETHPy150Open saltstack/salt/salt/key.py/Key.accept |
def accept_all(self):
'''
Accept all keys in pre
'''
keys = self.list_keys()
for key in keys[self.PEND]:
try:
shutil.move(
os.path.join(
self.opts['pki_dir'],
self.PEND,
key),
os.path.join(
self.opts['pki_dir'],
self.ACC,
key)
)
eload = {'result': True,
'act': 'accept',
'id': key}
self.event.fire_event(eload, tagify(prefix='key'))
except (__HOLE__, OSError):
pass
return self.list_keys() | IOError | dataset/ETHPy150Open saltstack/salt/salt/key.py/Key.accept_all |
def delete_key(self,
match=None,
match_dict=None,
preserve_minions=False,
revoke_auth=False):
'''
Delete public keys. If "match" is passed, it is evaluated as a glob.
Pre-gathered matches can also be passed via "match_dict".
To preserve the master caches of minions who are matched, set preserve_minions
'''
if match is not None:
matches = self.name_match(match)
elif match_dict is not None and isinstance(match_dict, dict):
matches = match_dict
else:
matches = {}
for status, keys in six.iteritems(matches):
for key in keys:
try:
if revoke_auth:
if self.opts.get('rotate_aes_key') is False:
print('Immediate auth revocation specified but AES key rotation not allowed. '
'Minion will not be disconnected until the master AES key is rotated.')
else:
try:
client = salt.client.get_local_client(mopts=self.opts)
client.cmd(key, 'saltutil.revoke_auth')
except salt.exceptions.SaltClientError:
print('Cannot contact Salt master. '
'Connection for {0} will remain up until '
'master AES key is rotated or auth is revoked '
'with \'saltutil.revoke_auth\'.'.format(key))
os.remove(os.path.join(self.opts['pki_dir'], status, key))
eload = {'result': True,
'act': 'delete',
'id': key}
self.event.fire_event(eload, tagify(prefix='key'))
except (OSError, __HOLE__):
pass
if preserve_minions:
preserve_minions_list = matches.get('minions', [])
else:
preserve_minions_list = []
self.check_minion_cache(preserve_minions=preserve_minions_list)
if self.opts.get('rotate_aes_key'):
salt.crypt.dropfile(self.opts['cachedir'], self.opts['user'])
return (
self.name_match(match) if match is not None
else self.dict_match(matches)
) | IOError | dataset/ETHPy150Open saltstack/salt/salt/key.py/Key.delete_key |
def delete_den(self):
'''
Delete all denied keys
'''
keys = self.list_keys()
for status, keys in six.iteritems(self.list_keys()):
for key in keys[self.DEN]:
try:
os.remove(os.path.join(self.opts['pki_dir'], status, key))
eload = {'result': True,
'act': 'delete',
'id': key}
self.event.fire_event(eload, tagify(prefix='key'))
except (__HOLE__, IOError):
pass
self.check_minion_cache()
return self.list_keys() | OSError | dataset/ETHPy150Open saltstack/salt/salt/key.py/Key.delete_den |
def delete_all(self):
'''
Delete all keys
'''
for status, keys in six.iteritems(self.list_keys()):
for key in keys:
try:
os.remove(os.path.join(self.opts['pki_dir'], status, key))
eload = {'result': True,
'act': 'delete',
'id': key}
self.event.fire_event(eload, tagify(prefix='key'))
except (__HOLE__, IOError):
pass
self.check_minion_cache()
if self.opts.get('rotate_aes_key'):
salt.crypt.dropfile(self.opts['cachedir'], self.opts['user'])
return self.list_keys() | OSError | dataset/ETHPy150Open saltstack/salt/salt/key.py/Key.delete_all |
def reject(self, match=None, match_dict=None, include_accepted=False, include_denied=False):
'''
Reject public keys. If "match" is passed, it is evaluated as a glob.
Pre-gathered matches can also be passed via "match_dict".
'''
if match is not None:
matches = self.name_match(match)
elif match_dict is not None and isinstance(match_dict, dict):
matches = match_dict
else:
matches = {}
keydirs = [self.PEND]
if include_accepted:
keydirs.append(self.ACC)
if include_denied:
keydirs.append(self.DEN)
for keydir in keydirs:
for key in matches.get(keydir, []):
try:
shutil.move(
os.path.join(
self.opts['pki_dir'],
keydir,
key),
os.path.join(
self.opts['pki_dir'],
self.REJ,
key)
)
eload = {'result': True,
'act': 'reject',
'id': key}
self.event.fire_event(eload, tagify(prefix='key'))
except (IOError, __HOLE__):
pass
self.check_minion_cache()
if self.opts.get('rotate_aes_key'):
salt.crypt.dropfile(self.opts['cachedir'], self.opts['user'])
return (
self.name_match(match) if match is not None
else self.dict_match(matches)
) | OSError | dataset/ETHPy150Open saltstack/salt/salt/key.py/Key.reject |
def reject_all(self):
'''
Reject all keys in pre
'''
keys = self.list_keys()
for key in keys[self.PEND]:
try:
shutil.move(
os.path.join(
self.opts['pki_dir'],
self.PEND,
key),
os.path.join(
self.opts['pki_dir'],
self.REJ,
key)
)
eload = {'result': True,
'act': 'reject',
'id': key}
self.event.fire_event(eload, tagify(prefix='key'))
except (IOError, __HOLE__):
pass
self.check_minion_cache()
if self.opts.get('rotate_aes_key'):
salt.crypt.dropfile(self.opts['cachedir'], self.opts['user'])
return self.list_keys() | OSError | dataset/ETHPy150Open saltstack/salt/salt/key.py/Key.reject_all |
def accept(self, match=None, match_dict=None, include_rejected=False, include_denied=False):
'''
Accept public keys. If "match" is passed, it is evaluated as a glob.
Pre-gathered matches can also be passed via "match_dict".
'''
if match is not None:
matches = self.name_match(match)
elif match_dict is not None and isinstance(match_dict, dict):
matches = match_dict
else:
matches = {}
keydirs = [self.PEND]
if include_rejected:
keydirs.append(self.REJ)
if include_denied:
keydirs.append(self.DEN)
for keydir in keydirs:
for key in matches.get(keydir, []):
try:
shutil.move(
os.path.join(
self.opts['pki_dir'],
keydir,
key),
os.path.join(
self.opts['pki_dir'],
self.ACC,
key)
)
except (__HOLE__, OSError):
pass
return (
self.name_match(match) if match is not None
else self.dict_match(matches)
) | IOError | dataset/ETHPy150Open saltstack/salt/salt/key.py/RaetKey.accept |
def accept_all(self):
'''
Accept all keys in pre
'''
keys = self.list_keys()
for key in keys[self.PEND]:
try:
shutil.move(
os.path.join(
self.opts['pki_dir'],
self.PEND,
key),
os.path.join(
self.opts['pki_dir'],
self.ACC,
key)
)
except (__HOLE__, OSError):
pass
return self.list_keys() | IOError | dataset/ETHPy150Open saltstack/salt/salt/key.py/RaetKey.accept_all |
def delete_key(self,
match=None,
match_dict=None,
preserve_minions=False,
revoke_auth=False):
'''
Delete public keys. If "match" is passed, it is evaluated as a glob.
Pre-gathered matches can also be passed via "match_dict".
'''
if match is not None:
matches = self.name_match(match)
elif match_dict is not None and isinstance(match_dict, dict):
matches = match_dict
else:
matches = {}
for status, keys in six.iteritems(matches):
for key in keys:
if revoke_auth:
if self.opts.get('rotate_aes_key') is False:
print('Immediate auth revocation specified but AES key rotation not allowed. '
'Minion will not be disconnected until the master AES key is rotated.')
else:
try:
client = salt.client.get_local_client(mopts=self.opts)
client.cmd(key, 'saltutil.revoke_auth')
except salt.exceptions.SaltClientError:
print('Cannot contact Salt master. '
'Connection for {0} will remain up until '
'master AES key is rotated or auth is revoked '
'with \'saltutil.revoke_auth\'.'.format(key))
try:
os.remove(os.path.join(self.opts['pki_dir'], status, key))
except (__HOLE__, IOError):
pass
self.check_minion_cache(preserve_minions=matches.get('minions', []))
return (
self.name_match(match) if match is not None
else self.dict_match(matches)
) | OSError | dataset/ETHPy150Open saltstack/salt/salt/key.py/RaetKey.delete_key |
def delete_all(self):
'''
Delete all keys
'''
for status, keys in six.iteritems(self.list_keys()):
for key in keys:
try:
os.remove(os.path.join(self.opts['pki_dir'], status, key))
except (__HOLE__, IOError):
pass
self.check_minion_cache()
return self.list_keys() | OSError | dataset/ETHPy150Open saltstack/salt/salt/key.py/RaetKey.delete_all |
def reject(self, match=None, match_dict=None, include_accepted=False, include_denied=False):
'''
Reject public keys. If "match" is passed, it is evaluated as a glob.
Pre-gathered matches can also be passed via "match_dict".
'''
if match is not None:
matches = self.name_match(match)
elif match_dict is not None and isinstance(match_dict, dict):
matches = match_dict
else:
matches = {}
keydirs = [self.PEND]
if include_accepted:
keydirs.append(self.ACC)
if include_denied:
keydirs.append(self.DEN)
for keydir in keydirs:
for key in matches.get(keydir, []):
try:
shutil.move(
os.path.join(
self.opts['pki_dir'],
keydir,
key),
os.path.join(
self.opts['pki_dir'],
self.REJ,
key)
)
except (__HOLE__, OSError):
pass
self.check_minion_cache()
return (
self.name_match(match) if match is not None
else self.dict_match(matches)
) | IOError | dataset/ETHPy150Open saltstack/salt/salt/key.py/RaetKey.reject |
def reject_all(self):
'''
Reject all keys in pre
'''
keys = self.list_keys()
for key in keys[self.PEND]:
try:
shutil.move(
os.path.join(
self.opts['pki_dir'],
self.PEND,
key),
os.path.join(
self.opts['pki_dir'],
self.REJ,
key)
)
except (__HOLE__, OSError):
pass
self.check_minion_cache()
return self.list_keys() | IOError | dataset/ETHPy150Open saltstack/salt/salt/key.py/RaetKey.reject_all |
def run(self):
# determine the sender's ip address from the email headers
ip_address = None
try:
headers = self.email_dict['headers']
for hdr in ['X-Originating-IP', # preferred header order to use
'X-Source-IP',
'X-Source',
'Received']:
if headers.has_key(hdr):
match = ip_pattern.search(headers[hdr])
if match is not None:
ip_address = match.group().strip().replace('[','').replace(']', '')
break
except KeyError:
pass
if ip_address is not None:
# use the ip address to get the geographic location
location = get_location(ip_address)
try:
lat = location['Latitude']
lng = location['Longitude']
# use the latitude and longitude to get the current report from the forecast.io API
weather_url = 'https://api.forecast.io/forecast/'+forecast_io_key+'/'+lat+','+lng
weather_data = get_url(weather_url)
if weather_data is not None:
data = json.loads(weather_data)
report = data["currently"]["summary"] + '\n\n' + data["hourly"]["summary"]
send('Current Weather', report, recipient_list=[self.sender], sender=server_auto_email)
return
except __HOLE__:
pass
# the default reply, in case the location or weather for that location can't be found
send('Current Weather',
'Sorry, this service could not determine the weather for your geographic location',
recipient_list=[self.sender],
sender=server_auto_email) | KeyError | dataset/ETHPy150Open dpapathanasiou/intelligent-smtp-responder/agents/weather_response_example.py/reply_weather.run |
def unpack(*arguments):
"""
Unpack arguments to be used in methods wrapped
"""
def decorator(func):
def wrapper(_self, data, **kwargs):
data = smart_parse(data)
try:
args = [data[item] for item in arguments]
except __HOLE__:
raise MissingField(item)
kwargs["_arguments"] = arguments
func(_self, *args, **kwargs)
return wrapper
return decorator | KeyError | dataset/ETHPy150Open sihrc/tornado-boilerplate/indico/utils/__init__.py/unpack |
def form_urlencoded_parse(body):
"""
Parse x-www-form-url encoded data
"""
try:
data = urlparse.parse_qs(body, strict_parsing=True)
for key in data:
data[key] = data[key][0]
return data
except __HOLE__:
raise InvalidJSON() | ValueError | dataset/ETHPy150Open sihrc/tornado-boilerplate/indico/utils/__init__.py/form_urlencoded_parse |
def smart_parse(body):
"""
Handle json, fall back to x-www-form-urlencoded
"""
try:
data_dict = json.loads(body)
except __HOLE__:
return form_urlencoded_parse(body)
return data_dict | ValueError | dataset/ETHPy150Open sihrc/tornado-boilerplate/indico/utils/__init__.py/smart_parse |
def config(settings):
"""
Template for Washington Common Operating Picture (WA-COP)
http://psmcop.org
"""
T = current.T
s3 = current.response.s3
# -----------------------------------------------------------------------------
# Pre-Populate
settings.base.prepopulate += ("MCOP", "default/users")
settings.base.system_name = T("Sahana: Washington Common Operating Picture (WA-COP)")
settings.base.system_name_short = T("Sahana")
# =============================================================================
# System Settings
# -----------------------------------------------------------------------------
# Authorization Settings
# Users can self-register
#settings.security.self_registration = False
# Users need to verify their email
settings.auth.registration_requires_verification = True
# Users need to be approved
settings.auth.registration_requires_approval = True
settings.auth.registration_requests_organisation = True
settings.auth.registration_organisation_required = True
# Approval emails get sent to all admins
settings.mail.approver = "ADMIN"
settings.auth.registration_link_user_to = {"staff": T("Staff")}
settings.auth.registration_link_user_to_default = ["staff"]
settings.auth.registration_roles = {"organisation_id": ["USER"],
}
settings.auth.show_utc_offset = False
settings.auth.show_link = False
# -----------------------------------------------------------------------------
# Security Policy
settings.security.policy = 7 # Apply Controller, Function and Table ACLs
settings.security.map = True
# -----------------------------------------------------------------------------
# Theme (folder to use for views/layout.html)
settings.base.theme = "MCOP"
settings.ui.formstyle_row = "bootstrap"
settings.ui.formstyle = "bootstrap"
settings.ui.filter_formstyle = "bootstrap"
settings.ui.hide_report_options = False
# Uncomment to use S3MultiSelectWidget on all dropdowns (currently the Auth Registration page & LocationSelectorWidget2 listen to this)
settings.ui.multiselect_widget = "search"
# @ToDo: Investigate
settings.ui.use_button_icons = True
# Custom icon classes
settings.ui.custom_icons = {
"alert": "icon-alert",
"building": "icon-building",
"contact": "icon-contact",
"incident": "icon-incident",
"resource": "icon-wrench",
"tasks": "icon-tasks",
}
# Uncomment to show a default cancel button in standalone create/update forms
settings.ui.default_cancel_button = True
# Uncomment to disable responsive behavior of datatables
# - Disabled until tested
settings.ui.datatables_responsive = False
#settings.gis.map_height = 600
#settings.gis.map_width = 854
# -----------------------------------------------------------------------------
# L10n (Localization) settings
settings.L10n.languages = OrderedDict([
("en", "English"),
])
# Default Language
settings.L10n.default_language = "en"
# Default timezone for users
settings.L10n.utc_offset = "-0800"
# Unsortable 'pretty' date format
settings.L10n.date_format = "%b %d %Y"
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
settings.L10n.thousands_separator = ","
# Default Country Code for telephone numbers
settings.L10n.default_country_code = 1
# Enable this to change the label for 'Mobile Phone'
settings.ui.label_mobile_phone = "Cell Phone"
# Enable this to change the label for 'Postcode'
settings.ui.label_postcode = "ZIP Code"
settings.msg.require_international_phone_numbers = False
# PDF to Letter
settings.base.paper_size = T("Letter")
# Uncomment this to Translate CMS Series Names
# - we want this on when running s3translate but off in normal usage as we use the English names to lookup icons in render_posts
#settings.L10n.translate_cms_series = True
# Uncomment this to Translate Location Names
#settings.L10n.translate_gis_location = True
# -----------------------------------------------------------------------------
# GIS settings
# Restrict the Location Selector to just certain countries
settings.gis.countries = ("US",)
# Levels for the LocationSelector
levels = ("L1", "L2", "L3")
# Uncomment to pass Addresses imported from CSV to a Geocoder to try and automate Lat/Lon
#settings.gis.geocode_imported_addresses = "google"
# Until we add support to S3LocationSelector to set dropdowns from LatLons
settings.gis.check_within_parent_boundaries = False
# GeoNames username
settings.gis.geonames_username = "mcop"
# Uncomment to hide Layer Properties tool
#settings.gis.layer_properties = False
# Uncomment to display the Map Legend as a floating DIV
settings.gis.legend = "float"
# Uncomment to prevent showing LatLon in Location Represents
settings.gis.location_represent_address_only = "icon"
# Resources which can be directly added to the main map
settings.gis.poi_create_resources = None
# -----------------------------------------------------------------------------
# Module settings
# Uncomment to customise the list of options for the Priority of a Task.
# NB Be very cautious about doing this (see docstring in modules/s3cfg.py)
# MCOP sets these to match Wrike
settings.project.task_priority_opts = {2: T("High"),
3: T("Normal"),
4: T("Low")
}
# Uncomment to customise the list of options for the Status of a Task.
# NB Be very cautious about doing this (see docstring in modules/s3cfg.py)
# MCOP sets these to match Wrike
settings.project.task_status_opts = {2: T("Active"),
6: T("Deferred"),
7: T("Canceled"),
12: T("Completed"),
}
# -----------------------------------------------------------------------------
# Enable this for a UN-style deployment
#settings.ui.cluster = True
# Enable this to use the label 'Camp' instead of 'Shelter'
#settings.ui.camp = True
# -----------------------------------------------------------------------------
# Uncomment to restrict the export formats available
#settings.ui.export_formats = ("xls",)
settings.ui.update_label = "Edit"
# -----------------------------------------------------------------------------
# Mariner CommandBridge resource identifiers
settings.sync.mcb_resource_identifiers = {"event_incident": "802017D4-08D1-40EA-A03D-4FCFC26883A4",
"project_task": "06831BE6-7B49-47F0-80CD-5FB27DEEC330",
"cms_post": "A6E68F53-72B8-415A-A50F-BB26D363CD30",
}
# Mariner CommandBridge domain identifiers
settings.sync.mcb_domain_identifiers = {"sahana": "9197B3DC-07DD-4922-96CA-9B6D8A1FC2D2",
"wrike": "69A069D9-23E8-422D-BB18-2A3A92FE291C",
}
# -----------------------------------------------------------------------------
# Disable rheaders
def customise_no_rheader_controller(**attr):
# Remove rheader
attr["rheader"] = None
return attr
settings.customise_org_facility_controller = customise_no_rheader_controller
settings.customise_org_organisation_controller = customise_no_rheader_controller
settings.customise_org_resource_controller = customise_no_rheader_controller
# -----------------------------------------------------------------------------
# Summary Pages
settings.ui.summary = [#{"common": True,
# "name": "cms",
# "widgets": [{"method": "cms"}]
# },
{"common": True,
"name": "add",
"widgets": [{"method": "create"}],
},
{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}]
},
{"name": "map",
"label": "Map",
"widgets": [{"method": "map", "ajax_init": True}],
},
{"name": "charts",
"label": "Reports",
"widgets": [{"method": "report", "ajax_init": True}]
},
]
settings.search.filter_manager = False
# =============================================================================
# Customise Resources
# -----------------------------------------------------------------------------
# Alerts (cms_post)
# -----------------------------------------------------------------------------
def cms_post_age(row):
"""
The age of the post
- used for colour-coding markers of Alerts
"""
if hasattr(row, "cms_post"):
row = row.cms_post
try:
date = row.date
except:
# not available
return current.messages["NONE"]
now = current.request.utcnow
age = now - date
if age < timedelta(days=2):
return 1
elif age < timedelta(days=7):
return 2
else:
return 3
# -----------------------------------------------------------------------------
def customise_cms_post_controller(**attr):
# Make GeoJSON output smaller
current.s3db.gis_location.gis_feature_type.represent = None
# Remove rheader
attr["rheader"] = None
return attr
settings.customise_cms_post_controller = customise_cms_post_controller
def customise_cms_post_resource(r, tablename):
"""
Customise cms_post resource
- CRUD Strings
- Datatable
- Fields
- Form
Runs after controller customisation
But runs before prep
"""
s3 = current.response.s3
db = current.db
s3db = current.s3db
table = s3db.cms_post
s3.dl_pagelength = 12
list_id = r.get_vars.get("list_id", None)
if list_id != "cms_post_datalist":
# Default page, not homepage
s3.dl_rowsize = 2
#from s3 import FS
#s3.filter = FS("series_id$name").belongs(["Alert"])
s3.crud_strings["cms_post"] = Storage(
label_create = T("Add"),
title_display = T("Alert Details"),
title_list = T("Alerts"),
title_update = T("Edit Alert"),
label_list_button = T("List Alerts"),
label_delete_button = T("Delete Alert"),
msg_record_created = T("Alert added"),
msg_record_modified = T("Alert updated"),
msg_record_deleted = T("Alert deleted"),
msg_list_empty = T("No Alerts currently registered"))
# CRUD Form
from s3 import IS_LOCATION, S3LocationSelector
table.location_id.requires = IS_LOCATION()
table.location_id.widget = S3LocationSelector(levels=levels,
show_address=True,
show_map=True,
points = True,
polygons = True,
)
# Don't add new Locations here
table.location_id.comment = None
#table.series_id.readable = table.series_id.writable = True
#table.series_id.label = T("Type")
stable = s3db.cms_series
try:
series_id = db(stable.name == "Alert").select(stable.id,
limitby=(0, 1)
).first().id
table.series_id.default = series_id
except:
# No suitable prepop
pass
table.body.label = T("Description")
table.body.widget = None
from s3 import S3SQLCustomForm, S3SQLInlineComponent
crud_fields = ["date",
#"series_id",
"body",
"location_id",
#S3SQLInlineComponent(
# "document",
# name = "file",
# label = T("Files"),
# fields = [("", "file"),
# #"comments",
# ],
# ),
]
incident_id = r.get_vars.get("~.(incident)", None)
if incident_id:
# Coming from Profile page
# Default location to Incident Location
itable = s3db.event_incident
incident = db(itable.id == incident_id).select(itable.location_id,
limitby=(0, 1)
).first()
if incident:
table.location_id.default = incident.location_id
# Add link onaccept
def create_onaccept(form):
current.s3db.event_post.insert(incident_id=incident_id,
post_id=form.vars.id)
s3db.configure("cms_post",
create_onaccept = create_onaccept,
)
else:
# Insert into Form
crud_fields.insert(0, S3SQLInlineComponent("incident_post",
label = T("Incident"),
fields = [("", "incident_id")],
multiple = False,
))
crud_form = S3SQLCustomForm(*crud_fields)
from s3 import S3OptionsFilter
filter_widgets = s3db.get_config("cms_post", "filter_widgets")
# Remove the Type filter
# @ToDo: More robust way to identify it
del filter_widgets[1]
filter_widgets.insert(1, S3OptionsFilter("incident_post.incident_id"))
# Return to List view after create/update/delete
# We do all this in Popups
url_next = URL(c="cms", f="post", args="datalist")
# Adapt list fields for cms_post_list_layout?
#if r.method == "datalist":
# list_fields = r.resource.get_config("list_fields")
# list_fields.extend(["event_post.event_id",
# "event_post.incident_id",
# "series_id",
# ])
s3db.configure("cms_post",
create_next = url_next,
crud_form = crud_form,
delete_next = url_next,
update_next = url_next,
)
if r.representation == "geojson":
# Add Virtual field to allow colour-coding by age
from s3dal import Field
table.age = Field.Method("age", cms_post_age)
settings.customise_cms_post_resource = customise_cms_post_resource
# -----------------------------------------------------------------------------
# Incidents (event_incident)
# -----------------------------------------------------------------------------
def open_incident_filter(selector, tablename=None):
"""
Default filter for Incidents (callback)
"""
return [False]
# -----------------------------------------------------------------------------
def customise_event_incident_controller(**attr):
if "summary" in current.request.args:
settings.gis.legend = None
# Not working
from s3 import s3_set_default_filter
s3_set_default_filter("~.closed",
open_incident_filter,
tablename = "event_incident")
# Make GeoJSON output smaller
current.s3db.gis_location.gis_feature_type.represent = None
s3 = current.response.s3
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if r.interactive and isinstance(output, dict):
actions = [dict(label=str(T("Open")),
_class="action-btn",
url=URL(c="event", f="incident",
args=["[id]", "profile"])),
]
s3.actions = actions
return output
s3.postp = custom_postp
# Remove RHeader
attr["rheader"] = None
return attr
settings.customise_event_incident_controller = customise_event_incident_controller
# -----------------------------------------------------------------------------
def customise_event_incident_resource(r, tablename):
"""
Customise org_resource resource
- Customize Fi
- List Fields
- Form
- Filter Widgets
Runs after controller customisation
But runs before prep
"""
s3db = current.s3db
table = s3db[tablename]
crud_strings = current.response.s3.crud_strings
# Enable 'Lead Organisation' field
table.organisation_id.readable = table.organisation_id.writable = True
if r.interactive:
table.zero_hour.label = T("Date")
table.comments.label = T("Description")
crud_strings["event_incident"].label_delete_button = T("Delete Incident")
list_fields = ["zero_hour",
"name",
"location_id",
"comments",
"organisation_id",
"closed",
]
# Custom Form
location_id_field = table.location_id
from s3 import IS_LOCATION, S3LocationSelector
location_id_field.requires = IS_LOCATION()
location_id_field.widget = S3LocationSelector(levels=levels,
show_address=True,
show_map=True,
points = True,
polygons = True,
)
# Don't add new Locations here
location_id_field.comment = None
#from gluon.validators import IS_EMPTY_OR
#table.organisation_id.requires = IS_EMPTY_OR(table.organisation_id.requires)
from s3 import S3SQLCustomForm, S3SQLInlineComponent
crud_fields = ["zero_hour",
"name",
"location_id",
"comments",
"organisation_id",
S3SQLInlineComponent(
"document",
name = "file",
label = T("Documents"),
fields = [("", "file"),
#"comments",
],
),
]
if r.method != "create":
crud_fields.append("closed")
crud_form = S3SQLCustomForm(*crud_fields)
from s3 import S3TextFilter, S3OptionsFilter, S3LocationFilter
filter_widgets = [S3TextFilter(["name",
"comments"
],
#label = T("Description"),
label = T("Search"),
_class = "filter-search",
),
S3OptionsFilter("closed",
cols = 2,
),
S3LocationFilter("location_id",
#label = T("Location"),
levels = levels,
),
#S3OptionsFilter("status_id",
# label = T("Status"),
# # @ToDo: Introspect cols
# cols = 3,
# ),
S3OptionsFilter("organisation_id",
represent = "%(name)s",
),
]
url_next = URL(c="event", f="incident", args=["[id]", "profile"])
s3db.configure("event_incident",
create_next = url_next,
crud_form = crud_form,
delete_next = URL(c="event", f="incident", args="summary"),
filter_widgets = filter_widgets,
list_fields = list_fields,
update_next = url_next,
)
if r.method == "profile" and r.tablename == "event_incident":
# Customise tables used by widgets
customise_project_task_resource(r, "project_task")
from s3 import FS
map_widget = dict(label = "Map",
type = "map",
context = "incident",
icon = "icon-map",
# Tall/thin fits Puget Sound best
height = 600,
width = 200,
colspan = 1,
)
alerts_widget = dict(label = "Alerts",
label_create = "Create Alert",
type = "datalist",
tablename = "cms_post",
context = "incident",
# Only show Active Alerts
filter = FS("expired") == False,
icon = "alert",
colspan = 1,
layer = "Alerts",
#list_layout = s3db.cms_post_list_layout,
)
resources_widget = dict(label = "Resources",
label_create = "Add Resource",
type = "datalist",
tablename = "event_resource",
context = "incident",
#filter = FS("status").belongs(event_resource_active_statuses),
icon = "resource",
colspan = 1,
#list_layout = s3db.event_resource_list_layout,
)
tasks_widget = dict(label = "Tasks",
label_create = "Create Task",
type = "datalist",
tablename = "project_task",
context = "incident",
# Only show Active Tasks
filter = FS("status").belongs(s3db.project_task_active_statuses),
icon = "tasks",
colspan = 1,
#list_layout = s3db.project_task_list_layout,
)
record = r.record
record_id = record.id
record_name = record.name
title = "%s: %s" % (T("Incident"), record_name)
marker = current.gis.get_marker(controller = "event",
function = "incident")
layer = dict(name = record_name,
id = "profile-header-%s-%s" % (tablename, record_id),
active = True,
tablename = tablename,
url = "/%s/event/incident.geojson?incident.id=%s" % \
(r.application, record_id),
marker = marker,
)
if current.auth.s3_has_permission("update", table, record_id=record_id):
edit_btn = A(I(_class="icon icon-edit"),
_href=URL(c="event", f="incident",
args=[record_id, "update.popup"],
vars={"refresh": "datalist"}),
_class="s3_modal",
_title=s3.crud_strings["event_incident"].title_update,
)
else:
edit_btn = ""
# Dropdown of available documents
# @ToDo: Use resource.components instead of DAL for security (not required here but good practise)
dtable = s3db.doc_document
rows = current.db(dtable.doc_id == r.record.doc_id).select(dtable.file)
documents = [row.file for row in rows]
if documents:
doc_list = UL(_class="dropdown-menu",
_role="menu",
)
retrieve = dtable.file.retrieve
for doc in documents:
try:
doc_name = retrieve(doc)[0]
except (__HOLE__, TypeError):
doc_name = current.messages["NONE"]
doc_url = URL(c="default", f="download",
args=[doc])
doc_item = LI(A(I(_class="icon-file"),
" ",
doc_name,
_href=doc_url,
),
_role="menuitem",
)
doc_list.append(doc_item)
docs = DIV(A(I(_class="icon-paper-clip"),
SPAN(_class="caret"),
_class="btn dropdown-toggle",
_href="#",
**{"_data-toggle": "dropdown"}
),
doc_list,
_class="btn-group attachments dropdown pull-right",
)
else:
docs = ""
s3db.configure("event_incident",
profile_title = title,
profile_header = DIV(edit_btn,
H2(title),
docs,
_class="profile-header",
),
profile_layers = (layer,),
profile_widgets = (alerts_widget,
resources_widget,
tasks_widget,
map_widget,
),
profile_cols = 4
)
settings.customise_event_incident_resource = customise_event_incident_resource
# -----------------------------------------------------------------------------
# Facilities (org_facility)
# -----------------------------------------------------------------------------
def customise_org_facility_resource(r, tablename):
"""
Customise org_resource resource
- CRUD Strings
- List Fields
- Form
- Report Options
Runs after controller customisation
But runs before prep
"""
s3 = current.response.s3
s3db = current.s3db
table = s3db.org_facility
s3.crud_strings[tablename] = Storage(
label_create = T("Add"),
title_display = T("Facility Details"),
title_list = T("Facilities"),
title_update = T("Edit Facility Details"),
label_list_button = T("List Facilities"),
label_delete_button = T("Delete Facility"),
msg_record_created = T("Facility added"),
msg_record_modified = T("Facility details updated"),
msg_record_deleted = T("Facility deleted"),
msg_list_empty = T("No Facilities currently registered"))
from s3 import IS_LOCATION, S3LocationSelector
location_id_field = table.location_id
location_id_field.requires = IS_LOCATION()
location_id_field.widget = S3LocationSelector(levels=levels,
show_address=True,
show_map=True,
)
# Don't add new Locations here
location_id_field.comment = None
list_fields = ["name",
"organisation_id",
#(T("Type"), "facility_type.name"),
"location_id",
"phone1",
"comments",
]
from s3 import S3SQLCustomForm
crud_form = S3SQLCustomForm(*list_fields)
# Report options
report_fields = ["organisation_id",
]
report_options = Storage(rows = report_fields,
cols = report_fields,
fact = ["count(id)"
],
defaults=Storage(rows = "organisation_id",
cols = "",
fact = "count(id)",
totals = True,
chart = "barchart:rows",
#table = "collapse",
)
)
url_next = URL(c="org", f="facility", args="summary")
s3db.configure("org_facility",
create_next = url_next,
crud_form = crud_form,
delete_next = url_next,
list_fields = list_fields,
list_layout = render_facilities,
report_options = report_options,
summary = settings.ui.summary,
update_next = url_next,
)
if r.method == "summary":
settings.gis.legend = None
settings.customise_org_facility_resource = customise_org_facility_resource
# -----------------------------------------------------------------------------
# Stakeholders (org_organisation)
# -----------------------------------------------------------------------------
def customise_org_organisation_resource(r, tablename):
"""
Customise org_organisation resource
- List Fields
- CRUD Strings
- Form
- Filter
Runs after controller customisation
But runs before prep
"""
# Load normal Model
s3db = current.s3db
table = s3db.org_organisation
list_fields = ["id",
"name",
"logo",
"phone",
"website",
]
if r.interactive:
# Labels
table.comments.label = T("Description")
s3.crud_strings["org_organisation"] = Storage(
label_create = T("Add"),
title_display = T("Stakeholder Details"),
title_list = T("Stakeholders"),
title_update = T("Edit Stakeholder"),
label_list_button = T("List Stakeholders"),
label_delete_button = T("Delete Stakeholder"),
msg_record_created = T("Stakeholder added"),
msg_record_modified = T("Stakeholder updated"),
msg_record_deleted = T("Stakeholder deleted"),
msg_list_empty = T("No Stakeholders currently registered"))
from s3 import S3SQLCustomForm, S3SQLInlineLink
crud_form = S3SQLCustomForm("id",
"name",
S3SQLInlineLink(
"organisation_type",
field = "organisation_type_id",
label = T("Type"),
multiple = False,
#widget = "hierarchy",
),
"logo",
"phone",
"website",
"comments",
)
s3db.configure("org_organisation",
crud_form = crud_form,
)
if r.method == "datalist":
# Stakeholder selection page
# 2-column datalist, 6 rows per page
s3.dl_pagelength = 12
s3.dl_rowsize = 3
from s3 import S3TextFilter, S3OptionsFilter
filter_widgets = [S3TextFilter(["name",
"acronym",
"website",
"comments",
],
label = T("Search")),
S3OptionsFilter("organisation_organisation_type.organisation_type_id",
label = T("Type"),
),
]
s3db.configure("org_organisation",
filter_widgets = filter_widgets,
)
if r.method == "profile":
# Ensure the correct list_fields are set
# @ToDo: Have profile method call these automatically
customise_pr_person_resource(r, "pr_person")
customise_event_incident_resource(r, "event_incident")
# Customise tables used by widgets
#customise_cms_post_fields()
#customise_hrm_human_resource_fields()
#customise_org_office_fields()
s3db.org_customise_org_resource_fields("profile")
#from s3 import FS
contacts_widget = dict(label = "Directory",
label_create = "Create Contact",
type = "datalist",
tablename = "hrm_human_resource",
context = "organisation",
create_controller = "pr",
create_function = "person",
icon = "contact",
show_on_map = False, # Since they will show within Offices
list_layout = render_contacts,
)
#map_widget = dict(label = "Map",
# type = "map",
# context = "organisation",
# icon = "icon-map",
# height = 383,
# width = 568,
# )
facilities_widget = dict(label = "Facilities",
label_create = "Create Facility",
type = "datalist",
tablename = "org_facility",
context = "organisation",
icon = "building",
layer = "Facilities",
# provided by Catalogue Layer
#marker = "office",
list_layout = render_facilities,
)
resources_widget = dict(label = "Resources",
label_create = "Create Resource",
type = "datalist",
tablename = "org_resource",
context = "organisation",
icon = "resource",
show_on_map = False, # No Marker yet & only show at L1-level anyway
#list_layout = s3db.org_resource_list_layout,
)
incidents_widget = dict(label = "Incidents",
label_create = "Create Incident",
type = "datalist",
tablename = "event_incident",
context = "organisation",
icon = "incident",
show_on_map = False, # No Marker yet & only show at L1-level anyway
#list_layout = s3db.event_incident_list_layout,
)
record = r.record
title = "%s : %s" % (s3.crud_strings["org_organisation"].title_list, record.name)
if record.logo:
logo = URL(c="default", f="download", args=[record.logo])
else:
logo = ""
s3db.configure("org_organisation",
profile_title = title,
profile_header = DIV(A(IMG(_class="media-object",
_src=logo,
),
_class="pull-left",
#_href=org_url,
),
H2(title),
_class="profile-header",
),
profile_widgets = [contacts_widget,
#map_widget,
facilities_widget,
resources_widget,
incidents_widget,
#activities_widget,
#reports_widget,
#assessments_widget,
],
#profile_cols = 3
)
# Return to List view after create/update/delete (unless done via Modal)
url_next = URL(c="org", f="organisation", args="datalist")
s3db.configure("org_organisation",
create_next = url_next,
delete_next = url_next,
# We want the Create form to be in a modal, not inline, for consistency
#listadd = False,
list_fields = list_fields,
update_next = url_next,
)
settings.customise_org_organisation_resource = customise_org_organisation_resource
# -----------------------------------------------------------------------------
# Resource Inventory (org_resource)
# -----------------------------------------------------------------------------
def customise_org_resource_resource(r, tablename):
"""
Customise org_resource resource
- Fields
- Filter
Runs after controller customisation
But runs before prep
"""
if r.representation == "geojson":
# Make GeoJSON output smaller
current.s3db.gis_location.gis_feature_type.represent = None
elif r.interactive:
s3 = current.response.s3
s3db = current.s3db
table = s3db.org_resource
s3.crud_strings[tablename] = Storage(
label_create = T("Add"),
title_display = T("Inventory Resource"),
title_list = T("Resource Inventory"),
title_update = T("Edit Inventory Resource"),
label_list_button = T("Resource Inventory"),
label_delete_button = T("Delete Inventory Resource"),
msg_record_created = T("Inventory Resource added"),
msg_record_modified = T("Inventory Resource updated"),
msg_record_deleted = T("Inventory Resource deleted"),
msg_list_empty = T("No Resources in Inventory"))
location_field = table.location_id
# Filter from a Profile page?
# If so, then default the fields we know
get_vars = current.request.get_vars
location_id = get_vars.get("~.(location)", None)
organisation_id = get_vars.get("~.(organisation)", None)
if organisation_id:
org_field = table.organisation_id
org_field.default = organisation_id
org_field.readable = org_field.writable = False
if location_id:
location_field.default = location_id
# We still want to be able to specify a precise location
#location_field.readable = location_field.writable = False
from s3 import IS_LOCATION, S3LocationSelector
location_field.requires = IS_LOCATION()
location_field.widget = S3LocationSelector(levels=levels,
show_address=True,
show_map=True,
)
# Don't add new Locations here
location_field.comment = None
s3db.org_customise_org_resource_fields(r.method)
# Configure fields
#table.site_id.readable = table.site_id.readable = False
#location_field.label = T("District")
# Return to Sumamry view after create/update/delete (unless done via Modal)
url_next = URL(c="org", f="resource", args="summary")
s3db.configure("org_resource",
create_next = url_next,
delete_next = url_next,
# Don't include a Create form in 'More' popups
listadd = False if r.method=="datalist" else True,
update_next = url_next,
)
# This is awful in Popups & inconsistent in dataTable view (People/Documents don't have this & it breaks the styling of the main Save button)
s3.cancel = URL(c="org", f="resource")
if r.method == "summary":
settings.gis.legend = None
settings.customise_org_resource_resource = customise_org_resource_resource
# -----------------------------------------------------------------------------
def customise_event_resource_resource(r, tablename):
"""
Customise org_resource resource
- Fields
- Filter
Runs after controller customisation
But runs before prep
"""
if r.representation == "geojson":
# Make GeoJSON output smaller
current.s3db.gis_location.gis_feature_type.represent = None
elif r.interactive:
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add"),
title_display = T("Resource Responding"),
title_list = T("Resources Responding"),
title_update = T("Edit Resource Responding"),
label_list_button = T("Resources Responding"),
label_delete_button = T("Delete Resource Responding"),
msg_record_created = T("Resource Responding added"),
msg_record_modified = T("Resource Responding updated"),
msg_record_deleted = T("Resource Responding deleted"),
msg_list_empty = T("No Resources Responding"))
if r.method == "summary":
settings.gis.legend = None
settings.customise_event_resource_resource = customise_event_resource_resource
# -----------------------------------------------------------------------------
# Tasks (project_task)
# -----------------------------------------------------------------------------
def active_status_filter(selector, tablename=None):
"""
Default filter for Tasks (callback)
"""
return current.s3db.project_task_active_statuses
# -----------------------------------------------------------------------------
def customise_project_task_controller(**attr):
if "summary" in current.request.args:
settings.gis.legend = None
# Not working
from s3 import s3_set_default_filter
s3_set_default_filter("~.status",
active_status_filter,
tablename = "project_task")
# Make GeoJSON output smaller
current.s3db.gis_location.gis_feature_type.represent = None
# Remove rheader
attr["rheader"] = None
return attr
settings.customise_project_task_controller = customise_project_task_controller
# -----------------------------------------------------------------------------
def customise_project_task_resource(r, tablename):
"""
Customise org_resource resource
- List Fields
- Fields
- Form
- Filter Widgets
- Report Options
Runs after controller customisation
But runs before prep
"""
s3db = current.s3db
table = s3db.project_task
if r.tablename == "event_incident" and r.method == "profile":
# Set list_fields for renderer (project_task_list_layout)
list_fields = ["name",
"description",
"location_id",
"date_due",
"pe_id",
"incident.incident_id",
#"organisation_id$logo",
"pe_id",
"source_url",
"modified_by",
"status",
"priority",
]
else:
list_fields = ["id",
"status",
"priority",
"incident.incident_id",
(T("Task"), "name"),
"location_id",
"date_due",
(T("Wrike Permalink"), "source_url"),
]
# Custom Form
table.name.label = T("Name")
table.description.label = T("Description")
table.description.comment = None
location_id_field = table.location_id
location_id_field.readable = location_id_field.writable = True
from s3 import IS_LOCATION, S3LocationSelector
location_id_field.requires = IS_LOCATION()
location_id_field.widget = S3LocationSelector(levels=levels,
show_address=True,
show_map=True,
points = True,
polygons = True,
)
from s3 import S3SQLCustomForm, S3SQLInlineComponent
crud_fields = ["source_url",
"status",
"priority",
"name",
"description",
"pe_id",
"date_due",
"location_id",
]
incident_id = r.get_vars.get("~.(incident)", None)
if incident_id:
# Coming from Profile page
# Add link onaccept
def create_onaccept(form):
current.s3db.event_task.insert(incident_id=incident_id,
task_id=form.vars.id)
s3db.configure("project_task",
create_onaccept = create_onaccept,
)
else:
# Insert into Form
crud_fields.insert(0, S3SQLInlineComponent("incident",
label = T("Incident"),
fields = [("", "incident_id")],
multiple = False,
))
if (r.method == None or r.method == "update") and \
r.record and r.record.source_url:
# Task imported from Wrike
# - lock all fields which should only be edited within Wrike
#crud_fields.insert(0, "source_url")
current.s3db.event_task.incident_id.writable = False
for fieldname in ["source_url",
"status",
"priority",
"name",
"description",
"pe_id",
"date_due"]:
table[fieldname].writable = False
crud_form = S3SQLCustomForm(*crud_fields)
# Filter Widgets
from s3 import S3OptionsFilter
filter_widgets = s3db.get_config("project_task", "filter_widgets")
filter_widgets.insert(2, S3OptionsFilter("incident.incident_id"))
# Report options
report_fields = ["status",
"priority",
"incident.incident_id",
"pe_id",
"location_id",
]
report_options = Storage(rows = report_fields,
cols = report_fields,
fact = ["count(name)"],
defaults=Storage(rows = "status",
cols = "priority",
fact = "count(name)",
totals = True,
chart = "barchart:rows",
)
)
url_next = URL(c="project", f="task", args="summary")
s3db.configure("project_task",
create_next = url_next,
crud_form = crud_form,
delete_next = url_next,
list_fields = list_fields,
onvalidation = None, # don't check pe_id if status Active
orderby = "project_task.date_due asc",
report_options = report_options,
update_next = url_next,
)
settings.customise_project_task_resource = customise_project_task_resource
# -----------------------------------------------------------------------------
# Contacts (pr_person)
# -----------------------------------------------------------------------------
def customise_pr_person_controller(**attr):
s3 = current.response.s3
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if r.interactive and isinstance(output, dict):
actions = [dict(label=str(T("Open")),
_class="action-btn",
url=URL(c="pr", f="person",
args=["[id]", "read"]))
]
s3.actions = actions
if "form" in output:
output["form"].add_class("pr_person")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("pr_person")
return output
s3.postp = custom_postp
# Remove RHeader
attr["rheader"] = None
return attr
settings.customise_pr_person_controller = customise_pr_person_controller
# -----------------------------------------------------------------------------
def customise_pr_person_resource(r, tablename):
"""
Customise org_resource resource
- List Fields
- Fields
- Form
- Filter Widgets
- Report Options
Runs after controller customisation
But runs before prep
"""
s3db = current.s3db
request = current.request
s3 = current.response.s3
tablename = "pr_person"
# CRUD Strings
s3.crud_strings[tablename] = Storage(
label_create = T("Add"),
title_display = T("Contact Details"),
title_list = T("Contact Directory"),
title_update = T("Edit Contact Details"),
label_list_button = T("List Contacts"),
label_delete_button = T("Delete Contact"),
msg_record_created = T("Contact added"),
msg_record_modified = T("Contact details updated"),
msg_record_deleted = T("Contact deleted"),
msg_list_empty = T("No Contacts currently registered"))
if r.method == "validate":
# Can't validate image without the file
image_field = s3db.pr_image.image
image_field.requires = None
MOBILE = settings.get_ui_label_mobile_phone()
EMAIL = T("Email")
htable = s3db.hrm_human_resource
htable.organisation_id.widget = None
site_field = htable.site_id
from s3 import S3Represent, IS_ONE_OF
represent = S3Represent(lookup="org_site")
site_field.label = T("Facility")
site_field.represent = represent
site_field.requires = IS_ONE_OF(current.db, "org_site.site_id",
represent,
orderby = "org_site.name")
from s3layouts import S3PopupLink
site_field.comment = S3PopupLink(c = "org",
f = "facility",
vars = {"child": "site_id"},
label = T("Create Facility"),
title = T("Facility"),
tooltip = T("If you don't see the Facility in the list, you can add a new one by clicking link 'Create Facility'."),
)
# ImageCrop widget doesn't currently work within an Inline Form
image_field = s3db.pr_image.image
from gluon.validators import IS_IMAGE
image_field.requires = IS_IMAGE()
image_field.widget = None
hr_fields = ["organisation_id",
"job_title_id",
"site_id",
]
# Context from a Profile page?"
organisation_id = request.get_vars.get("(organisation)", None)
if organisation_id:
field = s3db.hrm_human_resource.organisation_id
field.default = organisation_id
field.readable = field.writable = False
hr_fields.remove("organisation_id")
from s3 import S3SQLCustomForm, S3SQLInlineComponent
s3_sql_custom_fields = [
"first_name",
#"middle_name",
"last_name",
S3SQLInlineComponent(
"human_resource",
name = "human_resource",
label = "",
multiple = False,
fields = hr_fields,
),
S3SQLInlineComponent(
"image",
name = "image",
label = T("Photo"),
multiple = False,
fields = [("", "image")],
filterby = dict(field = "profile",
options=[True]
)
),
]
list_fields = ["human_resource.organisation_id",
"first_name",
#"middle_name",
"last_name",
(T("Job Title"), "human_resource.job_title_id"),
(T("Facility"), "human_resource.site_id"),
]
# Don't include Email/Phone for unauthenticated users
if current.auth.is_logged_in():
list_fields += [(MOBILE, "phone.value"),
(EMAIL, "email.value"),
]
s3_sql_custom_fields.insert(3,
S3SQLInlineComponent(
"contact",
name = "phone",
label = MOBILE,
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "SMS")),
)
s3_sql_custom_fields.insert(3,
S3SQLInlineComponent(
"contact",
name = "email",
label = EMAIL,
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "EMAIL")),
)
crud_form = S3SQLCustomForm(*s3_sql_custom_fields)
from s3 import S3TextFilter, S3OptionsFilter
filter_widgets = [S3TextFilter(["pe_label",
"first_name",
"middle_name",
"last_name",
"local_name",
"identity.value",
"human_resource.organisation_id",
"human_resource.job_title_id",
"human_resource.site_id",
],
label=T("Search"),
),
S3OptionsFilter("human_resource.organisation_id",
),
S3OptionsFilter("human_resource.job_title_id",
),
S3OptionsFilter("human_resource.site_id",
),
]
# Return to List view after create/update/delete (unless done via Modal)
url_next = URL(c="pr", f="person", )
# Report options
report_fields = ["organisation_id",
]
report_options = Storage(rows = report_fields,
cols = report_fields,
fact = ["count(id)"
],
defaults=Storage(rows = "organisation_id",
cols = "",
fact = "count(id)",
totals = True,
chart = "barchart:rows",
#table = "collapse",
)
)
s3db.configure(tablename,
create_next = url_next,
crud_form = crud_form,
delete_next = url_next,
filter_widgets = filter_widgets,
listadd = True,
list_fields = list_fields,
report_options = report_options,
# Don't include a Create form in 'More' popups
#listadd = False if r.method=="datalist" else True,
#list_layout = render_contacts,
update_next = url_next,
)
# HR Fields For dataList Cards
list_fields = ["person_id",
"organisation_id",
"site_id$location_id",
"site_id$location_id$addr_street",
"job_title_id",
"email.value",
"phone.value",
#"modified_by",
"modified_on",
]
s3db.configure("hrm_human_resource",
list_fields = list_fields,
)
settings.customise_pr_person_resource = customise_pr_person_resource
# =============================================================================
# Custom list_layout renders (Copy & Pasted from DRMP)
# @ToDo: re-factor
# -----------------------------------------------------------------------------
def render_contacts(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Contacts on the Profile pages
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["hrm_human_resource.id"]
item_class = "thumbnail"
raw = record._row
#author = record["hrm_human_resource.modified_by"]
#date = record["hrm_human_resource.modified_on"]
fullname = record["hrm_human_resource.person_id"]
job_title = raw["hrm_human_resource.job_title_id"] or ""
if job_title:
job_title = "- %s" % record["hrm_human_resource.job_title_id"]
#organisation = record["hrm_human_resource.organisation_id"]
organisation_id = raw["hrm_human_resource.organisation_id"]
#org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
person_id = raw["hrm_human_resource.person_id"]
#location = record["org_site.location_id"]
#location_id = raw["org_site.location_id"]
#location_url = URL(c="gis", f="location",
# args=[location_id, "profile"])
#address = raw["gis_location.addr_street"] or T("no facility assigned")
email = raw["pr_email_contact.value"] or T("no email address")
if isinstance(email, list):
email = email[0]
phone = raw["pr_phone_contact.value"] or T("no phone number")
if isinstance(phone, list):
phone = phone[0]
if person_id:
# Use Personal Avatar
# @ToDo: Optimise by not doing DB lookups within render, but doing these in the bulk query
avatar = s3_avatar_represent(person_id,
tablename="pr_person",
_class="media-object")
else:
avatar = IMG(_src=URL(c="static", f="img", args="blank-user.gif"),
_class="media-object")
# Edit Bar
permit = current.auth.s3_has_permission
table = current.s3db.pr_person
if permit("update", table, record_id=person_id):
vars = {"refresh": list_id,
"record": record_id,
}
f = current.request.function
if f == "organisation" and organisation_id:
vars["(organisation)"] = organisation_id
edit_url = URL(c="hrm", f="person",
args=[person_id, "update.popup"],
vars=vars)
title_update = current.response.s3.crud_strings.hrm_human_resource.title_update
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=edit_url,
_class="s3_modal",
_title=title_update,
)
else:
edit_btn = ""
edit_url = "#"
title_update = ""
# Deletions failing due to Integrity Errors
#if permit("delete", table, record_id=person_id):
# delete_btn = A(I(" ", _class="icon icon-remove-sign"),
# _class="dl-item-delete",
# )
#else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
avatar = A(avatar,
_href=edit_url,
_class="pull-left s3_modal",
_title=title_update,
)
# Render the item
body = TAG[""](P(I(_class="icon-phone"),
" ",
SPAN(phone),
" ",
),
P(I(_class="icon-envelope-alt"),
" ",
SPAN(email),
_class="main_contact_ph",
),
#P(I(_class="icon-home"),
# " ",
# address,
# _class="main_office-add",
# )
)
item = DIV(DIV(SPAN(fullname,
" ",
job_title,
_class="card-title"),
edit_bar,
_class="card-header",
),
DIV(avatar,
DIV(DIV(body,
# Organisation only needed if displaying elsewhere than org profile
# Author confusing with main contact record
#DIV(#author,
# #" - ",
# A(organisation,
# _href=org_url,
# _class="card-organisation",
# ),
# _class="card-person",
# ),
_class="media",
),
_class="media-body",
),
_class="media",
),
#docs,
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def render_facilities(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Facilities on the Profile pages
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["org_facility.id"]
item_class = "thumbnail"
raw = record._row
name = record["org_facility.name"]
#author = record["org_facility.modified_by"]
#date = record["org_facility.modified_on"]
#organisation = record["org_facility.organisation_id"]
organisation_id = raw["org_facility.organisation_id"]
#location = record["org_facility.location_id"]
#location_id = raw["org_facility.location_id"]
#location_url = URL(c="gis", f="location",
# args=[location_id, "profile"])
address = raw["gis_location.addr_street"]
phone = raw["org_facility.phone1"]
#facility_type = record["org_facility.facility_type_id"]
logo = raw["org_organisation.logo"]
org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
if logo:
logo = A(IMG(_src=URL(c="default", f="download", args=[logo]),
_class="media-object",
),
_href=org_url,
_class="pull-left",
)
else:
logo = DIV(IMG(_class="media-object"),
_class="pull-left")
# Edit Bar
permit = current.auth.s3_has_permission
table = current.db.org_facility
if permit("update", table, record_id=record_id):
vars = {"refresh": list_id,
"record": record_id,
}
f = current.request.function
if f == "organisation" and organisation_id:
vars["(organisation)"] = organisation_id
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="org", f="facility",
args=[record_id, "update.popup"],
vars=vars),
_class="s3_modal",
_title=current.response.s3.crud_strings.org_facility.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-remove-sign"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Render the item
#avatar = logo
body = TAG[""](#P(I(_class="icon-flag"),
# " ",
# SPAN(facility_type),
# " ",
# _class="main_contact_ph",
# ),
P(I(_class="icon-phone"),
" ",
SPAN(phone or T("no phone number")),
" ",
),
P(I(_class="icon-home"),
" ",
address,
_class="main_facility-add",
))
item = DIV(DIV(SPAN(name, _class="card-title"),
edit_bar,
_class="card-header",
),
DIV(#avatar,
DIV(DIV(body,
DIV(#author,
#" - ",
#A(organisation,
# _href=org_url,
# _class="card-organisation",
# ),
#_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
#docs,
_class=item_class,
_id=item_id,
)
return item
# =============================================================================
# Modules
# Comment/uncomment modules here to disable/enable them
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = "Home",
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = "Administration",
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = "Administration",
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
# ("errors", Storage(
# name_nice = "Ticket Viewer",
# #description = "Needed for Breadcrumbs",
# restricted = False,
# module_type = None # No Menu
# )),
("sync", Storage(
name_nice = "Synchronization",
#description = "Synchronization",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("translate", Storage(
name_nice = "Translation Functionality",
#description = "Selective translation of strings based on module.",
module_type = None,
)),
("gis", Storage(
name_nice = "Map",
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 1, # 1st item in the menu
)),
("pr", Storage(
name_nice = "Persons",
description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = None
)),
("org", Storage(
name_nice = "Organizations",
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = None
)),
# All modules below here should be possible to disable safely
("hrm", Storage(
name_nice = "Contacts",
#description = "Human Resources Management",
restricted = True,
module_type = None,
)),
("cms", Storage(
name_nice = "Content Management",
restricted = True,
module_type = None,
)),
("event", Storage(
name_nice = "Event Management",
restricted = True,
module_type = None,
)),
("project", Storage(
name_nice = "Project Management",
restricted = True,
module_type = None,
)),
("doc", Storage(
name_nice = "Documents",
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
module_type = None,
)),
("stats", Storage(
name_nice = "Statistics",
restricted = True,
module_type = None
)),
])
# END ========================================================================= | IOError | dataset/ETHPy150Open sahana/eden/modules/templates/MCOP/config.py/config |
def _register_patched_dtype_reduce():
"""
Numpy < 1.7 has a bug when copying/pickling dtype objects with a
zero-width void type--i.e. ``np.dtype('V0')``. Specifically, although
creating a void type is perfectly valid, it crashes when instantiating
a dtype using a format string of 'V0', which is what is normally returned
by dtype.__reduce__() for these dtypes.
See https://github.com/astropy/astropy/pull/3283#issuecomment-81667461
"""
from distutils.version import LooseVersion as V
try:
import numpy as np
except __HOLE__:
NUMPY_LT_1_7 = False
else:
NUMPY_LT_1_7 = V(np.__version__) < V('1.7.0')
if NUMPY_LT_1_7:
import copy_reg
# Originally this created an alternate constructor that fixed this
# issue, and returned that constructor from the new reduce_dtype;
# however that broke pickling since functions can't be pickled, so now
# we fix the issue directly within the custom __reduce__
def reduce_dtype(obj):
info = obj.__reduce__()
args = info[1]
if args[0] == 'V0':
args = ('V',) + args[1:]
info = (info[0], args) + info[2:]
return info
copy_reg.pickle(np.dtype, reduce_dtype) | ImportError | dataset/ETHPy150Open spacetelescope/PyFITS/pyfits/_compat/__init__.py/_register_patched_dtype_reduce |
def test_invalid_option(runner):
try:
@click.command()
@click.option('foo')
def cli(foo):
pass
except __HOLE__ as e:
assert 'No options defined but a name was passed (foo).' \
in str(e)
else:
assert False, 'Expected a type error because of an invalid option.' | TypeError | dataset/ETHPy150Open pallets/click/tests/test_options.py/test_invalid_option |
def test_invalid_nargs(runner):
try:
@click.command()
@click.option('--foo', nargs=-1)
def cli(foo):
pass
except __HOLE__ as e:
assert 'Options cannot have nargs < 0' in str(e)
else:
assert False, 'Expected a type error because of an invalid option.' | TypeError | dataset/ETHPy150Open pallets/click/tests/test_options.py/test_invalid_nargs |
def formfield_for_dbfield(self, db_field, **kwargs):
# Allow to use formfield_overrides using a fieldname too.
# Avoids the major need to reroute formfield_for_dbfield() via the plugin.
try:
attrs = self.formfield_overrides[db_field.name]
kwargs = dict(attrs, **kwargs)
except __HOLE__:
pass
return super(BaseContentItemInline, self).formfield_for_dbfield(db_field, **kwargs) | KeyError | dataset/ETHPy150Open edoburu/django-fluent-contents/fluent_contents/admin/contentitems.py/BaseContentItemInline.formfield_for_dbfield |
def reserveConfirmed(self, nsi_header, connection_id, global_reservation_id, description, service_parameters):
try:
nsi_header = self.notifications.pop( (connection_id, RESERVE_RESPONSE) )
d = self.provider_client.reserveConfirmed(nsi_header, connection_id, global_reservation_id, description, service_parameters)
d.addErrback(logError, 'reserveConfirmed')
return d
except __HOLE__:
log.msg('No entity to notify about reserveConfirmed for %s' % connection_id, system=LOG_SYSTEM)
return defer.succeed(None) | KeyError | dataset/ETHPy150Open NORDUnet/opennsa/opennsa/protocols/nsi2/provider.py/Provider.reserveConfirmed |
def reserveFailed(self, nsi_header, connection_id, connection_states, err):
try:
nsi_header = self.notifications.pop( (connection_id, RESERVE_RESPONSE) )
d = self.provider_client.reserveFailed(nsi_header, connection_id, connection_states, err)
d.addErrback(logError, 'reserveFailed')
return d
except __HOLE__:
log.msg('No entity to notify about reserveFailed for %s' % connection_id, system=LOG_SYSTEM)
return defer.succeed(None) | KeyError | dataset/ETHPy150Open NORDUnet/opennsa/opennsa/protocols/nsi2/provider.py/Provider.reserveFailed |
def reserveCommitConfirmed(self, header, connection_id):
try:
org_header = self.notifications.pop( (connection_id, RESERVE_COMMIT_RESPONSE) )
d = self.provider_client.reserveCommitConfirmed(org_header.reply_to, org_header.requester_nsa, org_header.provider_nsa, org_header.correlation_id, connection_id)
d.addErrback(logError, 'reserveCommitConfirmed')
return d
except __HOLE__:
log.msg('No entity to notify about reserveCommitConfirmed for %s' % connection_id, system=LOG_SYSTEM)
return defer.succeed(None) | KeyError | dataset/ETHPy150Open NORDUnet/opennsa/opennsa/protocols/nsi2/provider.py/Provider.reserveCommitConfirmed |
def reserveAbortConfirmed(self, header, connection_id):
try:
org_header = self.notifications.pop( (connection_id, RESERVE_ABORT_RESPONSE) )
d = self.provider_client.reserveAbortConfirmed(org_header.reply_to, org_header.requester_nsa, org_header.provider_nsa, org_header.correlation_id, connection_id)
d.addErrback(logError, 'reserveAbortConfirmed')
return d
except __HOLE__:
log.msg('No entity to notify about reserveAbortConfirmed for %s' % connection_id, system=LOG_SYSTEM)
return defer.succeed(None) | KeyError | dataset/ETHPy150Open NORDUnet/opennsa/opennsa/protocols/nsi2/provider.py/Provider.reserveAbortConfirmed |
def provisionConfirmed(self, header, connection_id):
try:
org_header = self.notifications.pop( (connection_id, PROVISION_RESPONSE) )
d = self.provider_client.provisionConfirmed(org_header.reply_to, org_header.correlation_id, org_header.requester_nsa, org_header.provider_nsa, connection_id)
d.addErrback(logError, 'provisionConfirmed')
return d
except __HOLE__:
log.msg('No entity to notify about provisionConfirmed for %s' % connection_id, system=LOG_SYSTEM)
return defer.succeed(None) | KeyError | dataset/ETHPy150Open NORDUnet/opennsa/opennsa/protocols/nsi2/provider.py/Provider.provisionConfirmed |
def releaseConfirmed(self, header, connection_id):
try:
org_header = self.notifications.pop( (connection_id, RELEASE_RESPONSE) )
d = self.provider_client.releaseConfirmed(org_header.reply_to, org_header.correlation_id, org_header.requester_nsa, org_header.provider_nsa, connection_id)
d.addErrback(logError, 'releaseConfirmed')
return d
except __HOLE__:
log.msg('No entity to notify about releaseConfirmed for %s' % connection_id, system=LOG_SYSTEM)
return defer.succeed(None) | KeyError | dataset/ETHPy150Open NORDUnet/opennsa/opennsa/protocols/nsi2/provider.py/Provider.releaseConfirmed |
def terminateConfirmed(self, header, connection_id):
try:
org_header = self.notifications.pop( (connection_id, TERMINATE_RESPONSE) )
return self.provider_client.terminateConfirmed(org_header.reply_to, org_header.correlation_id, org_header.requester_nsa, org_header.provider_nsa, connection_id)
except __HOLE__:
log.msg('No entity to notify about terminateConfirmed for %s' % connection_id, system=LOG_SYSTEM)
return defer.succeed(None)
d = self.service_provider.terminateConfirmed(header, connection_id)
d.addErrback(logError, 'terminateConfirmed')
return d
# Query | KeyError | dataset/ETHPy150Open NORDUnet/opennsa/opennsa/protocols/nsi2/provider.py/Provider.terminateConfirmed |
def get_by_id(self, styleId):
"""
Return the ``<w:style>`` child element having ``styleId`` attribute
matching *styleId*, or |None| if not found.
"""
xpath = 'w:style[@w:styleId="%s"]' % styleId
try:
return self.xpath(xpath)[0]
except __HOLE__:
return None | IndexError | dataset/ETHPy150Open python-openxml/python-docx/docx/oxml/styles.py/CT_Styles.get_by_id |
def get_by_name(self, name):
"""
Return the ``<w:style>`` child element having ``<w:name>`` child
element with value *name*, or |None| if not found.
"""
xpath = 'w:style[w:name/@w:val="%s"]' % name
try:
return self.xpath(xpath)[0]
except __HOLE__:
return None | IndexError | dataset/ETHPy150Open python-openxml/python-docx/docx/oxml/styles.py/CT_Styles.get_by_name |
def test_invalid_server_name():
ips = ["1.1.1.1"]
bad_server = "garbage.invalid"
bw = setup_server()
bw.server = bad_server
try:
bw.lookup_ips(ips)
except __HOLE__ as e:
print e
assert str(e) == "Couldn't connect to %s:%s" % (bad_server, bw.port), e
except Exception as e2:
# we shouldn't receive other errors
assert False, str(e2) | IOError | dataset/ETHPy150Open csirtfoundry/BulkWhois/tests/bulkwhois_tests.py/test_invalid_server_name |
def get_storage(path, *args, **kwargs):
i = path.rfind('.')
module, attr = path[:i], path[i+1:]
try:
mod = import_module(module)
except ImportError, e:
raise MissingStorageModule(
'Error loading storage %s: "%s"' % (module, e))
try:
storage_class = getattr(mod, attr)
except __HOLE__:
raise MissingStorageClass(
'Module "%s" does not define a storage named "%s"' % (module, attr))
return storage_class(*args, **kwargs) | AttributeError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/django/contrib/formtools/wizard/storage/__init__.py/get_storage |
def find_filegroups(paths, substring='', extensions=None, validity_check=True,
ignore_invisible=True, rstrip='', ignore_substring=None):
"""Find and collect files from different directories in a python dictionary.
Parameters
----------
paths : `list`
Paths of the directories to be searched. Dictionary keys are build from
the first directory.
substring : `str` (default: '')
Substring that all files have to contain to be considered.
extensions : `list` (default: None)
`None` or `list` of allowed file extensions for each path.
If provided, the number of extensions must match the number of `paths`.
validity_check : `bool` (default: None)
If `True`, checks if all dictionary values
have the same number of file paths. Prints
a warning and returns an empty dictionary if the validity check failed.
ignore_invisible : `bool` (default: True)
If `True`, ignores invisible files
(i.e., files starting with a period).
rstrip : `str` (default: '')
If provided, strips characters from right side of the file
base names after splitting the extension.
Useful to trim different filenames to a common stem.
E.g,. "abc_d.txt" and "abc_d_.csv" would share
the stem "abc_d" if rstrip is set to "_".
ignore_substring : `str` (default: None)
Ignores files that contain the specified substring.
Returns
----------
groups : `dict`
Dictionary of files paths. Keys are the file names
found in the first directory listed
in `paths` (without file extension).
"""
n = len(paths)
# must have same number of paths and extensions
assert(len(paths) >= 2)
if extensions:
assert(len(extensions) == n)
else:
extensions = ['' for i in range(n)]
base = find_files(path=paths[0],
substring=substring,
check_ext=extensions[0],
ignore_invisible=ignore_invisible,
ignore_substring=ignore_substring)
rest = [find_files(path=paths[i],
substring=substring,
check_ext=extensions[i],
ignore_invisible=ignore_invisible,
ignore_substring=ignore_substring) for i in range(1, n)]
groups = {}
for f in base:
basename = os.path.splitext(os.path.basename(f))[0]
basename = re.sub('\%s$' % rstrip, '', basename)
groups[basename] = [f]
# groups = {os.path.splitext(os.path.basename(f))[0].rstrip(rstrip):[f]
# for f in base}
for idx, r in enumerate(rest):
for f in r:
basename, ext = os.path.splitext(os.path.basename(f))
basename = re.sub('\%s$' % rstrip, '', basename)
try:
if extensions[idx+1] == '' or ext == extensions[idx+1]:
groups[basename].append(f)
except __HOLE__:
pass
if validity_check:
lens = [len(groups[k]) for k in groups.keys()]
if len(set(lens)) > 1:
raise ValueError('Warning, some keys have more/less values than'
' others. Set validity_check=False'
' to ignore this warning.')
return groups | KeyError | dataset/ETHPy150Open rasbt/mlxtend/mlxtend/file_io/find_filegroups.py/find_filegroups |
def __setattr__(self, key, value):
if key == '_attributes':
super(Fluent, self).__setattr__(key, value)
try:
super(Fluent, self).__getattribute__(key)
return super(Fluent, self).__setattr__(key, value)
except __HOLE__:
pass
self._attributes[key] = value | AttributeError | dataset/ETHPy150Open sdispater/orator/orator/support/fluent.py/Fluent.__setattr__ |
def importMACS( infile, outfile, suffix = ".norm.bam" ):
'''import MACS results.
Imports only positive peaks. It filters peaks by p-value,
q-value and fold change and imports the diagnostic data.
Does re-counting of peakcenter, peakval, ... using
the normalized tag counts.
'''
track = infile[:-len(".macs")]
infilename = infile + "_peaks.xls"
filename_diag = infile + "_diag.xls"
filename_r = infile + "_model.r"
if not os.path.exists(infilename):
E.warn("could not find %s" % infilename )
outs = open(outfile,"w")
outs.close()
return
shift = getPeakShift( infile )
assert shift != None, "could not determine peak shift from MACS file %s" % infile
samfiles = [ pysam.Samfile( track + suffix, "rb" ) ]
offsets = [ shift / 2 ]
outtemp = P.getTempFile()
outtemp.write( "\t".join( ( \
"interval_id",
"contig", "start", "end",
"npeaks", "peakcenter",
"length",
"avgval", "peakval",
"nprobes",
"pvalue", "fold", "qvalue",
"macs_summit", "macs_nprobes",
)) + "\n" )
id = 0
## get thresholds
max_qvalue = float(PARAMS["macs_max_qvalue"])
# min, as it is -10log10
min_pvalue = float(PARAMS["macs_min_pvalue"])
min_fold = float(PARAMS["macs_min_fold"])
counter = E.Counter()
with open( infilename, "r" ) as ins:
for line in ins:
if line.startswith("#"): continue
if line.startswith( "chr\tstart"): continue
counter.input += 1
data = line[:-1].split("\t")
if len(data) == 9:
contig,start,end,length,summit,ntags,pvalue,fold,qvalue = data
elif len(data) == 8:
contig,start,end,length,summit,ntags,pvalue,fold = data
qvalue = 1.0
else:
raise ValueError( "could not parse line %s" % line )
pvalue, qvalue, summit, fold = float(pvalue), float(qvalue), int(summit), float(fold)
if qvalue > max_qvalue or pvalue < min_pvalue or fold < min_fold:
counter.skipped += 1
continue
# these are 1-based coordinates
start, end = int(start)-1, int(end)
assert start < end
# macs can have negative start coordinates
start = max(start, 0)
npeaks, peakcenter, length, avgval, peakval, nreads = countPeaks( contig, start, end, samfiles, offsets )
outtemp.write ( "\t".join( map(str, ( \
id, contig, start, end, npeaks, peakcenter, length, avgval, peakval, nreads,
pvalue, fold, qvalue,
start + summit - 1,
ntags) ) ) + "\n" )
id += 1
counter.output += 1
outtemp.close()
tablename = "%s_intervals" % track
tmpfilename = outtemp.name
statement = '''
python %(scriptsdir)s/csv2db.py %(csv2db_options)s \
--index=interval_id \
--table=%(tablename)s \
< %(tmpfilename)s > %(outfile)s
'''
P.run( **dict( locals().items() + PARAMS.items() ) )
# import diagnotic data
if os.path.exists( filename_diag ):
tablename = "%s_macsdiag" % track
statement = '''
sed "s/FC range.*/fc\\tnpeaks\\tp90\\tp80\\tp70\\tp60\\tp50\\tp40\\tp30\\tp20/" < %(filename_diag)s |\
python %(scriptsdir)s/csv2db.py %(csv2db_options)s \
--map=fc:str \
--table=%(tablename)s \
> %(outfile)s
'''
P.run( **dict( locals().items() + PARAMS.items() ) )
# create plot
if os.path.exists( filename_r ):
target_path = os.path.join( os.getcwd(), "doc", "_static", "MACS" )
try:
os.makedirs( target_path )
except __HOLE__:
# ignore "file exists" exception
pass
statement = '''
R --vanilla < %(track)s.macs_model.r > %(outfile)s
'''
P.run( **dict( locals().items() + PARAMS.items() ) )
shutil.copyfile(
"%s.macs_model.pdf" % track,
os.path.join( target_path, "%s_model.pdf" % track) )
os.unlink( tmpfilename )
E.info("%s: %s" % (track, str(counter)))
############################################################
############################################################
############################################################ | OSError | dataset/ETHPy150Open CGATOxford/cgat/obsolete/pipeline_vitaminD_intervals.py/importMACS |
def makeIntervalCorrelation( infiles, outfile, field ):
'''compute correlation of interval properties between sets
'''
dbhandle = sqlite3.connect( PARAMS["database"] )
tracks, idx = [], []
for infile in infiles:
track = infile[:-len(".bed")]
cc = dbhandle.cursor()
statement = "SELECT contig, start, end, %(field)s FROM %(track)s_intervals" % locals()
cc.execute( statement )
ix = IndexedGenome.IndexedGenome()
for contig, start, end, peakval in cc:
ix.add( contig, start, end, peakval )
idx.append( ix )
tracks.append( track )
outs = open( outfile, "w" )
outs.write( "id\tcontig\tstart\tend\t" + "\t".join( tracks ) + "\n" )
for bed in Bed.iterator( infile = open( "merged.bed", "r") ):
row = []
for ix in idx:
try:
intervals = list(ix.get( bed.contig, bed.start, bed.end ))
except __HOLE__:
row.append( "" )
continue
if len(intervals) == 0:
peakval = ""
else:
peakval = str( (max( [ x[2] for x in intervals ] )) )
row.append( peakval )
outs.write( str(bed) + "\t" + "\t".join( row ) + "\n" )
outs.close() | KeyError | dataset/ETHPy150Open CGATOxford/cgat/obsolete/pipeline_vitaminD_intervals.py/makeIntervalCorrelation |
def listclasses(self):
dir, file = os.path.split(self.file)
name, ext = os.path.splitext(file)
if os.path.normcase(ext) != ".py":
return []
try:
dict = pyclbr.readmodule_ex(name, [dir] + sys.path)
except __HOLE__:
return []
items = []
self.classes = {}
for key, cl in dict.items():
if cl.module == name:
s = key
if hasattr(cl, 'super') and cl.super:
supers = []
for sup in cl.super:
if type(sup) is type(''):
sname = sup
else:
sname = sup.name
if sup.module != cl.module:
sname = "%s.%s" % (sup.module, sname)
supers.append(sname)
s = s + "(%s)" % ", ".join(supers)
items.append((cl.lineno, s))
self.classes[s] = cl
items.sort()
list = []
for item, s in items:
list.append(s)
return list | ImportError | dataset/ETHPy150Open francelabs/datafari/windows/python/Lib/idlelib/ClassBrowser.py/ModuleBrowserTreeItem.listclasses |
def __init__(self, name, classes, file):
self.name = name
self.classes = classes
self.file = file
try:
self.cl = self.classes[self.name]
except (__HOLE__, KeyError):
self.cl = None
self.isfunction = isinstance(self.cl, pyclbr.Function) | IndexError | dataset/ETHPy150Open francelabs/datafari/windows/python/Lib/idlelib/ClassBrowser.py/ClassBrowserTreeItem.__init__ |
def IsExpandable(self):
if self.cl:
try:
return not not self.cl.methods
except __HOLE__:
return False | AttributeError | dataset/ETHPy150Open francelabs/datafari/windows/python/Lib/idlelib/ClassBrowser.py/ClassBrowserTreeItem.IsExpandable |
def _class_browser(parent): #Wrapper for htest
try:
file = __file__
except __HOLE__:
file = sys.argv[0]
if sys.argv[1:]:
file = sys.argv[1]
else:
file = sys.argv[0]
dir, file = os.path.split(file)
name = os.path.splitext(file)[0]
flist = PyShell.PyShellFileList(parent)
global file_open
file_open = flist.open
ClassBrowser(flist, name, [dir], _htest=True) | NameError | dataset/ETHPy150Open francelabs/datafari/windows/python/Lib/idlelib/ClassBrowser.py/_class_browser |
def _address_to_hosts(self, addresses):
"""Iterate over hosts within an address range.
If an explicit range specifier is missing, the parameter is
interpreted as a specific individual address.
"""
try:
return [netaddr.IPAddress(addresses)]
except __HOLE__:
net = netaddr.IPNetwork(addresses)
if net.size < 4:
reason = _("/%s should be specified as single address(es) "
"not in cidr format") % net.prefixlen
raise exception.InvalidInput(reason=reason)
else:
return net.iter_hosts()
except netaddr.AddrFormatError as exc:
raise exception.InvalidInput(reason=six.text_type(exc)) | ValueError | dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/api/openstack/compute/floating_ips_bulk.py/FloatingIPBulkController._address_to_hosts |
def get_height(self):
try:
return os.path.getsize(self.path)/80 - 1
except __HOLE__, e:
return 0 | OSError | dataset/ETHPy150Open chromaway/ngcccbase/ngcccbase/blockchain.py/FileStore.get_height |
def read_raw_header(self, height):
try:
with open(self.path, 'rb') as store:
store.seek(height*80)
data = store.read(80)
assert len(data) == 80
return data
except (OSError, __HOLE__), e:
return None | AssertionError | dataset/ETHPy150Open chromaway/ngcccbase/ngcccbase/blockchain.py/FileStore.read_raw_header |
def verify_chunk(self, index, chunk):
height = index*2016
num = len(chunk)/80
if index == 0:
prev_hash = ("0"*64)
else:
prev_header = self.store.read_header(index*2016-1)
if prev_header is None:
raise
prev_hash = self.hash_header(self.store.header_to_raw(prev_header))
bits, target = self.get_target(index)
for i in range(num):
raw_header = chunk[i*80:(i+1)*80]
header = self.store.header_from_raw(raw_header)
_hash = self.hash_header(raw_header)
assert prev_hash == header.get('prev_block_hash')
try:
assert bits == header.get('bits')
assert int('0x'+_hash, 16) < target
except __HOLE__:
if self.testnet and header.get('timestamp') - prev_header.get('timestamp') > 1200:
assert self.max_bits == header.get('bits')
assert int('0x'+_hash, 16) < self.max_target
else:
raise
prev_header = header
prev_hash = _hash | AssertionError | dataset/ETHPy150Open chromaway/ngcccbase/ngcccbase/blockchain.py/BlockHashingAlgorithm.verify_chunk |
def verify_chain(self, chain):
prev_header = self.store.read_header(chain[0].get('block_height')-1)
prev_hash = self.hash_header(self.store.header_to_raw(prev_header))
for header in chain:
bits, target = self.get_target(header.get('block_height')/2016, chain)
_hash = self.hash_header(self.store.header_to_raw(header))
assert prev_hash == header.get('prev_block_hash')
try:
assert bits == header.get('bits')
assert int('0x'+_hash, 16) < target
except __HOLE__:
if self.testnet and header.get('timestamp') - prev_header.get('timestamp') > 1200:
assert self.max_bits == header.get('bits')
assert int('0x'+_hash, 16) < self.max_target
else:
raise
prev_header = header
prev_hash = _hash | AssertionError | dataset/ETHPy150Open chromaway/ngcccbase/ngcccbase/blockchain.py/BlockHashingAlgorithm.verify_chain |
def testGraphDisplay(self):
try:
import networkx
import matplotlib
except __HOLE__ as error:
logging.debug(error)
return
#Show
numFeatures = 1
numVertices = 20
vList = VertexList(numVertices, numFeatures)
graph = SparseGraph(vList)
ell = 2
m = 2
generator = BarabasiAlbertGenerator(ell, m)
graph = generator.generate(graph)
logging.debug((graph.degreeDistribution()))
nxGraph = graph.toNetworkXGraph()
nodePositions = networkx.spring_layout(nxGraph)
nodesAndEdges = networkx.draw_networkx(nxGraph, pos=nodePositions)
#matplotlib.pyplot.show() | ImportError | dataset/ETHPy150Open charanpald/APGL/apgl/generator/test/BarabasiAlbertGeneratorTest.py/BarabasiAlbertGeneratorTest.testGraphDisplay |
def import_string(import_name, silent=False):
"""Imports an object based on a string. This use useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If the `silent` is True the return value will be `None` if the import
fails.
:return: imported object
"""
try:
if ':' in import_name:
module, obj = import_name.split(':', 1)
elif '.' in import_name:
items = import_name.split('.')
module = '.'.join(items[:-1])
obj = items[-1]
else:
return __import__(import_name)
return getattr(__import__(module, None, None, [obj]), obj)
except (__HOLE__, AttributeError):
if not silent:
raise | ImportError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/jinja2-2.6/jinja2/utils.py/import_string |
def open_if_exists(filename, mode='rb'):
"""Returns a file descriptor for the filename if that file exists,
otherwise `None`.
"""
try:
return open(filename, mode)
except __HOLE__, e:
if e.errno not in (errno.ENOENT, errno.EISDIR):
raise | IOError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/jinja2-2.6/jinja2/utils.py/open_if_exists |
def pformat(obj, verbose=False):
"""Prettyprint an object. Either use the `pretty` library or the
builtin `pprint`.
"""
try:
from pretty import pretty
return pretty(obj, verbose=verbose)
except __HOLE__:
from pprint import pformat
return pformat(obj) | ImportError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/jinja2-2.6/jinja2/utils.py/pformat |
def get(self, key, default=None):
"""Return an item from the cache dict or `default`"""
try:
return self[key]
except __HOLE__:
return default | KeyError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/jinja2-2.6/jinja2/utils.py/LRUCache.get |
def setdefault(self, key, default=None):
"""Set `default` if the key is not in the cache otherwise
leave unchanged. Return the value of this key.
"""
try:
return self[key]
except __HOLE__:
self[key] = default
return default | KeyError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/jinja2-2.6/jinja2/utils.py/LRUCache.setdefault |
def __getitem__(self, key):
"""Get an item from the cache. Moves the item up so that it has the
highest priority then.
Raise an `KeyError` if it does not exist.
"""
rv = self._mapping[key]
if self._queue[-1] != key:
try:
self._remove(key)
except __HOLE__:
# if something removed the key from the container
# when we read, ignore the ValueError that we would
# get otherwise.
pass
self._append(key)
return rv | ValueError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/jinja2-2.6/jinja2/utils.py/LRUCache.__getitem__ |
def __setitem__(self, key, value):
"""Sets the value for an item. Moves the item up so that it
has the highest priority then.
"""
self._wlock.acquire()
try:
if key in self._mapping:
try:
self._remove(key)
except __HOLE__:
# __getitem__ is not locked, it might happen
pass
elif len(self._mapping) == self.capacity:
del self._mapping[self._popleft()]
self._append(key)
self._mapping[key] = value
finally:
self._wlock.release() | ValueError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/jinja2-2.6/jinja2/utils.py/LRUCache.__setitem__ |
def __delitem__(self, key):
"""Remove an item from the cache dict.
Raise an `KeyError` if it does not exist.
"""
self._wlock.acquire()
try:
del self._mapping[key]
try:
self._remove(key)
except __HOLE__:
# __getitem__ is not locked, it might happen
pass
finally:
self._wlock.release() | ValueError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/jinja2-2.6/jinja2/utils.py/LRUCache.__delitem__ |
def acquire_lock(self, name):
"""
Wait for a lock with name.
This will prevent other processes from acquiring the lock with the name while it is held.
Thus they will wait in the position where they are acquiring the lock until the process
that has it releases it.
"""
if self._remotelib:
try:
while not self._remotelib.run_keyword('acquire_lock', [name, self._my_id], {}):
time.sleep(0.1)
logger.debug('waiting for lock to release')
return True
except __HOLE__:
logger.warn('no connection')
self.__remotelib = None
return _PabotLib.acquire_lock(self, name, self._my_id) | RuntimeError | dataset/ETHPy150Open mkorpela/pabot/pabot/PabotLib.py/PabotLib.acquire_lock |
def acquire_value_set(self):
"""
Reserve a set of values for this execution.
No other process can reserve the same set of values while the set is reserved.
Acquired value set needs to be released after use to allow other processes
to access it.
"""
if self._remotelib:
try:
while True:
value = self._remotelib.run_keyword('acquire_value_set', [self._my_id], {})
if value:
logger.info('Value set "%s" acquired' % value)
return value
time.sleep(0.1)
logger.debug('waiting for a value set')
except __HOLE__:
logger.warn('no connection')
self.__remotelib = None
return _PabotLib.acquire_value_set(self, self._my_id) | RuntimeError | dataset/ETHPy150Open mkorpela/pabot/pabot/PabotLib.py/PabotLib.acquire_value_set |
def get_objects_name(self):
try:
profile = self.profile_list
return "User: %s" % profile.username
except ObjectDoesNotExist:
pass
try:
book = self.book_list
return "Book: \"%s\", author: %s" % (book.name, book.default_author.username)
except ObjectDoesNotExist:
pass
try:
post = self.post_list
return "Post: \"%s\", author: %s" % (post.title, post.author.username)
except __HOLE__:
pass
try:
comment = self.comment_list
return "Comment: \"%s\", author %s" % (comment.comment[:50], comment.user.username)
except ObjectDoesNotExist:
pass
return "<No info>" | ObjectDoesNotExist | dataset/ETHPy150Open ProstoKSI/django-voter/voter/models.py/Rating.get_objects_name |
def action_shell(params):
from nailgun.db import db
from nailgun.settings import settings
if params.config_file:
settings.update_from_file(params.config_file)
try:
from IPython import embed
embed()
except __HOLE__:
code.interact(local={'db': db, 'settings': settings}) | ImportError | dataset/ETHPy150Open openstack/fuel-web/nailgun/manage.py/action_shell |
def __init__(self, params, offset=0):
agents.Agent.__init__(self, params, offset)
try:
self.maxprice = self.args[0]
except (AttributeError, IndexError):
raise MissingParameter, 'maxprice'
try:
self.maxbuy = self.args[1]
except __HOLE__:
raise MissingParameter, 'maxbuy'
del self.args
self.mem = 5
# Prices of previous self.mem successfull bids
self.successes = list()
# All bids
self.bids = list() | IndexError | dataset/ETHPy150Open jcbagneris/fms/fms/contrib/coleman/agents/smartmem5trader.py/SmartMem5Trader.__init__ |
def act(self, world=None, market=None):
"""
Return order as a dict with keys in (direction, price, quantity).
If SELL, pick price between highest success, next highest bid.
If BUY, pick price between lowest success, next lowest bid.
Avoid short selling and levering up (borrowing).
"""
if self.successes:
# Average price of successful bids
stockprice = float(sum(self.successes))/len(self.successes)
else:
stockprice = random.randint(1, self.maxprice*100)/100.
try:
minp = max(self.successes)
except ValueError:
# No successes
minp = 0.01
try:
maxp = min([bid for bid in self.bids if bid > minp])
except:
# No higher bids
maxp = self.maxprice
sellprice = random.randint(int(minp*100), int(maxp*100))/100.
sellquant = self.stocks
try:
maxp = min(self.successes)
except ValueError:
# No successes
maxp = self.maxprice
try:
minp = max([bid for bid in self.bids if bid < maxp])
except __HOLE__:
# No lower bids
minp = 0.01
buyprice = random.randint(int(minp*100), int(maxp*100))/100.
if buyprice <= 0:
buyprice = 0.01
buyquant = int(self.money/buyprice)
# Choose buy or sell, place order
# Wealth if trader sells all his stock
sellwealth = self.money + sellquant*sellprice
# Wealth if trader uses as much money as possible to buy
buywealth = self.money - buyquant*buyprice + \
(self.stocks + buyquant)*stockprice
if sellwealth > buywealth:
direction = SELL
price = sellprice
quantity = sellquant
else:
direction = BUY
price = buyprice
quantity = buyquant
self.bids.append(price)
return {'direction':direction, 'price':price, 'quantity':quantity} | ValueError | dataset/ETHPy150Open jcbagneris/fms/fms/contrib/coleman/agents/smartmem5trader.py/SmartMem5Trader.act |
def wantMethod(self, method):
# only works with unittest compatible functions currently
method = getattr(sys.modules[method.im_class.__module__], method.im_class.__name__)
try:
# check if this test was modified (e.g. added/changed)
filename = inspect.getfile(method)
except __HOLE__:
return None
return self.file_checker[filename] | TypeError | dataset/ETHPy150Open dcramer/quickunit/quickunit/plugin.py/QuickUnitPlugin.wantMethod |
def analyze(self, opts, cluster_stats):
try:
warning = int(opts.warning)
critical = int(opts.critical)
except (__HOLE__, ValueError):
print >>sys.stderr, 'Invalid values for "warning" and "critical".'
return 2
if opts.key is None:
print >>sys.stderr, 'You should specify a key name.'
return 2
warning_state, critical_state, values = [], [], []
for host, stats in cluster_stats.items():
if opts.key in stats:
value = stats[opts.key]
values.append('%s=%s;%s;%s' % (host, value, warning, critical))
if warning >= value > critical or warning <= value < critical:
warning_state.append(host)
elif (warning < critical and critical <= value) or (warning > critical and critical >= value):
critical_state.append(host)
values = ' '.join(values)
if critical_state:
print 'Critical "%s" %s!|%s' % (opts.key, ', '.join(critical_state), values)
return 2
elif warning_state:
print 'Warning "%s" %s!|%s' % (opts.key, ', '.join(warning_state), values)
return 1
else:
print 'Ok "%s"!|%s' % (opts.key, values)
return 0 | TypeError | dataset/ETHPy150Open andreisavu/zookeeper-monitoring/check_zookeeper.py/NagiosHandler.analyze |
def analyze(self, opts, cluster_stats):
if opts.key is None:
print >>sys.stderr, 'The key name is mandatory.'
return 1
if opts.leader is True:
try:
leader = [x for x in cluster_stats.values() \
if x.get('zk_server_state', '') == 'leader'][0]
except __HOLE__:
print >>sys.stderr, 'No leader found.'
return 3
if opts.key in leader:
print leader[opts.key]
return 0
else:
print >>sys.stderr, 'Unknown key: "%s"' % opts.key
return 2
else:
for host, stats in cluster_stats.items():
if opts.key not in stats:
continue
host = host.replace(':', '_')
print '%s:%s' % (host, stats[opts.key]), | IndexError | dataset/ETHPy150Open andreisavu/zookeeper-monitoring/check_zookeeper.py/CactiHandler.analyze |
def analyze(self, opts, cluster_stats):
if len(cluster_stats) != 1:
print >>sys.stderr, 'Only allowed to monitor a single node.'
return 1
for host, stats in cluster_stats.items():
for k, v in stats.items():
try:
self.call([opts.gmetric, '-n', k, '-v', str(int(v)), '-t', 'uint32'])
except (__HOLE__, ValueError):
pass | TypeError | dataset/ETHPy150Open andreisavu/zookeeper-monitoring/check_zookeeper.py/GangliaHandler.analyze |
def _parse(self, data):
""" Parse the output from the 'mntr' 4letter word command """
h = StringIO(data)
result = {}
for line in h.readlines():
try:
key, value = self._parse_line(line)
result[key] = value
except __HOLE__:
pass # ignore broken lines
return result | ValueError | dataset/ETHPy150Open andreisavu/zookeeper-monitoring/check_zookeeper.py/ZooKeeperServer._parse |
def _parse_line(self, line):
try:
key, value = map(str.strip, line.split('\t'))
except ValueError:
raise ValueError('Found invalid line: %s' % line)
if not key:
raise ValueError('The key is mandatory and should not be empty')
try:
value = int(value)
except (__HOLE__, ValueError):
pass
return key, value | TypeError | dataset/ETHPy150Open andreisavu/zookeeper-monitoring/check_zookeeper.py/ZooKeeperServer._parse_line |
def create_handler(name):
""" Return an instance of a platform specific analyzer """
try:
return globals()['%sHandler' % name.capitalize()]()
except __HOLE__:
return None | KeyError | dataset/ETHPy150Open andreisavu/zookeeper-monitoring/check_zookeeper.py/create_handler |
def _solve_n_slack_qp(self, constraints, n_samples):
C = self.C
joint_features = [c[1] for sample in constraints for c in sample]
losses = [c[2] for sample in constraints for c in sample]
joint_feature_matrix = np.vstack(joint_features).astype(np.float)
n_constraints = len(joint_features)
P = cvxopt.matrix(np.dot(joint_feature_matrix, joint_feature_matrix.T))
# q contains loss from margin-rescaling
q = cvxopt.matrix(-np.array(losses, dtype=np.float))
# constraints are a bit tricky. first, all alpha must be >zero
idy = np.identity(n_constraints)
tmp1 = np.zeros(n_constraints)
# box constraint: sum of all alpha for one example must be <= C
blocks = np.zeros((n_samples, n_constraints))
first = 0
for i, sample in enumerate(constraints):
blocks[i, first: first + len(sample)] = 1
first += len(sample)
# positivity constraints:
if self.negativity_constraint is None:
#empty constraints
zero_constr = np.zeros(0)
joint_features_constr = np.zeros((0, n_constraints))
else:
joint_features_constr = joint_feature_matrix.T[self.negativity_constraint]
zero_constr = np.zeros(len(self.negativity_constraint))
# put together
G = cvxopt.sparse(cvxopt.matrix(np.vstack((-idy, blocks,
joint_features_constr))))
tmp2 = np.ones(n_samples) * C
h = cvxopt.matrix(np.hstack((tmp1, tmp2, zero_constr)))
# solve QP model
cvxopt.solvers.options['feastol'] = 1e-5
try:
solution = cvxopt.solvers.qp(P, q, G, h)
except __HOLE__:
solution = {'status': 'error'}
if solution['status'] != "optimal":
print("regularizing QP!")
P = cvxopt.matrix(np.dot(joint_feature_matrix, joint_feature_matrix.T)
+ 1e-8 * np.eye(joint_feature_matrix.shape[0]))
solution = cvxopt.solvers.qp(P, q, G, h)
if solution['status'] != "optimal":
raise ValueError("QP solver failed. Try regularizing your QP.")
# Lagrange multipliers
a = np.ravel(solution['x'])
self.prune_constraints(constraints, a)
self.old_solution = solution
# Support vectors have non zero lagrange multipliers
sv = a > self.inactive_threshold * C
box = np.dot(blocks, a)
if self.verbose > 1:
print("%d support vectors out of %d points" % (np.sum(sv),
n_constraints))
# calculate per example box constraint:
print("Box constraints at C: %d" % np.sum(1 - box / C < 1e-3))
print("dual objective: %f" % -solution['primal objective'])
self.w = np.dot(a, joint_feature_matrix)
return -solution['primal objective'] | ValueError | dataset/ETHPy150Open pystruct/pystruct/pystruct/learners/n_slack_ssvm.py/NSlackSSVM._solve_n_slack_qp |
def fit(self, X, Y, constraints=None, warm_start=None, initialize=True):
"""Learn parameters using cutting plane method.
Parameters
----------
X : iterable
Traing instances. Contains the structured input objects.
No requirement on the particular form of entries of X is made.
Y : iterable
Training labels. Contains the strctured labels for inputs in X.
Needs to have the same length as X.
contraints : iterable
Known constraints for warm-starts. List of same length as X.
Each entry is itself a list of constraints for a given instance x .
Each constraint is of the form [y_hat, delta_joint_feature, loss], where
y_hat is a labeling, ``delta_joint_feature = joint_feature(x, y) - joint_feature(x, y_hat)``
and loss is the loss for predicting y_hat instead of the true label
y.
initialize : boolean, default=True
Whether to initialize the model for the data.
Leave this true except if you really know what you are doing.
"""
if self.verbose:
print("Training n-slack dual structural SVM")
cvxopt.solvers.options['show_progress'] = self.verbose > 3
if initialize:
self.model.initialize(X, Y)
self.w = np.zeros(self.model.size_joint_feature)
n_samples = len(X)
stopping_criterion = False
if constraints is None:
# fresh start
constraints = [[] for i in range(n_samples)]
self.last_active = [[] for i in range(n_samples)]
self.objective_curve_ = []
self.primal_objective_curve_ = []
self.timestamps_ = [time()]
else:
# warm start
objective = self._solve_n_slack_qp(constraints, n_samples)
try:
# catch ctrl+c to stop training
# we have to update at least once after going through the dataset
for iteration in range(self.max_iter):
# main loop
self.timestamps_.append(time() - self.timestamps_[0])
if self.verbose > 0:
print("iteration %d" % iteration)
if self.verbose > 2:
print(self)
new_constraints = 0
# generate slices through dataset from batch_size
if self.batch_size < 1 and not self.batch_size == -1:
raise ValueError("batch_size should be integer >= 1 or -1,"
"got %s." % str(self.batch_size))
batch_size = (self.batch_size if self.batch_size != -1 else
len(X))
n_batches = int(np.ceil(float(len(X)) / batch_size))
slices = gen_even_slices(n_samples, n_batches)
indices = np.arange(n_samples)
slack_sum = 0
for batch in slices:
new_constraints_batch = 0
verbose = max(0, self.verbose - 3)
X_b = X[batch]
Y_b = Y[batch]
indices_b = indices[batch]
candidate_constraints = Parallel(
n_jobs=self.n_jobs, verbose=verbose)(
delayed(find_constraint)(self.model, x, y, self.w)
for x, y in zip(X_b, Y_b))
# for each batch, gather new constraints
for i, x, y, constraint in zip(indices_b, X_b, Y_b,
candidate_constraints):
# loop over samples in batch
y_hat, delta_joint_feature, slack, loss = constraint
slack_sum += slack
if self.verbose > 3:
print("current slack: %f" % slack)
if not loss > 0:
# can have y != y_hat but loss = 0 in latent svm.
# we need this here as djoint_feature is then != 0
continue
if self._check_bad_constraint(y_hat, slack,
constraints[i]):
continue
constraints[i].append([y_hat, delta_joint_feature, loss])
new_constraints_batch += 1
# after processing the slice, solve the qp
if new_constraints_batch:
objective = self._solve_n_slack_qp(constraints,
n_samples)
new_constraints += new_constraints_batch
self.objective_curve_.append(objective)
self._compute_training_loss(X, Y, iteration)
primal_objective = (self.C
* slack_sum
+ np.sum(self.w ** 2) / 2)
self.primal_objective_curve_.append(primal_objective)
if self.verbose > 0:
print("new constraints: %d, "
"cutting plane objective: %f primal objective: %f" %
(new_constraints, objective, primal_objective))
if new_constraints == 0:
if self.verbose:
print("no additional constraints")
stopping_criterion = True
if (iteration > 1 and self.objective_curve_[-1]
- self.objective_curve_[-2] < self.tol):
if self.verbose:
print("objective converged.")
stopping_criterion = True
if stopping_criterion:
if (self.switch_to is not None and
self.model.inference_method != self.switch_to):
if self.verbose:
print("Switching to %s inference" %
str(self.switch_to))
self.model.inference_method_ = \
self.model.inference_method
self.model.inference_method = self.switch_to
stopping_criterion = False
continue
else:
break
if self.verbose > 5:
print(self.w)
if self.logger is not None:
self.logger(self, iteration)
except __HOLE__:
pass
self.constraints_ = constraints
if self.verbose and self.n_jobs == 1:
print("calls to inference: %d" % self.model.inference_calls)
if verbose:
print("Computing final objective.")
self.timestamps_.append(time() - self.timestamps_[0])
self.primal_objective_curve_.append(self._objective(X, Y))
self.objective_curve_.append(objective)
if self.logger is not None:
self.logger(self, 'final')
return self | KeyboardInterrupt | dataset/ETHPy150Open pystruct/pystruct/pystruct/learners/n_slack_ssvm.py/NSlackSSVM.fit |
def main(opt):
from aero_kernels import dirichlet, dotPV, dotR, init_cg, res_calc, spMV, \
update, updateP, updateUR
try:
with h5py.File(opt['mesh'], 'r') as f:
# sets
nodes = op2.Set.fromhdf5(f, 'nodes')
bnodes = op2.Set.fromhdf5(f, 'bedges')
cells = op2.Set.fromhdf5(f, 'cells')
# maps
pbnodes = op2.Map.fromhdf5(bnodes, nodes, f, 'pbedge')
pcell = op2.Map.fromhdf5(cells, nodes, f, 'pcell')
pvcell = op2.Map.fromhdf5(cells, nodes, f, 'pcell')
# dats
p_xm = op2.Dat.fromhdf5(nodes ** 2, f, 'p_x')
p_phim = op2.Dat.fromhdf5(nodes, f, 'p_phim')
p_resm = op2.Dat.fromhdf5(nodes, f, 'p_resm')
p_K = op2.Dat.fromhdf5(cells ** 16, f, 'p_K')
p_V = op2.Dat.fromhdf5(nodes, f, 'p_V')
p_P = op2.Dat.fromhdf5(nodes, f, 'p_P')
p_U = op2.Dat.fromhdf5(nodes, f, 'p_U')
except __HOLE__:
import sys
print "Failed reading mesh: Could not read from %s\n" % opt['mesh']
sys.exit(1)
# Constants
gam = 1.4
gm1 = op2.Const(1, gam - 1.0, 'gm1', dtype=np.double)
op2.Const(1, 1.0 / gm1.data, 'gm1i', dtype=np.double)
op2.Const(2, [0.5, 0.5], 'wtg1', dtype=np.double)
op2.Const(2, [0.211324865405187, 0.788675134594813], 'xi1',
dtype=np.double)
op2.Const(4, [0.788675134594813, 0.211324865405187,
0.211324865405187, 0.788675134594813],
'Ng1', dtype=np.double)
op2.Const(4, [-1, -1, 1, 1], 'Ng1_xi', dtype=np.double)
op2.Const(4, [0.25] * 4, 'wtg2', dtype=np.double)
op2.Const(16, [0.622008467928146, 0.166666666666667,
0.166666666666667, 0.044658198738520,
0.166666666666667, 0.622008467928146,
0.044658198738520, 0.166666666666667,
0.166666666666667, 0.044658198738520,
0.622008467928146, 0.166666666666667,
0.044658198738520, 0.166666666666667,
0.166666666666667, 0.622008467928146],
'Ng2', dtype=np.double)
op2.Const(32, [-0.788675134594813, 0.788675134594813,
-0.211324865405187, 0.211324865405187,
-0.788675134594813, 0.788675134594813,
-0.211324865405187, 0.211324865405187,
-0.211324865405187, 0.211324865405187,
-0.788675134594813, 0.788675134594813,
-0.211324865405187, 0.211324865405187,
-0.788675134594813, 0.788675134594813,
-0.788675134594813, -0.211324865405187,
0.788675134594813, 0.211324865405187,
-0.211324865405187, -0.788675134594813,
0.211324865405187, 0.788675134594813,
-0.788675134594813, -0.211324865405187,
0.788675134594813, 0.211324865405187,
-0.211324865405187, -0.788675134594813,
0.211324865405187, 0.788675134594813],
'Ng2_xi', dtype=np.double)
minf = op2.Const(1, 0.1, 'minf', dtype=np.double)
op2.Const(1, minf.data ** 2, 'm2', dtype=np.double)
op2.Const(1, 1, 'freq', dtype=np.double)
op2.Const(1, 1, 'kappa', dtype=np.double)
op2.Const(1, 0, 'nmode', dtype=np.double)
op2.Const(1, 1.0, 'mfan', dtype=np.double)
niter = 20
for i in xrange(1, niter + 1):
op2.par_loop(res_calc, cells,
p_xm(op2.READ, pvcell),
p_phim(op2.READ, pcell),
p_K(op2.WRITE),
p_resm(op2.INC, pcell))
op2.par_loop(dirichlet, bnodes,
p_resm(op2.WRITE, pbnodes[0]))
c1 = op2.Global(1, data=0.0, name='c1')
c2 = op2.Global(1, data=0.0, name='c2')
c3 = op2.Global(1, data=0.0, name='c3')
# c1 = R' * R
op2.par_loop(init_cg, nodes,
p_resm(op2.READ),
c1(op2.INC),
p_U(op2.WRITE),
p_V(op2.WRITE),
p_P(op2.WRITE))
# Set stopping criteria
res0 = sqrt(c1.data)
res = res0
res0 *= 0.1
it = 0
maxiter = 200
while res > res0 and it < maxiter:
# V = Stiffness * P
op2.par_loop(spMV, cells,
p_V(op2.INC, pcell),
p_K(op2.READ),
p_P(op2.READ, pcell))
op2.par_loop(dirichlet, bnodes,
p_V(op2.WRITE, pbnodes[0]))
c2.data = 0.0
# c2 = P' * V
op2.par_loop(dotPV, nodes,
p_P(op2.READ),
p_V(op2.READ),
c2(op2.INC))
alpha = op2.Global(1, data=c1.data / c2.data, name='alpha')
# U = U + alpha * P
# resm = resm - alpha * V
op2.par_loop(updateUR, nodes,
p_U(op2.INC),
p_resm(op2.INC),
p_P(op2.READ),
p_V(op2.RW),
alpha(op2.READ))
c3.data = 0.0
# c3 = resm' * resm
op2.par_loop(dotR, nodes,
p_resm(op2.READ),
c3(op2.INC))
beta = op2.Global(1, data=c3.data / c1.data, name="beta")
# P = beta * P + resm
op2.par_loop(updateP, nodes,
p_resm(op2.READ),
p_P(op2.RW),
beta(op2.READ))
c1.data = c3.data
res = sqrt(c1.data)
it += 1
rms = op2.Global(1, data=0.0, name='rms')
# phim = phim - Stiffness \ Load
op2.par_loop(update, nodes,
p_phim(op2.RW),
p_resm(op2.WRITE),
p_U(op2.READ),
rms(op2.INC))
print "rms = %10.5e iter: %d" % (sqrt(rms.data) / sqrt(nodes.size), it) | IOError | dataset/ETHPy150Open OP2/PyOP2/demo/aero.py/main |
def get_serializer(alias):
alias = alias or MOMENT_SERIALIZER
if hasattr(alias, 'loads'):
return alias
try:
return _serializers[alias]
except __HOLE__:
raise LookupError("Serializer `{}` not configured.".format(alias)) | KeyError | dataset/ETHPy150Open caxap/redis-moment/moment/conf.py/get_serializer |
def get_connection(alias='default'):
global _connections
if isinstance(alias, redis.StrictRedis):
return alias
try:
return _connections[alias]
except __HOLE__:
raise LookupError("Connection `{}` not configured.".format(alias)) | KeyError | dataset/ETHPy150Open caxap/redis-moment/moment/conf.py/get_connection |
def _get_schema(self):
time_now = int(datetime.datetime.utcnow().strftime('%s'))
if ((self.column_families is None or
(time_now - self.last_schema_sync_time < 3600))):
return False
url = 'http://%s:%i/%s/keyspaces' % (self.config['host'],
int(self.config['port']),
self.config['cluster_id'])
try:
response = urllib2.urlopen(url)
except Exception, err:
self.log.error('%s: %s', url, err)
return False
try:
result = json.load(response)
column_families = []
for ks in result:
i = []
for cf in result[ks]['column_families']:
i.append("%s.%s" % (ks, cf))
column_families.append(i)
self.column_families = ','.join(sum(column_families, []))
self.log.debug('DseOpsCenterCollector columnfamilies = %s',
self.column_families)
self.last_schema_sync_time = time_now
return True
except (TypeError, __HOLE__):
self.log.error(
"Unable to parse response from opscenter as a json object")
return False | ValueError | dataset/ETHPy150Open python-diamond/Diamond/src/collectors/dseopscenter/dseopscenter.py/DseOpsCenterCollector._get_schema |
def _get(self, start, end, step=60):
self._get_schema()
url = ('http://%s:%i/%s/new-metrics?node_group=%s&columnfamilies=%s'
'&metrics=%s&start=%i&end=%i&step=%i%s') % (
self.config['host'],
int(self.config['port']),
self.config['cluster_id'],
self.config['node_group'],
self.column_families,
self.config['metrics'],
start, end, step,
self.config['default_tail_opts'])
try:
response = urllib2.urlopen(url)
except Exception, err:
self.log.error('%s: %s', url, err)
return False
self.log.debug('DseOpsCenterCollector metrics url = %s', url)
try:
return json.load(response)
except (__HOLE__, ValueError):
self.log.error(
"Unable to parse response from opscenter as a json object")
return False | TypeError | dataset/ETHPy150Open python-diamond/Diamond/src/collectors/dseopscenter/dseopscenter.py/DseOpsCenterCollector._get |
def item_link(self, item):
try:
return item.get_absolute_url()
except __HOLE__:
raise ImproperlyConfigured, "Give your %s class a get_absolute_url() method, or define an item_link() method in your Feed class." % item.__class__.__name__ | AttributeError | dataset/ETHPy150Open CollabQ/CollabQ/vendor/django/contrib/syndication/feeds.py/Feed.item_link |
def __get_dynamic_attr(self, attname, obj, default=None):
try:
attr = getattr(self, attname)
except __HOLE__:
return default
if callable(attr):
# Check func_code.co_argcount rather than try/excepting the
# function and catching the TypeError, because something inside
# the function may raise the TypeError. This technique is more
# accurate.
if hasattr(attr, 'func_code'):
argcount = attr.func_code.co_argcount
else:
argcount = attr.__call__.func_code.co_argcount
if argcount == 2: # one argument is 'self'
return attr(obj)
else:
return attr()
return attr | AttributeError | dataset/ETHPy150Open CollabQ/CollabQ/vendor/django/contrib/syndication/feeds.py/Feed.__get_dynamic_attr |
def get_feed(self, url=None):
"""
Returns a feedgenerator.DefaultFeed object, fully populated, for
this feed. Raises FeedDoesNotExist for invalid parameters.
"""
if url:
bits = url.split('/')
else:
bits = []
try:
obj = self.get_object(bits)
except __HOLE__:
raise FeedDoesNotExist
if Site._meta.installed:
current_site = Site.objects.get_current()
else:
current_site = RequestSite(self.request)
link = self.__get_dynamic_attr('link', obj)
link = add_domain(current_site.domain, link)
feed = self.feed_type(
title = self.__get_dynamic_attr('title', obj),
subtitle = self.__get_dynamic_attr('subtitle', obj),
link = link,
description = self.__get_dynamic_attr('description', obj),
language = settings.LANGUAGE_CODE.decode(),
feed_url = add_domain(current_site.domain,
self.__get_dynamic_attr('feed_url', obj)),
author_name = self.__get_dynamic_attr('author_name', obj),
author_link = self.__get_dynamic_attr('author_link', obj),
author_email = self.__get_dynamic_attr('author_email', obj),
categories = self.__get_dynamic_attr('categories', obj),
feed_copyright = self.__get_dynamic_attr('feed_copyright', obj),
feed_guid = self.__get_dynamic_attr('feed_guid', obj),
ttl = self.__get_dynamic_attr('ttl', obj),
**self.feed_extra_kwargs(obj)
)
try:
title_tmp = loader.get_template(self.title_template_name)
except TemplateDoesNotExist:
title_tmp = Template('{{ obj }}')
try:
description_tmp = loader.get_template(self.description_template_name)
except TemplateDoesNotExist:
description_tmp = Template('{{ obj }}')
for item in self.__get_dynamic_attr('items', obj):
link = add_domain(current_site.domain, self.__get_dynamic_attr('item_link', item))
enc = None
enc_url = self.__get_dynamic_attr('item_enclosure_url', item)
if enc_url:
enc = feedgenerator.Enclosure(
url = smart_unicode(enc_url),
length = smart_unicode(self.__get_dynamic_attr('item_enclosure_length', item)),
mime_type = smart_unicode(self.__get_dynamic_attr('item_enclosure_mime_type', item))
)
author_name = self.__get_dynamic_attr('item_author_name', item)
if author_name is not None:
author_email = self.__get_dynamic_attr('item_author_email', item)
author_link = self.__get_dynamic_attr('item_author_link', item)
else:
author_email = author_link = None
pubdate = self.__get_dynamic_attr('item_pubdate', item)
if pubdate and not pubdate.tzinfo:
now = datetime.now()
utcnow = datetime.utcnow()
# Must always subtract smaller time from larger time here.
if utcnow > now:
sign = -1
tzDifference = (utcnow - now)
else:
sign = 1
tzDifference = (now - utcnow)
# Round the timezone offset to the nearest half hour.
tzOffsetMinutes = sign * ((tzDifference.seconds / 60 + 15) / 30) * 30
tzOffset = timedelta(minutes=tzOffsetMinutes)
pubdate = pubdate.replace(tzinfo=FixedOffset(tzOffset))
feed.add_item(
title = title_tmp.render(RequestContext(self.request, {'obj': item, 'site': current_site})),
link = link,
description = description_tmp.render(RequestContext(self.request, {'obj': item, 'site': current_site})),
unique_id = self.__get_dynamic_attr('item_guid', item, link),
enclosure = enc,
pubdate = pubdate,
author_name = author_name,
author_email = author_email,
author_link = author_link,
categories = self.__get_dynamic_attr('item_categories', item),
item_copyright = self.__get_dynamic_attr('item_copyright', item),
**self.item_extra_kwargs(item)
)
return feed | ObjectDoesNotExist | dataset/ETHPy150Open CollabQ/CollabQ/vendor/django/contrib/syndication/feeds.py/Feed.get_feed |
def collect(self, device, ip, user, password):
"""
This function collects the metrics for one filer.
"""
sys.path.append(self.config['netappsdkpath'])
try:
import NaServer
except __HOLE__:
self.log.error("Unable to load NetApp SDK from %s" % (
self.config['netappsdkpath']))
return
# Set up the parameters
server = NaServer.NaServer(ip, 1, 3)
server.set_transport_type('HTTPS')
server.set_style('LOGIN')
server.set_admin_user(user, password)
# We're only able to query a single object at a time,
# so we'll loop over the objects.
for na_object in self.METRICS.keys():
# For easy reference later, generate a new dict for this object
LOCALMETRICS = {}
for metric in self.METRICS[na_object]:
metricname, prettyname, multiplier = metric
LOCALMETRICS[metricname] = {}
LOCALMETRICS[metricname]["prettyname"] = prettyname
LOCALMETRICS[metricname]["multiplier"] = multiplier
# Keep track of how long has passed since we checked last
CollectTime = time.time()
time_delta = None
if na_object in self.LastCollectTime.keys():
time_delta = CollectTime - self.LastCollectTime[na_object]
self.LastCollectTime[na_object] = CollectTime
self.log.debug("Collecting metric of object %s" % na_object)
query = NaServer.NaElement("perf-object-get-instances-iter-start")
query.child_add_string("objectname", na_object)
counters = NaServer.NaElement("counters")
for metric in LOCALMETRICS.keys():
counters.child_add_string("counter", metric)
query.child_add(counters)
res = server.invoke_elem(query)
if(res.results_status() == "failed"):
self.log.error("Connection to filer %s failed; %s" % (
device, res.results_reason()))
return
iter_tag = res.child_get_string("tag")
num_records = 1
max_records = 100
# For some metrics there are dependencies between metrics for
# a single object, so we'll need to collect all, so we can do
# calculations later.
raw = {}
while(num_records != 0):
query = NaServer.NaElement(
"perf-object-get-instances-iter-next")
query.child_add_string("tag", iter_tag)
query.child_add_string("maximum", max_records)
res = server.invoke_elem(query)
if(res.results_status() == "failed"):
print "Connection to filer %s failed; %s" % (
device, res.results_reason())
return
num_records = res.child_get_int("records")
if(num_records > 0):
instances_list = res.child_get("instances")
instances = instances_list.children_get()
for instance in instances:
raw_name = unicodedata.normalize(
'NFKD',
instance.child_get_string("name")).encode(
'ascii', 'ignore')
# Shorten the name for disks as they are very long and
# padded with zeroes, eg:
# 5000C500:3A236B0B:00000000:00000000:00000000:...
if na_object is "disk":
non_zero_blocks = [
block for block in raw_name.split(":")
if block != "00000000"
]
raw_name = "".join(non_zero_blocks)
instance_name = re.sub(r'\W', '_', raw_name)
counters_list = instance.child_get("counters")
counters = counters_list.children_get()
for counter in counters:
metricname = unicodedata.normalize(
'NFKD',
counter.child_get_string("name")).encode(
'ascii', 'ignore')
metricvalue = counter.child_get_string("value")
# We'll need a long complete pathname to not
# confuse self.derivative
pathname = ".".join([self.config["path_prefix"],
device, na_object,
instance_name, metricname])
raw[pathname] = int(metricvalue)
# Do the math
self.log.debug("Processing %i metrics for object %s" % (len(raw),
na_object))
# Since the derivative function both returns the derivative
# and saves a new point, we'll need to store all derivatives
# for local reference.
derivative = {}
for key in raw.keys():
derivative[key] = self.derivative(key, raw[key])
for key in raw.keys():
metricname = key.split(".")[-1]
prettyname = LOCALMETRICS[metricname]["prettyname"]
multiplier = LOCALMETRICS[metricname]["multiplier"]
if metricname in self.DROPMETRICS:
continue
elif metricname in self.DIVIDERS.keys():
self._gen_delta_depend(key, derivative, multiplier,
prettyname, device)
else:
self._gen_delta_per_sec(key, derivative[key], time_delta,
multiplier, prettyname, device) | ImportError | dataset/ETHPy150Open BrightcoveOS/Diamond/src/collectors/netapp/netapp.py/NetAppCollector.collect |