function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
---|---|---|
def __init__( self, matchString, identChars=DEFAULT_KEYWORD_CHARS, caseless=False ):
super(Keyword,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except __HOLE__:
warnings.warn("null string passed to Keyword; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = _str2dict(identChars) | IndexError | dataset/ETHPy150Open ipython/ipython-py3k/IPython/external/pyparsing/_pyparsing.py/Keyword.__init__ |
def __init__( self, exprs, savelist = False ):
super(ParseExpression,self).__init__(savelist)
if isinstance( exprs, list ):
self.exprs = exprs
elif isinstance( exprs, str ):
self.exprs = [ Literal( exprs ) ]
else:
try:
self.exprs = list( exprs )
except __HOLE__:
self.exprs = [ exprs ]
self.callPreparse = False | TypeError | dataset/ETHPy150Open ipython/ipython-py3k/IPython/external/pyparsing/_pyparsing.py/ParseExpression.__init__ |
def parseImpl( self, instring, loc, doActions=True ):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
errorStop = False
for e in self.exprs[1:]:
if isinstance(e, And._ErrorStop):
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse( instring, loc, doActions )
except ParseSyntaxException:
raise
except ParseBaseException as pe:
raise ParseSyntaxException(pe)
except __HOLE__ as ie:
raise ParseSyntaxException( ParseException(instring, len(instring), self.errmsg, self) )
else:
loc, exprtokens = e._parse( instring, loc, doActions )
if exprtokens or list(exprtokens.keys()):
resultlist += exprtokens
return loc, resultlist | IndexError | dataset/ETHPy150Open ipython/ipython-py3k/IPython/external/pyparsing/_pyparsing.py/And.parseImpl |
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxMatchLoc = -1
maxException = None
for e in self.exprs:
try:
loc2 = e.tryParse( instring, loc )
except ParseException as err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except __HOLE__:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
else:
if loc2 > maxMatchLoc:
maxMatchLoc = loc2
maxMatchExp = e
if maxMatchLoc < 0:
if maxException is not None:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
return maxMatchExp._parse( instring, loc, doActions ) | IndexError | dataset/ETHPy150Open ipython/ipython-py3k/IPython/external/pyparsing/_pyparsing.py/Or.parseImpl |
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
ret = e._parse( instring, loc, doActions )
return ret
except ParseException as err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except __HOLE__:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if maxException is not None:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self) | IndexError | dataset/ETHPy150Open ipython/ipython-py3k/IPython/external/pyparsing/_pyparsing.py/MatchFirst.parseImpl |
def parseImpl( self, instring, loc, doActions=True ):
try:
self.expr.tryParse( instring, loc )
except (ParseException,__HOLE__):
pass
else:
#~ raise ParseException(instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, [] | IndexError | dataset/ETHPy150Open ipython/ipython-py3k/IPython/external/pyparsing/_pyparsing.py/NotAny.parseImpl |
def parseImpl( self, instring, loc, doActions=True ):
tokens = []
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or list(tmptokens.keys()):
tokens += tmptokens
except (ParseException,__HOLE__):
pass
return loc, tokens | IndexError | dataset/ETHPy150Open ipython/ipython-py3k/IPython/external/pyparsing/_pyparsing.py/ZeroOrMore.parseImpl |
def parseImpl( self, instring, loc, doActions=True ):
# must be at least one
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
try:
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or list(tmptokens.keys()):
tokens += tmptokens
except (ParseException,__HOLE__):
pass
return loc, tokens | IndexError | dataset/ETHPy150Open ipython/ipython-py3k/IPython/external/pyparsing/_pyparsing.py/OneOrMore.parseImpl |
def parseImpl( self, instring, loc, doActions=True ):
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
except (ParseException,__HOLE__):
if self.defaultValue is not _optionalNotMatched:
if self.expr.resultsName:
tokens = ParseResults([ self.defaultValue ])
tokens[self.expr.resultsName] = self.defaultValue
else:
tokens = [ self.defaultValue ]
else:
tokens = []
return loc, tokens | IndexError | dataset/ETHPy150Open ipython/ipython-py3k/IPython/external/pyparsing/_pyparsing.py/Optional.parseImpl |
def parseImpl( self, instring, loc, doActions=True ):
startLoc = loc
instrlen = len(instring)
expr = self.expr
failParse = False
while loc <= instrlen:
try:
if self.failOn:
try:
self.failOn.tryParse(instring, loc)
except ParseBaseException:
pass
else:
failParse = True
raise ParseException(instring, loc, "Found expression " + str(self.failOn))
failParse = False
if self.ignoreExpr is not None:
while 1:
try:
loc = self.ignoreExpr.tryParse(instring,loc)
print("found ignoreExpr, advance to", loc)
except ParseBaseException:
break
expr._parse( instring, loc, doActions=False, callPreParse=False )
skipText = instring[startLoc:loc]
if self.includeMatch:
loc,mat = expr._parse(instring,loc,doActions,callPreParse=False)
if mat:
skipRes = ParseResults( skipText )
skipRes += mat
return loc, [ skipRes ]
else:
return loc, [ skipText ]
else:
return loc, [ skipText ]
except (ParseException,__HOLE__):
if failParse:
raise
else:
loc += 1
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc | IndexError | dataset/ETHPy150Open ipython/ipython-py3k/IPython/external/pyparsing/_pyparsing.py/SkipTo.parseImpl |
def traceParseAction(f):
"""Decorator for debugging parse actions."""
f = ParserElement._normalizeParseActionArgs(f)
def z(*paArgs):
thisFunc = f.__name__
s,l,t = paArgs[-3:]
if len(paArgs)>3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write( ">>entering %s(line: '%s', %d, %s)\n" % (thisFunc,line(l,s),l,t) )
try:
ret = f(*paArgs)
except Exception as exc:
sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
raise
sys.stderr.write( "<<leaving %s (ret: %s)\n" % (thisFunc,ret) )
return ret
try:
z.__name__ = f.__name__
except __HOLE__:
pass
return z
#
# global helpers
# | AttributeError | dataset/ETHPy150Open ipython/ipython-py3k/IPython/external/pyparsing/_pyparsing.py/traceParseAction |
def change_file_ext():
""" read a file and an extension from the command line and produces a copy with its extension changed"""
if len(sys.argv) < 2:
print("Usage: change_ext.py filename.old_ext 'new_ext'")
sys.exit()
name = os.path.splitext(sys.argv[1])[0] + "." + sys.argv[2]
print (name)
try:
shutil.copyfile(sys.argv[1], name)
except __HOLE__ as err:
print (err) | OSError | dataset/ETHPy150Open bt3gl/Python-and-Algorithms-and-Data-Structures/src/USEFUL/useful_with_files/change_ext_file.py/change_file_ext |
def get_user(self):
# django 1.5 uses uidb36, django 1.6 uses uidb64
uidb36 = self.kwargs.get('uidb36')
uidb64 = self.kwargs.get('uidb64')
assert bool(uidb36) ^ bool(uidb64)
try:
if uidb36:
uid = base36_to_int(uidb36)
else:
# urlsafe_base64_decode is not available in django 1.5
from django.utils.http import urlsafe_base64_decode
uid = urlsafe_base64_decode(uidb64)
return self.get_queryset().get(pk=uid)
except (TypeError, __HOLE__, OverflowError, User.DoesNotExist):
return None | ValueError | dataset/ETHPy150Open fusionbox/django-authtools/authtools/views.py/PasswordResetConfirmView.get_user |
def render(self, context):
try:
expire_time = self.expire_time_var.resolve(context)
except VariableDoesNotExist:
raise TemplateSyntaxError('"cache" tag got an unknown variable: %r' % self.expire_time_var.var)
try:
expire_time = int(expire_time)
except (ValueError, __HOLE__):
raise TemplateSyntaxError('"cache" tag got a non-integer timeout value: %r' % expire_time)
# Build a unicode key for this fragment and all vary-on's.
args = md5_constructor(u':'.join([urlquote(resolve_variable(var, context)) for var in self.vary_on]))
cache_key = 'template.cache.%s.%s' % (self.fragment_name, args.hexdigest())
value = cache.get(cache_key)
if value is None:
value = self.nodelist.render(context)
cache.set(cache_key, value, expire_time)
return value | TypeError | dataset/ETHPy150Open AppScale/appscale/AppServer/google/appengine/_internal/django/templatetags/cache.py/CacheNode.render |
def run_system_command(self, command):
"""
Run system command (While hiding the prompt. When finished, all the
output will scroll above the prompt.)
:param command: Shell command to be executed.
"""
def wait_for_enter():
"""
Create a sub application to wait for the enter key press.
This has two advantages over using 'input'/'raw_input':
- This will share the same input/output I/O.
- This doesn't block the event loop.
"""
from .shortcuts import create_prompt_application
registry = Registry()
@registry.add_binding(Keys.ControlJ)
@registry.add_binding(Keys.ControlM)
def _(event):
event.cli.set_return_value(None)
application = create_prompt_application(
message='Press ENTER to continue...',
key_bindings_registry=registry)
self.run_sub_application(application)
def run():
# Try to use the same input/output file descriptors as the one,
# used to run this application.
try:
input_fd = self.input.fileno()
except __HOLE__:
input_fd = sys.stdin.fileno()
try:
output_fd = self.output.fileno()
except AttributeError:
output_fd = sys.stdout.fileno()
# Run sub process.
# XXX: This will still block the event loop.
p = Popen(command, shell=True,
stdin=input_fd, stdout=output_fd)
p.wait()
# Wait for the user to press enter.
wait_for_enter()
self.run_in_terminal(run) | AttributeError | dataset/ETHPy150Open jonathanslenders/python-prompt-toolkit/prompt_toolkit/interface.py/CommandLineInterface.run_system_command |
def main():
"""Parse environment and arguments and call the appropriate action."""
config.parse_args(sys.argv,
default_config_files=jsonutils.loads(os.environ['CONFIG_FILE']))
logging.setup(CONF, "nova")
global LOG
LOG = logging.getLogger('nova.dhcpbridge')
if CONF.action.name == 'old':
# NOTE(sdague): old is the most frequent message sent, and
# it's a noop. We should just exit immediately otherwise we
# can stack up a bunch of requests in dnsmasq. A SIGHUP seems
# to dump this list, so actions queued up get lost.
return
objects.register_all()
if not CONF.conductor.use_local:
block_db_access()
objects_base.NovaObject.indirection_api = \
conductor_rpcapi.ConductorAPI()
else:
LOG.warning(_LW('Conductor local mode is deprecated and will '
'be removed in a subsequent release'))
if CONF.action.name in ['add', 'del']:
LOG.debug("Called '%(action)s' for mac '%(mac)s' with IP '%(ip)s'",
{"action": CONF.action.name,
"mac": CONF.action.mac,
"ip": CONF.action.ip})
CONF.action.func(CONF.action.mac, CONF.action.ip)
else:
try:
network_id = int(os.environ.get('NETWORK_ID'))
except __HOLE__:
LOG.error(_LE("Environment variable 'NETWORK_ID' must be set."))
return(1)
print(init_leases(network_id))
rpc.cleanup() | TypeError | dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/cmd/dhcpbridge.py/main |
def save(self, *args, **kwargs):
if getattr(self, 'cleanse', None):
try:
# Passes the rich text content as first argument because
# the passed callable has been converted into a bound method
self.richtext = self.cleanse(self.richtext)
except __HOLE__:
# Call the original callable, does not pass the rich richtext
# content instance along
self.richtext = self.cleanse.im_func(self.richtext)
super(SectionContent, self).save(*args, **kwargs) | TypeError | dataset/ETHPy150Open feincms/feincms/feincms/content/section/models.py/SectionContent.save |
def mkdir_p(path):
try:
os.makedirs(path)
except __HOLE__ as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise | OSError | dataset/ETHPy150Open eallik/spinoff/spinoff/contrib/filetransfer/util.py/mkdir_p |
def get_by_natural_key(self, app_label, model):
try:
ct = self.__class__._cache[self.db][(app_label, model)]
except __HOLE__:
ct = self.get(app_label=app_label, model=model)
self._add_to_cache(self.db, ct)
return ct | KeyError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/contrib/contenttypes/models.py/ContentTypeManager.get_by_natural_key |
def get_for_model(self, model, for_concrete_model=True):
"""
Returns the ContentType object for a given model, creating the
ContentType if necessary. Lookups are cached so that subsequent lookups
for the same model don't hit the database.
"""
opts = self._get_opts(model, for_concrete_model)
try:
ct = self._get_from_cache(opts)
except __HOLE__:
# Load or create the ContentType entry. The smart_text() is
# needed around opts.verbose_name_raw because name_raw might be a
# django.utils.functional.__proxy__ object.
ct, created = self.get_or_create(
app_label = opts.app_label,
model = opts.object_name.lower(),
defaults = {'name': smart_text(opts.verbose_name_raw)},
)
self._add_to_cache(self.db, ct)
return ct | KeyError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/contrib/contenttypes/models.py/ContentTypeManager.get_for_model |
def get_for_models(self, *models, **kwargs):
"""
Given *models, returns a dictionary mapping {model: content_type}.
"""
for_concrete_models = kwargs.pop('for_concrete_models', True)
# Final results
results = {}
# models that aren't already in the cache
needed_app_labels = set()
needed_models = set()
needed_opts = set()
for model in models:
opts = self._get_opts(model, for_concrete_models)
try:
ct = self._get_from_cache(opts)
except __HOLE__:
needed_app_labels.add(opts.app_label)
needed_models.add(opts.object_name.lower())
needed_opts.add(opts)
else:
results[model] = ct
if needed_opts:
cts = self.filter(
app_label__in=needed_app_labels,
model__in=needed_models
)
for ct in cts:
model = ct.model_class()
if model._meta in needed_opts:
results[model] = ct
needed_opts.remove(model._meta)
self._add_to_cache(self.db, ct)
for opts in needed_opts:
# These weren't in the cache, or the DB, create them.
ct = self.create(
app_label=opts.app_label,
model=opts.object_name.lower(),
name=smart_text(opts.verbose_name_raw),
)
self._add_to_cache(self.db, ct)
results[ct.model_class()] = ct
return results | KeyError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/contrib/contenttypes/models.py/ContentTypeManager.get_for_models |
def get_for_id(self, id):
"""
Lookup a ContentType by ID. Uses the same shared cache as get_for_model
(though ContentTypes are obviously not created on-the-fly by get_by_id).
"""
try:
ct = self.__class__._cache[self.db][id]
except __HOLE__:
# This could raise a DoesNotExist; that's correct behavior and will
# make sure that only correct ctypes get stored in the cache dict.
ct = self.get(pk=id)
self._add_to_cache(self.db, ct)
return ct | KeyError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/contrib/contenttypes/models.py/ContentTypeManager.get_for_id |
def decode(stream):
try:
return stream.decode('utf-8')
except __HOLE__:
return stream.decode('iso-8859-1', errors='ignore') | UnicodeDecodeError | dataset/ETHPy150Open mesonbuild/meson/mesonbuild/scripts/meson_test.py/decode |
def run_tests(datafilename):
global options
logfile_base = 'meson-logs/testlog'
if options.wrapper is None:
wrap = []
logfilename = logfile_base + '.txt'
jsonlogfilename = logfile_base+ '.json'
else:
wrap = [options.wrapper]
logfilename = logfile_base + '-' + options.wrapper.replace(' ', '_') + '.txt'
jsonlogfilename = logfile_base + '-' + options.wrapper.replace(' ', '_') + '.json'
logfile = open(logfilename, 'w')
jsonlogfile = open(jsonlogfilename, 'w')
logfile.write('Log of Meson test suite run on %s.\n\n' % datetime.datetime.now().isoformat())
tests = pickle.load(open(datafilename, 'rb'))
if len(tests) == 0:
print('No tests defined.')
return
numlen = len('%d' % len(tests))
varname = 'MESON_TESTTHREADS'
if varname in os.environ:
try:
num_workers = int(os.environ[varname])
except __HOLE__:
print('Invalid value in %s, using 1 thread.' % varname)
num_workers = 1
else:
num_workers = multiprocessing.cpu_count()
executor = conc.ThreadPoolExecutor(max_workers=num_workers)
futures = []
filtered_tests = filter_tests(options.suite, tests)
for i, test in enumerate(filtered_tests):
if test.suite[0] == '':
visible_name = test.name
else:
if options.suite is not None:
visible_name = options.suite + ' / ' + test.name
else:
visible_name = test.suite[0] + ' / ' + test.name
if not test.is_parallel:
drain_futures(futures)
futures = []
res = run_single_test(wrap, test)
print_stats(numlen, filtered_tests, visible_name, res, i, logfile, jsonlogfile)
else:
f = executor.submit(run_single_test, wrap, test)
futures.append((f, numlen, filtered_tests, visible_name, i, logfile, jsonlogfile))
drain_futures(futures)
return logfilename | ValueError | dataset/ETHPy150Open mesonbuild/meson/mesonbuild/scripts/meson_test.py/run_tests |
def set_fec_id(this_fec_id):
try:
ftpcandidate = Candidate.objects.filter(cand_id = this_fec_id).order_by('-cycle')[0]
except __HOLE__:
print "No candidate found in master file for id=%s" % (this_fec_id)
return
print "Got cycle: %s" % (ftpcandidate.cycle)
this_incumbent, created = Incumbent.objects.get_or_create(fec_id=this_fec_id)
this_incumbent.cycle='2016'
this_incumbent.name = ftpcandidate.cand_name
this_incumbent.fec_id = this_fec_id
this_incumbent.state = ftpcandidate.cand_office_st
this_incumbent.office_district = ftpcandidate.cand_office_district
this_incumbent.office = ftpcandidate.cand_office
this_incumbent.is_incumbent=True
this_incumbent.save()
# Now set the incumbent flag in candidate_overlay
try:
this_co = Candidate_Overlay.objects.get(fec_id=this_fec_id, cycle='2016')
this_co.is_incumbent = True
this_co.save()
#print "set incumbent %s, %s" % (ftpcandidate.cand_name, this_fec_id)
except Candidate_Overlay.DoesNotExist:
pass
print "Missing candidate: %s, %s" % (ftpcandidate.cand_name, this_fec_id) | IndexError | dataset/ETHPy150Open sunlightlabs/read_FEC/fecreader/summary_data/management/commands/set_incumbent.py/set_fec_id |
def get_template_sources(self, template_name, template_dirs=None):
"""
Returns the absolute paths to "template_name", when appended to each
directory in "template_dirs". Any paths that don't lie inside one of the
template dirs are excluded from the result set, for security reasons.
"""
if not template_dirs:
template_dirs = app_template_dirs
dirs = template_name.split('/')
if len(dirs) >= 2 and dirs[0] == 'layouts':
template_name = u'/'.join([dirs[1].replace('.html', ''), 'layout.html'])
for template_dir in template_dirs:
try:
yield safe_join(template_dir, template_name)
except __HOLE__:
# The template dir name was a bytestring that wasn't valid UTF-8.
raise
except ValueError:
# The joined path was located outside of template_dir.
pass | UnicodeDecodeError | dataset/ETHPy150Open ionyse/ionyweb/ionyweb/loaders/layouts_templates.py/Loader.get_template_sources |
def load_template_source(self, template_name, template_dirs=None):
for filepath in self.get_template_sources(template_name, template_dirs):
try:
file = open(filepath)
try:
return (file.read().decode(settings.FILE_CHARSET), filepath)
finally:
file.close()
except __HOLE__:
pass
raise TemplateDoesNotExist(template_name) | IOError | dataset/ETHPy150Open ionyse/ionyweb/ionyweb/loaders/layouts_templates.py/Loader.load_template_source |
def DownloadActivityList(self, serviceRecord, exhaustive=False):
"""
GET List of Activities as JSON File
URL: http://app.velohero.com/export/workouts/json
Parameters:
user = username
pass = password
date_from = YYYY-MM-DD
date_to = YYYY-MM-DD
"""
activities = []
exclusions = []
discoveredWorkoutIds = []
params = self._add_auth_params({}, record=serviceRecord)
limitDateFormat = "%Y-%m-%d"
if exhaustive:
listEnd = datetime.now() + timedelta(days=1.5) # Who knows which TZ it's in
listStart = datetime(day=1, month=1, year=1980) # The beginning of time
else:
listEnd = datetime.now() + timedelta(days=1.5) # Who knows which TZ it's in
listStart = listEnd - timedelta(days=20) # Doesn't really matter
params.update({"date_from": listStart.strftime(limitDateFormat), "date_to": listEnd.strftime(limitDateFormat)})
logger.debug("Requesting %s to %s" % (listStart, listEnd))
res = requests.get(self._urlRoot + "/export/workouts/json", params=params)
if res.status_code != 200:
if res.status_code == 403:
raise APIException("Invalid login", block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
raise APIException("Unable to retrieve activity list")
res.raise_for_status()
try:
res = res.json()
except __HOLE__:
raise APIException("Could not decode activity list")
if "workouts" not in res:
raise APIException("No activities")
for workout in res["workouts"]:
workoutId = int(workout["id"])
if workoutId in discoveredWorkoutIds:
continue # There's the possibility of query overlap
discoveredWorkoutIds.append(workoutId)
if workout["file"] is not "1":
logger.debug("Skip workout with ID: " + str(workoutId) + " (no file)")
continue # Skip activity without samples (no PWX export)
activity = UploadedActivity()
logger.debug("Workout ID: " + str(workoutId))
# Duration (dur_time)
duration = self._durationToSeconds(workout["dur_time"])
activity.Stats.TimerTime = ActivityStatistic(ActivityStatisticUnit.Seconds, value=duration)
# Start time (date_ymd, start_time)
startTimeStr = workout["date_ymd"] + " " + workout["start_time"]
activity.StartTime = self._parseDateTime(startTimeStr)
# End time (date_ymd, start_time) + dur_time
activity.EndTime = self._parseDateTime(startTimeStr) + timedelta(seconds=duration)
# Sport (sport_id)
if workout["sport_id"] in self._reverseActivityMappings:
activity.Type = self._reverseActivityMappings[workout["sport_id"]]
else:
activity.Type = ActivityType.Other
# Distance (dist_km)
activity.Stats.Distance = ActivityStatistic(ActivityStatisticUnit.Kilometers, value=float(workout["dist_km"]))
# Workout is hidden
activity.Private = workout["hide"] == "1"
activity.ServiceData = {"workoutId": workoutId}
activity.CalculateUID()
activities.append(activity)
return activities, exclusions | ValueError | dataset/ETHPy150Open cpfair/tapiriik/tapiriik/services/VeloHero/velohero.py/VeloHeroService.DownloadActivityList |
def UploadActivity(self, serviceRecord, activity):
"""
POST a Multipart-Encoded File
URL: http://app.velohero.com/upload/file
Parameters:
user = username
pass = password
view = json
file = multipart-encodes file (fit, tcx, pwx, gpx, srm, hrm...)
Maximum file size per file is 16 MB.
"""
has_location = has_distance = has_speed = False
for lap in activity.Laps:
for wp in lap.Waypoints:
if wp.Location and wp.Location.Latitude and wp.Location.Longitude:
has_location = True
if wp.Distance:
has_distance = True
if wp.Speed:
has_speed = True
if has_location and has_distance and has_speed:
format = "fit"
data = FITIO.Dump(activity)
elif has_location and has_distance:
format = "tcx"
data = TCXIO.Dump(activity)
elif has_location:
format = "gpx"
data = GPXIO.Dump(activity)
else:
format = "fit"
data = FITIO.Dump(activity)
# Upload
files = {"file": ("tap-sync-" + str(os.getpid()) + "-" + activity.UID + "." + format, data)}
params = self._add_auth_params({"view":"json"}, record=serviceRecord)
res = requests.post(self._urlRoot + "/upload/file", files=files, params=params)
if res.status_code != 200:
if res.status_code == 403:
raise APIException("Invalid login", block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
raise APIException("Unable to upload activity")
res.raise_for_status()
try:
res = res.json()
except __HOLE__:
raise APIException("Could not decode activity list")
if "error" in res:
raise APIException(res["error"])
# Set date, start time, comment and sport
if "id" in res:
workoutId = res["id"]
params = self._add_auth_params({
"workout_date" : activity.StartTime.strftime("%Y-%m-%d"),
"workout_start_time" : activity.StartTime.strftime("%H:%M:%S"),
"workout_comment" : activity.Notes,
"sport_id" : self._activityMappings[activity.Type],
"workout_hide": "yes" if activity.Private else "no"
}, record=serviceRecord)
res = requests.get(self._urlRoot + "/workouts/change/{}".format(workoutId), params=params)
if res.status_code != 200:
if res.status_code == 403:
raise APIException("No authorization to change activity with workout ID: {}".format(workoutId), block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
raise APIException("Unable to change activity with workout ID: {}".format(workoutId))
return workoutId | ValueError | dataset/ETHPy150Open cpfair/tapiriik/tapiriik/services/VeloHero/velohero.py/VeloHeroService.UploadActivity |
def __init__(self, host='localhost', port=61613, user='', passcode='', ver='1.1', prompt='> ', verbose=True, use_ssl=False, heartbeats=(0, 0), stdin=sys.stdin, stdout=sys.stdout):
Cmd.__init__(self, 'Tab', stdin, stdout)
ConnectionListener.__init__(self)
self.prompt = prompt
self.verbose = verbose
self.user = user
self.passcode = passcode
self.__quit = False
if ver == '1.0':
self.conn = StompConnection10([(host, port)], wait_on_receipt=True)
elif ver == '1.1':
self.conn = StompConnection11([(host, port)], wait_on_receipt=True, heartbeats=heartbeats)
elif ver == '1.2':
self.conn = StompConnection12([(host, port)], wait_on_receipt=True, heartbeats=heartbeats)
elif ver == 'multicast':
self.conn = MulticastConnection()
else:
raise RuntimeError('Unknown version')
if use_ssl:
self.conn.set_ssl([(host, port)])
self.conn.set_listener('', self)
self.conn.start()
self.conn.connect(self.user, self.passcode, wait=True)
self.transaction_id = None
self.version = ver
try:
self.nversion = float(ver)
except __HOLE__:
self.nversion = 1.0
self.__subscriptions = {}
self.__subscription_id = 1 | ValueError | dataset/ETHPy150Open jasonrbriggs/stomp.py/stomp/__main__.py/StompCLI.__init__ |
def main():
parser = OptionParser(version=stomppy_version)
parser.add_option('-H', '--host', type='string', dest='host', default='localhost',
help='Hostname or IP to connect to. Defaults to localhost if not specified.')
parser.add_option('-P', '--port', type=int, dest='port', default=61613,
help='Port providing stomp protocol connections. Defaults to 61613 if not specified.')
parser.add_option('-U', '--user', type='string', dest='user', default=None,
help='Username for the connection')
parser.add_option('-W', '--password', type='string', dest='password', default=None,
help='Password for the connection')
parser.add_option('-F', '--file', type='string', dest='filename',
help='File containing commands to be executed, instead of prompting from the command prompt.')
parser.add_option('-S', '--stomp', type='string', dest='stomp', default='1.1',
help='Set the STOMP protocol version.')
parser.add_option('-L', '--listen', type='string', dest='listen', default=None,
help='Listen for messages on a queue/destination')
parser.add_option("-V", "--verbose", dest="verbose", default='on',
help='Verbose logging "on" or "off" (if on, full headers from stomp server responses are printed)')
parser.add_option('--ssl', action='callback', callback=optional_arg(True), dest='ssl',
help='Enable SSL connection')
parser.add_option('--heartbeats', type='string', dest='heartbeats', default="0,0",
help='Heartbeats to request when connecting with protocol >= 1.1, two comma separated integers.')
parser.set_defaults()
(options, _) = parser.parse_args()
if options.verbose == 'on':
verbose = True
else:
verbose = False
if options.ssl is None:
options.ssl = False
if options.listen:
prompt = ''
else:
prompt = '> '
heartbeats = tuple(map(int, options.heartbeats.split(",")))
st = StompCLI(options.host, options.port, options.user, options.password, options.stomp, prompt, verbose, options.ssl, heartbeats)
if options.listen:
st.do_subscribe(options.listen)
try:
while 1:
time.sleep(10)
except:
print("\n")
elif options.filename:
st.do_run(options.filename)
else:
# disable CTRL-C, since can't guarantee correct handling of disconnect
import signal
def signal_handler(signal, frame):
pass
signal.signal(signal.SIGINT, signal_handler)
try:
try:
st.cmdloop()
except __HOLE__:
st.do_quit()
finally:
st.conn.disconnect()
#
# command line access
# | KeyboardInterrupt | dataset/ETHPy150Open jasonrbriggs/stomp.py/stomp/__main__.py/main |
def ComputeIPolicyInstanceViolation(ipolicy, instance, cfg,
_compute_fn=ComputeIPolicySpecViolation):
"""Compute if instance meets the specs of ipolicy.
@type ipolicy: dict
@param ipolicy: The ipolicy to verify against
@type instance: L{objects.Instance}
@param instance: The instance to verify
@type cfg: L{config.ConfigWriter}
@param cfg: Cluster configuration
@param _compute_fn: The function to verify ipolicy (unittest only)
@see: L{ComputeIPolicySpecViolation}
"""
ret = []
be_full = cfg.GetClusterInfo().FillBE(instance)
mem_size = be_full[constants.BE_MAXMEM]
cpu_count = be_full[constants.BE_VCPUS]
inst_nodes = cfg.GetInstanceNodes(instance.uuid)
es_flags = rpc.GetExclusiveStorageForNodes(cfg, inst_nodes)
disks = cfg.GetInstanceDisks(instance.uuid)
if any(es_flags.values()):
# With exclusive storage use the actual spindles
try:
spindle_use = sum([disk.spindles for disk in disks])
except __HOLE__:
ret.append("Number of spindles not configured for disks of instance %s"
" while exclusive storage is enabled, try running gnt-cluster"
" repair-disk-sizes" % instance.name)
# _ComputeMinMaxSpec ignores 'None's
spindle_use = None
else:
spindle_use = be_full[constants.BE_SPINDLE_USE]
disk_count = len(disks)
disk_sizes = [disk.size for disk in disks]
nic_count = len(instance.nics)
disk_types = [d.dev_type for d in disks]
return ret + _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
disk_sizes, spindle_use, disk_types) | TypeError | dataset/ETHPy150Open ganeti/ganeti/lib/cmdlib/common.py/ComputeIPolicyInstanceViolation |
def GetUpdatedParams(old_params, update_dict,
use_default=True, use_none=False):
"""Return the new version of a parameter dictionary.
@type old_params: dict
@param old_params: old parameters
@type update_dict: dict
@param update_dict: dict containing new parameter values, or
constants.VALUE_DEFAULT to reset the parameter to its default
value
@param use_default: boolean
@type use_default: whether to recognise L{constants.VALUE_DEFAULT}
values as 'to be deleted' values
@param use_none: boolean
@type use_none: whether to recognise C{None} values as 'to be
deleted' values
@rtype: dict
@return: the new parameter dictionary
"""
params_copy = copy.deepcopy(old_params)
for key, val in update_dict.iteritems():
if ((use_default and val == constants.VALUE_DEFAULT) or
(use_none and val is None)):
try:
del params_copy[key]
except __HOLE__:
pass
else:
params_copy[key] = val
return params_copy | KeyError | dataset/ETHPy150Open ganeti/ganeti/lib/cmdlib/common.py/GetUpdatedParams |
def GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
"""Return the new version of an instance policy.
@param group_policy: whether this policy applies to a group and thus
we should support removal of policy entries
"""
ipolicy = copy.deepcopy(old_ipolicy)
for key, value in new_ipolicy.items():
if key not in constants.IPOLICY_ALL_KEYS:
raise errors.OpPrereqError("Invalid key in new ipolicy: %s" % key,
errors.ECODE_INVAL)
if (not value or value == [constants.VALUE_DEFAULT] or
value == constants.VALUE_DEFAULT):
if group_policy:
if key in ipolicy:
del ipolicy[key]
else:
raise errors.OpPrereqError("Can't unset ipolicy attribute '%s'"
" on the cluster'" % key,
errors.ECODE_INVAL)
else:
if key in constants.IPOLICY_PARAMETERS:
# FIXME: we assume all such values are float
try:
ipolicy[key] = float(value)
except (__HOLE__, ValueError), err:
raise errors.OpPrereqError("Invalid value for attribute"
" '%s': '%s', error: %s" %
(key, value, err), errors.ECODE_INVAL)
elif key == constants.ISPECS_MINMAX:
for minmax in value:
for k in minmax.keys():
utils.ForceDictType(minmax[k], constants.ISPECS_PARAMETER_TYPES)
ipolicy[key] = value
elif key == constants.ISPECS_STD:
if group_policy:
msg = "%s cannot appear in group instance specs" % key
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
ipolicy[key] = GetUpdatedParams(old_ipolicy.get(key, {}), value,
use_none=False, use_default=False)
utils.ForceDictType(ipolicy[key], constants.ISPECS_PARAMETER_TYPES)
else:
# FIXME: we assume all others are lists; this should be redone
# in a nicer way
ipolicy[key] = list(value)
try:
objects.InstancePolicy.CheckParameterSyntax(ipolicy, not group_policy)
except errors.ConfigurationError, err:
raise errors.OpPrereqError("Invalid instance policy: %s" % err,
errors.ECODE_INVAL)
return ipolicy | TypeError | dataset/ETHPy150Open ganeti/ganeti/lib/cmdlib/common.py/GetUpdatedIPolicy |
def _SetOpEarlyRelease(early_release, op):
"""Sets C{early_release} flag on opcodes if available.
"""
try:
op.early_release = early_release
except __HOLE__:
assert not isinstance(op, opcodes.OpInstanceReplaceDisks)
return op | AttributeError | dataset/ETHPy150Open ganeti/ganeti/lib/cmdlib/common.py/_SetOpEarlyRelease |
def test_paths_unequal(self):
try:
self.assertEqual(self.path1, self.path2)
except __HOLE__ as e:
if not str(e).startswith("Path data not almost equal to 6 decimals"):
raise self.failureException("Path mismatch error not raised.") | AssertionError | dataset/ETHPy150Open ioam/holoviews/tests/testcomparisonpath.py/PathComparisonTest.test_paths_unequal |
def test_contours_unequal(self):
try:
self.assertEqual(self.contours1, self.contours2)
except __HOLE__ as e:
if not str(e).startswith("Contours data not almost equal to 6 decimals"):
raise self.failureException("Contours mismatch error not raised.") | AssertionError | dataset/ETHPy150Open ioam/holoviews/tests/testcomparisonpath.py/PathComparisonTest.test_contours_unequal |
def test_contour_levels_unequal(self):
try:
self.assertEqual(self.contours1, self.contours3)
except __HOLE__ as e:
if not str(e).startswith("Contour levels are mismatched"):
raise self.failureException("Contour level are mismatch error not raised.") | AssertionError | dataset/ETHPy150Open ioam/holoviews/tests/testcomparisonpath.py/PathComparisonTest.test_contour_levels_unequal |
def test_bounds_unequal(self):
try:
self.assertEqual(self.bounds1, self.bounds2)
except __HOLE__ as e:
if not str(e).startswith("Bounds data not almost equal to 6 decimals"):
raise self.failureException("Bounds mismatch error not raised.") | AssertionError | dataset/ETHPy150Open ioam/holoviews/tests/testcomparisonpath.py/PathComparisonTest.test_bounds_unequal |
def test_boxs_unequal(self):
try:
self.assertEqual(self.box1, self.box2)
except __HOLE__ as e:
if not str(e).startswith("Box data not almost equal to 6 decimals"):
raise self.failureException("Box mismatch error not raised.") | AssertionError | dataset/ETHPy150Open ioam/holoviews/tests/testcomparisonpath.py/PathComparisonTest.test_boxs_unequal |
def test_ellipses_unequal(self):
try:
self.assertEqual(self.ellipse1, self.ellipse2)
except __HOLE__ as e:
if not str(e).startswith("Ellipse data not almost equal to 6 decimals"):
raise self.failureException("Ellipse mismatch error not raised.") | AssertionError | dataset/ETHPy150Open ioam/holoviews/tests/testcomparisonpath.py/PathComparisonTest.test_ellipses_unequal |
def __init__(self, to, to_field=None, rel_class=ManyToOneRel, **kwargs):
try:
to._meta.object_name.lower()
except __HOLE__: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "%s(%r) is invalid. First parameter to ForeignKey must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
else:
# For backwards compatibility purposes, we need to *try* and set
# the to_field during FK construction. It won't be guaranteed to
# be correct until contribute_to_class is called. Refs #12190.
to_field = to_field or (to._meta.pk and to._meta.pk.name)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = rel_class(to, to_field,
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
parent_link=kwargs.pop('parent_link', False))
super(ForeignKey, self).__init__(**kwargs)
self.db_index = True | AttributeError | dataset/ETHPy150Open disqus/sharding-example/sqlshards/db/shards/models.py/PartitionedForeignKey.__init__ |
def __get__(self, instance, instance_type=None):
if instance is None:
return self
cache_name = self.field.get_cache_name()
try:
return getattr(instance, cache_name)
except __HOLE__:
val = getattr(instance, self.field.attname)
if val is None:
# If NULL is an allowed value, return it.
if self.field.null:
return None
raise self.field.rel.to.DoesNotExist
other_field = self.field.rel.get_related_field()
relname = self.field.rel.field_name
if other_field.rel:
params = {'%s__pk' % relname: val}
else:
params = {'%s__exact' % relname: val}
# Ensure key is sent to the manager
for field_name in instance._shards.key:
params[field_name] = getattr(instance, field_name)
# If the related manager indicates that it should be used for
# related fields, respect that.
rel_mgr = self.field.rel.to._default_manager
rel_obj = rel_mgr.get(**params)
setattr(instance, cache_name, rel_obj)
return rel_obj | AttributeError | dataset/ETHPy150Open disqus/sharding-example/sqlshards/db/shards/models.py/PartitionedReverseRelatedObjectDescriptor.__get__ |
def __init__(self, url, protocols=None, extensions=None,
heartbeat_freq=None, ssl_options=None, headers=None):
"""
A websocket client that implements :rfc:`6455` and provides a simple
interface to communicate with a websocket server.
This class works on its own but will block if not run in
its own thread.
When an instance of this class is created, a :py:mod:`socket`
is created. If the connection is a TCP socket,
the nagle's algorithm is disabled.
The address of the server will be extracted from the given
websocket url.
The websocket key is randomly generated, reset the
`key` attribute if you want to provide yours.
For instance to create a TCP client:
.. code-block:: python
>>> from websocket.client import WebSocketBaseClient
>>> ws = WebSocketBaseClient('ws://localhost/ws')
Here is an example for a TCP client over SSL:
.. code-block:: python
>>> from websocket.client import WebSocketBaseClient
>>> ws = WebSocketBaseClient('wss://localhost/ws')
Finally an example of a Unix-domain connection:
.. code-block:: python
>>> from websocket.client import WebSocketBaseClient
>>> ws = WebSocketBaseClient('ws+unix:///tmp/my.sock')
Note that in this case, the initial Upgrade request
will be sent to ``/``. You may need to change this
by setting the resource explicitely before connecting:
.. code-block:: python
>>> from websocket.client import WebSocketBaseClient
>>> ws = WebSocketBaseClient('ws+unix:///tmp/my.sock')
>>> ws.resource = '/ws'
>>> ws.connect()
You may provide extra headers by passing a list of tuples
which must be unicode objects.
"""
self.url = url
self.host = None
self.scheme = None
self.port = None
self.unix_socket_path = None
self.resource = None
self.ssl_options = ssl_options or {}
self.extra_headers = headers or []
self._parse_url()
if self.unix_socket_path:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
else:
# Let's handle IPv4 and IPv6 addresses
# Simplified from CherryPy's code
try:
family, socktype, proto, canonname, sa = socket.getaddrinfo(self.host, self.port,
socket.AF_UNSPEC,
socket.SOCK_STREAM,
0, socket.AI_PASSIVE)[0]
except socket.gaierror:
family = socket.AF_INET
if self.host.startswith('::'):
family = socket.AF_INET6
socktype = socket.SOCK_STREAM
proto = 0
canonname = ""
sa = (self.host, self.port, 0, 0)
sock = socket.socket(family, socktype, proto)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if hasattr(socket, 'AF_INET6') and family == socket.AF_INET6 and \
self.host.startswith('::'):
try:
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
except (__HOLE__, socket.error):
pass
WebSocket.__init__(self, sock, protocols=protocols,
extensions=extensions,
heartbeat_freq=heartbeat_freq)
self.stream.always_mask = True
self.stream.expect_masking = False
self.key = b64encode(os.urandom(16))
# Adpated from: https://github.com/liris/websocket-client/blob/master/websocket.py#L105 | AttributeError | dataset/ETHPy150Open Lawouach/WebSocket-for-Python/ws4py/client/__init__.py/WebSocketBaseClient.__init__ |
def call_highest_priority(method_name):
"""A decorator for binary special methods to handle _op_priority.
Binary special methods in Expr and its subclasses use a special attribute
'_op_priority' to determine whose special method will be called to
handle the operation. In general, the object having the highest value of
'_op_priority' will handle the operation. Expr and subclasses that define
custom binary special methods (__mul__, etc.) should decorate those
methods with this decorator to add the priority logic.
The ``method_name`` argument is the name of the method of the other class
that will be called. Use this decorator in the following manner::
# Call other.__rmul__ if other._op_priority > self._op_priority
@call_highest_priority('__rmul__')
def __mul__(self, other):
...
# Call other.__mul__ if other._op_priority > self._op_priority
@call_highest_priority('__mul__')
def __rmul__(self, other):
...
"""
def priority_decorator(func):
@wraps(func)
def binary_op_wrapper(self, other):
if hasattr(other, '_op_priority'):
if other._op_priority > self._op_priority:
try:
f = getattr(other, method_name)
except __HOLE__:
pass
else:
return f(self)
return func(self, other)
return binary_op_wrapper
return priority_decorator | AttributeError | dataset/ETHPy150Open sympy/sympy/sympy/core/decorators.py/call_highest_priority |
def matchCall(func_name, args, star_list_arg, star_dict_arg, num_defaults,
positional, pairs, improved = False):
# This is of incredible code complexity, but there really is no other way to
# express this with less statements, branches, or variables.
# pylint: disable=R0912,R0914,R0915
assert type(positional) is tuple, positional
assert type(pairs) in (tuple, list), pairs
# Make a copy, we are going to modify it.
pairs = list(pairs)
result = {}
assigned_tuple_params = []
def assign(arg, value):
if type(arg) is str:
# Normal case:
result[ arg ] = value
else:
# Tuple argument case:
assigned_tuple_params.append(arg)
value = iter(value.getIterationValues())
for i, subarg in enumerate(arg):
try:
subvalue = next(value)
except StopIteration:
raise TooManyArguments(
ValueError(
"need more than %d %s to unpack" % (
i,
"values" if i > 1 else "value"
)
)
)
# Recurse into tuple argument values, could be more tuples.
assign(subarg, subvalue)
# Check that not too many values we provided.
try:
next(value)
except __HOLE__:
pass
else:
raise TooManyArguments(
ValueError("too many values to unpack")
)
def isAssigned(arg):
if type(arg) is str:
return arg in result
return arg in assigned_tuple_params
num_pos = len(positional)
num_total = num_pos + len(pairs)
num_args = len(args)
for arg, value in zip(args, positional):
assign(arg, value)
# Python3 does this check earlier.
if python_version >= 300 and not star_dict_arg:
for pair in pairs:
if pair[0] not in args:
message = "'%s' is an invalid keyword argument for this function" % pair[0]
raise TooManyArguments(
TypeError(message)
)
if star_list_arg:
if num_pos > num_args:
assign(star_list_arg, positional[ -(num_pos-num_args) : ])
else:
assign(star_list_arg, ())
elif 0 < num_args < num_total:
if num_defaults == 0:
if num_args == 1:
raise TooManyArguments(
TypeError(
"%s() takes exactly one argument (%d given)" % (
func_name,
num_total
)
)
)
else:
raise TooManyArguments(
TypeError(
"%s expected %d arguments, got %d" % (
func_name,
num_args,
num_total
)
)
)
else:
raise TooManyArguments(
TypeError(
"%s() takes at most %d %s (%d given)" % (
func_name,
num_args,
"argument" if num_args == 1 else "arguments",
num_total
)
)
)
elif num_args == 0 and num_total:
if star_dict_arg:
if num_pos:
# Could use num_pos, but Python also uses num_total.
raise TooManyArguments(
TypeError(
"%s() takes exactly 0 arguments (%d given)" % (
func_name,
num_total
)
)
)
else:
raise TooManyArguments(
TypeError(
"%s() takes no arguments (%d given)" % (
func_name,
num_total
)
)
)
named_argument_names = [
pair[0]
for pair in
pairs
]
for arg in args:
if type(arg) is str and arg in named_argument_names:
if isAssigned(arg):
raise TooManyArguments(
TypeError(
"%s() got multiple values for keyword argument '%s'" % (
func_name,
arg
)
)
)
else:
new_pairs = []
for pair in pairs:
if arg == pair[0]:
assign(arg, pair[1])
else:
new_pairs.append(pair)
assert len(new_pairs) == len(pairs) - 1
pairs = new_pairs
# Fill in any missing values with the None to indicate "default".
if num_defaults > 0:
for arg in args[ -num_defaults : ]:
if not isAssigned(arg):
assign(arg, None)
if star_dict_arg:
assign(star_dict_arg, pairs)
elif pairs:
unexpected = next(iter(dict(pairs)))
if improved:
message = "%s() got an unexpected keyword argument '%s'" % (
func_name,
unexpected
)
else:
message = "'%s' is an invalid keyword argument for this function" % unexpected
raise TooManyArguments(
TypeError(message)
)
unassigned = num_args - len(
[
arg
for arg in args
if isAssigned(arg)
]
)
if unassigned:
num_required = num_args - num_defaults
if num_required > 0 or improved:
if num_defaults == 0 and num_args != 1:
raise TooManyArguments(
TypeError(
"%s expected %d arguments, got %d" % (
func_name,
num_args,
num_total
)
)
)
if num_required == 1:
arg_desc = "1 argument" if python_version < 350 else "one argument"
else:
arg_desc = "%d arguments" % num_required
raise TooManyArguments(
TypeError(
"%s() takes %s %s (%d given)" % (
func_name,
"at least" if num_defaults > 0 else "exactly",
arg_desc,
num_total
)
)
)
else:
raise TooManyArguments(
TypeError(
"%s expected %s%s, got %d" % (
func_name,
( "at least " if python_version < 300 else "" )
if num_defaults > 0
else "exactly ",
"%d arguments" % num_required,
num_total
)
)
)
return result | StopIteration | dataset/ETHPy150Open kayhayen/Nuitka/nuitka/nodes/ParameterSpecs.py/matchCall |
def render(self, name, value, *args, **kwargs):
try:
lat = value.lat
lng = value.lng
except __HOLE__:
lat = settings.DEFAULT_LATITUDE
lng = settings.DEFAULT_LONGITUDE
js = '''
</script>
<script type="text/javascript">
//<![CDATA[
var %(name)s_marker ;
$(document).ready(function () {
if (GBrowserIsCompatible()) {
var map = new GMap2(document.getElementById("map_%(name)s"));
map.setCenter(new GLatLng(%(default_lat)s,%(default_lng)s), 13);
%(name)s_marker = new GMarker(new GLatLng(%(default_lat)s,%(default_lng)s), {draggable: true});
map.addOverlay(%(name)s_marker);
map.addControl(new GLargeMapControl());
map.addControl(new GMapTypeControl());
$('#%(name)s_id')[0].value = %(name)s_marker.getLatLng().lat() + "," + %(name)s_marker.getLatLng().lng();
GEvent.addListener(%(name)s_marker, "dragend", function() {
var point = %(name)s_marker.getLatLng();
$('#%(name)s_id')[0].value = point.lat() + "," + point.lng();
});
}});
$(document).unload(function () {GUnload()});
//]]>
</script>
''' % {'name': name, 'default_lat': lat, 'default_lng': lng}
# % dict(name=name)
html = self.inner_widget.render("%s" % name, None, dict(id='%s_id' % name))
html += "<div id=\"map_%s\" style=\"width: 500px; height: 500px\"></div>" % name
return mark_safe(js+html) | AttributeError | dataset/ETHPy150Open albatrossandco/brubeck_cms/brubeck/mapping/fields.py/LocationWidget.render |
def _add_plugin(self, plugindir):
"""
If this is a Jig plugin, add it.
``plugindir`` should be the full path to a directory containing all the
files required for a jig plugin.
"""
# Is this a plugins?
config_filename = join(plugindir, PLUGIN_CONFIG_FILENAME)
if not isfile(config_filename):
raise PluginError('The plugin file {0} is missing.'.format(
config_filename))
config = SafeConfigParser()
with open(config_filename, 'r') as fh:
try:
config.readfp(fh)
except ConfigParserError as e:
raise PluginError(e)
try:
settings = OrderedDict(config.items('settings'))
except NoSectionError:
settings = []
try:
plugin_info = OrderedDict(config.items('plugin'))
except NoSectionError:
raise PluginError(
'The plugin config does not contain a '
'[plugin] section.')
try:
bundle = plugin_info['bundle']
name = plugin_info['name']
except __HOLE__:
raise PluginError(
'Could not find the bundle or name of '
'the plugin.')
new_section = 'plugin:{bundle}:{name}'.format(
bundle=bundle, name=name)
if self.config.has_section(new_section):
raise PluginError('The plugin is already installed.')
self.config.add_section(new_section)
self.config.set(new_section, 'path', plugindir)
for setting in settings:
option, value = setting, settings[setting]
self.config.set(new_section, option, value)
# Re-initialize the self.plugins list
self._plugins = self._init_plugins(self.config)
# And return the plugin once we find it
for plugin in self._plugins: # pragma: no branch
if plugin.name == name and plugin.bundle == bundle:
return plugin | KeyError | dataset/ETHPy150Open robmadole/jig/src/jig/plugins/manager.py/PluginManager._add_plugin |
def pre_commit(self, git_diff_index):
"""
Runs the plugin's pre-commit script, passing in the diff.
``git_diff_index`` is a :py:class:`jig.diffconvert.GitDiffIndex`
object.
The pre-commit script will receive JSON data as standard input (stdin).
The JSON data is comprised of two main attributes: config and diff.
The ``config`` attribute represents the configuration for this plugin.
This is up to the plugin author but the values can be changed by the
user.
The ``diff`` attribute is a list of files and changes that have
occurred to them. See :py:module:`jig.diffconvert` for
information on what this object provides.
"""
# Grab this plugin's settings
data_in = {
'config': self.config,
'files': git_diff_index}
script = join(self.path, PLUGIN_PRE_COMMIT_SCRIPT)
ph = Popen([script], stdin=PIPE, stdout=PIPE, stderr=PIPE)
# Send the data to the script
stdin = json.dumps(data_in, indent=2, cls=PluginDataJSONEncoder)
retcode = None
stdout = ''
stderr = ''
try:
stdout, stderr = ph.communicate(stdin)
# Convert to unicode
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
retcode = ph.returncode
except __HOLE__ as ose:
# Generic non-zero retcode that indicates an error
retcode = 1
if ose.errno == 32:
stderr = u'Error: received SIGPIPE from the command'
else:
stderr = unicode(ose)
# And return the relevant stuff
return retcode, stdout, stderr | OSError | dataset/ETHPy150Open robmadole/jig/src/jig/plugins/manager.py/Plugin.pre_commit |
def __init__(self, *args, **kwargs):
super(FluentCommentForm, self).__init__(*args, **kwargs)
# Remove fields from the form.
# This has to be done in the constructor, because the ThreadedCommentForm
# inserts the title field in the __init__, instead of the static form definition.
for name in appsettings.FLUENT_COMMENTS_EXCLUDE_FIELDS:
try:
self.fields.pop(name)
except __HOLE__:
raise ImproperlyConfigured("Field name '{0}' in FLUENT_COMMENTS_EXCLUDE_FIELDS is invalid, it does not exist in the comment form.") | KeyError | dataset/ETHPy150Open edoburu/django-fluent-comments/fluent_comments/forms.py/FluentCommentForm.__init__ |
def callable_year(dt_value):
try:
return dt_value.year
except __HOLE__:
return None | AttributeError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/tests/regressiontests/admin_views/admin.py/callable_year |
def run_interaction(f, args, kwargs):
"""
Run an interaction function, which may yield FrontEndOperation
objects to interact with the front end.
"""
g = f(*args, **kwargs)
def advance(to_send=None):
try:
op = g.send(to_send)
except __HOLE__:
return
except InteractionError as e:
ShowModal("Error", [widgets.Latex(e.message)]).submit()
else:
op.submit(advance)
advance() | StopIteration | dataset/ETHPy150Open Microsoft/ivy/ivy/ui_extensions_api.py/run_interaction |
def run(self):
while True:
try:
msg = self.child_conn.recv()
except __HOLE__:
continue
if msg == 'shutdown':
break
elif msg == 'start':
self._collect()
self.child_conn.close() | KeyboardInterrupt | dataset/ETHPy150Open dask/dask/dask/diagnostics/profile.py/_Tracker.run |
def _get_remote_branch(self):
"""Return the remote branch assoicated with this repository.
If the remote branch is not defined, the parent branch of the
repository is returned.
"""
remote = getattr(self.options, 'tracking', None)
if not remote:
try:
remote = self._remote_path[0]
except __HOLE__:
remote = None
if not remote:
die('Could not determine remote branch to use for diff creation. '
'Specify --tracking-branch to continue.')
return remote | IndexError | dataset/ETHPy150Open reviewboard/rbtools/rbtools/clients/mercurial.py/MercurialClient._get_remote_branch |
def test_frozen(self):
with captured_stdout() as stdout:
try:
import __hello__
except ImportError as x:
self.fail("import __hello__ failed:" + str(x))
self.assertEqual(__hello__.initialized, True)
expect = set(self.module_attrs)
expect.add('initialized')
self.assertEqual(set(dir(__hello__)), expect)
self.assertEqual(stdout.getvalue(), 'Hello world!\n')
with captured_stdout() as stdout:
try:
import __phello__
except ImportError as x:
self.fail("import __phello__ failed:" + str(x))
self.assertEqual(__phello__.initialized, True)
expect = set(self.package_attrs)
expect.add('initialized')
if not "__phello__.spam" in sys.modules:
self.assertEqual(set(dir(__phello__)), expect)
else:
expect.add('spam')
self.assertEqual(set(dir(__phello__)), expect)
self.assertEqual(__phello__.__path__, [__phello__.__name__])
self.assertEqual(stdout.getvalue(), 'Hello world!\n')
with captured_stdout() as stdout:
try:
import __phello__.spam
except ImportError as x:
self.fail("import __phello__.spam failed:" + str(x))
self.assertEqual(__phello__.spam.initialized, True)
spam_expect = set(self.module_attrs)
spam_expect.add('initialized')
self.assertEqual(set(dir(__phello__.spam)), spam_expect)
phello_expect = set(self.package_attrs)
phello_expect.add('initialized')
phello_expect.add('spam')
self.assertEqual(set(dir(__phello__)), phello_expect)
self.assertEqual(stdout.getvalue(), 'Hello world!\n')
try:
import __phello__.foo
except ImportError:
pass
else:
self.fail("import __phello__.foo should have failed")
try:
import __phello__.foo
except __HOLE__:
pass
else:
self.fail("import __phello__.foo should have failed")
del sys.modules['__hello__']
del sys.modules['__phello__']
del sys.modules['__phello__.spam'] | ImportError | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_frozen.py/FrozenTests.test_frozen |
def save(self, commit=True):
instance = super(OrgForm, self).save(commit=False)
if 'issues' in self.fields:
old_issues = instance.issues.all()
new_issues = self.cleaned_data['issues']
to_delete = set(old_issues) - set(new_issues)
to_create = set(new_issues) - set(old_issues)
OrgIssueRelationship.objects.filter(org=instance, issue__in=to_delete).delete()
for issue in to_create:
relationship = OrgIssueRelationship(org=instance, issue=issue)
try:
relationship.full_clean()
relationship.save()
except __HOLE__, e:
pass
del(self.cleaned_data['issues'])
if commit:
instance.save()
self.save_m2m()
return instance | ValidationError | dataset/ETHPy150Open jumoconnect/openjumo/jumodjango/org/forms.py/OrgForm.save |
def encode(self, media):
"""
Encode media into a temporary file for the current
encoding profile.
"""
temp_file, encode_path = mkstemp(suffix=".%s" % self.container,
dir=settings.FILE_UPLOAD_TEMP_DIR)
os.close(temp_file)
command = self.shell_command(media.file.path, encode_path)
try:
subprocess.check_call(command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
except subprocess.CalledProcessError as e:
logger.error("Encoding command returned %d while executing '%s'" %
(e.returncode, ' '.join(e.cmd)))
raise
except __HOLE__:
logger.error("Could not find encoding command '%s'" % command[0])
raise
return encode_path | OSError | dataset/ETHPy150Open jbittel/django-multimedia/multimedia/models.py/EncodeProfile.encode |
def upload(self, local_path):
"""
Upload an encoded file to remote storage and remove the
local file.
"""
new_hash = self.generate_content_hash(local_path)
if new_hash != self.content_hash:
self.unlink()
self.content_hash = new_hash
if not self.exists():
try:
logger.info("Uploading %s to %s" % (local_path, self.remote_path))
self.get_storage().save(self.remote_path, open(local_path))
except Exception as e:
logger.error("Error saving '%s' to remote storage: %s" % (local_path, e))
raise
try:
os.unlink(local_path)
except __HOLE__ as e:
logger.error("Error removing file '%s': %s" % (local_path, e)) | OSError | dataset/ETHPy150Open jbittel/django-multimedia/multimedia/models.py/RemoteStorage.upload |
def unlink(self):
"""Delete the remote file if it is no longer referenced."""
refs = RemoteStorage.objects.filter(content_hash=self.content_hash)
if refs.count() == 1:
try:
logger.info("Deleting %s" % self.remote_path)
self.get_storage().delete(self.remote_path)
except __HOLE__ as e:
logger.error("Error removing file '%s': %s" %
(self.remote_path, e)) | IOError | dataset/ETHPy150Open jbittel/django-multimedia/multimedia/models.py/RemoteStorage.unlink |
def Filter(self, container, unused_args):
"""Filter method that writes the content of the file into a file.
Args:
container: Container object which holds all information for one definition
file. See Container class for details.
unused_args: No extra arguments required by this filter implementation.
Returns:
Container object that has been passed in.
"""
try:
f = file(container.absolute_path, 'w')
except __HOLE__ as e:
raise FileError('File "%s" could not be opened: %s' % (
container.absolute_path, e))
try:
f.write('\n'.join(container.lines))
except IOError as e:
raise FileError('File "%s" could not be written: %s' % (
container.absolute_path, e))
else:
f.close()
logging.info('Wrote file: %s', container.absolute_path)
return container | IOError | dataset/ETHPy150Open google/capirca/definate/file_filter.py/WriteFileFilter.Filter |
def run(
func, argv=None, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr
):
argv = argv or sys.argv[1:]
include_func_name_in_errors = False
# Handle multiple functions
if isinstance(func, (tuple, list)):
funcs = dict([
(fn.__name__, fn) for fn in func
])
try:
func_name = argv.pop(0)
except __HOLE__:
func_name = None
if func_name not in funcs:
names = ["'%s'" % fn.__name__ for fn in func]
s = ', '.join(names[:-1])
if len(names) > 1:
s += ' or %s' % names[-1]
stderr.write("Unknown command: try %s\n" % s)
return
func = funcs[func_name]
include_func_name_in_errors = True
if inspect.isfunction(func):
resolved, errors = resolve_args(func, argv)
elif inspect.isclass(func):
if hasattr(func, '__init__'):
resolved, errors = resolve_args(func.__init__, argv)
else:
resolved, errors = {}, []
else:
raise TypeError('arg is not a Python function or class')
# Special case for stdin/stdout/stderr
for pipe in ('stdin', 'stdout', 'stderr'):
if resolved.pop('optfunc_use_%s' % pipe, False):
resolved[pipe] = locals()[pipe]
if not errors:
try:
return func(**resolved)
except Exception, e:
if include_func_name_in_errors:
stderr.write('%s: ' % func.__name__)
stderr.write(str(e) + '\n')
else:
if include_func_name_in_errors:
stderr.write('%s: ' % func.__name__)
stderr.write("%s\n" % '\n'.join(errors)) | IndexError | dataset/ETHPy150Open simonw/optfunc/optfunc.py/run |
def validate_unique_prez_2012_general():
"""Should only be a single contest for 2012 prez general"""
count = Contest.objects.filter(election_id='md-2012-11-06-general', slug='president').count()
expected = 1
try:
assert count == expected
print "PASS: %s general prez contest found for 2012" % count
except __HOLE__:
raise AssertionError("expected 2012 general prez contest count (%s) did not match actual count (%s)" % (expected, count)) | AssertionError | dataset/ETHPy150Open openelections/openelections-core/openelex/us/md/validate/__init__.py/validate_unique_prez_2012_general |
def validate_obama_candidacies_2012():
"""Should only be two Obama candidacies in 2012 (primary and general)"""
kwargs = {
'election_id__startswith': 'md-2012',
'slug': 'barack-obama',
}
count = Candidate.objects.filter(**kwargs).count()
expected = 2
try:
assert count == expected
print "PASS: %s obama candidacies found for %s" % (count, '2012')
except __HOLE__:
raise AssertionError("expected obama 2012 candidacies (%s) did not match actual count(%s)" % (expected, count)) | AssertionError | dataset/ETHPy150Open openelections/openelections-core/openelex/us/md/validate/__init__.py/validate_obama_candidacies_2012 |
def validate_unique_contests():
"""Should have a unique set of contests for all elections"""
# Get all election ids
election_ids = list(Contest.objects.distinct('election_id'))
for elec_id in election_ids:
contests = Contest.objects.filter(election_id=elec_id)
# compare the number of contest records to unique set of contests for that election
count = contests.count()
expected = len(list(contests.distinct('slug')))
try:
assert expected == count
except __HOLE__:
raise AssertionError("%s contests expected for elec_id '%s', but %s found" % (expected, elec_id, count))
print "PASS: unique contests counts found for all elections" | AssertionError | dataset/ETHPy150Open openelections/openelections-core/openelex/us/md/validate/__init__.py/validate_unique_contests |
def main():
with Input() as input:
with FullscreenWindow() as window:
b = Board()
while True:
window.render_to_terminal(b.display())
if b.turn == 9 or b.winner():
c = input.next() # hit any key
sys.exit()
while True:
c = input.next()
if c == '':
sys.exit()
try:
if int(c) in range(9):
b = b.move(int(c))
except __HOLE__:
continue
window.render_to_terminal(b.display())
break
if b.turn == 9 or b.winner():
c = input.next() # hit any key
sys.exit()
b = ai(b, 'o') | ValueError | dataset/ETHPy150Open thomasballinger/curtsies/examples/tictactoeexample.py/main |
def initial(self, request, *args, **kwargs):
if hasattr(self, 'PaidControl') and self.action and self.action not in ('list', 'retrieve', 'create'):
if django_settings.NODECONDUCTOR.get('SUSPEND_UNPAID_CUSTOMERS'):
entity = self.get_object()
def get_obj(name):
try:
args = getattr(self.PaidControl, '%s_path' % name).split('__')
except __HOLE__:
return None
return reduce(getattr, args, entity)
self._check_paid_status(get_obj('settings'), get_obj('customer'))
return super(UpdateOnlyByPaidCustomerMixin, self).initial(request, *args, **kwargs) | AttributeError | dataset/ETHPy150Open opennode/nodeconductor/nodeconductor/structure/views.py/UpdateOnlyByPaidCustomerMixin.initial |
def create(self, request, *args, **kwargs):
if django_settings.NODECONDUCTOR.get('SUSPEND_UNPAID_CUSTOMERS'):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
def get_obj(name):
try:
args = getattr(self.PaidControl, '%s_path' % name).split('__')
except __HOLE__:
return None
obj = serializer.validated_data[args[0]]
if len(args) > 1:
obj = reduce(getattr, args[1:], obj)
return obj
self._check_paid_status(get_obj('settings'), get_obj('customer'))
return super(UpdateOnlyByPaidCustomerMixin, self).create(request, *args, **kwargs) | AttributeError | dataset/ETHPy150Open opennode/nodeconductor/nodeconductor/structure/views.py/UpdateOnlyByPaidCustomerMixin.create |
def get_stats_for_scope(self, quota_name, scope, dates):
stats_data = []
try:
quota = scope.quotas.get(name=quota_name)
except Quota.DoesNotExist:
return stats_data
versions = reversion\
.get_for_object(quota)\
.select_related('revision')\
.filter(revision__date_created__lte=dates[0][0])\
.iterator()
version = None
for end, start in dates:
try:
while version is None or version.revision.date_created > end:
version = versions.next()
stats_data.append((version.object_version.object.limit,
version.object_version.object.usage))
except __HOLE__:
break
return stats_data | StopIteration | dataset/ETHPy150Open opennode/nodeconductor/nodeconductor/structure/views.py/QuotaTimelineStatsView.get_stats_for_scope |
def parse_json(self, req, name, field):
"""Pull a json value from the request."""
try:
json_data = webapp2_extras.json.decode(req.body)
except __HOLE__:
return core.missing
return core.get_value(json_data, name, field, allow_many_nested=True) | ValueError | dataset/ETHPy150Open sloria/webargs/webargs/webapp2parser.py/Webapp2Parser.parse_json |
@property
def overrides(self):
warnings.warn("`Settings.overrides` attribute is deprecated and won't "
"be supported in Scrapy 0.26, use "
"`Settings.set(name, value, priority='cmdline')` instead",
category=ScrapyDeprecationWarning, stacklevel=2)
try:
o = self._overrides
except __HOLE__:
self._overrides = o = _DictProxy(self, 'cmdline')
return o | AttributeError | dataset/ETHPy150Open scrapy/scrapy/scrapy/settings/__init__.py/BaseSettings.overrides |
@property
def defaults(self):
warnings.warn("`Settings.defaults` attribute is deprecated and won't "
"be supported in Scrapy 0.26, use "
"`Settings.set(name, value, priority='default')` instead",
category=ScrapyDeprecationWarning, stacklevel=2)
try:
o = self._defaults
except __HOLE__:
self._defaults = o = _DictProxy(self, 'default')
return o | AttributeError | dataset/ETHPy150Open scrapy/scrapy/scrapy/settings/__init__.py/BaseSettings.defaults |
@csrf_exempt
def rest(request, *pargs):
"""
Calls python function corresponding with HTTP METHOD name.
Calls with incomplete arguments will return HTTP 400
"""
if request.method == 'GET':
rest_function = get
elif request.method == 'POST':
rest_function = post
elif request.method == 'PUT':
rest_function = put
elif request.method == 'DELETE':
rest_function = delete
else:
return JsonResponse({"error": "HTTP METHOD UNKNOWN"})
try:
return rest_function(request, *pargs)
except __HOLE__ as e:
print e
return HttpResponseBadRequest("unknown method") | TypeError | dataset/ETHPy150Open emccode/heliosburn/heliosburn/django/hbproject/api/views/user.py/rest |
@RequireLogin(role='admin')
def post(request):
"""
Create a new user.
"""
try:
new = json.loads(request.body)
assert "username" in new
assert "password" in new
assert "email" in new
except AssertionError:
return HttpResponseBadRequest("argument mismatch")
except __HOLE__:
return HttpResponseBadRequest("invalid JSON")
from api.models import db_model
dbc = db_model.connect()
user = dbc.hbuser.find_one({"username": new['username']})
if user is not None:
return HttpResponseBadRequest("user already exists")
else:
m = hashlib.sha512()
m.update(new['password'])
roles = []
if "roles" in new:
roles = new['roles']
dbc.hbuser.save({
'username': new['username'],
'email': new['email'],
'password': m.hexdigest(),
'createdAt': datetime.isoformat(datetime.now()),
'updatedAt': datetime.isoformat(datetime.now()),
'roles': roles,
})
r = HttpResponse(status=200)
r['location'] = "/api/user/%s" % new['username']
logger.info("user '%s' created by '%s'" % (new['username'], request.user['username']))
return r | ValueError | dataset/ETHPy150Open emccode/heliosburn/heliosburn/django/hbproject/api/views/user.py/post |
@RequireLogin()
def put(request, username):
"""
Update existing user based on username.
"""
# Users can only update their own account, unless admin
if (request.user['username'] != username) and (auth.is_admin(request.user) is False):
return HttpResponseForbidden()
try:
in_json = json.loads(request.body)
except __HOLE__:
return HttpResponseBadRequest("invalid JSON")
from api.models import db_model
dbc = db_model.connect()
user = dbc.hbuser.find_one({"username": username})
if user is None:
return HttpResponseNotFound()
else:
if "password" in in_json:
m = hashlib.sha512()
m.update(in_json['password'])
user['password'] = m.hexdigest()
if "email" in in_json:
user['email'] = in_json['email']
if "roles" in in_json:
user['roles'] = in_json['roles']
user['updatedAt'] = datetime.isoformat(datetime.now())
dbc.hbuser.save(user)
logger.info("user '%s' updated by '%s'" % (username, request.user['username']))
return HttpResponse() | ValueError | dataset/ETHPy150Open emccode/heliosburn/heliosburn/django/hbproject/api/views/user.py/put |
def __init__(self, subject, decimate=False):
self.subject = subject
self.types = []
left, right = db.get_surf(subject, "fiducial")
try:
fleft, fright = db.get_surf(subject, "flat", nudge=True, merge=False)
except IOError:
fleft = None
if decimate:
try:
pleft, pright = db.get_surf(subject, "pia")
self.left = DecimatedHemi(left[0], left[1], fleft[1], pia=pleft[0])
self.right = DecimatedHemi(right[0], right[1], fright[1], pia=pright[0])
self.addSurf("wm", name="wm", addtype=False, renorm=False)
except IOError:
self.left = DecimatedHemi(left[0], left[1], fleft[1])
self.right = DecimatedHemi(right[0], right[1], fright[1])
else:
try:
pleft, pright = db.get_surf(subject, "pia")
wleft, wright = db.get_surf(subject, "wm")
self.left = Hemi(pleft[0], left[1])
self.right = Hemi(pright[0], right[1])
self.addSurf("wm", name="wm", addtype=False, renorm=False)
except __HOLE__:
self.left = Hemi(left[0], left[1])
self.right = Hemi(right[0], right[1])
if fleft is not None:
#set medial wall
for hemi, ptpoly in ([self.left, fleft], [self.right, fright]):
fidpolys = set(tuple(f) for f in polyutils.sort_polys(hemi.polys))
flatpolys = set(tuple(f) for f in polyutils.sort_polys(ptpoly[1]))
hemi.aux[np.array(list(fidpolys - flatpolys)).astype(int), 0] = 1
#Find the flatmap limits
if fleft is not None:
flatmerge = np.vstack([fleft[0][:,:2], fright[0][:,:2]])
fmin, fmax = flatmerge.min(0), flatmerge.max(0)
self.flatlims = map(float, -fmin), map(float, fmax-fmin)
self.left.setFlat(fleft[0])
self.right.setFlat(fright[0])
else:
self.flatlims = None | IOError | dataset/ETHPy150Open gallantlab/pycortex/cortex/brainctm.py/BrainCTM.__init__ |
def addCurvature(self, **kwargs):
npz = db.get_surfinfo(self.subject, type='curvature', **kwargs)
try:
self.left.aux[:,1] = npz.left[self.left.mask]
self.right.aux[:,1] = npz.right[self.right.mask]
except __HOLE__:
self.left.aux[:,1] = npz.left
self.right.aux[:,1] = npz.right | AttributeError | dataset/ETHPy150Open gallantlab/pycortex/cortex/brainctm.py/BrainCTM.addCurvature |
def __init__(self, **kwargs): # pylint: disable=W0613
super(Device, self).__init__(**kwargs)
if not self.path_module:
raise NotImplementedError('path_module must be specified by the deriving classes.')
libpath = os.path.dirname(os.__file__)
modpath = os.path.join(libpath, self.path_module)
if not modpath.lower().endswith('.py'):
modpath += '.py'
try:
self.path = imp.load_source('device_path', modpath)
except __HOLE__:
raise DeviceError('Unsupported path module: {}'.format(self.path_module)) | IOError | dataset/ETHPy150Open ARM-software/workload-automation/wlauto/core/device.py/Device.__init__ |
def publish(self, channel, *args, **kwargs):
"""Return output of all subscribers for the given channel."""
if channel not in self.listeners:
return []
exc = ChannelFailures()
output = []
items = [(self._priorities[(channel, listener)], listener)
for listener in self.listeners[channel]]
try:
items.sort(key=lambda item: item[0])
except TypeError:
# Python 2.3 had no 'key' arg, but that doesn't matter
# since it could sort dissimilar types just fine.
items.sort()
for priority, listener in items:
try:
output.append(listener(*args, **kwargs))
except __HOLE__:
raise
except SystemExit:
e = sys.exc_info()[1]
# If we have previous errors ensure the exit code is non-zero
if exc and e.code == 0:
e.code = 1
raise
except:
exc.handle_exception()
if channel == 'log':
# Assume any further messages to 'log' will fail.
pass
else:
self.log("Error in %r listener %r" % (channel, listener),
level=40, traceback=True)
if exc:
raise exc
return output | KeyboardInterrupt | dataset/ETHPy150Open clips/pattern/pattern/server/cherrypy/cherrypy/process/wspbus.py/Bus.publish |
def start(self):
"""Start all services."""
atexit.register(self._clean_exit)
self.state = states.STARTING
self.log('Bus STARTING')
try:
self.publish('start')
self.state = states.STARTED
self.log('Bus STARTED')
except (__HOLE__, SystemExit):
raise
except:
self.log("Shutting down due to error in start listener:",
level=40, traceback=True)
e_info = sys.exc_info()[1]
try:
self.exit()
except:
# Any stop/exit errors will be logged inside publish().
pass
# Re-raise the original error
raise e_info | KeyboardInterrupt | dataset/ETHPy150Open clips/pattern/pattern/server/cherrypy/cherrypy/process/wspbus.py/Bus.start |
def block(self, interval=0.1):
"""Wait for the EXITING state, KeyboardInterrupt or SystemExit.
This function is intended to be called only by the main thread.
After waiting for the EXITING state, it also waits for all threads
to terminate, and then calls os.execv if self.execv is True. This
design allows another thread to call bus.restart, yet have the main
thread perform the actual execv call (required on some platforms).
"""
try:
self.wait(states.EXITING, interval=interval, channel='main')
except (__HOLE__, IOError):
# The time.sleep call might raise
# "IOError: [Errno 4] Interrupted function call" on KBInt.
self.log('Keyboard Interrupt: shutting down bus')
self.exit()
except SystemExit:
self.log('SystemExit raised: shutting down bus')
self.exit()
raise
# Waiting for ALL child threads to finish is necessary on OS X.
# See http://www.cherrypy.org/ticket/581.
# It's also good to let them all shut down before allowing
# the main thread to call atexit handlers.
# See http://www.cherrypy.org/ticket/751.
self.log("Waiting for child threads to terminate...")
for t in threading.enumerate():
if t != threading.currentThread() and t.isAlive():
# Note that any dummy (external) threads are always daemonic.
if hasattr(threading.Thread, "daemon"):
# Python 2.6+
d = t.daemon
else:
d = t.isDaemon()
if not d:
self.log("Waiting for thread %s." % t.getName())
t.join()
if self.execv:
self._do_execv() | KeyboardInterrupt | dataset/ETHPy150Open clips/pattern/pattern/server/cherrypy/cherrypy/process/wspbus.py/Bus.block |
def wait(self, state, interval=0.1, channel=None):
"""Poll for the given state(s) at intervals; publish to channel."""
if isinstance(state, (tuple, list)):
states = state
else:
states = [state]
def _wait():
while self.state not in states:
time.sleep(interval)
self.publish(channel)
# From http://psyco.sourceforge.net/psycoguide/bugs.html:
# "The compiled machine code does not include the regular polling
# done by Python, meaning that a KeyboardInterrupt will not be
# detected before execution comes back to the regular Python
# interpreter. Your program cannot be interrupted if caught
# into an infinite Psyco-compiled loop."
try:
sys.modules['psyco'].cannotcompile(_wait)
except (__HOLE__, AttributeError):
pass
_wait() | KeyError | dataset/ETHPy150Open clips/pattern/pattern/server/cherrypy/cherrypy/process/wspbus.py/Bus.wait |
def _set_cloexec(self):
"""Set the CLOEXEC flag on all open files (except stdin/out/err).
If self.max_cloexec_files is an integer (the default), then on
platforms which support it, it represents the max open files setting
for the operating system. This function will be called just before
the process is restarted via os.execv() to prevent open files
from persisting into the new process.
Set self.max_cloexec_files to 0 to disable this behavior.
"""
for fd in range(3, self.max_cloexec_files): # skip stdin/out/err
try:
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
except __HOLE__:
continue
fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC) | IOError | dataset/ETHPy150Open clips/pattern/pattern/server/cherrypy/cherrypy/process/wspbus.py/Bus._set_cloexec |
def get_slave_instances(self, ):
instances = self.get_database_instances()
master = self.get_master_instance()
try:
instances.remove(master)
except __HOLE__:
raise Exception("Master could not be detected")
return instances | ValueError | dataset/ETHPy150Open globocom/database-as-a-service/dbaas/drivers/base.py/BaseDriver.get_slave_instances |
def execute_command(server_config, command, **kwargs):
before_fds = _get_open_fds() # see the comment in the finally clause below
if isinstance(command, basestring):
command = Command(command, **kwargs)
timeout = 300 if command.name in ['stop', 'pull'] else 10
factory = CommandExecutorFactory(server_config, command, timeout)
# reactors aren't designed to be re-startable. In order to be
# able to call execute_command multiple times, we need to froce
# re-installation of the reactor; hence this hackery.
# TODO: look into implementing restartable reactors. According to the
# Twisted FAQ, there is no good reason why there isn't one:
# http://twistedmatrix.com/trac/wiki/FrequentlyAskedQuestions#WhycanttheTwistedsreactorberestarted
from twisted.internet import default
del sys.modules['twisted.internet.reactor']
default.install()
global reactor # pylint: disable=W0603
reactor = sys.modules['twisted.internet.reactor']
try:
reactor.connectTCP(server_config.host, server_config.port, factory)
reactor.run()
return factory.result
finally:
# re-startable reactor hack part 2.
# twisted hijacks SIGINT and doesn't bother to un-hijack it when the reactor
# stops. So we have to do it for it *rolls eye*.
import signal
signal.signal(signal.SIGINT, signal.default_int_handler)
# OK, the reactor is also leaking file descriptors. Tracking down all
# of them is non trivial, so instead we're just comparing the before
# and after lists of open FDs for the current process, and closing all
# new ones, as execute_command should never leave anything open after
# it exits (even when downloading data files from the server).
# TODO: This is way too hacky even compared to the rest of this function.
# Additionally, the current implementation ties this to UNIX,
# so in the long run, we need to do this properly and get the FDs
# from the reactor.
after_fds = _get_open_fds()
for fd in after_fds - before_fds:
try:
os.close(int(fd[1:]))
except __HOLE__:
pass
# Below is the alternative code that gets FDs from the reactor, however
# at the moment it doesn't seem to get everything, which is why code
# above is used instead.
#for fd in readtor._selectables:
# os.close(fd)
#reactor._poller.close() | OSError | dataset/ETHPy150Open ARM-software/workload-automation/wlauto/external/daq_server/src/daqpower/client.py/execute_command |
def test_exception_record(self):
formatter = logger.JsonLogFormatter(job_id='jobid', worker_id='workerid')
try:
raise ValueError('Something')
except __HOLE__:
attribs = dict(self.SAMPLE_RECORD)
attribs.update({'exc_info': sys.exc_info()})
record = self.create_log_record(**attribs)
log_output = json.loads(formatter.format(record))
# Check if exception type, its message, and stack trace information are in.
exn_output = log_output.pop('exception')
self.assertNotEqual(exn_output.find('ValueError: Something'), -1)
self.assertNotEqual(exn_output.find('logger_test.py'), -1)
self.assertEqual(log_output, self.SAMPLE_OUTPUT) | ValueError | dataset/ETHPy150Open GoogleCloudPlatform/DataflowPythonSDK/google/cloud/dataflow/worker/logger_test.py/JsonLogFormatterTest.test_exception_record |
def process(fn):
try:
f = open(fn)
except __HOLE__ as err:
print(err)
return
try:
for i, line in enumerate(f):
line = line.rstrip('\n')
sline = line.rstrip()
if len(line) >= 80 or line != sline or not isascii(line):
print('{}:{:d}:{}{}'.format(
fn, i+1, sline, '_' * (len(line) - len(sline))))
finally:
f.close() | IOError | dataset/ETHPy150Open python/asyncio/check.py/process |
def validate(self, modelXbrl, parameters=None):
if not hasattr(modelXbrl.modelDocument, "xmlDocument"): # not parsed
return
self._isStandardUri = {}
modelXbrl.modelManager.disclosureSystem.loadStandardTaxonomiesDict()
# find typedDomainRefs before validateXBRL pass
if modelXbrl.modelManager.disclosureSystem.SBRNL:
for pluginXbrlMethod in pluginClassMethods("Validate.SBRNL.Start"):
pluginXbrlMethod(self, modelXbrl)
self.qnSbrLinkroleorder = ModelValue.qname("http://www.nltaxonomie.nl/5.0/basis/sbr/xbrl/xbrl-syntax-extension","linkroleOrder")
self.typedDomainQnames = set()
self.typedDomainElements = set()
for modelConcept in modelXbrl.qnameConcepts.values():
if modelConcept.isTypedDimension:
typedDomainElement = modelConcept.typedDomainElement
if isinstance(typedDomainElement, ModelConcept):
self.typedDomainQnames.add(typedDomainElement.qname)
self.typedDomainElements.add(typedDomainElement)
# note that some XFM tests are done by ValidateXbrl to prevent mulstiple node walks
super(ValidateFiling,self).validate(modelXbrl, parameters)
xbrlInstDoc = modelXbrl.modelDocument.xmlDocument.getroot()
disclosureSystem = self.disclosureSystem
disclosureSystemVersion = disclosureSystem.version
modelXbrl.modelManager.showStatus(_("validating {0}").format(disclosureSystem.name))
self.modelXbrl.profileActivity()
conceptsUsed = {} # key=concept object value=True if has presentation label
labelsRelationshipSet = modelXbrl.relationshipSet(XbrlConst.conceptLabel)
if self.validateSBRNL: # include generic labels in a (new) set
genLabelsRelationshipSet = modelXbrl.relationshipSet(XbrlConst.elementLabel)
presentationRelationshipSet = modelXbrl.relationshipSet(XbrlConst.parentChild)
referencesRelationshipSetWithProhibits = modelXbrl.relationshipSet(XbrlConst.conceptReference, includeProhibits=True)
self.modelXbrl.profileActivity("... cache lbl, pre, ref relationships", minTimeToShow=1.0)
validateInlineXbrlGFM = (modelXbrl.modelDocument.type == ModelDocument.Type.INLINEXBRL and
self.validateGFM)
validateEFMpragmatic = disclosureSystem.names and "efm-pragmatic" in disclosureSystem.names
self.validateLoggingSemantic = validateLoggingSemantic = (
modelXbrl.isLoggingEffectiveFor(level="WARNING-SEMANTIC") or
modelXbrl.isLoggingEffectiveFor(level="ERROR-SEMANTIC"))
if self.validateEFM:
for pluginXbrlMethod in pluginClassMethods("Validate.EFM.Start"):
pluginXbrlMethod(self)
# instance checks
self.fileNameBasePart = None # prevent testing on fileNameParts if not instance or invalid
self.fileNameDate = None
self.entityRegistrantName = None
self.requiredContext = None
self.standardNamespaceConflicts = defaultdict(set)
self.exhibitType = None # e.g., EX-101, EX-201
if modelXbrl.modelDocument.type == ModelDocument.Type.INSTANCE or \
modelXbrl.modelDocument.type == ModelDocument.Type.INLINEXBRL:
instanceName = modelXbrl.modelDocument.basename
# parameter-provided CIKs and registrant names
paramFilerIdentifier = None
paramFilerIdentifiers = None
paramFilerNames = None
submissionType = None
if self.validateEFM and self.parameters:
p = self.parameters.get(ModelValue.qname("CIK",noPrefixIsNoNamespace=True))
if p and len(p) == 2 and p[1] not in ("null", "None"):
paramFilerIdentifier = p[1]
p = self.parameters.get(ModelValue.qname("cikList",noPrefixIsNoNamespace=True))
if p and len(p) == 2:
paramFilerIdentifiers = p[1].split(",")
p = self.parameters.get(ModelValue.qname("cikNameList",noPrefixIsNoNamespace=True))
if p and len(p) == 2:
paramFilerNames = p[1].split("|Edgar|")
if paramFilerIdentifiers and len(paramFilerIdentifiers) != len(paramFilerNames):
self.modelXbrl.error(("EFM.6.05.24.parameters", "GFM.3.02.02"),
_("parameters for cikList and cikNameList different list entry counts: %(cikList)s, %(cikNameList)s"),
modelXbrl=modelXbrl, cikList=paramFilerIdentifiers, cikNameList=paramFilerNames)
p = self.parameters.get(ModelValue.qname("submissionType",noPrefixIsNoNamespace=True))
if p and len(p) == 2:
submissionType = p[1]
p = self.parameters.get(ModelValue.qname("exhibitType",noPrefixIsNoNamespace=True))
if p and len(p) == 2:
self.exhibitType = p[1]
#6.3.3 filename check
m = instanceFileNamePattern.match(instanceName)
if (modelXbrl.modelDocument.type == ModelDocument.Type.INLINEXBRL
and any(name.startswith('efm') for name in disclosureSystem.names)):
m = htmlFileNamePattern.match(instanceName)
if m:
self.fileNameBasePart = None # html file name not necessarily parseable.
self.fileNameDatePart = None
else:
modelXbrl.error(self.EFM60303,
_('Invalid inline xbrl document in {base}.htm": %(filename)s'),
modelObject=modelXbrl.modelDocument, filename=instanceName,
messageCodes=("EFM.6.03.03",))
elif m:
self.fileNameBasePart = m.group(1)
self.fileNameDatePart = m.group(2)
if not self.fileNameBasePart:
modelXbrl.error((self.EFM60303, "GFM.1.01.01"),
_('Invalid instance document base name part (ticker or mnemonic name) in "{base}-{yyyymmdd}.xml": %(filename)s'),
modelObject=modelXbrl.modelDocument, filename=modelXbrl.modelDocument.basename,
messageCodes=("EFM.6.03.03", "EFM.6.23.01", "GFM.1.01.01"))
else:
try:
self.fileNameDate = datetime.datetime.strptime(self.fileNameDatePart,"%Y%m%d").date()
except __HOLE__:
modelXbrl.error((self.EFM60303, "GFM.1.01.01"),
_('Invalid instance document base name part (date) in "{base}-{yyyymmdd}.xml": %(filename)s'),
modelObject=modelXbrl.modelDocument, filename=modelXbrl.modelDocument.basename,
messageCodes=("EFM.6.03.03", "EFM.6.23.01", "GFM.1.01.01"))
else:
modelXbrl.error((self.EFM60303, "GFM.1.01.01"),
_('Invalid instance document name, must match "{base}-{yyyymmdd}.xml": %(filename)s'),
modelObject=modelXbrl.modelDocument, filename=modelXbrl.modelDocument.basename,
messageCodes=("EFM.6.03.03", "EFM.6.23.01", "GFM.1.01.01"))
#6.5.1 scheme, 6.5.2, 6.5.3 identifier
entityIdentifierValue = None
entityIdentifierValueElt = None
if disclosureSystem.identifierValueName: # omit if no checks
for entityIdentifierElt in xbrlInstDoc.iterdescendants("{http://www.xbrl.org/2003/instance}identifier"):
if isinstance(entityIdentifierElt,ModelObject):
schemeAttr = entityIdentifierElt.get("scheme")
entityIdentifier = XmlUtil.text(entityIdentifierElt)
if not disclosureSystem.identifierSchemePattern.match(schemeAttr):
try:
contextId = entityIdentifierElt.getparent().getparent().id
except AttributeError:
contextId = "not available"
modelXbrl.error(("EFM.6.05.01", "GFM.1.02.01"),
_("Invalid entity identifier scheme %(scheme)s in context %(context)s for identifier %(identifier)s"),
modelObject=entityIdentifierElt, scheme=schemeAttr,
context=contextId, identifier=entityIdentifier)
if not disclosureSystem.identifierValuePattern.match(entityIdentifier):
modelXbrl.error(("EFM.6.05.02", "GFM.1.02.02"),
_("Invalid entity identifier %(entityIdentifierName)s: %(entityIdentifer)s"),
modelObject=entityIdentifierElt,
entityIdentifierName=disclosureSystem.identifierValueName,
entityIdentifer=entityIdentifier)
if not entityIdentifierValue:
entityIdentifierValue = entityIdentifier
entityIdentifierValueElt = entityIdentifierElt
if self.validateEFM and not efmCIKpattern.match(entityIdentifierValue):
self.modelXbrl.error("EFM.6.05.23.cikValue",
_("EntityIdentifier %(entityIdentifer)s must be 10 digits."),
modelObject=entityIdentifierElt, entityIdentifer=entityIdentifierValue)
elif entityIdentifier != entityIdentifierValue:
modelXbrl.error(("EFM.6.05.03", "GFM.1.02.03"),
_("Multiple %(entityIdentifierName)ss: %(entityIdentifer)s, %(entityIdentifer2)s"),
modelObject=(entityIdentifierElt, entityIdentifierValueElt),
entityIdentifierName=disclosureSystem.identifierValueName,
entityIdentifer=entityIdentifierValue,
entityIdentifer2=entityIdentifier,
filerIdentifier=",".join(paramFilerIdentifiers or []))
self.modelXbrl.profileActivity("... filer identifier checks", minTimeToShow=1.0)
#6.5.7 duplicated contexts
contexts = modelXbrl.contexts.values()
contextIDs = set()
uniqueContextHashes = {}
contextsWithDisallowedOCEs = []
contextsWithDisallowedOCEcontent = []
for context in contexts:
contextID = context.id
contextIDs.add(contextID)
h = context.contextDimAwareHash
if h in uniqueContextHashes:
if context.isEqualTo(uniqueContextHashes[h]):
modelXbrl.error(("EFM.6.05.07", "GFM.1.02.07"),
_("Context ID %(context)s is equivalent to context ID %(context2)s"),
modelObject=(context, uniqueContextHashes[h]), context=contextID, context2=uniqueContextHashes[h].id)
else:
uniqueContextHashes[h] = context
#GFM no time in contexts
if self.validateGFM:
for dateElt in XmlUtil.children(context, XbrlConst.xbrli, ("startDate", "endDate", "instant")):
dateText = XmlUtil.text(dateElt)
if not GFMcontextDatePattern.match(dateText):
modelXbrl.error("GFM.1.02.25",
_("Context id %(context)s %(elementName)s invalid content %(value)s"),
modelObject=dateElt, context=contextID,
elementName=dateElt.prefixedName, value=dateText)
#6.5.4 scenario
hasSegment = XmlUtil.hasChild(context, XbrlConst.xbrli, "segment")
hasScenario = XmlUtil.hasChild(context, XbrlConst.xbrli, "scenario")
notAllowed = None
if disclosureSystem.contextElement == "segment" and hasScenario:
notAllowed = _("Scenario")
elif disclosureSystem.contextElement == "scenario" and hasSegment:
notAllowed = _("Segment")
elif disclosureSystem.contextElement == "either" and hasSegment and hasScenario:
notAllowed = _("Both segment and scenario")
elif disclosureSystem.contextElement == "none" and (hasSegment or hasScenario):
notAllowed = _("Neither segment nor scenario")
if notAllowed:
if validateEFMpragmatic:
contextsWithDisallowedOCEs.append(context)
else:
modelXbrl.error(("EFM.6.05.04", "GFM.1.02.04", "SBR.NL.2.3.5.06"),
_("%(elementName)s element not allowed in context Id: %(context)s"),
modelObject=context, elementName=notAllowed, context=contextID, count=1)
#6.5.5 segment only explicit dimensions
for contextName in {"segment": ("{http://www.xbrl.org/2003/instance}segment",),
"scenario": ("{http://www.xbrl.org/2003/instance}scenario",),
"either": ("{http://www.xbrl.org/2003/instance}segment","{http://www.xbrl.org/2003/instance}scenario"),
"both": ("{http://www.xbrl.org/2003/instance}segment","{http://www.xbrl.org/2003/instance}scenario"),
"none": [], None:[]
}[disclosureSystem.contextElement]:
for segScenElt in context.iterdescendants(contextName):
if isinstance(segScenElt,ModelObject):
childTags = ", ".join([child.prefixedName for child in segScenElt.iterchildren()
if isinstance(child,ModelObject) and
child.tag != "{http://xbrl.org/2006/xbrldi}explicitMember"])
if len(childTags) > 0:
if validateEFMpragmatic:
contextsWithDisallowedOCEcontent.append(context)
else:
modelXbrl.error(("EFM.6.05.05", "GFM.1.02.05"),
_("%(elementName)s of context Id %(context)s has disallowed content: %(content)s"),
modelObject=context, context=contextID, content=childTags,
elementName=contextName.partition("}")[2].title())
#6.5.38 period forever
if context.isForeverPeriod:
self.modelXbrl.error("EFM.6.05.38",
_("Context %(contextID)s has a forever period."),
modelObject=context, contextID=contextID)
if validateEFMpragmatic: # output combined count message
if contextsWithDisallowedOCEs:
modelXbrl.error(("EFM.6.05.04", "GFM.1.02.04"),
_("%(count)s contexts contain disallowed %(elementName)s: %(context)s"),
modelObject=contextsWithDisallowedOCEs, elementName=notAllowed,
count=len(contextsWithDisallowedOCEs), context=', '.join(c.id for c in contextsWithDisallowedOCEs))
if contextsWithDisallowedOCEcontent:
modelXbrl.error(("EFM.6.05.05", "GFM.1.02.05"),
_("%(count)s contexts contain disallowed %(elementName)s content: %(context)s"),
modelObject=contextsWithDisallowedOCEcontent, elementName=disclosureSystem.contextElement,
count=len(contextsWithDisallowedOCEcontent), context=', '.join(c.id for c in contextsWithDisallowedOCEcontent))
del uniqueContextHashes, contextsWithDisallowedOCEs, contextsWithDisallowedOCEcontent
self.modelXbrl.profileActivity("... filer context checks", minTimeToShow=1.0)
#fact items from standard context (no dimension)
amendmentDescription = None
amendmentDescriptionFact = None
amendmentFlag = None
amendmentFlagFact = None
documentPeriodEndDate = None
documentPeriodEndDateFact = None
documentType = None
documentTypeFact = None
deiItems = {}
deiFacts = {}
commonSharesItemsByStockClass = defaultdict(list)
commonSharesClassMembers = None
hasDefinedStockAxis = False
hasCommonSharesOutstandingDimensionedFactWithDefaultStockClass = False
commonSharesClassUndefinedMembers = None
commonStockMeasurementDatetime = None
deiCheckLocalNames = {
"EntityRegistrantName",
"EntityCommonStockSharesOutstanding",
"EntityCurrentReportingStatus",
"EntityVoluntaryFilers",
disclosureSystem.deiCurrentFiscalYearEndDateElement,
"EntityFilerCategory",
"EntityWellKnownSeasonedIssuer",
"EntityPublicFloat",
disclosureSystem.deiDocumentFiscalYearFocusElement,
"DocumentFiscalPeriodFocus",
"EntityReportingCurrencyISOCode", # for SD
}
#6.5.8 unused contexts
for f in modelXbrl.facts:
factContextID = f.contextID
contextIDs.discard(factContextID)
context = f.context
factInDeiNamespace = None
factQname = f.qname # works for both inline and plain instances
if factQname: # may be none in error situations
factElementName = factQname.localName
if disclosureSystem.deiNamespacePattern is not None:
factInDeiNamespace = disclosureSystem.deiNamespacePattern.match(factQname.namespaceURI)
# standard dei items from required context
if context is not None: # tests do not apply to tuples
if not context.hasSegment and not context.hasScenario:
#default context
if factInDeiNamespace:
value = f.value
if factElementName == disclosureSystem.deiAmendmentFlagElement:
amendmentFlag = value
amendmentFlagFact = f
elif factElementName == "AmendmentDescription":
amendmentDescription = value
amendmentDescriptionFact = f
elif factElementName == disclosureSystem.deiDocumentPeriodEndDateElement:
documentPeriodEndDate = value
documentPeriodEndDateFact = f
commonStockMeasurementDatetime = context.endDatetime
elif factElementName == "DocumentType":
documentType = value
documentTypeFact = f
elif factElementName == disclosureSystem.deiFilerIdentifierElement:
deiItems[factElementName] = value
deiFilerIdentifierFact = f
elif factElementName == disclosureSystem.deiFilerNameElement:
deiItems[factElementName] = value
deiFilerNameFact = f
elif factElementName in deiCheckLocalNames:
deiItems[factElementName] = value
deiFacts[factElementName] = f
if (self.requiredContext is None and context.isStartEndPeriod and
context.startDatetime is not None and context.endDatetime is not None):
self.requiredContext = context
else:
# segment present
isEntityCommonStockSharesOutstanding = factElementName == "EntityCommonStockSharesOutstanding"
hasClassOfStockMember = False
# note all concepts used in explicit dimensions
for dimValue in context.qnameDims.values():
if dimValue.isExplicit:
dimConcept = dimValue.dimension
memConcept = dimValue.member
for dConcept in (dimConcept, memConcept):
if dConcept is not None:
conceptsUsed[dConcept] = False
if (isEntityCommonStockSharesOutstanding and
dimConcept is not None and
dimConcept.name == "StatementClassOfStockAxis"):
commonSharesItemsByStockClass[memConcept.qname].append(f)
''' per discussion with Dean R, remove use of LB defined members from this test
if commonSharesClassMembers is None:
commonSharesClassMembers, hasDefinedStockAxis = self.getDimMembers(dimConcept)
if not hasDefinedStockAxis: # no def LB for stock axis, note observed members
commonSharesClassMembers.add(memConcept.qname)
#following is replacement:'''
if commonSharesClassMembers is None:
commonSharesClassMembers = set()
commonSharesClassMembers.add(memConcept.qname) # only note the actually used members, not any defined members
#end of replacement
hasClassOfStockMember = True
if isEntityCommonStockSharesOutstanding and not hasClassOfStockMember:
hasCommonSharesOutstandingDimensionedFactWithDefaultStockClass = True # absent dimension, may be no def LB
if self.validateEFM: # note that this is in the "if context is not None" region
for pluginXbrlMethod in pluginClassMethods("Validate.EFM.Fact"):
pluginXbrlMethod(self, f)
#6.5.17 facts with precision
concept = f.concept
if concept is None:
modelXbrl.error(("EFM.6.04.03", "GFM.2.01.01"),
_("Fact %(fact)s of context %(contextID)s has an XBRL error"),
modelObject=f, fact=f.qname, contextID=factContextID)
else:
# note fact concpts used
conceptsUsed[concept] = False
if concept.isNumeric:
if f.precision:
modelXbrl.error(("EFM.6.05.17", "GFM.1.02.16"),
_("Numeric fact %(fact)s of context %(contextID)s has a precision attribute '%(precision)s'"),
modelObject=f, fact=f.qname, contextID=factContextID, precision=f.precision)
#6.5.25 domain items as facts
if self.validateEFM and concept.type is not None and concept.type.isDomainItemType:
modelXbrl.error("EFM.6.05.25",
_("Domain item %(fact)s in context %(contextID)s may not appear as a fact"),
modelObject=f, fact=f.qname, contextID=factContextID)
if validateInlineXbrlGFM:
if f.localName == "nonFraction" or f.localName == "fraction":
syms = signOrCurrencyPattern.findall(f.text)
if syms:
modelXbrl.error(("EFM.N/A", "GFM.1.10.18"),
'ix-numeric Fact %(fact)s of context %(contextID)s has a sign or currency symbol "%(value)s" in "%(text)s"',
modelObject=f, fact=f.qname, contextID=factContextID,
value="".join(s for t in syms for s in t), text=f.text)
self.entityRegistrantName = deiItems.get("EntityRegistrantName") # used for name check in 6.8.6
# 6.05.23,24 check (after dei facts read)
if not (entityIdentifierValue == "0000000000" and self.validateEFM and documentType == "L SDR"):
if disclosureSystem.deiFilerIdentifierElement in deiItems:
value = deiItems[disclosureSystem.deiFilerIdentifierElement]
if entityIdentifierValue != value:
self.modelXbrl.error(("EFM.6.05.23", "GFM.3.02.02"),
_("dei:%(elementName)s %(value)s must match the context entity identifier %(entityIdentifier)s"),
modelObject=deiFilerIdentifierFact, elementName=disclosureSystem.deiFilerIdentifierElement,
value=value, entityIdentifier=entityIdentifierValue)
if paramFilerIdentifiers:
if value not in paramFilerIdentifiers:
self.modelXbrl.error(("EFM.6.05.23.submissionIdentifier", "GFM.3.02.02"),
_("dei:%(elementName)s %(value)s must match submission: %(filerIdentifier)s"),
modelObject=deiFilerIdentifierFact, elementName=disclosureSystem.deiFilerIdentifierElement,
value=value, filerIdentifier=",".join(paramFilerIdentifiers))
elif paramFilerIdentifier and value != paramFilerIdentifier:
self.modelXbrl.error(("EFM.6.05.23.submissionIdentifier", "GFM.3.02.02"),
_("dei:%(elementName)s %(value)s must match submission: %(filerIdentifier)s"),
modelObject=deiFilerIdentifierFact, elementName=disclosureSystem.deiFilerIdentifierElement,
value=value, filerIdentifier=paramFilerIdentifier)
if disclosureSystem.deiFilerNameElement in deiItems:
value = deiItems[disclosureSystem.deiFilerNameElement]
if paramFilerIdentifiers and paramFilerNames and entityIdentifierValue in paramFilerIdentifiers:
prefix = paramFilerNames[paramFilerIdentifiers.index(entityIdentifierValue)]
if not value.lower().startswith(prefix.lower()):
self.modelXbrl.error(("EFM.6.05.24", "GFM.3.02.02"),
_("dei:%(elementName)s %(prefix)s should be a case-insensitive prefix of: %(value)s"),
modelObject=deiFilerNameFact, elementName=disclosureSystem.deiFilerNameElement,
prefix=prefix, value=value)
self.modelXbrl.profileActivity("... filer fact checks", minTimeToShow=1.0)
if len(contextIDs) > 0: # check if contextID is on any undefined facts
for undefinedFact in modelXbrl.undefinedFacts:
contextIDs.discard(undefinedFact.get("contextRef"))
if len(contextIDs) > 0:
modelXbrl.error(("EFM.6.05.08", "GFM.1.02.08"),
_("The instance document contained a context(s) %(contextIDs)s that was(are) not used in any fact."),
modelXbrl=modelXbrl, contextIDs=", ".join(str(c) for c in contextIDs))
#6.5.9, .10 start-end durations
if disclosureSystem.GFM or \
disclosureSystemVersion[0] >= 27 or \
documentType in {
'20-F', '40-F', '10-Q', '10-QT', '10-K', '10-KT', '10', 'N-CSR', 'N-CSRS', 'N-Q',
'20-F/A', '40-F/A', '10-Q/A', '10-QT/A', '10-K/A', '10-KT/A', '10/A', 'N-CSR/A', 'N-CSRS/A', 'N-Q/A'}:
'''
for c1 in contexts:
if c1.isStartEndPeriod:
end1 = c1.endDatetime
start1 = c1.startDatetime
for c2 in contexts:
if c1 != c2 and c2.isStartEndPeriod:
duration = end1 - c2.startDatetime
if duration > datetime.timedelta(0) and duration <= datetime.timedelta(1):
modelXbrl.error(("EFM.6.05.09", "GFM.1.2.9"),
_("Context {0} endDate and {1} startDate have a duration of one day; that is inconsistent with document type {2}."),
c1.id, c2.id, documentType),
"err", )
if self.validateEFM and c1 != c2 and c2.isInstantPeriod:
duration = c2.endDatetime - start1
if duration > datetime.timedelta(0) and duration <= datetime.timedelta(1):
modelXbrl.error(
_("Context {0} startDate and {1} end (instant) have a duration of one day; that is inconsistent with document type {2}."),
c1.id, c2.id, documentType),
"err", "EFM.6.05.10")
'''
durationCntxStartDatetimes = defaultdict(set)
for cntx in contexts:
if cntx.isStartEndPeriod and cntx.startDatetime is not None:
durationCntxStartDatetimes[cntx.startDatetime].add(cntx)
probStartEndCntxsByEnd = defaultdict(set)
startEndCntxsByEnd = defaultdict(set)
probInstantCntxsByEnd = defaultdict(set)
probCntxs = set()
for cntx in contexts:
end = cntx.endDatetime
if end is not None:
if cntx.isStartEndPeriod:
thisStart = cntx.startDatetime
for otherStart, otherCntxs in durationCntxStartDatetimes.items():
duration = end - otherStart
if duration > datetime.timedelta(0) and duration <= datetime.timedelta(1):
if disclosureSystemVersion[0] < 27:
probCntxs |= otherCntxs - {cntx}
elif thisStart is not None and end - thisStart > datetime.timedelta(1):
for otherCntx in otherCntxs:
if otherCntx is not cntx and otherCntx.endDatetime != end and otherStart != cntx.startDatetime:
probCntxs.add(otherCntx)
if probCntxs:
probStartEndCntxsByEnd[end] |= probCntxs
startEndCntxsByEnd[end] |= {cntx}
probCntxs.clear()
if self.validateEFM and cntx.isInstantPeriod:
for otherStart, otherCntxs in durationCntxStartDatetimes.items():
duration = end - otherStart
if duration > datetime.timedelta(0) and duration <= datetime.timedelta(1):
probCntxs |= otherCntxs
if probCntxs:
probInstantCntxsByEnd[end] |= ( probCntxs | {cntx} )
probCntxs.clear()
del probCntxs
for end, probCntxs in probStartEndCntxsByEnd.items():
endCntxs = startEndCntxsByEnd[end]
modelXbrl.error(("EFM.6.05.09", "GFM.1.2.9"),
_("Context endDate %(endDate)s, and startDate(s) have a duration of one day, for end context(s): %(endContexts)s and start context(s): %(startContexts)s; that is inconsistent with document type %(documentType)s."),
modelObject=probCntxs, endDate=XmlUtil.dateunionValue(end, subtractOneDay=True),
endContexts=', '.join(sorted(c.id for c in endCntxs)),
startContexts=', '.join(sorted(c.id for c in probCntxs)),
documentType=documentType)
if disclosureSystemVersion[0] < 27:
for end, probCntxs in probInstantCntxsByEnd.items():
modelXbrl.error("EFM.6.05.10",
_("Context instant date %(endDate)s startDate has a duration of one day,with end (instant) of context(s): %(contexts)s; that is inconsistent with document type %(documentType)s."),
modelObject=probCntxs, endDate=XmlUtil.dateunionValue(end, subtractOneDay=True),
contexts=', '.join(sorted(c.id for c in probCntxs)),
documentType=documentType)
del probStartEndCntxsByEnd, startEndCntxsByEnd, probInstantCntxsByEnd
del durationCntxStartDatetimes
self.modelXbrl.profileActivity("... filer instant-duration checks", minTimeToShow=1.0)
#6.5.19 required context
foundRequiredContext = False
for c in contexts:
if c.isStartEndPeriod:
if not c.hasSegment:
foundRequiredContext = True
break
if not foundRequiredContext:
modelXbrl.error(("EFM.6.05.19", "GFM.1.02.18"),
_("Required context (no segment) not found for document type %(documentType)s."),
modelObject=documentTypeFact, documentType=documentType)
#6.5.11 equivalent units
uniqueUnitHashes = {}
for unit in self.modelXbrl.units.values():
h = unit.hash
if h in uniqueUnitHashes:
if unit.isEqualTo(uniqueUnitHashes[h]):
modelXbrl.error(("EFM.6.05.11", "GFM.1.02.10"),
_("Units %(unitID)s and %(unitID2)s are equivalent."),
modelObject=(unit, uniqueUnitHashes[h]), unitID=unit.id, unitID2=uniqueUnitHashes[h].id)
else:
uniqueUnitHashes[h] = unit
if self.validateEFM: # 6.5.38
for measureElt in unit.iterdescendants(tag="{http://www.xbrl.org/2003/instance}measure"):
if isinstance(measureElt.xValue, ModelValue.QName) and len(measureElt.xValue.localName) > 65:
l = len(measureElt.xValue.localName.encode("utf-8"))
if l > 200:
modelXbrl.error("EFM.6.05.36",
_("Unit has a measure with localName length (%(length)s) over 200 bytes long in utf-8, %(measure)s."),
modelObject=measureElt, unitID=unit.id, measure=measureElt.xValue.localName, length=l)
del uniqueUnitHashes
self.modelXbrl.profileActivity("... filer unit checks", minTimeToShow=1.0)
# EFM.6.05.14, GFM.1.02.13 xml:lang tests, as of v-17, full default lang is compared
#if self.validateEFM:
# factLangStartsWith = disclosureSystem.defaultXmlLang[:2]
#else:
# factLangStartsWith = disclosureSystem.defaultXmlLang
requiredFactLang = disclosureSystem.defaultXmlLang
#6.5.12 equivalent facts
factsForLang = {}
factForConceptContextUnitLangHash = {}
keysNotDefaultLang = {}
iF1 = 1
for f1 in modelXbrl.facts:
# build keys table for 6.5.14
if not f1.isNil:
langTestKey = "{0},{1},{2}".format(f1.qname, f1.contextID, f1.unitID)
factsForLang.setdefault(langTestKey, []).append(f1)
lang = f1.xmlLang
if lang and lang != requiredFactLang: # not lang.startswith(factLangStartsWith):
keysNotDefaultLang[langTestKey] = f1
# 6.5.37 test (insignificant digits due to rounding)
if f1.isNumeric and f1.decimals and f1.decimals != "INF" and not f1.isNil and getattr(f1,"xValid", 0) == 4:
try:
insignificance = insignificantDigits(f1.xValue, decimals=f1.decimals)
if insignificance: # if not None, returns (truncatedDigits, insiginficantDigits)
modelXbrl.error(("EFM.6.05.37", "GFM.1.02.26"),
_("Fact %(fact)s of context %(contextID)s decimals %(decimals)s value %(value)s has nonzero digits in insignificant portion %(insignificantDigits)s."),
modelObject=f1, fact=f1.qname, contextID=f1.contextID, decimals=f1.decimals,
value=f1.xValue, truncatedDigits=insignificance[0], insignificantDigits=insignificance[1])
except (ValueError,TypeError):
modelXbrl.error(("EFM.6.05.37", "GFM.1.02.26"),
_("Fact %(fact)s of context %(contextID)s decimals %(decimals)s value %(value)s causes Value Error exception."),
modelObject=f1, fact=f1.qname, contextID=f1.contextID, decimals=f1.decimals, value=f1.value)
# 6.5.12 test
h = f1.conceptContextUnitLangHash
if h in factForConceptContextUnitLangHash:
f2 = factForConceptContextUnitLangHash[h]
if f1.qname == f2.qname and \
f1.contextID == f2.contextID and \
f1.unitID == f2.unitID and \
f1.xmlLang == f2.xmlLang:
modelXbrl.error(("EFM.6.05.12", "GFM.1.02.11"),
"Facts %(fact)s of context %(contextID)s and %(contextID2)s are equivalent.",
modelObject=(f1, f2), fact=f1.qname, contextID=f1.contextID, contextID2=f2.contextID)
else:
factForConceptContextUnitLangHash[h] = f1
iF1 += 1
del factForConceptContextUnitLangHash
self.modelXbrl.profileActivity("... filer fact checks", minTimeToShow=1.0)
#6.5.14 facts without english text
for keyNotDefaultLang, factNotDefaultLang in keysNotDefaultLang.items():
anyDefaultLangFact = False
for fact in factsForLang[keyNotDefaultLang]:
if fact.xmlLang == requiredFactLang: #.startswith(factLangStartsWith):
anyDefaultLangFact = True
break
if not anyDefaultLangFact:
self.modelXbrl.error(("EFM.6.05.14", "GFM.1.02.13"),
_("Fact %(fact)s of context %(contextID)s has text of xml:lang '%(lang)s' without corresponding %(lang2)s text"),
modelObject=factNotDefaultLang, fact=factNotDefaultLang.qname, contextID=factNotDefaultLang.contextID,
lang=factNotDefaultLang.xmlLang, lang2=requiredFactLang) # factLangStartsWith)
#label validations
if not labelsRelationshipSet:
self.modelXbrl.error(("EFM.6.10.01.missingLabelLinkbase", "GFM.1.05.01"),
_("A label linkbase is required but was not found"),
modelXbrl=modelXbrl)
elif disclosureSystem.defaultXmlLang: # cannot check if no defaultXmlLang specified
for concept in conceptsUsed.keys():
self.checkConceptLabels(modelXbrl, labelsRelationshipSet, disclosureSystem, concept)
#6.5.15 facts with xml in text blocks
if self.validateEFMorGFM:
ValidateFilingText.validateTextBlockFacts(modelXbrl)
if amendmentFlag is None:
modelXbrl.log("WARNING" if validateEFMpragmatic else "ERROR",
("EFM.6.05.20.missingAmendmentFlag", "GFM.3.02.01"),
_("%(elementName)s is not found in the default context"),
modelXbrl=modelXbrl, elementName=disclosureSystem.deiAmendmentFlagElement)
if not documentPeriodEndDate:
modelXbrl.error(("EFM.6.05.20.missingDocumentPeriodEndDate", "GFM.3.02.01"),
_("%(elementName)s is required and was not found in the default context"),
modelXbrl=modelXbrl, elementName=disclosureSystem.deiDocumentPeriodEndDateElement)
else:
dateMatch = datePattern.match(documentPeriodEndDate)
if not dateMatch or dateMatch.lastindex != 3:
modelXbrl.error(("EFM.6.05.20", "GFM.3.02.01"),
_("%(elementName)s is in the default context is incorrect '%(date)s'"),
modelXbrl=modelXbrl, elementName=disclosureSystem.deiDocumentPeriodEndDateElement,
date=documentPeriodEndDate)
self.modelXbrl.profileActivity("... filer label and text checks", minTimeToShow=1.0)
if self.validateEFM:
if amendmentFlag == "true" and amendmentDescription is None:
modelXbrl.log("WARNING" if validateEFMpragmatic else "ERROR",
"EFM.6.05.20.missingAmendmentDescription",
_("AmendmentFlag is true in context %(contextID)s so AmendmentDescription is also required"),
modelObject=amendmentFlagFact, contextID=amendmentFlagFact.contextID if amendmentFlagFact is not None else "unknown")
if amendmentDescription is not None and amendmentFlag != "true":
modelXbrl.log("WARNING" if validateEFMpragmatic else "ERROR",
"EFM.6.05.20.extraneous",
_("AmendmentDescription can not be provided when AmendmentFlag is not true in context %(contextID)s"),
modelObject=amendmentDescriptionFact, contextID=amendmentDescriptionFact.contextID)
if documentType is None:
modelXbrl.error("EFM.6.05.20.missingDocumentType",
_("DocumentType is required and was not found in the default context"),
modelXbrl=modelXbrl)
elif documentType not in {
"497",
"10-12B",
"10-12B/A",
"10-12G",
"10-12G/A",
"10-K/A",
"10-KT",
"10-K",
"10-KT/A",
"10-Q/A",
"10-QT",
"10-Q",
"10-QT/A",
"20-F",
"20-F/A",
"20FR12B",
"20FR12B/A",
"20FR12G",
"20FR12G/A",
"40-F",
"40-F/A",
"40FR12B",
"40FR12B/A",
"40FR12G",
"40FR12G/A",
"485BPOS",
"6-K",
"6-K/A",
"8-K",
"8-K/A",
"8-K12B",
"8-K12B/A",
"8-K12G3",
"8-K12G3/A",
"8-K15D5",
"8-K15D5/A",
"F-1/A",
"F-10",
"F-10/A",
"F-10EF",
"F-10POS",
"F-3/A",
"F-3ASR",
"F-3D",
"F-3DPOS",
"F-4 POS",
"F-4/A",
"F-4EF",
"F-9 POS",
"F-9/A",
"F-9",
"F-9EF",
"N-1A",
"N-1A/A",
"N-CSR",
"N-CSR/A",
"N-CSRS",
"N-CSRS/A",
"N-Q",
"N-Q/A",
"F-1",
"F-6",
"POS AM",
"SD",
"SD/A",
"S-20",
"S-B",
"F-4",
"POS EX",
"F-1MEF",
"F-3MEF",
"F-4MEF",
"K SDR",
"L SDR",
"POS462B",
"POS462C",
"S-BMEF",
"F-3",
"Other",
"POSASR",
"S-1",
"S-1/A",
"S-11",
"S-11/A",
"S-11MEF",
"S-1MEF",
"S-3/A",
"S-3ASR",
"S-3D",
"S-3",
"S-3DPOS",
"S-3MEF",
"S-4 POS",
"S-4/A",
"S-4",
"S-4EF",
"S-4MEF",
"SD",
"SD/A",
"SP 15D2",
"SP 15D2/A"
}:
modelXbrl.error("EFM.6.05.20.documentTypeValue",
_("DocumentType '%(documentType)s' of context %(contextID)s was not recognized"),
modelObject=documentTypeFact, contextID=documentTypeFact.contextID, documentType=documentType)
elif submissionType:
expectedDocumentTypes = {
"10-12B": ("10-12B", "Other"),
"10-12B/A": ("10-12B/A", "Other"),
"10-12G": ("10-12G", "Other"),
"10-12G/A": ("10-12G/A", "Other"),
"10-K": ("10-K",),
"10-K/A": ("10-K", "10-K/A"),
"10-KT": ("10-K","10-KT","Other"),
"10-KT/A": ("10-K", "10-KT", "10-KT/A", "Other"),
"10-Q": ("10-Q",),
"10-Q/A": ("10-Q", "10-Q/A"),
"10-QT": ("10-Q", "10-QT", "Other"),
"10-QT/A": ("10-Q", "10-QT", "10-QT/A", "Other"),
"20-F": ("20-F",),
"20-F/A": ("20-F", "20-F/A"),
"20FR12B": ("20FR12B", "Other"),
"20FR12B/A": ("20FR12B/A", "Other"),
"20FR12G": ("20FR12G", "Other"),
"20FR12G/A": ("20FR12G/A", "Other"),
"40-F": ("40-F",),
"40-F/A": ("40-F", "40-F/A"),
"40FR12B": ("40FR12B", "Other"),
"40FR12B/A": ("40FR12B/A", "Other"),
"40FR12G": ("40FR12G", "Other"),
"40FR12G/A": ("40FR12G/A", "Other"),
"485BPOS": ("485BPOS",),
"497": ("497", "Other"),
"6-K": ("6-K",),
"6-K/A": ("6-K", "6-K/A"),
"8-K": ("8-K",),
"8-K/A": ("8-K", "8-K/A"),
"8-K12B": ("8-K12B", "Other"),
"8-K12B/A": ("8-K12B/A", "Other"),
"8-K12G3": ("8-K12G3", "Other"),
"8-K12G3/A": ("8-K12G3/A", "Other"),
"8-K15D5": ("8-K15D5", "Other"),
"8-K15D5/A": ("8-K15D5/A", "Other"),
"F-1": ("F-1",),
"F-1/A": ("F-1", "F-1/A"),
"F-10": ("F-10",),
"F-10/A": ("F-10", "F-10/A"),
"F-10EF": ("F-10EF", "Other"),
"F-10POS": ("F-10POS", "Other"),
"F-1MEF": ("F-1MEF",),
"F-3": ("F-3",),
"F-3/A": ("F-3", "F-3/A"),
"F-3ASR": ("F-3", "F-3ASR"),
"F-3D": ("F-3", "F-3D"),
"F-3DPOS": ("F-3", "F-3DPOS"),
"F-3MEF": ("F-3MEF",),
"F-4": ("F-4",),
"F-4 POS": ("F-4", "F-4 POS"),
"F-4/A": ("F-4", "F-4/A"),
"F-4EF": ("F-4", "F-4EF"),
"F-4MEF": ("F-4MEF",),
"F-9": ("F-9",),
"F-9 POS": ("F-9", "F-9 POS"),
"F-9/A": ("F-9", "F-9/A"),
"F-9EF": ("F-9", "F-9EF"),
"N-1A": ("N-1A",),
"N-1A/A": ("N-1A/A", "Other"),
"N-CSR": ("N-CSR",),
"N-CSR/A": ("N-CSR/A",),
"N-CSRS": ("N-CSRS",),
"N-CSRS/A": ("N-CSRS/A",),
"N-Q": ("N-Q",),
"N-Q/A": ("N-Q/A",),
"POS AM": ("F-1", "F-3", "F-4", "F-6", "Other",
"POS AM", "S-1", "S-11", "S-20", "S-3", "S-4", "S-B"),
"POS EX": ("F-3", "F-4", "Other",
"POS EX", "S-1", "S-3", "S-4"),
"POS462B": ("F-1MEF", "F-3MEF", "F-4MEF", "Other",
"POS462B", "POS462C", "S-11MEF", "S-1MEF", "S-3MEF", "S-BMEF"),
"POSASR": ("F-3", "Other", "POSASR", "S-3"),
"S-1": ("S-1",),
"S-1/A": ("S-1", "S-1/A"),
"S-11": ("S-11",),
"S-11/A": ("S-11/A",),
"S-11MEF": ("S-11MEF",),
"S-1MEF": ("S-1MEF",),
"S-3": ("S-3",),
"S-3/A": ("S-3", "S-3/A"),
"S-3ASR": ("S-3", "S-3ASR"),
"S-3D": ("S-3", "S-3D"),
"S-3DPOS": ("S-3", "S-3DPOS"),
"S-3MEF": ("S-3MEF",),
"S-4": ("S-4",),
"S-4 POS": ("S-4", "S-4 POS"),
"S-4/A": ("S-4", "S-4/A"),
"S-4EF": ("S-4", "S-4EF"),
"S-4MEF": ("S-4MEF",),
"SD": ("SD",),
"SD/A": ("SD/A",),
"SP 15D2": ("SP 15D2",),
"SP 15D2/A": ("SP 15D2/A",),
"SDR": ("K SDR", "L SDR"),
"SDR/A": ("K SDR", "L SDR"),
"SDR-A": ("K SDR", "L SDR"),
"SDR/W ": ("K SDR", "L SDR")
}.get(submissionType)
if expectedDocumentTypes and documentType not in expectedDocumentTypes:
modelXbrl.error("EFM.6.05.20.submissionDocumentType" if self.exhibitType != "EX-2.01" else "EFM.6.23.03",
_("DocumentType '%(documentType)s' of context %(contextID)s inapplicable to submission form %(submissionType)s"),
modelObject=documentTypeFact, contextID=documentTypeFact.contextID, documentType=documentType, submissionType=submissionType,
messageCodes=("EFM.6.05.20.submissionDocumentType", "EFM.6.23.03"))
if self.exhibitType and documentType is not None:
if (documentType in ("SD", "SD/A")) != (self.exhibitType == "EX-2.01"):
modelXbrl.error({"EX-100":"EFM.6.23.04",
"EX-101":"EFM.6.23.04",
"EX-99.K SDR.INS":"EFM.6.23.04",
"EX-99.L SDR.INS":"EFM.6.23.04",
"EX-2.01":"EFM.6.23.05"}.get(self.exhibitType,"EX-101"),
_("The value for dei:DocumentType, %(documentType)s, is not allowed for %(exhibitType)s attachments."),
modelObject=documentTypeFact, contextID=documentTypeFact.contextID, documentType=documentType, exhibitType=self.exhibitType,
messageCodes=("EFM.6.23.04", "EFM.6.23.04", "EFM.6.23.05"))
elif (((documentType == "K SDR") != (val.exhibitType in ("EX-99.K SDR", "EX-99.K SDR.INS"))) or
((documentType == "L SDR") != (val.exhibitType in ("EX-99.L SDR", "EX-99.L SDR.INS")))):
modelXbrl.error("EFM.6.05.20.exhibitDocumentType",
_("The value for dei:DocumentType, '%(documentType)s' is not allowed for %(exhibitType)s attachments."),
modelObject=documentTypeFact, contextID=documentTypeFact.contextID, documentType=documentType, exhibitType=val.exhibitType)
# 6.5.21
for doctypesRequired, deiItemsRequired in (
(("10-K", "10-KT", "10-Q", "10-QT", "20-F", "40-F",
"10-K/A", "10-KT/A", "10-Q/A", "10-QT/A", "20-F/A", "40-F/A",
"6-K", "NCSR", "N-CSR", "N-CSRS", "N-Q",
"6-K/A", "NCSR/A", "N-CSR/A", "N-CSRS/A", "N-Q/A",
"10", "S-1", "S-3", "S-4", "S-11", "POS AM",
"10/A", "S-1/A", "S-3/A", "S-4/A", "S-11/A",
"8-K", "F-1", "F-3", "F-10", "497", "485BPOS",
"8-K/A", "F-1/A", "F-3/A", "F-10/A", "K SDR", "L SDR",
"Other"),
("EntityRegistrantName", "EntityCentralIndexKey")),
(("10-K", "10-KT", "20-F", "40-F",
"10-K/A", "10-KT/A", "20-F/A", "40-F/A"),
("EntityCurrentReportingStatus",)),
(("10-K", "10-KT", "10-K/A", "10-KT/A",),
("EntityVoluntaryFilers", "EntityPublicFloat")),
(("10-K", "10-KT", "10-Q", "10-QT", "20-F", "40-F",
"10-K/A", "10-KT/A", "10-Q/A", "10-QT/A", "20-F/A", "40-F/A",
"6-K", "NCSR", "N-CSR", "N-CSRS", "N-Q",
"6-K/A", "NCSR/A", "N-CSR/A", "N-CSRS/A", "N-Q/A", "K SDR", "L SDR"),
("CurrentFiscalYearEndDate", "DocumentFiscalYearFocus", "DocumentFiscalPeriodFocus")),
(("10-K", "10-KT", "10-Q", "10-QT", "20-F",
"10-K/A", "10-KT/A", "10-Q/A", "10-QT/A", "20-F/A",
"10", "S-1", "S-3", "S-4", "S-11", "POS AM",
"10/A", "S-1/A", "S-3/A", "S-4/A", "S-11/A", "K SDR", "L SDR"),
("EntityFilerCategory",)),
(("10-K", "10-KT", "20-F", "10-K/A", "10-KT/A", "20-F/A"),
("EntityWellKnownSeasonedIssuer",)),
(("SD", "SD/A"),
("EntityReportingCurrencyISOCode", ))
):
if documentType in doctypesRequired:
for deiItem in deiItemsRequired:
if deiItem not in deiItems or not deiItems[deiItem]: #must exist and value must be non-empty (incl not nil)
modelXbrl.log(("WARNING" if validateEFMpragmatic and deiItem in {
"CurrentFiscalYearEndDate", "DocumentFiscalPeriodFocus", "DocumentFiscalYearFocus",
"EntityCurrentReportingStatus", "EntityFilerCategory", "EntityPublicFloat",
"EntityVoluntaryFilers", "EntityWellKnownSeasonedIssuer"
} else "ERROR"),
("EFM.6.05.21.{0}".format(deiItem) if validateEFMpragmatic and deiItem in {
"CurrentFiscalYearEndDate", "DocumentFiscalPeriodFocus", "DocumentFiscalYearFocus",
"EntityRegistrantName", "EntityCentralIndexKey",
"EntityCurrentReportingStatus", "EntityFilerCategory", "EntityPublicFloat",
"EntityVoluntaryFilers", "EntityWellKnownSeasonedIssuer"
} else "EFM.6.23.36" if deiItem == "EntityReportingCurrencyISOCode"
else "EFM.6.05.21"),
_("dei:%(elementName)s is required for DocumentType '%(documentType)s' of context %(contextID)s"),
modelObject=documentTypeFact, contextID=documentTypeFact.contextID, documentType=documentType,
elementName=deiItem,
messageCodes=("EFM.6.05.21.CurrentFiscalYearEndDate", "EFM.6.05.21.DocumentFiscalPeriodFocus", "EFM.6.05.21.DocumentFiscalYearFocus",
"EFM.6.05.21.EntityRegistrantName", "EFM.6.05.21.EntityCentralIndexKey",
"EFM.6.05.21.EntityCurrentReportingStatus", "EFM.6.05.21.EntityFilerCategory", "EFM.6.05.21.EntityPublicFloat",
"EFM.6.05.21.EntityVoluntaryFilers", "EFM.6.05.21.EntityWellKnownSeasonedIssuer",
"EFM.6.23.36", "EFM.6.05.21"))
if documentType in {"10-K", "10-KT", "10-Q", "10-QT", "20-F", "40-F",
"10-K/A", "10-KT/A", "10-Q/A", "10-QT/A", "20-F/A", "40-F/A"}:
defaultContextSharesOutstandingValue = deiItems.get("EntityCommonStockSharesOutstanding")
errLevel = "WARNING" if validateEFMpragmatic else "ERROR"
if commonSharesClassMembers:
if defaultContextSharesOutstandingValue: # checks that it exists and is not empty or nil
modelXbrl.log(errLevel, "EFM.6.05.26",
_("dei:EntityCommonStockSharesOutstanding is required for DocumentType '%(documentType)s' but not in the default context because there are multiple classes of common shares"),
modelObject=documentTypeFact, contextID=documentTypeFact.contextID, documentType=documentType)
elif len(commonSharesClassMembers) == 1: # and not hasDefinedStockAxis:
modelXbrl.log(errLevel, "EFM.6.05.26",
_("dei:EntityCommonStockSharesOutstanding is required for DocumentType '%(documentType)s' but but a default-context because only one class of stock"),
modelObject=documentTypeFact, documentType=documentType)
''' per Dean R, this test no longer makes sense because we don't check against def LB defined members
missingClasses = commonSharesClassMembers - _DICT_SET(commonSharesItemsByStockClass.keys())
if missingClasses:
modelXbrl.log(errLevel, "EFM.6.05.26",
_("dei:EntityCommonStockSharesOutstanding is required for DocumentType '%(documentType)s' but missing in these stock classes: %(stockClasses)s"),
modelObject=documentTypeFact, documentType=documentType, stockClasses=", ".join([str(c) for c in missingClasses]))
'''
for mem, facts in commonSharesItemsByStockClass.items():
if len(facts) != 1:
modelXbrl.log(errLevel, "EFM.6.05.26",
_("dei:EntityCommonStockSharesOutstanding is required for DocumentType '%(documentType)s' but only one per stock class %(stockClass)s"),
modelObject=documentTypeFact, documentType=documentType, stockClass=mem)
''' removed per ARELLE-124 (should check measurement date vs report date)
elif facts[0].context.instantDatetime != commonStockMeasurementDatetime:
modelXbrl.log(errLevel, "EFM.6.05.26",
_("dei:EntityCommonStockSharesOutstanding is required for DocumentType '%(documentType)s' in stock class %(stockClass)s with measurement date %(date)s"),
modelObject=documentTypeFact, documentType=documentType, stockClass=mem, date=commonStockMeasurementDatetime)
'''
elif hasCommonSharesOutstandingDimensionedFactWithDefaultStockClass and not defaultContextSharesOutstandingValue:
modelXbrl.log(errLevel, "EFM.6.05.26",
_("dei:EntityCommonStockSharesOutstanding is required for DocumentType '%(documentType)s' but missing for a non-default-context fact"),
modelObject=documentTypeFact, documentType=documentType)
elif not defaultContextSharesOutstandingValue: # missing, empty, or nil
modelXbrl.log(errLevel, "EFM.6.05.26",
_("dei:EntityCommonStockSharesOutstanding is required for DocumentType '%(documentType)s' in the default context because there are not multiple classes of common shares"),
modelObject=documentTypeFact, documentType=documentType)
if documentType in ("SD", "SD/A"): # SD documentType
self.modelXbrl.profileActivity("... filer required facts checks (other than SD)", minTimeToShow=1.0)
rxdNs = None # find RXD schema
rxdDoc = None
hasRxdPre = hasRxdDef = False
for rxdLoc in disclosureSystem.familyHrefs["RXD"]:
rxdUri = rxdLoc.href
if rxdUri in modelXbrl.urlDocs:
if rxdUri.endswith(".xsd") and rxdLoc.elements == "1":
if rxdNs is None:
rxdDoc = modelXbrl.urlDocs[rxdUri]
rxdNs = rxdDoc.targetNamespace
else:
modelXbrl.error("EFM.6.23.10",
_("The DTS of must use only one version of the RXD schema"),
modelObject=(rxdDoc, modelXbrl.urlDocs[rxdUri]), instance=instanceName)
elif "/rxd-pre-" in rxdUri:
hasRxdPre = True
elif "/rxd-def-" in rxdUri:
hasRxdDef = True
if not hasRxdPre:
modelXbrl.error("EFM.6.23.08",
_("The DTS must use a standard presentation linkbase from Family RXD in edgartaxonomies.xml."),
modelObject=modelXbrl, instance=instanceName)
if not hasRxdDef:
modelXbrl.error("EFM.6.23.09",
_("The DTS must use a standard definition linkbase from Family RXD in edgartaxonomies.xml."),
modelObject=modelXbrl, instance=instanceName)
countryNs = None
deiNS = None
for url, doc in modelXbrl.urlDocs.items():
if doc.type == ModelDocument.Type.SCHEMA:
if url.startswith("http://xbrl.sec.gov/country/"):
if countryNs is None:
countryNs = doc.targetNamespace
else:
modelXbrl.error("EFM.6.23.11",
_("The DTS must use must use only one version of the COUNTRY schema."),
modelObject=(doc
for url,doc in modelXbrl.urlDocs.items()
if url.startswith("http://xbrl.sec.gov/country/")), instance=instanceName)
if disclosureSystem.deiNamespacePattern.match(doc.targetNamespace):
deiNS = doc.targetNamespace
if rxdNs:
qn = ModelValue.qname(rxdNs, "AmendmentNumber")
if amendmentFlag == "true" and (
qn not in modelXbrl.factsByQname or not any(
f.context is not None and not f.context.hasSegment
for f in modelXbrl.factsByQname[qn])):
modelXbrl.error("EFM.6.23.06",
_("The value for dei:DocumentType, %(documentType)s, requires a value for rxd:AmendmentNumber in the Required Context."),
modelObject=modelXbrl, documentType=documentType)
else:
modelXbrl.error("EFM.6.23.07",
_("The DTS must use a standard schema from Family RXD in edgartaxonomies.xml."),
modelObject=modelXbrl, instance=instanceName)
class Rxd(): # fake class of rxd qnames based on discovered rxd namespace
def __init__(self):
for name in ("CountryAxis", "GovernmentAxis", "PaymentTypeAxis", "ProjectAxis","PmtAxis",
"AllGovernmentsMember", "AllProjectsMember","BusinessSegmentAxis", "EntityDomain",
"A", "Cm", "Co", "Cu", "D", "Gv", "E", "K", "Km", "P", "Payments", "Pr", "Sm"):
setattr(self, name, ModelValue.qname(rxdNs, "rxd:" + name))
rxd = Rxd()
f1 = deiFacts.get(disclosureSystem.deiCurrentFiscalYearEndDateElement)
if f1 is not None and documentPeriodEndDateFact is not None and f1.xValid >= VALID and documentPeriodEndDateFact.xValid >= VALID:
d = ModelValue.dateunionDate(documentPeriodEndDateFact.xValue)# is an end date, convert back to a start date without midnight part
if f1.xValue.month != d.month or f1.xValue.day != d.day:
modelXbrl.error("EFM.6.23.26",
_("The dei:CurrentFiscalYearEndDate, %(fyEndDate)s does not match the dei:DocumentReportingPeriod %(reportingPeriod)s"),
modelObject=(f1,documentPeriodEndDateFact), fyEndDate=f1.value, reportingPeriod=documentPeriodEndDateFact.value)
if (documentPeriodEndDateFact is not None and documentPeriodEndDateFact.xValid >= VALID and
not any(f2.xValue == documentPeriodEndDateFact.xValue
for f2 in modelXbrl.factsByQname[rxd.D]
if f2.xValid >= VALID)):
modelXbrl.error("EFM.6.23.27",
_("The dei:DocumentPeriodEndDate %(reportingPeriod)s has no corresponding rxd:D fact."),
modelObject=documentPeriodEndDateFact, reportingPeriod=documentPeriodEndDateFact.value)
for url,doc in modelXbrl.urlDocs.items():
if (url not in disclosureSystem.standardTaxonomiesDict and
doc.type == ModelDocument.Type.SCHEMA):
for concept in XmlUtil.children(doc.xmlRootElement, XbrlConst.xsd, "element"):
name = concept.name
if not concept.isAbstract and not concept.isTextBlock:
modelXbrl.error("EFM.6.23.12",
_("Extension concept %(concept)s is non-abstract and not a Text Block."),
modelObject=concept, schemaName=doc.basename, name=concept.name, concept=concept.qname)
elif name.endswith("Table") or name.endswith("Axis") or name.endswith("Domain"):
modelXbrl.error("EFM.6.23.13",
_("Extension concept %(concept)s is not allowed in an extension schema."),
modelObject=concept, schemaName=doc.basename, name=concept.name, concept=concept.qname)
self.modelXbrl.profileActivity("... SD checks 6-13, 26-27", minTimeToShow=1.0)
dimDefRelSet = modelXbrl.relationshipSet(XbrlConst.dimensionDefault)
dimDomRelSet = modelXbrl.relationshipSet(XbrlConst.dimensionDomain)
hypDimRelSet = modelXbrl.relationshipSet(XbrlConst.hypercubeDimension)
hasHypRelSet = modelXbrl.relationshipSet(XbrlConst.all)
for rel in dimDomRelSet.modelRelationships:
if (isinstance(rel.fromModelObject, ModelConcept) and isinstance(rel.toModelObject, ModelConcept) and
not dimDefRelSet.isRelated(rel.fromModelObject, "child", rel.toModelObject)):
modelXbrl.error("EFM.6.23.14",
_("The target of the dimension-domain relationship in role %(linkrole)s from %(source)s to %(target)s must be the default member of %(source)s."),
modelObject=(rel, rel.fromModelObject, rel.toModelObject),
linkbaseName=rel.modelDocument.basename, linkrole=rel.linkrole,
source=rel.fromModelObject.qname, target=rel.toModelObject.qname)
domMemRelSet = modelXbrl.relationshipSet(XbrlConst.domainMember)
memDim = {}
def checkMemMultDims(memRel, dimRel, elt, ELR, visited):
if elt not in visited:
visited.add(elt)
for rel in domMemRelSet.toModelObject(elt):
if rel.consecutiveLinkrole == ELR and isinstance(rel.fromModelObject, ModelConcept):
checkMemMultDims(memRel, None, rel.fromModelObject, rel.linkrole, visited)
for rel in dimDomRelSet.toModelObject(elt):
if rel.consecutiveLinkrole == ELR:
dim = rel.fromModelObject
mem = memRel.toModelObject
if isinstance(dim, ModelConcept) and isinstance(mem, ModelConcept):
if dim.qname == rxd.PaymentTypeAxis and not mem.modelDocument.targetNamespace.startswith("http://xbrl.sec.gov/rxd/"):
modelXbrl.error("EFM.6.23.17",
_("The member %(member)s in dimension rxd:PaymentTypeAxis in linkrole %(linkrole)s must be a QName with namespace that begins with \"http://xbrl.sec.gov/rxd/\". "),
modelObject=(rel, memRel, dim, mem), member=mem.qname, linkrole=rel.linkrole)
if dim.qname == rxd.CountryAxis and not mem.modelDocument.targetNamespace.startswith("http://xbrl.sec.gov/country/"):
modelXbrl.error("EFM.6.23.18",
_("The member %(member)s in dimension rxd:CountryAxis in linkrole %(linkrole)s must be a QName with namespace that begins with \"http://xbrl.sec.gov/country//\". "),
modelObject=(rel, memRel, dim, mem), member=mem.qname, linkrole=rel.linkrole)
checkMemMultDims(memRel, rel, rel.fromModelObject, rel.linkrole, visited)
for rel in hypDimRelSet.toModelObject(elt):
if rel.consecutiveLinkrole == ELR and isinstance(rel.fromModelObject, ModelConcept):
checkMemMultDims(memRel, dimRel, rel.fromModelObject, rel.linkrole, visited)
for rel in hasHypRelSet.toModelObject(elt):
if rel.consecutiveLinkrole == ELR and isinstance(rel.fromModelObject, ModelConcept):
linkrole = rel.linkrole
mem = memRel.toModelObject
if (mem,linkrole) not in memDim:
memDim[mem,linkrole] = (dimRel, memRel)
else:
otherDimRel, otherMemRel = memDim[mem,linkrole]
modelXbrl.error("EFM.6.23.16",
_("The member %(member)s has two dimensions, %(dimension1)s in linkrole %(linkrole1)s and %(dimension2)s in linkrole %(linkrole2)s. "),
modelObject=(dimRel, otherDimRel, memRel, otherMemRel, dimRel.fromModelObject, otherDimRel.fromModelObject),
member=mem.qname, dimension1=dimRel.fromModelObject.qname, linkrole1=linkrole,
dimension2=otherDimRel.fromModelObject.qname, linkrole2=otherDimRel.linkrole)
visited.discard(elt)
for rel in domMemRelSet.modelRelationships:
if isinstance(rel.fromModelObject, ModelConcept) and isinstance(rel.toModelObject, ModelConcept):
for rel2 in modelXbrl.relationshipSet(XbrlConst.domainMember, rel.consecutiveLinkrole).fromModelObject(rel.toModelObject):
if rel2.fromModelObject is not None and rel2.toModelObject is not None:
modelXbrl.error("EFM.6.23.15",
_("The domain-member relationship in %(linkrole)s from %(source)s to %(target)s is consecutive with domain-member relationship in %(linkrole2)s to %(target2)s. "),
modelObject=(rel, rel.fromModelObject, rel.toModelObject),
linkrole=rel.linkrole, linkrole2=rel2.linkrole,
source=rel.fromModelObject.qname, target=rel.toModelObject.qname, target2=rel2.toModelObject.qname)
checkMemMultDims(rel, None, rel.fromModelObject, rel.linkrole, set())
self.modelXbrl.profileActivity("... SD checks 14-18", minTimeToShow=1.0)
qnDeiEntityDomain = ModelValue.qname(deiNS, "dei:EntityDomain")
for relSet, dom, priItem, errCode in ((domMemRelSet, rxd.AllProjectsMember, rxd.Pr, "EFM.6.23.30"),
(domMemRelSet, rxd.AllGovernmentsMember, rxd.Gv, "EFM.6.23.31"),
(dimDomRelSet, rxd.BusinessSegmentAxis, rxd.Sm, "EFM.6.23.33"),
(domMemRelSet, qnDeiEntityDomain, rxd.E, "EFM.6.23.34")):
for f in modelXbrl.factsByQname[priItem]:
if (not f.isNil and f.xValid >= VALID and
not relSet.isRelated(dom, "descendant", f.xValue, isDRS=True)):
modelXbrl.error(errCode,
_("The %(fact)s %(value)s in context %(context)s is not a %(domain)s."),
modelObject=f, fact=priItem, value=f.xValue, context=f.context.id, domain=dom,
messageCodes=("EFM.6.23.30", "EFM.6.23.31", "EFM.6.23.33", "EFM.6.23.34"))
self.modelXbrl.profileActivity("... SD checks 30, 31, 33, 34", minTimeToShow=1.0)
cntxEqualFacts = defaultdict(list)
for f in modelXbrl.facts:
if f.context is not None:
cntxEqualFacts[f.context.contextDimAwareHash].append(f)
self.modelXbrl.profileActivity("... SD prepare facts by context", minTimeToShow=1.0)
qnCurrencyMeasure = XbrlConst.qnIsoCurrency(deiItems.get("EntityReportingCurrencyISOCode"))
currencyMeasures = ([qnCurrencyMeasure],[])
qnAllCountriesDomain = ModelValue.qname(countryNs, "country:AllCountriesDomain")
for cntxFacts in cntxEqualFacts.values():
qnameFacts = dict((f.qname,f) for f in cntxFacts)
context = cntxFacts[0].context
contextDims = cntxFacts[0].context.qnameDims
# required priItem values based on context dimension
for dim, priItem, errCode in ((rxd.PmtAxis, rxd.P, "EFM.6.23.20"),
(rxd.GovernmentAxis, rxd.Payments, "EFM.6.23.22")):
if context.hasDimension(dim) and (priItem not in qnameFacts or qnameFacts[priItem].isNil):
modelXbrl.error(errCode,
_("The Context %(context)s has dimension %(dimension)s member %(member)s but is missing required fact %(fact)s"),
modelObject=context, context=context.id, dimension=dim, member=context.dimMemberQname(dim), fact=priItem,
messageCodes=("EFM.6.23.20", "EFM.6.23.22"))
if (rxd.Co in qnameFacts and not qnameFacts[rxd.Co].isNil and
not domMemRelSet.isRelated(qnAllCountriesDomain, "descendant", qnameFacts[rxd.Co].xValue, isDRS=True)):
modelXbrl.error("EFM.6.23.44",
_("Fact rxd:Co value %(value)s in context %(context)s is not in the domain of country:AllCountriesDomain"),
modelObject=f, context=context.id, value=qnameFacts[rxd.Co].value)
# required present facts based on other present fact
for qnF, fNilOk, qnG, gNilOk, errCode in ((rxd.A, True, rxd.Cu, False, "EFM.6.23.24"),
(rxd.A, True, rxd.D, False, "EFM.6.23.25"),
(rxd.A, False, rxd.Gv, False, "EFM.6.23.28"),
(rxd.A, False, rxd.Co, False, "EFM.6.23.29"),
(rxd.Km, False, rxd.K, False, "EFM.6.23.35"),
(rxd.K, False, rxd.Km, False, "EFM.6.23.35"),
(rxd.Cm, False, rxd.Cu, False, "EFM.6.23.39"),
(rxd.K, False, rxd.A, False, "EFM.6.23.42"),
(rxd.Pr, False, rxd.A, False, "EFM.6.23.43")):
if (qnF in qnameFacts and (fNilOk or not qnameFacts[qnF].isNil) and
(qnG not in qnameFacts or (not gNilOk and qnameFacts[qnG].isNil))):
modelXbrl.error(errCode,
_("The Context %(context)s has a %(fact1)s and is missing required %(fact2NotNil)sfact %(fact2)s"),
modelObject=qnameFacts[qnF], context=context.id, fact1=qnF, fact2=qnG, fact2NotNil="" if gNilOk else "non-nil ",
messageCodes=("EFM.6.23.24", "EFM.6.23.25", "EFM.6.23.28", "EFM.6.23.29", "EFM.6.23.35",
"EFM.6.23.35", "EFM.6.23.39", "EFM.6.23.42", "EFM.6.23.43"))
for f in cntxFacts:
if (not context.hasDimension(rxd.PmtAxis) and f.isNumeric and
f.unit is not None and f.unit.measures != currencyMeasures):
modelXbrl.error("EFM.6.23.37",
_("Fact %(fact)s in context %(context)s has unit %(unit)s not matching dei:EntityReportingCurrencyISOCode %(currency)s"),
modelObject=f, fact=f.qname, context=context.id, unit=f.unit.value, currency=qnCurrencyMeasure)
if (rxd.A in qnameFacts and not qnameFacts[rxd.A].isNil and
rxd.Cm in qnameFacts and not qnameFacts[rxd.Cm].isNil and
qnameFacts[rxd.A].unit is not None and qnameFacts[rxd.A].unit.measures == currencyMeasures):
modelXbrl.error("EFM.6.23.38",
_("A value cannot be given for rxd:Cm in context %(context)s because the payment is in the reporting currency %(currency)s."),
modelObject=(qnameFacts[rxd.A],qnameFacts[rxd.Cm]), context=context.id, currency=qnCurrencyMeasure)
if (rxd.A in qnameFacts and
rxd.Cu in qnameFacts and not qnameFacts[rxd.Cu].isNil and
qnameFacts[rxd.A].unit is not None and qnameFacts[rxd.A].unit.measures != ([XbrlConst.qnIsoCurrency(qnameFacts[rxd.Cu].xValue)],[])):
modelXbrl.error("EFM.6.23.41",
_("The unit %(unit)s of rxd:A in context %(context)s is not consistent with the value %(currency)s of rxd:Cu."),
modelObject=(qnameFacts[rxd.A],qnameFacts[rxd.Cu]), context=context.id, unit=qnameFacts[rxd.A].unit.value, currency=qnameFacts[rxd.Cu].value)
if (context.hasDimension(rxd.ProjectAxis) and
not any(f.xValue == m
for m in (contextDims[rxd.ProjectAxis].memberQname,)
for f in modelXbrl.factsByQname[rxd.Pr]
if f.context is not None)):
modelXbrl.error("EFM.6.23.19",
_("The Context %(context)s has dimension %(dimension)s but is missing any payment."),
modelObject=context, context=context.id, dimension=rxd.GovernmentAxis)
if (context.hasDimension(rxd.GovernmentAxis) and
not any(f.xValue == m and f.context.hasDimension(rxd.PmtAxis)
for m in (contextDims[rxd.GovernmentAxis].memberQname,)
for f in modelXbrl.factsByQname[rxd.Gv]
if f.context is not None)):
modelXbrl.error("EFM.6.23.21",
_("The Context %(context)s has dimension %(dimension)s member %(member)s but is missing any payment."),
modelObject=context, context=context.id, dimension=rxd.GovernmentAxis, member=context.dimMemberQname(rxd.GovernmentAxis))
if rxd.P in qnameFacts and not any(f.context is not None and not f.context.hasSegment
for f in modelXbrl.factsByQname.get(qnameFacts[rxd.P].xValue,())):
modelXbrl.error("EFM.6.23.23",
_("The Context %(context)s has payment type %(paymentType)s but is missing a corresponding fact in the required context."),
modelObject=context, context=context.id, paymentType=qnameFacts[rxd.P].xValue)
if not context.hasDimension(rxd.PmtAxis) and rxd.A in qnameFacts and not qnameFacts[rxd.A].isNil:
modelXbrl.error("EFM.6.23.40",
_("There is a non-nil rxd:A in context %(context)s but missing a dimension rxd:PmtAxis."),
modelObject=(context, qnameFacts[rxd.A]), context=context.id)
self.modelXbrl.profileActivity("... SD by context for 19-25, 28-29, 35, 37-39, 40-44", minTimeToShow=1.0)
for f in modelXbrl.factsByQname[rxd.D]:
if not f.isNil and f.xValid >= VALID and f.xValue + datetime.timedelta(1) != f.context.endDatetime: # date needs to be midnite to compare to datetime
modelXbrl.error("EFM.6.23.32",
_("The rxd:D %(value)s in context %(context)s does not match the context end date %(endDate)s."),
modelObject=f, value=f.xValue, context=f.context.id, endDate=XmlUtil.dateunionValue(f.context.endDatetime, subtractOneDay=True))
self.modelXbrl.profileActivity("... SD checks 32 (last SD check)", minTimeToShow=1.0)
# deference object references no longer needed
del rxdDoc, cntxEqualFacts
# dereference compatibly with 2.7 (as these may be used in nested contexts above
hasHypRelSet = hypDimRelSet = dimDefRelSet = domMemRelSet = dimDomRelSet = None
memDim.clear()
else: # non-SD documentType
pass # no non=SD tests yet
elif disclosureSystem.GFM:
for deiItem in (
disclosureSystem.deiCurrentFiscalYearEndDateElement,
disclosureSystem.deiDocumentFiscalYearFocusElement,
disclosureSystem.deiFilerNameElement):
if deiItem not in deiItems or deiItems[deiItem] == "":
modelXbrl.error("GFM.3.02.01",
_("dei:%(elementName)s is required in the default context"),
modelXbrl=modelXbrl, elementName=deiItem)
if documentType not in ("SD", "SD/A"):
self.modelXbrl.profileActivity("... filer required facts checks", minTimeToShow=1.0)
#6.5.27 footnote elements, etc
footnoteLinkNbr = 0
for footnoteLinkElt in xbrlInstDoc.iterdescendants(tag="{http://www.xbrl.org/2003/linkbase}footnoteLink"):
if isinstance(footnoteLinkElt,ModelObject):
footnoteLinkNbr += 1
linkrole = footnoteLinkElt.get("{http://www.w3.org/1999/xlink}role")
if linkrole != XbrlConst.defaultLinkRole:
modelXbrl.error(("EFM.6.05.28.linkrole", "GFM.1.02.20"),
_("FootnoteLink %(footnoteLinkNumber)s has disallowed role %(linkrole)s"),
modelObject=footnoteLinkElt, footnoteLinkNumber=footnoteLinkNbr, linkrole=linkrole)
# find modelLink of this footnoteLink
# modelLink = modelXbrl.baseSetModelLink(footnoteLinkElt)
relationshipSet = modelXbrl.relationshipSet("XBRL-footnotes", linkrole)
#if (modelLink is None) or (not relationshipSet):
# continue # had no child elements to parse
locNbr = 0
arcNbr = 0
for child in footnoteLinkElt:
if isinstance(child,ModelObject):
xlinkType = child.get("{http://www.w3.org/1999/xlink}type")
if child.namespaceURI != XbrlConst.link or \
xlinkType not in ("locator", "resource", "arc") or \
child.localName not in ("loc", "footnote", "footnoteArc"):
modelXbrl.error(("EFM.6.05.27", "GFM.1.02.19"),
_("FootnoteLink %(footnoteLinkNumber)s has disallowed child element %(elementName)s"),
modelObject=child, footnoteLinkNumber=footnoteLinkNbr, elementName=child.prefixedName)
elif xlinkType == "locator":
locNbr += 1
locrole = child.get("{http://www.w3.org/1999/xlink}role")
if locrole is not None and (disclosureSystem.GFM or \
not disclosureSystem.uriAuthorityValid(locrole)):
modelXbrl.error(("EFM.6.05.29", "GFM.1.02.21"),
_("FootnoteLink %(footnoteLinkNumber)s loc %(locNumber)s has disallowed role %(role)s"),
modelObject=child, footnoteLinkNumber=footnoteLinkNbr,
xlinkLabel=child.xlinkLabel,
locNumber=locNbr, role=locrole)
href = child.get("{http://www.w3.org/1999/xlink}href")
if not href.startswith("#"):
modelXbrl.error(("EFM.6.05.32", "GFM.1.02.23"),
_("FootnoteLink %(footnoteLinkNumber)s loc %(locNumber)s has disallowed href %(locHref)s"),
modelObject=child, footnoteLinkNumber=footnoteLinkNbr, locNumber=locNbr, locHref=href,
locLabel=child.get("{http://www.w3.org/1999/xlink}label"))
else:
label = child.get("{http://www.w3.org/1999/xlink}label")
elif xlinkType == "arc":
arcNbr += 1
arcrole = child.get("{http://www.w3.org/1999/xlink}arcrole")
if (self.validateEFM and not disclosureSystem.uriAuthorityValid(arcrole)) or \
(disclosureSystem.GFM and arcrole != XbrlConst.factFootnote and arcrole != XbrlConst.factExplanatoryFact):
modelXbrl.error(("EFM.6.05.30", "GFM.1.02.22"),
_("FootnoteLink %(footnoteLinkNumber)s arc %(arcNumber)s has disallowed arcrole %(arcrole)s"),
modelObject=child, footnoteLinkNumber=footnoteLinkNbr, arcNumber=arcNbr,
arcToLabel=child.get("{http://www.w3.org/1999/xlink}to"),
arcrole=arcrole)
elif xlinkType == "resource": # footnote
footnoterole = child.get("{http://www.w3.org/1999/xlink}role")
if footnoterole == "":
modelXbrl.error(("EFM.6.05.28.missingRole", "GFM.1.2.20"),
_("Footnote %(xlinkLabel)s is missing a role"),
modelObject=child, xlinkLabel=child.get("{http://www.w3.org/1999/xlink}label"))
elif (self.validateEFM and not disclosureSystem.uriAuthorityValid(footnoterole)) or \
(disclosureSystem.GFM and footnoterole != XbrlConst.footnote):
modelXbrl.error(("EFM.6.05.28", "GFM.1.2.20"),
_("Footnote %(xlinkLabel)s has disallowed role %(role)s"),
modelObject=child, xlinkLabel=child.get("{http://www.w3.org/1999/xlink}label"),
role=footnoterole)
if self.validateEFM:
ValidateFilingText.validateFootnote(modelXbrl, child)
# find modelResource for this element
foundFact = False
if XmlUtil.text(child) != "":
if relationshipSet:
for relationship in relationshipSet.toModelObject(child):
if isinstance(relationship.fromModelObject, ModelFact):
foundFact = True
break
if not foundFact:
modelXbrl.error(("EFM.6.05.33", "GFM.1.02.24"),
_("FootnoteLink %(footnoteLinkNumber)s footnote %(footnoteLabel)s has no linked fact"),
modelObject=child, footnoteLinkNumber=footnoteLinkNbr,
footnoteLabel=child.get("{http://www.w3.org/1999/xlink}label"),
text=XmlUtil.text(child)[:100])
self.modelXbrl.profileActivity("... filer rfootnotes checks", minTimeToShow=1.0)
# entry point schema checks
elif modelXbrl.modelDocument.type == ModelDocument.Type.SCHEMA:
if self.validateSBRNL:
# entry must have a P-link
if not any(hrefElt.localName == "linkbaseRef" and hrefElt.get("{http://www.w3.org/1999/xlink}role") == "http://www.xbrl.org/2003/role/presentationLinkbaseRef"
for hrefElt, hrefDoc, hrefId in modelXbrl.modelDocument.hrefObjects):
modelXbrl.error("SBR.NL.2.2.10.01",
'Entrypoint schema must have a presentation linkbase', modelObject=modelXbrl.modelDocument)
# all-labels and references checks
defaultLangStandardLabels = {}
for concept in modelXbrl.qnameConcepts.values():
conceptHasDefaultLangStandardLabel = False
for modelLabelRel in labelsRelationshipSet.fromModelObject(concept):
modelLabel = modelLabelRel.toModelObject
role = modelLabel.role
text = modelLabel.text
lang = modelLabel.xmlLang
if role == XbrlConst.documentationLabel:
if concept.modelDocument.targetNamespace in disclosureSystem.standardTaxonomiesDict:
modelXbrl.error(("EFM.6.10.05", "GFM.1.05.05", "SBR.NL.2.1.0.08"),
_("Concept %(concept)s of a standard taxonomy cannot have a documentation label: %(text)s"),
modelObject=modelLabel, concept=concept.qname, text=text)
elif text and lang and disclosureSystem.defaultXmlLang and lang.startswith(disclosureSystem.defaultXmlLang):
if role == XbrlConst.standardLabel:
if text in defaultLangStandardLabels:
concept2, modelLabel2 = defaultLangStandardLabels[text]
modelXbrl.error(("EFM.6.10.04", "GFM.1.05.04"),
_("Same labels for concepts %(concept)s and %(concept2)s for %(lang)s standard role: %(text)s."),
modelObject=(concept, modelLabel, concept2, modelLabel2),
concept=concept.qname,
concept2=concept2.qname,
lang=disclosureSystem.defaultLanguage, text=text[:80])
else:
defaultLangStandardLabels[text] = (concept, modelLabel)
conceptHasDefaultLangStandardLabel = True
if len(text) > 511:
modelXbrl.error(("EFM.6.10.06", "GFM.1.05.06"),
_("Label for concept %(concept)s role %(role)s length %(length)s must be shorter than 511 characters: %(text)s"),
modelObject=modelLabel, concept=concept.qname, role=role, length=len(text), text=text[:80])
match = modelXbrl.modelManager.disclosureSystem.labelCheckPattern.search(text)
if match:
modelXbrl.error(("EFM.6.10.06", "GFM.1.05.07", "SBR.NL.2.3.8.07"),
'Label for concept %(concept)s role %(role)s has disallowed characters: "%(text)s"',
modelObject=modelLabel, concept=concept.qname, role=role, text=match.group())
if (text is not None and len(text) > 0 and
modelXbrl.modelManager.disclosureSystem.labelTrimPattern and
(modelXbrl.modelManager.disclosureSystem.labelTrimPattern.match(text[0]) or \
modelXbrl.modelManager.disclosureSystem.labelTrimPattern.match(text[-1]))):
modelXbrl.error(("EFM.6.10.08", "GFM.1.05.08"),
_("Label for concept %(concept)s role %(role)s lang %(lang)s is not trimmed: %(text)s"),
modelObject=modelLabel, concept=concept.qname, role=role, lang=lang, text=text)
for modelRefRel in referencesRelationshipSetWithProhibits.fromModelObject(concept):
modelReference = modelRefRel.toModelObject
text = XmlUtil.innerText(modelReference)
#6.18.1 no reference to company extension concepts
if concept.modelDocument.targetNamespace not in disclosureSystem.standardTaxonomiesDict:
modelXbrl.error(("EFM.6.18.01", "GFM.1.9.1"),
_("References for extension concept %(concept)s are not allowed: %(text)s"),
modelObject=modelReference, concept=concept.qname, text=text, xml=XmlUtil.xmlstring(modelReference, stripXmlns=True, contentsOnly=True))
elif (self.validateEFM or self.validateSBRNL) and not self.isStandardUri(modelRefRel.modelDocument.uri):
#6.18.2 no extension to add or remove references to standard concepts
modelXbrl.error(("EFM.6.18.02", "SBR.NL.2.1.0.08"),
_("References for standard taxonomy concept %(concept)s are not allowed in an extension linkbase: %(text)s"),
modelObject=modelReference, concept=concept.qname, text=text, xml=XmlUtil.xmlstring(modelReference, stripXmlns=True, contentsOnly=True))
if self.validateSBRNL and (concept.isItem or concept.isTuple):
if concept.modelDocument.targetNamespace not in disclosureSystem.standardTaxonomiesDict:
if not conceptHasDefaultLangStandardLabel:
modelXbrl.error("SBR.NL.2.2.2.26",
_("Concept %(concept)s missing standard label in local language."),
modelObject=concept, concept=concept.qname)
subsGroup = concept.get("substitutionGroup")
if ((not concept.isAbstract or subsGroup == "sbr:presentationItem") and
not (presentationRelationshipSet.toModelObject(concept) or
presentationRelationshipSet.fromModelObject(concept))):
modelXbrl.error("SBR.NL.2.2.2.04",
_("Concept %(concept)s not referred to by presentation relationship."),
modelObject=concept, concept=concept.qname)
elif ((concept.isDimensionItem or
(subsGroup and (subsGroup.endswith(":domainItem") or subsGroup.endswith(":domainMemberItem")))) and
not (presentationRelationshipSet.toModelObject(concept) or
presentationRelationshipSet.fromModelObject(concept))):
modelXbrl.error("SBR.NL.2.2.10.03",
_("DTS concept %(concept)s not referred to by presentation relationship."),
modelObject=concept, concept=concept.qname)
if (concept.substitutionGroupQname and
concept.substitutionGroupQname.namespaceURI not in disclosureSystem.baseTaxonomyNamespaces):
modelXbrl.error("SBR.NL.2.2.2.05",
_("Concept %(concept)s has a substitutionGroup of a non-standard concept."),
modelObject=concept, concept=concept.qname)
if concept.isTuple: # verify same presentation linkbase nesting
for missingQname in set(concept.type.elements) ^ pLinkedNonAbstractDescendantQnames(modelXbrl, concept):
modelXbrl.error("SBR.NL.2.3.4.01",
_("Tuple %(concept)s has mismatch between content and presentation children: %(missingQname)s."),
modelObject=concept, concept=concept.qname, missingQname=missingQname)
self.checkConceptLabels(modelXbrl, labelsRelationshipSet, disclosureSystem, concept)
self.checkConceptLabels(modelXbrl, genLabelsRelationshipSet, disclosureSystem, concept)
# role types checks
# 6.7.10 only one role type declaration in DTS
for roleURI, modelRoleTypes in modelXbrl.roleTypes.items():
if len(modelRoleTypes) > 1:
modelXbrl.error(("EFM.6.07.10", "GFM.1.03.10"),
_("RoleType %(roleType)s is defined in multiple taxonomies"),
modelObject=modelRoleTypes, roleType=roleURI, numberOfDeclarations=len(modelRoleTypes))
# 6.7.14 only one arcrole type declaration in DTS
for arcroleURI, modelRoleTypes in modelXbrl.arcroleTypes.items():
if len(modelRoleTypes) > 1:
modelXbrl.error(("EFM.6.07.14", "GFM.1.03.16"),
_("ArcroleType %(arcroleType)s is defined in multiple taxonomies"),
modelObject=modelRoleTypes, arcroleType=arcroleURI, numberOfDeclarations=len(modelRoleTypes) )
self.modelXbrl.profileActivity("... filer concepts checks", minTimeToShow=1.0)
del defaultLangStandardLabels #dereference
# checks on all documents: instance, schema, instance
ValidateFilingDTS.checkDTS(self, modelXbrl.modelDocument, [])
''' removed RH 2011-12-23, corresponding use of nameWordsTable in ValidateFilingDTS
if self.validateSBRNL:
del self.nameWordsTable
'''
self.modelXbrl.profileActivity("... filer DTS checks", minTimeToShow=1.0)
# checks for namespace clashes
if self.validateEFM:
# check number of us-roles taxonomies referenced
for conflictClass, modelDocuments in self.standardNamespaceConflicts.items():
if len(modelDocuments) > 1:
modelXbrl.error("EFM.6.22.03",
_("References for conflicting standard %(conflictClass)s taxonomies %(namespaceConflicts)s are not allowed in same DTS"),
modelObject=modelXbrl, conflictClass=conflictClass,
namespaceConflicts=sorted((d.targetNamespace for d in modelDocuments),
key=lambda ns: ns.rpartition('/')[2]))
conceptRelsUsedWithPreferredLabels = defaultdict(list)
usedCalcsPresented = defaultdict(set) # pairs of concepts objectIds used in calc
usedCalcFromTosELR = {}
localPreferredLabels = defaultdict(set)
drsELRs = set()
# do calculation, then presentation, then other arcroles
self.summationItemRelsSetAllELRs = modelXbrl.relationshipSet(XbrlConst.summationItem)
for arcroleFilter in (XbrlConst.summationItem, XbrlConst.parentChild, "*"):
for baseSetKey, baseSetModelLinks in modelXbrl.baseSets.items():
arcrole, ELR, linkqname, arcqname = baseSetKey
if ELR and linkqname and arcqname and not arcrole.startswith("XBRL-"):
# assure summationItem, then parentChild, then others
if not (arcroleFilter == arcrole or
arcroleFilter == "*" and arcrole not in (XbrlConst.summationItem, XbrlConst.parentChild)):
continue
if self.validateEFMorGFM or (self.validateSBRNL and arcrole == XbrlConst.parentChild):
ineffectiveArcs = ModelRelationshipSet.ineffectiveArcs(baseSetModelLinks, arcrole)
#validate ineffective arcs
for modelRel in ineffectiveArcs:
if modelRel.fromModelObject is not None and modelRel.toModelObject is not None:
modelXbrl.error(("EFM.6.09.03", "GFM.1.04.03", "SBR.NL.2.3.4.06"),
_("Ineffective arc %(arc)s in \nlink role %(linkrole)s \narcrole %(arcrole)s \nfrom %(conceptFrom)s \nto %(conceptTo)s \n%(ineffectivity)s"),
modelObject=modelRel, arc=modelRel.qname, arcrole=modelRel.arcrole,
linkrole=modelRel.linkrole, linkroleDefinition=modelXbrl.roleTypeDefinition(modelRel.linkrole),
conceptFrom=modelRel.fromModelObject.qname, conceptTo=modelRel.toModelObject.qname,
ineffectivity=modelRel.ineffectivity)
if arcrole == XbrlConst.parentChild:
isStatementSheet = any(linkroleDefinitionStatementSheet.match(roleType.definition or '')
for roleType in self.modelXbrl.roleTypes.get(ELR,()))
conceptsPresented = set()
# 6.12.2 check for distinct order attributes
parentChildRels = modelXbrl.relationshipSet(arcrole, ELR)
for relFrom, siblingRels in parentChildRels.fromModelObjects().items():
targetConceptPreferredLabels = defaultdict(dict)
orderRels = {}
firstRel = True
relFromUsed = True
for rel in siblingRels:
if firstRel:
firstRel = False
if relFrom in conceptsUsed:
conceptsUsed[relFrom] = True # 6.12.3, has a pres relationship
relFromUsed = True
relTo = rel.toModelObject
preferredLabel = rel.preferredLabel
if relTo in conceptsUsed:
conceptsUsed[relTo] = True # 6.12.3, has a pres relationship
if preferredLabel and preferredLabel != "":
conceptRelsUsedWithPreferredLabels[relTo].append(rel)
if self.validateSBRNL and preferredLabel in ("periodStart","periodEnd"):
modelXbrl.error("SBR.NL.2.3.4.03",
_("Preferred label on presentation relationships not allowed"), modelObject=modelRel)
# 6.12.5 distinct preferred labels in base set
preferredLabels = targetConceptPreferredLabels[relTo]
if (preferredLabel in preferredLabels or
(self.validateSBRNL and not relFrom.isTuple and
(not preferredLabel or None in preferredLabels))):
if preferredLabel in preferredLabels:
rel2, relTo2 = preferredLabels[preferredLabel]
else:
rel2 = relTo2 = None
modelXbrl.error(("EFM.6.12.05", "GFM.1.06.05", "SBR.NL.2.3.4.06"),
_("Concept %(concept)s has duplicate preferred label %(preferredLabel)s in link role %(linkrole)s"),
modelObject=(rel, relTo, rel2, relTo2),
concept=relTo.qname, fromConcept=rel.fromModelObject.qname,
preferredLabel=preferredLabel, linkrole=rel.linkrole, linkroleDefinition=modelXbrl.roleTypeDefinition(rel.linkrole))
else:
preferredLabels[preferredLabel] = (rel, relTo)
if relFromUsed:
# 6.14.5
conceptsPresented.add(relFrom.objectIndex)
conceptsPresented.add(relTo.objectIndex)
order = rel.order
if order in orderRels:
modelXbrl.error(("EFM.6.12.02", "GFM.1.06.02", "SBR.NL.2.3.4.05"),
_("Duplicate presentation relations from concept %(conceptFrom)s for order %(order)s in base set role %(linkrole)s to concept %(conceptTo)s and to concept %(conceptTo2)s"),
modelObject=(rel, orderRels[order]), conceptFrom=relFrom.qname, order=rel.arcElement.get("order"), linkrole=rel.linkrole, linkroleDefinition=modelXbrl.roleTypeDefinition(rel.linkrole),
conceptTo=rel.toModelObject.qname, conceptTo2=orderRels[order].toModelObject.qname)
else:
orderRels[order] = rel
if self.validateSBRNL and not relFrom.isTuple:
if relTo in localPreferredLabels:
if {None, preferredLabel} & localPreferredLabels[relTo]:
self.modelXbrl.error("SBR.NL.2.3.4.06",
_("Non-distinguished preferredLabel presentation relations from concept %(conceptFrom)s in base set role %(linkrole)s"),
modelObject=rel, conceptFrom=relFrom.qname, linkrole=rel.linkrole, conceptTo=relTo.qname)
localPreferredLabels[relTo].add(preferredLabel)
targetConceptPreferredLabels.clear()
orderRels.clear()
localPreferredLabels.clear() # clear for next relationship
for conceptPresented in conceptsPresented:
if conceptPresented in usedCalcsPresented:
usedCalcPairingsOfConcept = usedCalcsPresented[conceptPresented]
if len(usedCalcPairingsOfConcept & conceptsPresented) > 0:
usedCalcPairingsOfConcept -= conceptsPresented
# 6.15.02, 6.15.03 semantics checks for totals and calc arcs (by tree walk)
if validateLoggingSemantic:
for rootConcept in parentChildRels.rootConcepts:
self.checkCalcsTreeWalk(parentChildRels, rootConcept, isStatementSheet, False, conceptsUsed, set())
elif arcrole == XbrlConst.summationItem:
if self.validateEFMorGFM:
# 6.14.3 check for relation concept periods
fromRelationships = modelXbrl.relationshipSet(arcrole,ELR).fromModelObjects()
allElrRelSet = modelXbrl.relationshipSet(arcrole)
for relFrom, rels in fromRelationships.items():
orderRels = {}
for rel in rels:
relTo = rel.toModelObject
# 6.14.03 must have matched period types across relationshp
if isinstance(relTo, ModelConcept) and relFrom.periodType != relTo.periodType:
self.modelXbrl.error(("EFM.6.14.03", "GFM.1.07.03"),
"Calculation relationship period types mismatched in base set role %(linkrole)s from %(conceptFrom)s to %(conceptTo)s",
modelObject=rel, linkrole=rel.linkrole, conceptFrom=relFrom.qname, conceptTo=relTo.qname, linkroleDefinition=self.modelXbrl.roleTypeDefinition(ELR))
# 6.14.5 concepts used must have pres in same ext link
if relFrom in conceptsUsed and relTo in conceptsUsed:
fromObjId = relFrom.objectIndex
toObjId = relTo.objectIndex
if fromObjId < toObjId:
usedCalcsPresented[fromObjId].add(toObjId)
else:
usedCalcsPresented[toObjId].add(fromObjId)
order = rel.order
if order in orderRels and disclosureSystem.GFM:
self.modelXbrl.error(("EFM.N/A", "GFM.1.07.06"),
_("Duplicate calculations relations from concept %(conceptFrom)s for order %(order)s in base set role %(linkrole)s to concept %(conceptTo)s and to concept %(conceptTo2)s"),
modelObject=(rel, orderRels[order]), linkrole=rel.linkrole, conceptFrom=relFrom.qname, order=order,
conceptTo=rel.toModelObject.qname, conceptTo2=orderRels[order].toModelObject.qname)
else:
orderRels[order] = rel
directedCycleRels = self.directedCycle(relFrom,relFrom,fromRelationships,{relFrom})
if directedCycleRels is not None:
self.modelXbrl.error(("EFM.6.14.04", "GFM.1.07.04"),
_("Calculation relationships have a directed cycle in base set role %(linkrole)s starting from %(concept)s"),
modelObject=[relFrom] + directedCycleRels, linkrole=ELR, concept=relFrom.qname, linkroleDefinition=self.modelXbrl.roleTypeDefinition(ELR))
orderRels.clear()
# if relFrom used by fact and multiple calc networks from relFrom, test 6.15.04
if rels and relFrom in conceptsUsed:
relFromAndTos = (relFrom.objectIndex,) + tuple(sorted((rel.toModelObject.objectIndex
for rel in rels if isinstance(rel.toModelObject, ModelConcept))))
if relFromAndTos in usedCalcFromTosELR:
otherRels = usedCalcFromTosELR[relFromAndTos]
otherELR = otherRels[0].linkrole
self.modelXbrl.log("WARNING-SEMANTIC", ("EFM.6.15.04", "GFM.2.06.04"),
_("Calculation relationships should have a same set of targets in %(linkrole)s and %(linkrole2)s starting from %(concept)s"),
modelObject=[relFrom] + rels + otherRels, linkrole=ELR, linkrole2=otherELR, concept=relFrom.qname)
else:
usedCalcFromTosELR[relFromAndTos] = rels
elif self.validateSBRNL:
# find a calc relationship to get the containing document name
for modelRel in self.modelXbrl.relationshipSet(arcrole, ELR).modelRelationships:
self.modelXbrl.error("SBR.NL.2.3.9.01",
_("Calculation linkbase linkrole %(linkrole)s"),
modelObject=modelRel, linkrole=ELR)
break
elif arcrole == XbrlConst.all or arcrole == XbrlConst.notAll:
drsELRs.add(ELR)
elif arcrole == XbrlConst.dimensionDomain or arcrole == XbrlConst.dimensionDefault and \
self.validateEFMorGFM:
# 6.16.3 check domain targets in extension linkbases are domain items
fromRelationships = modelXbrl.relationshipSet(arcrole,ELR).fromModelObjects()
for relFrom, rels in fromRelationships.items():
for rel in rels:
relTo = rel.toModelObject
if not (isinstance(relTo, ModelConcept) and relTo.type is not None and relTo.type.isDomainItemType) and not self.isStandardUri(rel.modelDocument.uri):
self.modelXbrl.error(("EFM.6.16.03", "GFM.1.08.03"),
_("Definition relationship from %(conceptFrom)s to %(conceptTo)s in role %(linkrole)s requires domain item target"),
modelObject=(rel, relFrom, relTo), conceptFrom=relFrom.qname, conceptTo=(relTo.qname if relTo is not None else None), linkrole=rel.linkrole)
elif self.validateSBRNL:
if arcrole == XbrlConst.dimensionDefault:
for modelRel in self.modelXbrl.relationshipSet(arcrole).modelRelationships:
self.modelXbrl.error("SBR.NL.2.3.6.05",
_("Dimension-default in from %(conceptFrom)s to %(conceptTo)s in role %(linkrole)s is not allowed"),
modelObject=modelRel, conceptFrom=modelRel.fromModelObject.qname, conceptTo=modelRel.toModelObject.qname,
linkrole=modelRel.linkrole)
''' removed per RH 2013-01-11
if not (XbrlConst.isStandardArcrole(arcrole) or XbrlConst.isDefinitionOrXdtArcrole(arcrole)):
for modelRel in self.modelXbrl.relationshipSet(arcrole).modelRelationships:
relTo = modelRel.toModelObject
relFrom = modelRel.fromModelObject
if not ((isinstance(relFrom,ModelConcept) and isinstance(relTo,ModelConcept)) or
(relFrom.modelDocument.inDTS and
(relTo.qname == XbrlConst.qnGenLabel and modelRel.arcrole == XbrlConst.elementLabel) or
(relTo.qname == XbrlConst.qnGenReference and modelRel.arcrole == XbrlConst.elementReference) or
(relTo.qname == self.qnSbrLinkroleorder))):
self.modelXbrl.error("SBR.NL.2.3.2.07",
_("The source and target of an arc must be in the DTS from %(elementFrom)s to %(elementTo)s, in linkrole %(linkrole)s, arcrole %(arcrole)s"),
modelObject=modelRel, elementFrom=relFrom.qname, elementTo=relTo.qname,
linkrole=modelRel.linkrole, arcrole=arcrole)
'''
# definition tests (GFM only, for now)
if XbrlConst.isDefinitionOrXdtArcrole(arcrole) and disclosureSystem.GFM:
fromRelationships = modelXbrl.relationshipSet(arcrole,ELR).fromModelObjects()
for relFrom, rels in fromRelationships.items():
orderRels = {}
for rel in rels:
relTo = rel.toModelObject
order = rel.order
if order in orderRels and disclosureSystem.GFM:
self.modelXbrl.error("GFM.1.08.10",
_("Duplicate definitions relations from concept %(conceptFrom)s for order %(order)s in base set role %(linkrole)s to concept %(conceptTo)s and to concept %(conceptTo2)s"),
modelObject=(rel, relFrom, relTo), conceptFrom=relFrom.qname, order=order, linkrole=rel.linkrole,
conceptTo=rel.toModelObject.qname, conceptTo2=orderRels[order].toModelObject.qname)
else:
orderRels[order] = rel
if (arcrole not in (XbrlConst.dimensionDomain, XbrlConst.domainMember) and
rel.get("{http://xbrl.org/2005/xbrldt}usable") == "false"):
self.modelXrl.error("GFM.1.08.11",
_("Disallowed xbrldt:usable='false' attribute on %(arc)s relationship from concept %(conceptFrom)s in base set role %(linkrole)s to concept %(conceptTo)s"),
modelObject=(rel, relFrom, relTo), arc=rel.qname, conceptFrom=relFrom.qname, linkrole=rel.linkrole, conceptTo=rel.toModelObject.qname)
del localPreferredLabels # dereference
del usedCalcFromTosELR
del self.summationItemRelsSetAllELRs
self.modelXbrl.profileActivity("... filer relationships checks", minTimeToShow=1.0)
# checks on dimensions
ValidateFilingDimensions.checkDimensions(self, drsELRs)
self.modelXbrl.profileActivity("... filer dimensions checks", minTimeToShow=1.0)
for concept, hasPresentationRelationship in conceptsUsed.items():
if not hasPresentationRelationship:
self.modelXbrl.error(("EFM.6.12.03", "GFM.1.6.3"),
_("Concept used in instance %(concept)s does not participate in an effective presentation relationship"),
modelObject=[concept] + list(modelXbrl.factsByQname[concept.qname]), concept=concept.qname)
for fromIndx, toIndxs in usedCalcsPresented.items():
for toIndx in toIndxs:
fromModelObject = self.modelXbrl.modelObject(fromIndx)
toModelObject = self.modelXbrl.modelObject(toIndx)
calcRels = modelXbrl.relationshipSet(XbrlConst.summationItem) \
.fromToModelObjects(fromModelObject, toModelObject, checkBothDirections=True)
fromFacts = self.modelXbrl.factsByQname[fromModelObject.qname]
toFacts = self.modelXbrl.factsByQname[toModelObject.qname]
fromFactContexts = set(f.context.contextNonDimAwareHash for f in fromFacts if f.context is not None)
contextId = backupId = None # for EFM message
for f in toFacts:
if f.context is not None:
if f.context.contextNonDimAwareHash in fromFactContexts:
contextId = f.context.id
break
backupId = f.context.id
if contextId is None:
contextId = backupId
self.modelXbrl.error(("EFM.6.14.05", "GFM.1.7.5"),
_("Used calculation relationship from %(conceptFrom)s to %(conceptTo)s does not participate in an effective presentation relationship"),
modelObject=calcRels + [fromModelObject, toModelObject],
linkroleDefinition=self.modelXbrl.roleTypeDefinition(calcRels[0].linkrole if calcRels else None),
conceptFrom=self.modelXbrl.modelObject(fromIndx).qname, conceptTo=self.modelXbrl.modelObject(toIndx).qname, contextId=contextId)
if disclosureSystem.defaultXmlLang:
for concept, preferredLabelRels in conceptRelsUsedWithPreferredLabels.items():
for preferredLabelRel in preferredLabelRels:
preferredLabel = preferredLabelRel.preferredLabel
hasDefaultLangPreferredLabel = False
for modelLabelRel in labelsRelationshipSet.fromModelObject(concept):
modelLabel = modelLabelRel.toModelObject
if modelLabel.xmlLang.startswith(disclosureSystem.defaultXmlLang) and \
modelLabel.role == preferredLabel:
hasDefaultLangPreferredLabel = True
break
if not hasDefaultLangPreferredLabel:
self.modelXbrl.error("GFM.1.06.04", # 6.12.04 now reserved: ("EFM.6.12.04", "GFM.1.06.04"),
_("Concept %(concept)s missing %(lang)s preferred labels for role %(preferredLabel)s"),
modelObject=(preferredLabelRel, concept), concept=concept.qname, fromConcept=preferredLabelRel.fromModelObject.qname,
lang=disclosureSystem.defaultLanguage, preferredLabel=preferredLabel)
del conceptRelsUsedWithPreferredLabels
# 6 16 4, 1.16.5 Base sets of Domain Relationship Sets testing
self.modelXbrl.profileActivity("... filer preferred label checks", minTimeToShow=1.0)
''' try moving to plug-in
if self.validateSBRNL:
# check presentation link roles for generic linkbase order number
ordersRelationshipSet = modelXbrl.relationshipSet("http://www.nltaxonomie.nl/2011/arcrole/linkrole-order")
presLinkroleNumberURI = {}
presLinkrolesCount = 0
for countLinkroles in (True, False):
for roleURI, modelRoleTypes in modelXbrl.roleTypes.items():
for modelRoleType in modelRoleTypes:
if XbrlConst.qnLinkPresentationLink in modelRoleType.usedOns:
if countLinkroles:
presLinkrolesCount += 1
else:
if not ordersRelationshipSet:
modelXbrl.error("SBR.NL.2.2.3.06",
_("Presentation linkrole %(linkrole)s missing order number relationship set"),
modelObject=modelRoleType, linkrole=modelRoleType.roleURI)
else:
order = None
for orderNumRel in ordersRelationshipSet.fromModelObject(modelRoleType):
order = getattr(orderNumRel.toModelObject, "xValue", "(noPSVIvalue)")
if order in presLinkroleNumberURI:
modelXbrl.error("SBR.NL.2.2.3.06",
_("Presentation linkrole order number %(order)s of %(linkrole)s also used in %(otherLinkrole)s"),
modelObject=modelRoleType, order=order, linkrole=modelRoleType.roleURI, otherLinkrole=presLinkroleNumberURI[order])
else:
presLinkroleNumberURI[order] = modelRoleType.roleURI
if not order:
modelXbrl.error("SBR.NL.2.2.3.06",
_("Presentation linkrole %(linkrole)s missing order number"),
modelObject=modelRoleType, linkrole=modelRoleType.roleURI)
if countLinkroles and presLinkrolesCount < 2:
break # don't check order numbers if only one presentation linkrole
# check arc role definitions for labels
for arcroleURI, modelRoleTypes in modelXbrl.arcroleTypes.items():
for modelRoleType in modelRoleTypes:
if (not arcroleURI.startswith("http://xbrl.org/") and
modelRoleType.modelDocument.targetNamespace not in disclosureSystem.baseTaxonomyNamespaces and
(not modelRoleType.genLabel(lang="nl") or not modelRoleType.genLabel(lang="en"))):
modelXbrl.error("SBR.NL.2.2.4.02",
_("ArcroleType missing nl or en generic label: %(arcrole)s"),
modelObject=modelRoleType, arcrole=arcroleURI)
for domainElt in typedDomainElements:
if domainElt.modelDocument.targetNamespace not in disclosureSystem.baseTaxonomyNamespaces:
if not domainElt.genLabel(fallbackToQname=False,lang="nl"):
modelXbrl.error("SBR.NL.2.2.8.01",
_("Typed dimension domain element %(concept)s must have a generic label"),
modelObject=domainElt, concept=domainElt.qname)
if domainElt.type is not None and domainElt.type.localName == "complexType":
modelXbrl.error("SBR.NL.2.2.8.02",
_("Typed dimension domain element %(concept)s has disallowed complex content"),
modelObject=domainElt, concept=domainElt.qname)
self.modelXbrl.profileActivity("... SBR role types and type facits checks", minTimeToShow=1.0)
'''
if self.validateEFM:
for pluginXbrlMethod in pluginClassMethods("Validate.EFM.Finally"):
pluginXbrlMethod(self, conceptsUsed)
elif self.validateSBRNL:
for pluginXbrlMethod in pluginClassMethods("Validate.SBRNL.Finally"):
pluginXbrlMethod(self, conceptsUsed)
self.modelXbrl.profileActivity("... plug in '.Finally' checks", minTimeToShow=1.0)
self.modelXbrl.profileStat(_("validate{0}").format(modelXbrl.modelManager.disclosureSystem.validationType))
modelXbrl.modelManager.showStatus(_("ready"), 2000) | ValueError | dataset/ETHPy150Open Arelle/Arelle/arelle/ValidateFiling.py/ValidateFiling.validate |
def isStandardUri(self, uri):
try:
return self._isStandardUri[uri]
except __HOLE__:
isStd = (uri in self.disclosureSystem.standardTaxonomiesDict or
(not isHttpUrl(uri) and
# try 2011-12-23 RH: if works, remove the localHrefs
# any(u.endswith(e) for u in (uri.replace("\\","/"),) for e in disclosureSystem.standardLocalHrefs)
"/basis/sbr/" in uri.replace("\\","/")
))
self._isStandardUri[uri] = isStd
return isStd | KeyError | dataset/ETHPy150Open Arelle/Arelle/arelle/ValidateFiling.py/ValidateFiling.isStandardUri |
def _resolve_value(self, name):
""" Returns an appropriate value for the given name. """
name = str(name)
if name in self._metadata._meta.elements:
element = self._metadata._meta.elements[name]
# Look in instances for an explicit value
if element.editable:
value = getattr(self, name)
if value:
return value
# Otherwise, return an appropriate default value (populate_from)
populate_from = element.populate_from
if callable(populate_from):
return populate_from(self, **self._populate_from_kwargs())
elif isinstance(populate_from, Literal):
return populate_from.value
elif populate_from is not NotSet:
return self._resolve_value(populate_from)
# If this is not an element, look for an attribute on metadata
try:
value = getattr(self._metadata, name)
except __HOLE__:
pass
else:
if callable(value):
if getattr(value, 'im_self', None):
return value(self)
else:
return value(self._metadata, self)
return value | AttributeError | dataset/ETHPy150Open willhardy/django-seo/rollyourown/seo/backends.py/MetadataBaseModel._resolve_value |
def get_model(self, options):
class ViewMetadataBase(MetadataBaseModel):
_view = models.CharField(_('view'), max_length=255, unique=not (options.use_sites or options.use_i18n), default="", blank=True)
if options.use_sites:
_site = models.ForeignKey(Site, null=True, blank=True, verbose_name=_("site"))
if options.use_i18n:
_language = models.CharField(_("language"), max_length=5, null=True, blank=True, db_index=True, choices=settings.LANGUAGES)
objects = self.get_manager(options)()
def _process_context(self, context):
""" Use the context when rendering any substitutions. """
if 'view_context' in context:
self.__context = context['view_context']
def _populate_from_kwargs(self):
return {'view_name': self._view}
def _resolve_value(self, name):
value = super(ViewMetadataBase, self)._resolve_value(name)
try:
return _resolve(value, context=self.__context)
except __HOLE__:
return value
def __unicode__(self):
return self._view
class Meta:
abstract = True
unique_together = self.get_unique_together(options)
return ViewMetadataBase | AttributeError | dataset/ETHPy150Open willhardy/django-seo/rollyourown/seo/backends.py/ViewBackend.get_model |
def get_model(self, options):
class ModelInstanceMetadataBase(MetadataBaseModel):
_path = models.CharField(_('path'), max_length=255, editable=False, unique=not (options.use_sites or options.use_i18n))
_content_type = models.ForeignKey(ContentType, editable=False)
_object_id = models.PositiveIntegerField(editable=False)
_content_object = generic.GenericForeignKey('_content_type', '_object_id')
if options.use_sites:
_site = models.ForeignKey(Site, null=True, blank=True, verbose_name=_("site"))
if options.use_i18n:
_language = models.CharField(_("language"), max_length=5, null=True, blank=True, db_index=True, choices=settings.LANGUAGES)
objects = self.get_manager(options)()
def __unicode__(self):
return self._path
class Meta:
unique_together = self.get_unique_together(options)
abstract = True
def _process_context(self, context):
context['content_type'] = self._content_type
context['model_instance'] = self
def _populate_from_kwargs(self):
return {'model_instance': self._content_object}
def save(self, *args, **kwargs):
try:
path_func = self._content_object.get_absolute_url
except __HOLE__:
pass
else:
self._path = path_func()
super(ModelInstanceMetadataBase, self).save(*args, **kwargs)
return ModelInstanceMetadataBase | AttributeError | dataset/ETHPy150Open willhardy/django-seo/rollyourown/seo/backends.py/ModelInstanceBackend.get_model |
def get_model(self, options):
class ModelMetadataBase(MetadataBaseModel):
_content_type = models.ForeignKey(ContentType)
if options.use_sites:
_site = models.ForeignKey(Site, null=True, blank=True, verbose_name=_("site"))
if options.use_i18n:
_language = models.CharField(_("language"), max_length=5, null=True, blank=True, db_index=True, choices=settings.LANGUAGES)
objects = self.get_manager(options)()
def __unicode__(self):
return unicode(self._content_type)
def _process_context(self, context):
""" Use the given model instance as context for rendering
any substitutions.
"""
if 'model_instance' in context:
self.__instance = context['model_instance']
def _populate_from_kwargs(self):
return {'content_type': self._content_type}
def _resolve_value(self, name):
value = super(ModelMetadataBase, self)._resolve_value(name)
try:
return _resolve(value, self.__instance._content_object)
except __HOLE__:
return value
class Meta:
abstract = True
unique_together = self.get_unique_together(options)
return ModelMetadataBase | AttributeError | dataset/ETHPy150Open willhardy/django-seo/rollyourown/seo/backends.py/ModelBackend.get_model |
@staticmethod
def validate(options):
""" Validates the application of this backend to a given metadata
"""
try:
if options.backends.index('modelinstance') > options.backends.index('model'):
raise Exception("Metadata backend 'modelinstance' must come before 'model' backend")
except __HOLE__:
raise Exception("Metadata backend 'modelinstance' must be installed in order to use 'model' backend") | ValueError | dataset/ETHPy150Open willhardy/django-seo/rollyourown/seo/backends.py/ModelBackend.validate |
def update(self):
"""Check module status, update upstream and run compute.
This is the execution logic for the module. It handled all the
different possible states (cached, suspended, already failed), run the
upstream and the compute() method, reporting everything to the logger.
"""
if self.had_error:
raise ModuleHadError(self)
elif self.was_suspended:
raise ModuleWasSuspended(self)
elif self.computed:
return
self.logging.begin_update(self)
if not self.setJobCache():
self.update_upstream()
if self.upToDate:
if not self.computed:
self.logging.update_cached(self)
self.computed = True
return
self.had_error = True # Unset later in this method
self.logging.begin_compute(self)
try:
if self.is_breakpoint:
raise ModuleBreakpoint(self)
self.set_iterated_ports()
self.set_streamed_ports()
if self.streamed_ports:
self.build_stream()
elif self.list_depth > 0:
self.compute_all()
elif (self.in_pipeline and
not self.is_while and
(ModuleControlParam.WHILE_COND_KEY in self.control_params or
ModuleControlParam.WHILE_MAX_KEY in self.control_params)):
self.is_while = True
self.compute_while()
else:
self.compute()
self.addJobCache()
self.computed = True
except ModuleSuspended, e:
self.had_error, self.was_suspended = False, True
raise
except ModuleError, me:
if hasattr(me.module, 'interpreter'):
if me.errorTrace is None:
me.errorTrace = traceback.format_exc()
raise
else:
msg = "A dynamic module raised an exception: '%s'" % me
raise ModuleError(self, msg, errorTrace=me.errorTrace)
except ModuleErrors:
raise
except __HOLE__, e:
raise ModuleError(self, 'Interrupted by user')
except ModuleBreakpoint:
raise
except Exception, e:
debug.unexpected_exception(e)
raise ModuleError(
self,
"Uncaught exception: %s" % debug.format_exception(e).rstrip(),
errorTrace=traceback.format_exc())
if self.annotate_output:
self.annotate_output_values()
self.upToDate = True
self.had_error = False
self.logging.end_update(self)
self.logging.signalSuccess(self) | KeyboardInterrupt | dataset/ETHPy150Open VisTrails/VisTrails/vistrails/core/modules/vistrails_module.py/Module.update |
def compute_while(self):
"""This method executes the module once for each input.
Similarly to controlflow's fold, it calls update() in a loop to handle
lists of inputs.
"""
name_condition = self.control_params.get(
ModuleControlParam.WHILE_COND_KEY, None)
max_iterations = int(self.control_params.get(
ModuleControlParam.WHILE_MAX_KEY, 20))
delay = float(self.control_params.get(
ModuleControlParam.WHILE_DELAY_KEY, 0.0))
name_state_input = self.control_params.get(
ModuleControlParam.WHILE_INPUT_KEY, None)
if not name_state_input:
name_state_input = None
else:
try:
name_state_input = list(ast.literal_eval(name_state_input))
except __HOLE__:
name_state_input = [name_state_input]
name_state_output = self.control_params.get(
ModuleControlParam.WHILE_OUTPUT_KEY, None)
if not name_state_output:
name_state_output = None
else:
try:
name_state_output = list(ast.literal_eval(name_state_output))
except ValueError:
name_state_output = [name_state_output]
from vistrails.core.modules.basic_modules import create_constant
if name_state_input or name_state_output:
if not name_state_input or not name_state_output:
raise ModuleError(self,
"Passing state between iterations requires "
"BOTH StateInputPorts and StateOutputPorts "
"to be set")
if len(name_state_input) != len(name_state_output):
raise ModuleError(self,
"StateInputPorts and StateOutputPorts need "
"to have the same number of ports "
"(got %d and %d)" %(len(name_state_input),
len(name_state_output)))
module = copy.copy(self)
module.had_error = False
module.is_while = True
state = None
loop = self.logging.begin_loop_execution(self, max_iterations)
for i in xrange(max_iterations):
if not self.upToDate:
module.upToDate = False
module.computed = False
# Set state on input ports
if i > 0 and name_state_input:
for value, input_port, output_port \
in izip(state, name_state_input, name_state_output):
if input_port in module.inputPorts:
del module.inputPorts[input_port]
new_connector = ModuleConnector(
create_constant(value), 'value',
module.output_specs.get(output_port, None))
module.set_input_port(input_port, new_connector)
loop.begin_iteration(module, i)
try:
module.update() # might raise ModuleError, ModuleSuspended,
# ModuleHadError, ModuleWasSuspended
except ModuleSuspended, e:
e.loop_iteration = i
raise
loop.end_iteration(module)
if name_condition is not None:
if name_condition not in module.outputPorts:
raise ModuleError(
module,
"Invalid output port: %s" % name_condition)
if not module.get_output(name_condition):
break
if delay and i+1 != max_iterations:
time.sleep(delay)
# Get state on output ports
if name_state_output:
state = [module.get_output(port) for port in name_state_output]
self.logging.update_progress(self, i * 1.0 / max_iterations)
loop.end_loop_execution()
for name_output in self.outputPorts:
self.set_output(name_output, module.get_output(name_output)) | ValueError | dataset/ETHPy150Open VisTrails/VisTrails/vistrails/core/modules/vistrails_module.py/Module.compute_while |
def get_input_list(self, port_name):
"""Returns the value(s) coming in on the input port named
**port_name**. When a port can accept more than one input,
this method obtains all the values being passed in.
:param port_name: the name of the input port being queried
:type port_name: str
:returns: a list of all the values being passed in on the input port
:raises: ``ModuleError`` if there is no value on the port
"""
from vistrails.core.modules.basic_modules import List, Variant
if port_name not in self.inputPorts:
raise ModuleError(self, "Missing value from port %s" % port_name)
# Cannot resolve circular reference here, need to be fixed later
from vistrails.core.modules.sub_module import InputPort
connectors = []
for connector in self.inputPorts[port_name]:
if isinstance(connector.obj, InputPort):
# add external connectors
connectors.extend(connector.obj.inputPorts['ExternalPipe'])
else:
connectors.append(connector)
ports = []
for connector in connectors:
value = connector()
src_depth = connector.depth()
if not self.input_specs:
# cannot do depth wrapping
ports.append(value)
continue
# Give List an additional depth
dest_descs = self.input_specs[port_name].descriptors()
dest_depth = self.input_specs[port_name].depth + self.list_depth
if len(dest_descs) == 1 and dest_descs[0].module == List:
dest_depth += 1
if connector.spec:
src_descs = connector.spec.descriptors()
if len(src_descs) == 1 and src_descs[0].module == List and \
len(dest_descs) == 1 and dest_descs[0].module == Variant:
# special case - Treat Variant as list
src_depth -= 1
if len(src_descs) == 1 and src_descs[0].module == Variant and \
len(dest_descs) == 1 and dest_descs[0].module == List:
# special case - Treat Variant as list
dest_depth -= 1
# wrap depths that are too shallow
while (src_depth - dest_depth) < 0:
value = [value]
src_depth += 1
# type check list of lists
root = value
for i in xrange(1, src_depth):
try:
# only check first item
root = root[0]
except __HOLE__:
raise ModuleError(self, "List on port %s has wrong"
" depth %s, expected %s." %
(port_name, i-1, src_depth))
if src_depth and root is not None:
self.typeChecking(self, [port_name], [[root]])
ports.append(value)
return ports | TypeError | dataset/ETHPy150Open VisTrails/VisTrails/vistrails/core/modules/vistrails_module.py/Module.get_input_list |
def set_streaming_output(self, port, generator, size=0):
"""This method is used to set a streaming output port.
:param port: the name of the output port to be set
:type port: str
:param generator: An iterator object supporting .next()
:param size: The number of values if known (default=0)
:type size: int
"""
from vistrails.core.modules.basic_modules import Generator
module = copy.copy(self)
if size:
milestones = [i*size//10 for i in xrange(1, 11)]
def _Generator():
i = 0
while 1:
try:
value = generator.next()
except __HOLE__:
module.set_output(port, None)
self.logging.update_progress(self, 1.0)
yield None
except Exception, e:
me = ModuleError(self, "Error generating value: %s"% str(e),
errorTrace=str(e))
raise me
if value is None:
module.set_output(port, None)
self.logging.update_progress(self, 1.0)
yield None
module.set_output(port, value)
if size:
if i in milestones:
self.logging.update_progress(self, float(i)/size)
else:
self.logging.update_progress(self, 0.5)
i += 1
yield True
_generator = _Generator()
self.set_output(port, Generator(size=size,
module=module,
generator=_generator,
port=port)) | StopIteration | dataset/ETHPy150Open VisTrails/VisTrails/vistrails/core/modules/vistrails_module.py/Module.set_streaming_output |
def __call__(self):
result = self.obj.get_output(self.port)
if isinstance(result, Module):
warnings.warn(
"A Module instance was used as data: "
"module=%s, port=%s, object=%r" % (type(self.obj).__name__,
self.port, result),
UserWarning)
from vistrails.core.modules.basic_modules import Generator
value = result
if isinstance(result, Generator):
return result
depth = self.depth(fix_list=False)
if depth > 0:
value = result
# flatten list
for i in xrange(1, depth):
try:
value = value[0]
except __HOLE__:
raise ModuleError(self.obj, "List on port %s has wrong"
" depth %s, expected %s." %
(self.port, i, depth))
if depth:
# Only type-check first value
value = value[0] if value is not None and len(value) else None
if self.spec is not None and self.typecheck is not None:
descs = self.spec.descriptors()
typecheck = self.typecheck
if len(descs) == 1:
if not typecheck[0]:
return result
mod = descs[0].module
if value is not None and hasattr(mod, 'validate') \
and not mod.validate(value):
raise ModuleError(self.obj, "Type passed on Variant port "
"%s does not match destination type "
"%s" % (self.port, descs[0].name))
else:
if len(typecheck) == 1:
if typecheck[0]:
typecheck = [True] * len(descs)
else:
return result
if not isinstance(value, tuple):
raise ModuleError(self.obj, "Type passed on Variant port "
"%s is not a tuple" % self.port)
elif len(value) != len(descs):
raise ModuleError(self.obj, "Object passed on Variant "
"port %s does not have the correct "
"length (%d, expected %d)" % (
self.port, len(result), len(descs)))
for i, desc in enumerate(descs):
if not typecheck[i]:
continue
mod = desc.module
if hasattr(mod, 'validate'):
if not mod.validate(value[i]):
raise ModuleError(
self.obj,
"Element %d of tuple passed on Variant "
"port %s does not match the destination "
"type %s" % (i, self.port, desc.name))
return result | TypeError | dataset/ETHPy150Open VisTrails/VisTrails/vistrails/core/modules/vistrails_module.py/ModuleConnector.__call__ |
def copy_to_unicode(element):
""" used to transform the lxml version of unicode to a
standard version of unicode that can be pickalable -
necessary for linting """
if isinstance(element, dict):
return {
key: copy_to_unicode(val)
for key, val in element.items()
}
elif isinstance(element, list):
return list(map(copy_to_unicode, element))
else:
try:
# A dirty way to convert to unicode in python 2 + 3.3+
return u''.join(element)
except __HOLE__:
return element | TypeError | dataset/ETHPy150Open CenterForOpenScience/scrapi/scrapi/util.py/copy_to_unicode |
def test_patch_wont_create_by_default(self):
try:
@patch('%s.frooble' % builtin_string, sentinel.Frooble)
def test():
self.assertEqual(frooble, sentinel.Frooble)
test()
except __HOLE__:
pass
else:
self.fail('Patching non existent attributes should fail')
self.assertRaises(NameError, lambda: frooble) | AttributeError | dataset/ETHPy150Open testing-cabal/mock/mock/tests/testpatch.py/PatchTest.test_patch_wont_create_by_default |