text
stringlengths 0
1.05M
| meta
dict |
---|---|
# Internationalization details are documented in
# <http://docutils.sf.net/docs/howto/i18n.html>.
"""
This package contains modules for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils.utils import normalize_language_tag
if sys.version_info < (2,5):
from docutils._compat import __import__
_languages = {}
def get_language(language_code, reporter=None):
"""Return module with language localizations.
`language_code` is a "BCP 47" language tag.
If there is no matching module, warn and fall back to English.
"""
# TODO: use a dummy module returning emtpy strings?, configurable?
for tag in normalize_language_tag(language_code):
tag = tag.replace('-','_') # '-' not valid in module names
if tag in _languages:
return _languages[tag]
try:
module = __import__(tag, globals(), locals(), level=0)
except ImportError:
try:
module = __import__(tag, globals(), locals(), level=1)
except ImportError:
continue
_languages[tag] = module
return module
if reporter is not None:
reporter.warning(
'language "%s" not supported: ' % language_code +
'Docutils-generated text will be in English.')
module = __import__('en', globals(), locals(), level=1)
_languages[tag] = module # warn only one time!
return module
| {
"repo_name": "timonwong/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python3/docutils/languages/__init__.py",
"copies": "4",
"size": "1580",
"license": "mit",
"hash": 4120245481796229600,
"line_mean": 31.9166666667,
"line_max": 74,
"alpha_frac": 0.6335443038,
"autogenerated": false,
"ratio": 4.0932642487046635,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002182805861665304,
"num_lines": 48
} |
# Internationalization details are documented in
# <http://docutils.sf.net/docs/howto/i18n.html>.
"""
This package contains modules for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils.utils import normalize_language_tag
if sys.version_info < (2,5):
from docutils._compat import __import__
_languages = {}
def get_language(language_code):
for tag in normalize_language_tag(language_code):
tag = tag.replace('-','_') # '-' not valid in module names
if tag in _languages:
return _languages[tag]
try:
module = __import__(tag, globals(), locals(), level=0)
except ImportError:
try:
module = __import__(tag, globals(), locals(), level=1)
except ImportError:
continue
_languages[tag] = module
return module
return None
| {
"repo_name": "timonwong/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python3/docutils/parsers/rst/languages/__init__.py",
"copies": "4",
"size": "1039",
"license": "mit",
"hash": -6492075142304612000,
"line_mean": 27.0810810811,
"line_max": 70,
"alpha_frac": 0.6323387873,
"autogenerated": false,
"ratio": 4.042801556420233,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6675140343720234,
"avg_score": null,
"num_lines": null
} |
"""
Admonition directives.
"""
__docformat__ = 'reStructuredText'
from docutils.parsers.rst import Directive
from docutils.parsers.rst import states, directives
from docutils.parsers.rst.roles import set_classes
from docutils import nodes
class BaseAdmonition(Directive):
final_argument_whitespace = True
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
node_class = None
"""Subclasses must set this to the appropriate admonition node class."""
def run(self):
set_classes(self.options)
self.assert_has_content()
text = '\n'.join(self.content)
admonition_node = self.node_class(text, **self.options)
self.add_name(admonition_node)
if self.node_class is nodes.admonition:
title_text = self.arguments[0]
textnodes, messages = self.state.inline_text(title_text,
self.lineno)
admonition_node += nodes.title(title_text, '', *textnodes)
admonition_node += messages
if not 'classes' in self.options:
admonition_node['classes'] += ['admonition-' +
nodes.make_id(title_text)]
self.state.nested_parse(self.content, self.content_offset,
admonition_node)
return [admonition_node]
class Admonition(BaseAdmonition):
required_arguments = 1
node_class = nodes.admonition
class Attention(BaseAdmonition):
node_class = nodes.attention
class Caution(BaseAdmonition):
node_class = nodes.caution
class Danger(BaseAdmonition):
node_class = nodes.danger
class Error(BaseAdmonition):
node_class = nodes.error
class Hint(BaseAdmonition):
node_class = nodes.hint
class Important(BaseAdmonition):
node_class = nodes.important
class Note(BaseAdmonition):
node_class = nodes.note
class Tip(BaseAdmonition):
node_class = nodes.tip
class Warning(BaseAdmonition):
node_class = nodes.warning
| {
"repo_name": "timonwong/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python3/docutils/parsers/rst/directives/admonitions.py",
"copies": "4",
"size": "2224",
"license": "mit",
"hash": 7150633405828872000,
"line_mean": 22.1666666667,
"line_max": 76,
"alpha_frac": 0.6384892086,
"autogenerated": false,
"ratio": 4,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00022644927536231884,
"num_lines": 96
} |
# $Id$
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
A finite state machine specialized for regular-expression-based text filters,
this module defines the following classes:
- `StateMachine`, a state machine
- `State`, a state superclass
- `StateMachineWS`, a whitespace-sensitive version of `StateMachine`
- `StateWS`, a state superclass for use with `StateMachineWS`
- `SearchStateMachine`, uses `re.search()` instead of `re.match()`
- `SearchStateMachineWS`, uses `re.search()` instead of `re.match()`
- `ViewList`, extends standard Python lists.
- `StringList`, string-specific ViewList.
Exception classes:
- `StateMachineError`
- `UnknownStateError`
- `DuplicateStateError`
- `UnknownTransitionError`
- `DuplicateTransitionError`
- `TransitionPatternNotFound`
- `TransitionMethodNotFound`
- `UnexpectedIndentationError`
- `TransitionCorrection`: Raised to switch to another transition.
- `StateCorrection`: Raised to switch to another state & transition.
Functions:
- `string2lines()`: split a multi-line string into a list of one-line strings
How To Use This Module
======================
(See the individual classes, methods, and attributes for details.)
1. Import it: ``import statemachine`` or ``from statemachine import ...``.
You will also need to ``import re``.
2. Derive a subclass of `State` (or `StateWS`) for each state in your state
machine::
class MyState(statemachine.State):
Within the state's class definition:
a) Include a pattern for each transition, in `State.patterns`::
patterns = {'atransition': r'pattern', ...}
b) Include a list of initial transitions to be set up automatically, in
`State.initial_transitions`::
initial_transitions = ['atransition', ...]
c) Define a method for each transition, with the same name as the
transition pattern::
def atransition(self, match, context, next_state):
# do something
result = [...] # a list
return context, next_state, result
# context, next_state may be altered
Transition methods may raise an `EOFError` to cut processing short.
d) You may wish to override the `State.bof()` and/or `State.eof()` implicit
transition methods, which handle the beginning- and end-of-file.
e) In order to handle nested processing, you may wish to override the
attributes `State.nested_sm` and/or `State.nested_sm_kwargs`.
If you are using `StateWS` as a base class, in order to handle nested
indented blocks, you may wish to:
- override the attributes `StateWS.indent_sm`,
`StateWS.indent_sm_kwargs`, `StateWS.known_indent_sm`, and/or
`StateWS.known_indent_sm_kwargs`;
- override the `StateWS.blank()` method; and/or
- override or extend the `StateWS.indent()`, `StateWS.known_indent()`,
and/or `StateWS.firstknown_indent()` methods.
3. Create a state machine object::
sm = StateMachine(state_classes=[MyState, ...],
initial_state='MyState')
4. Obtain the input text, which needs to be converted into a tab-free list of
one-line strings. For example, to read text from a file called
'inputfile'::
input_string = open('inputfile').read()
input_lines = statemachine.string2lines(input_string)
5. Run the state machine on the input text and collect the results, a list::
results = sm.run(input_lines)
6. Remove any lingering circular references::
sm.unlink()
"""
__docformat__ = 'restructuredtext'
import sys
import re
import types
import unicodedata
from docutils import utils
from docutils.utils.error_reporting import ErrorOutput
class StateMachine:
"""
A finite state machine for text filters using regular expressions.
The input is provided in the form of a list of one-line strings (no
newlines). States are subclasses of the `State` class. Transitions consist
of regular expression patterns and transition methods, and are defined in
each state.
The state machine is started with the `run()` method, which returns the
results of processing in a list.
"""
def __init__(self, state_classes, initial_state, debug=False):
"""
Initialize a `StateMachine` object; add state objects.
Parameters:
- `state_classes`: a list of `State` (sub)classes.
- `initial_state`: a string, the class name of the initial state.
- `debug`: a boolean; produce verbose output if true (nonzero).
"""
self.input_lines = None
"""`StringList` of input lines (without newlines).
Filled by `self.run()`."""
self.input_offset = 0
"""Offset of `self.input_lines` from the beginning of the file."""
self.line = None
"""Current input line."""
self.line_offset = -1
"""Current input line offset from beginning of `self.input_lines`."""
self.debug = debug
"""Debugging mode on/off."""
self.initial_state = initial_state
"""The name of the initial state (key to `self.states`)."""
self.current_state = initial_state
"""The name of the current state (key to `self.states`)."""
self.states = {}
"""Mapping of {state_name: State_object}."""
self.add_states(state_classes)
self.observers = []
"""List of bound methods or functions to call whenever the current
line changes. Observers are called with one argument, ``self``.
Cleared at the end of `run()`."""
self._stderr = ErrorOutput()
"""Wrapper around sys.stderr catching en-/decoding errors"""
def unlink(self):
"""Remove circular references to objects no longer required."""
for state in self.states.values():
state.unlink()
self.states = None
def run(self, input_lines, input_offset=0, context=None,
input_source=None, initial_state=None):
"""
Run the state machine on `input_lines`. Return results (a list).
Reset `self.line_offset` and `self.current_state`. Run the
beginning-of-file transition. Input one line at a time and check for a
matching transition. If a match is found, call the transition method
and possibly change the state. Store the context returned by the
transition method to be passed on to the next transition matched.
Accumulate the results returned by the transition methods in a list.
Run the end-of-file transition. Finally, return the accumulated
results.
Parameters:
- `input_lines`: a list of strings without newlines, or `StringList`.
- `input_offset`: the line offset of `input_lines` from the beginning
of the file.
- `context`: application-specific storage.
- `input_source`: name or path of source of `input_lines`.
- `initial_state`: name of initial state.
"""
self.runtime_init()
if isinstance(input_lines, StringList):
self.input_lines = input_lines
else:
self.input_lines = StringList(input_lines, source=input_source)
self.input_offset = input_offset
self.line_offset = -1
self.current_state = initial_state or self.initial_state
if self.debug:
print >>self._stderr, (
u'\nStateMachine.run: input_lines (line_offset=%s):\n| %s'
% (self.line_offset, u'\n| '.join(self.input_lines)))
transitions = None
results = []
state = self.get_state()
try:
if self.debug:
print >>self._stderr, '\nStateMachine.run: bof transition'
context, result = state.bof(context)
results.extend(result)
while True:
try:
try:
self.next_line()
if self.debug:
source, offset = self.input_lines.info(
self.line_offset)
print >>self._stderr, (
u'\nStateMachine.run: line (source=%r, '
u'offset=%r):\n| %s'
% (source, offset, self.line))
context, next_state, result = self.check_line(
context, state, transitions)
except EOFError:
if self.debug:
print >>self._stderr, (
'\nStateMachine.run: %s.eof transition'
% state.__class__.__name__)
result = state.eof(context)
results.extend(result)
break
else:
results.extend(result)
except TransitionCorrection, exception:
self.previous_line() # back up for another try
transitions = (exception.args[0],)
if self.debug:
print >>self._stderr, (
'\nStateMachine.run: TransitionCorrection to '
'state "%s", transition %s.'
% (state.__class__.__name__, transitions[0]))
continue
except StateCorrection, exception:
self.previous_line() # back up for another try
next_state = exception.args[0]
if len(exception.args) == 1:
transitions = None
else:
transitions = (exception.args[1],)
if self.debug:
print >>self._stderr, (
'\nStateMachine.run: StateCorrection to state '
'"%s", transition %s.'
% (next_state, transitions[0]))
else:
transitions = None
state = self.get_state(next_state)
except:
if self.debug:
self.error()
raise
self.observers = []
return results
def get_state(self, next_state=None):
"""
Return current state object; set it first if `next_state` given.
Parameter `next_state`: a string, the name of the next state.
Exception: `UnknownStateError` raised if `next_state` unknown.
"""
if next_state:
if self.debug and next_state != self.current_state:
print >>self._stderr, (
'\nStateMachine.get_state: Changing state from '
'"%s" to "%s" (input line %s).'
% (self.current_state, next_state,
self.abs_line_number()))
self.current_state = next_state
try:
return self.states[self.current_state]
except KeyError:
raise UnknownStateError(self.current_state)
def next_line(self, n=1):
"""Load `self.line` with the `n`'th next line and return it."""
try:
try:
self.line_offset += n
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self.line
finally:
self.notify_observers()
def is_next_line_blank(self):
"""Return 1 if the next line is blank or non-existant."""
try:
return not self.input_lines[self.line_offset + 1].strip()
except IndexError:
return 1
def at_eof(self):
"""Return 1 if the input is at or past end-of-file."""
return self.line_offset >= len(self.input_lines) - 1
def at_bof(self):
"""Return 1 if the input is at or before beginning-of-file."""
return self.line_offset <= 0
def previous_line(self, n=1):
"""Load `self.line` with the `n`'th previous line and return it."""
self.line_offset -= n
if self.line_offset < 0:
self.line = None
else:
self.line = self.input_lines[self.line_offset]
self.notify_observers()
return self.line
def goto_line(self, line_offset):
"""Jump to absolute line offset `line_offset`, load and return it."""
try:
try:
self.line_offset = line_offset - self.input_offset
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self.line
finally:
self.notify_observers()
def get_source(self, line_offset):
"""Return source of line at absolute line offset `line_offset`."""
return self.input_lines.source(line_offset - self.input_offset)
def abs_line_offset(self):
"""Return line offset of current line, from beginning of file."""
return self.line_offset + self.input_offset
def abs_line_number(self):
"""Return line number of current line (counting from 1)."""
return self.line_offset + self.input_offset + 1
def get_source_and_line(self, lineno=None):
"""Return (source, line) tuple for current or given line number.
Looks up the source and line number in the `self.input_lines`
StringList instance to count for included source files.
If the optional argument `lineno` is given, convert it from an
absolute line number to the corresponding (source, line) pair.
"""
if lineno is None:
offset = self.line_offset
else:
offset = lineno - self.input_offset - 1
try:
src, srcoffset = self.input_lines.info(offset)
srcline = srcoffset + 1
except (TypeError):
# line is None if index is "Just past the end"
src, srcline = self.get_source_and_line(offset + self.input_offset)
return src, srcline + 1
except (IndexError): # `offset` is off the list
src, srcline = None, None
# raise AssertionError('cannot find line %d in %s lines' %
# (offset, len(self.input_lines)))
# # list(self.input_lines.lines())))
# assert offset == srcoffset, str(self.input_lines)
# print "get_source_and_line(%s):" % lineno,
# print offset + 1, '->', src, srcline
# print self.input_lines
return (src, srcline)
def insert_input(self, input_lines, source):
self.input_lines.insert(self.line_offset + 1, '',
source='internal padding after '+source,
offset=len(input_lines))
self.input_lines.insert(self.line_offset + 1, '',
source='internal padding before '+source,
offset=-1)
self.input_lines.insert(self.line_offset + 2,
StringList(input_lines, source))
def get_text_block(self, flush_left=False):
"""
Return a contiguous block of text.
If `flush_left` is true, raise `UnexpectedIndentationError` if an
indented line is encountered before the text block ends (with a blank
line).
"""
try:
block = self.input_lines.get_text_block(self.line_offset,
flush_left)
self.next_line(len(block) - 1)
return block
except UnexpectedIndentationError, err:
block = err.args[0]
self.next_line(len(block) - 1) # advance to last line of block
raise
def check_line(self, context, state, transitions=None):
"""
Examine one line of input for a transition match & execute its method.
Parameters:
- `context`: application-dependent storage.
- `state`: a `State` object, the current state.
- `transitions`: an optional ordered list of transition names to try,
instead of ``state.transition_order``.
Return the values returned by the transition method:
- context: possibly modified from the parameter `context`;
- next state name (`State` subclass name);
- the result output of the transition, a list.
When there is no match, ``state.no_match()`` is called and its return
value is returned.
"""
if transitions is None:
transitions = state.transition_order
state_correction = None
if self.debug:
print >>self._stderr, (
'\nStateMachine.check_line: state="%s", transitions=%r.'
% (state.__class__.__name__, transitions))
for name in transitions:
pattern, method, next_state = state.transitions[name]
match = pattern.match(self.line)
if match:
if self.debug:
print >>self._stderr, (
'\nStateMachine.check_line: Matched transition '
'"%s" in state "%s".'
% (name, state.__class__.__name__))
return method(match, context, next_state)
else:
if self.debug:
print >>self._stderr, (
'\nStateMachine.check_line: No match in state "%s".'
% state.__class__.__name__)
return state.no_match(context, transitions)
def add_state(self, state_class):
"""
Initialize & add a `state_class` (`State` subclass) object.
Exception: `DuplicateStateError` raised if `state_class` was already
added.
"""
statename = state_class.__name__
if statename in self.states:
raise DuplicateStateError(statename)
self.states[statename] = state_class(self, self.debug)
def add_states(self, state_classes):
"""
Add `state_classes` (a list of `State` subclasses).
"""
for state_class in state_classes:
self.add_state(state_class)
def runtime_init(self):
"""
Initialize `self.states`.
"""
for state in self.states.values():
state.runtime_init()
def error(self):
"""Report error details."""
type, value, module, line, function = _exception_data()
print >>self._stderr, u'%s: %s' % (type, value)
print >>self._stderr, 'input line %s' % (self.abs_line_number())
print >>self._stderr, (u'module %s, line %s, function %s' %
(module, line, function))
def attach_observer(self, observer):
"""
The `observer` parameter is a function or bound method which takes two
arguments, the source and offset of the current line.
"""
self.observers.append(observer)
def detach_observer(self, observer):
self.observers.remove(observer)
def notify_observers(self):
for observer in self.observers:
try:
info = self.input_lines.info(self.line_offset)
except IndexError:
info = (None, None)
observer(*info)
class State:
"""
State superclass. Contains a list of transitions, and transition methods.
Transition methods all have the same signature. They take 3 parameters:
- An `re` match object. ``match.string`` contains the matched input line,
``match.start()`` gives the start index of the match, and
``match.end()`` gives the end index.
- A context object, whose meaning is application-defined (initial value
``None``). It can be used to store any information required by the state
machine, and the retured context is passed on to the next transition
method unchanged.
- The name of the next state, a string, taken from the transitions list;
normally it is returned unchanged, but it may be altered by the
transition method if necessary.
Transition methods all return a 3-tuple:
- A context object, as (potentially) modified by the transition method.
- The next state name (a return value of ``None`` means no state change).
- The processing result, a list, which is accumulated by the state
machine.
Transition methods may raise an `EOFError` to cut processing short.
There are two implicit transitions, and corresponding transition methods
are defined: `bof()` handles the beginning-of-file, and `eof()` handles
the end-of-file. These methods have non-standard signatures and return
values. `bof()` returns the initial context and results, and may be used
to return a header string, or do any other processing needed. `eof()`
should handle any remaining context and wrap things up; it returns the
final processing result.
Typical applications need only subclass `State` (or a subclass), set the
`patterns` and `initial_transitions` class attributes, and provide
corresponding transition methods. The default object initialization will
take care of constructing the list of transitions.
"""
patterns = None
"""
{Name: pattern} mapping, used by `make_transition()`. Each pattern may
be a string or a compiled `re` pattern. Override in subclasses.
"""
initial_transitions = None
"""
A list of transitions to initialize when a `State` is instantiated.
Each entry is either a transition name string, or a (transition name, next
state name) pair. See `make_transitions()`. Override in subclasses.
"""
nested_sm = None
"""
The `StateMachine` class for handling nested processing.
If left as ``None``, `nested_sm` defaults to the class of the state's
controlling state machine. Override it in subclasses to avoid the default.
"""
nested_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `nested_sm` constructor.
Two keys must have entries in the dictionary:
- Key 'state_classes' must be set to a list of `State` classes.
- Key 'initial_state' must be set to the name of the initial state class.
If `nested_sm_kwargs` is left as ``None``, 'state_classes' defaults to the
class of the current state, and 'initial_state' defaults to the name of
the class of the current state. Override in subclasses to avoid the
defaults.
"""
def __init__(self, state_machine, debug=False):
"""
Initialize a `State` object; make & add initial transitions.
Parameters:
- `statemachine`: the controlling `StateMachine` object.
- `debug`: a boolean; produce verbose output if true.
"""
self.transition_order = []
"""A list of transition names in search order."""
self.transitions = {}
"""
A mapping of transition names to 3-tuples containing
(compiled_pattern, transition_method, next_state_name). Initialized as
an instance attribute dynamically (instead of as a class attribute)
because it may make forward references to patterns and methods in this
or other classes.
"""
self.add_initial_transitions()
self.state_machine = state_machine
"""A reference to the controlling `StateMachine` object."""
self.debug = debug
"""Debugging mode on/off."""
if self.nested_sm is None:
self.nested_sm = self.state_machine.__class__
if self.nested_sm_kwargs is None:
self.nested_sm_kwargs = {'state_classes': [self.__class__],
'initial_state': self.__class__.__name__}
def runtime_init(self):
"""
Initialize this `State` before running the state machine; called from
`self.state_machine.run()`.
"""
pass
def unlink(self):
"""Remove circular references to objects no longer required."""
self.state_machine = None
def add_initial_transitions(self):
"""Make and add transitions listed in `self.initial_transitions`."""
if self.initial_transitions:
names, transitions = self.make_transitions(
self.initial_transitions)
self.add_transitions(names, transitions)
def add_transitions(self, names, transitions):
"""
Add a list of transitions to the start of the transition list.
Parameters:
- `names`: a list of transition names.
- `transitions`: a mapping of names to transition tuples.
Exceptions: `DuplicateTransitionError`, `UnknownTransitionError`.
"""
for name in names:
if name in self.transitions:
raise DuplicateTransitionError(name)
if name not in transitions:
raise UnknownTransitionError(name)
self.transition_order[:0] = names
self.transitions.update(transitions)
def add_transition(self, name, transition):
"""
Add a transition to the start of the transition list.
Parameter `transition`: a ready-made transition 3-tuple.
Exception: `DuplicateTransitionError`.
"""
if name in self.transitions:
raise DuplicateTransitionError(name)
self.transition_order[:0] = [name]
self.transitions[name] = transition
def remove_transition(self, name):
"""
Remove a transition by `name`.
Exception: `UnknownTransitionError`.
"""
try:
del self.transitions[name]
self.transition_order.remove(name)
except:
raise UnknownTransitionError(name)
def make_transition(self, name, next_state=None):
"""
Make & return a transition tuple based on `name`.
This is a convenience function to simplify transition creation.
Parameters:
- `name`: a string, the name of the transition pattern & method. This
`State` object must have a method called '`name`', and a dictionary
`self.patterns` containing a key '`name`'.
- `next_state`: a string, the name of the next `State` object for this
transition. A value of ``None`` (or absent) implies no state change
(i.e., continue with the same state).
Exceptions: `TransitionPatternNotFound`, `TransitionMethodNotFound`.
"""
if next_state is None:
next_state = self.__class__.__name__
try:
pattern = self.patterns[name]
if not hasattr(pattern, 'match'):
pattern = re.compile(pattern)
except KeyError:
raise TransitionPatternNotFound(
'%s.patterns[%r]' % (self.__class__.__name__, name))
try:
method = getattr(self, name)
except AttributeError:
raise TransitionMethodNotFound(
'%s.%s' % (self.__class__.__name__, name))
return (pattern, method, next_state)
def make_transitions(self, name_list):
"""
Return a list of transition names and a transition mapping.
Parameter `name_list`: a list, where each entry is either a transition
name string, or a 1- or 2-tuple (transition name, optional next state
name).
"""
stringtype = type('')
names = []
transitions = {}
for namestate in name_list:
if type(namestate) is stringtype:
transitions[namestate] = self.make_transition(namestate)
names.append(namestate)
else:
transitions[namestate[0]] = self.make_transition(*namestate)
names.append(namestate[0])
return names, transitions
def no_match(self, context, transitions):
"""
Called when there is no match from `StateMachine.check_line()`.
Return the same values returned by transition methods:
- context: unchanged;
- next state name: ``None``;
- empty result list.
Override in subclasses to catch this event.
"""
return context, None, []
def bof(self, context):
"""
Handle beginning-of-file. Return unchanged `context`, empty result.
Override in subclasses.
Parameter `context`: application-defined storage.
"""
return context, []
def eof(self, context):
"""
Handle end-of-file. Return empty result.
Override in subclasses.
Parameter `context`: application-defined storage.
"""
return []
def nop(self, match, context, next_state):
"""
A "do nothing" transition method.
Return unchanged `context` & `next_state`, empty result. Useful for
simple state changes (actionless transitions).
"""
return context, next_state, []
class StateMachineWS(StateMachine):
"""
`StateMachine` subclass specialized for whitespace recognition.
There are three methods provided for extracting indented text blocks:
- `get_indented()`: use when the indent is unknown.
- `get_known_indented()`: use when the indent is known for all lines.
- `get_first_known_indented()`: use when only the first line's indent is
known.
"""
def get_indented(self, until_blank=False, strip_indent=True):
"""
Return a block of indented lines of text, and info.
Extract an indented block where the indent is unknown for all lines.
:Parameters:
- `until_blank`: Stop collecting at the first blank line if true.
- `strip_indent`: Strip common leading indent if true (default).
:Return:
- the indented block (a list of lines of text),
- its indent,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent)
if indented:
self.next_line(len(indented) - 1) # advance to last indented line
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, indent, offset, blank_finish
def get_known_indented(self, indent, until_blank=False, strip_indent=True):
"""
Return an indented block and info.
Extract an indented block where the indent is known for all lines.
Starting with the current line, extract the entire text block with at
least `indent` indentation (which must be whitespace, except for the
first line).
:Parameters:
- `indent`: The number of indent columns/characters.
- `until_blank`: Stop collecting at the first blank line if true.
- `strip_indent`: Strip `indent` characters of indentation if true
(default).
:Return:
- the indented block,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent,
block_indent=indent)
self.next_line(len(indented) - 1) # advance to last indented line
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, offset, blank_finish
def get_first_known_indented(self, indent, until_blank=False,
strip_indent=True, strip_top=True):
"""
Return an indented block and info.
Extract an indented block where the indent is known for the first line
and unknown for all other lines.
:Parameters:
- `indent`: The first line's indent (# of columns/characters).
- `until_blank`: Stop collecting at the first blank line if true
(1).
- `strip_indent`: Strip `indent` characters of indentation if true
(1, default).
- `strip_top`: Strip blank lines from the beginning of the block.
:Return:
- the indented block,
- its indent,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent,
first_indent=indent)
self.next_line(len(indented) - 1) # advance to last indented line
if strip_top:
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, indent, offset, blank_finish
class StateWS(State):
"""
State superclass specialized for whitespace (blank lines & indents).
Use this class with `StateMachineWS`. The transitions 'blank' (for blank
lines) and 'indent' (for indented text blocks) are added automatically,
before any other transitions. The transition method `blank()` handles
blank lines and `indent()` handles nested indented blocks. Indented
blocks trigger a new state machine to be created by `indent()` and run.
The class of the state machine to be created is in `indent_sm`, and the
constructor keyword arguments are in the dictionary `indent_sm_kwargs`.
The methods `known_indent()` and `firstknown_indent()` are provided for
indented blocks where the indent (all lines' and first line's only,
respectively) is known to the transition method, along with the attributes
`known_indent_sm` and `known_indent_sm_kwargs`. Neither transition method
is triggered automatically.
"""
indent_sm = None
"""
The `StateMachine` class handling indented text blocks.
If left as ``None``, `indent_sm` defaults to the value of
`State.nested_sm`. Override it in subclasses to avoid the default.
"""
indent_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `indent_sm` constructor.
If left as ``None``, `indent_sm_kwargs` defaults to the value of
`State.nested_sm_kwargs`. Override it in subclasses to avoid the default.
"""
known_indent_sm = None
"""
The `StateMachine` class handling known-indented text blocks.
If left as ``None``, `known_indent_sm` defaults to the value of
`indent_sm`. Override it in subclasses to avoid the default.
"""
known_indent_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `known_indent_sm` constructor.
If left as ``None``, `known_indent_sm_kwargs` defaults to the value of
`indent_sm_kwargs`. Override it in subclasses to avoid the default.
"""
ws_patterns = {'blank': ' *$',
'indent': ' +'}
"""Patterns for default whitespace transitions. May be overridden in
subclasses."""
ws_initial_transitions = ('blank', 'indent')
"""Default initial whitespace transitions, added before those listed in
`State.initial_transitions`. May be overridden in subclasses."""
def __init__(self, state_machine, debug=False):
"""
Initialize a `StateSM` object; extends `State.__init__()`.
Check for indent state machine attributes, set defaults if not set.
"""
State.__init__(self, state_machine, debug)
if self.indent_sm is None:
self.indent_sm = self.nested_sm
if self.indent_sm_kwargs is None:
self.indent_sm_kwargs = self.nested_sm_kwargs
if self.known_indent_sm is None:
self.known_indent_sm = self.indent_sm
if self.known_indent_sm_kwargs is None:
self.known_indent_sm_kwargs = self.indent_sm_kwargs
def add_initial_transitions(self):
"""
Add whitespace-specific transitions before those defined in subclass.
Extends `State.add_initial_transitions()`.
"""
State.add_initial_transitions(self)
if self.patterns is None:
self.patterns = {}
self.patterns.update(self.ws_patterns)
names, transitions = self.make_transitions(
self.ws_initial_transitions)
self.add_transitions(names, transitions)
def blank(self, match, context, next_state):
"""Handle blank lines. Does nothing. Override in subclasses."""
return self.nop(match, context, next_state)
def indent(self, match, context, next_state):
"""
Handle an indented text block. Extend or override in subclasses.
Recursively run the registered state machine for indented blocks
(`self.indent_sm`).
"""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
sm = self.indent_sm(debug=self.debug, **self.indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
def known_indent(self, match, context, next_state):
"""
Handle a known-indent text block. Extend or override in subclasses.
Recursively run the registered state machine for known-indent indented
blocks (`self.known_indent_sm`). The indent is the length of the
match, ``match.end()``.
"""
indented, line_offset, blank_finish = \
self.state_machine.get_known_indented(match.end())
sm = self.known_indent_sm(debug=self.debug,
**self.known_indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
def first_known_indent(self, match, context, next_state):
"""
Handle an indented text block (first line's indent known).
Extend or override in subclasses.
Recursively run the registered state machine for known-indent indented
blocks (`self.known_indent_sm`). The indent is the length of the
match, ``match.end()``.
"""
indented, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
sm = self.known_indent_sm(debug=self.debug,
**self.known_indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
class _SearchOverride:
"""
Mix-in class to override `StateMachine` regular expression behavior.
Changes regular expression matching, from the default `re.match()`
(succeeds only if the pattern matches at the start of `self.line`) to
`re.search()` (succeeds if the pattern matches anywhere in `self.line`).
When subclassing a `StateMachine`, list this class **first** in the
inheritance list of the class definition.
"""
def match(self, pattern):
"""
Return the result of a regular expression search.
Overrides `StateMachine.match()`.
Parameter `pattern`: `re` compiled regular expression.
"""
return pattern.search(self.line)
class SearchStateMachine(_SearchOverride, StateMachine):
"""`StateMachine` which uses `re.search()` instead of `re.match()`."""
pass
class SearchStateMachineWS(_SearchOverride, StateMachineWS):
"""`StateMachineWS` which uses `re.search()` instead of `re.match()`."""
pass
class ViewList:
"""
List with extended functionality: slices of ViewList objects are child
lists, linked to their parents. Changes made to a child list also affect
the parent list. A child list is effectively a "view" (in the SQL sense)
of the parent list. Changes to parent lists, however, do *not* affect
active child lists. If a parent list is changed, any active child lists
should be recreated.
The start and end of the slice can be trimmed using the `trim_start()` and
`trim_end()` methods, without affecting the parent list. The link between
child and parent lists can be broken by calling `disconnect()` on the
child list.
Also, ViewList objects keep track of the source & offset of each item.
This information is accessible via the `source()`, `offset()`, and
`info()` methods.
"""
def __init__(self, initlist=None, source=None, items=None,
parent=None, parent_offset=None):
self.data = []
"""The actual list of data, flattened from various sources."""
self.items = []
"""A list of (source, offset) pairs, same length as `self.data`: the
source of each line and the offset of each line from the beginning of
its source."""
self.parent = parent
"""The parent list."""
self.parent_offset = parent_offset
"""Offset of this list from the beginning of the parent list."""
if isinstance(initlist, ViewList):
self.data = initlist.data[:]
self.items = initlist.items[:]
elif initlist is not None:
self.data = list(initlist)
if items:
self.items = items
else:
self.items = [(source, i) for i in range(len(initlist))]
assert len(self.data) == len(self.items), 'data mismatch'
def __str__(self):
return str(self.data)
def __repr__(self):
return '%s(%s, items=%s)' % (self.__class__.__name__,
self.data, self.items)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __ne__(self, other): return self.data != self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cmp__(self, other): return cmp(self.data, self.__cast(other))
def __cast(self, other):
if isinstance(other, ViewList):
return other.data
else:
return other
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
# The __getitem__()/__setitem__() methods check whether the index
# is a slice first, since indexing a native list with a slice object
# just works.
def __getitem__(self, i):
if isinstance(i, types.SliceType):
assert i.step in (None, 1), 'cannot handle slice with stride'
return self.__class__(self.data[i.start:i.stop],
items=self.items[i.start:i.stop],
parent=self, parent_offset=i.start or 0)
else:
return self.data[i]
def __setitem__(self, i, item):
if isinstance(i, types.SliceType):
assert i.step in (None, 1), 'cannot handle slice with stride'
if not isinstance(item, ViewList):
raise TypeError('assigning non-ViewList to ViewList slice')
self.data[i.start:i.stop] = item.data
self.items[i.start:i.stop] = item.items
assert len(self.data) == len(self.items), 'data mismatch'
if self.parent:
self.parent[(i.start or 0) + self.parent_offset
: (i.stop or len(self)) + self.parent_offset] = item
else:
self.data[i] = item
if self.parent:
self.parent[i + self.parent_offset] = item
def __delitem__(self, i):
try:
del self.data[i]
del self.items[i]
if self.parent:
del self.parent[i + self.parent_offset]
except TypeError:
assert i.step is None, 'cannot handle slice with stride'
del self.data[i.start:i.stop]
del self.items[i.start:i.stop]
if self.parent:
del self.parent[(i.start or 0) + self.parent_offset
: (i.stop or len(self)) + self.parent_offset]
def __add__(self, other):
if isinstance(other, ViewList):
return self.__class__(self.data + other.data,
items=(self.items + other.items))
else:
raise TypeError('adding non-ViewList to a ViewList')
def __radd__(self, other):
if isinstance(other, ViewList):
return self.__class__(other.data + self.data,
items=(other.items + self.items))
else:
raise TypeError('adding ViewList to a non-ViewList')
def __iadd__(self, other):
if isinstance(other, ViewList):
self.data += other.data
else:
raise TypeError('argument to += must be a ViewList')
return self
def __mul__(self, n):
return self.__class__(self.data * n, items=(self.items * n))
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
self.items *= n
return self
def extend(self, other):
if not isinstance(other, ViewList):
raise TypeError('extending a ViewList with a non-ViewList')
if self.parent:
self.parent.insert(len(self.data) + self.parent_offset, other)
self.data.extend(other.data)
self.items.extend(other.items)
def append(self, item, source=None, offset=0):
if source is None:
self.extend(item)
else:
if self.parent:
self.parent.insert(len(self.data) + self.parent_offset, item,
source, offset)
self.data.append(item)
self.items.append((source, offset))
def insert(self, i, item, source=None, offset=0):
if source is None:
if not isinstance(item, ViewList):
raise TypeError('inserting non-ViewList with no source given')
self.data[i:i] = item.data
self.items[i:i] = item.items
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.insert(index + self.parent_offset, item)
else:
self.data.insert(i, item)
self.items.insert(i, (source, offset))
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.insert(index + self.parent_offset, item,
source, offset)
def pop(self, i=-1):
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.pop(index + self.parent_offset)
self.items.pop(i)
return self.data.pop(i)
def trim_start(self, n=1):
"""
Remove items from the start of the list, without touching the parent.
"""
if n > len(self.data):
raise IndexError("Size of trim too large; can't trim %s items "
"from a list of size %s." % (n, len(self.data)))
elif n < 0:
raise IndexError('Trim size must be >= 0.')
del self.data[:n]
del self.items[:n]
if self.parent:
self.parent_offset += n
def trim_end(self, n=1):
"""
Remove items from the end of the list, without touching the parent.
"""
if n > len(self.data):
raise IndexError("Size of trim too large; can't trim %s items "
"from a list of size %s." % (n, len(self.data)))
elif n < 0:
raise IndexError('Trim size must be >= 0.')
del self.data[-n:]
del self.items[-n:]
def remove(self, item):
index = self.index(item)
del self[index]
def count(self, item): return self.data.count(item)
def index(self, item): return self.data.index(item)
def reverse(self):
self.data.reverse()
self.items.reverse()
self.parent = None
def sort(self, *args):
tmp = zip(self.data, self.items)
tmp.sort(*args)
self.data = [entry[0] for entry in tmp]
self.items = [entry[1] for entry in tmp]
self.parent = None
def info(self, i):
"""Return source & offset for index `i`."""
try:
return self.items[i]
except IndexError:
if i == len(self.data): # Just past the end
return self.items[i - 1][0], None
else:
raise
def source(self, i):
"""Return source for index `i`."""
return self.info(i)[0]
def offset(self, i):
"""Return offset for index `i`."""
return self.info(i)[1]
def disconnect(self):
"""Break link between this list and parent list."""
self.parent = None
def xitems(self):
"""Return iterator yielding (source, offset, value) tuples."""
for (value, (source, offset)) in zip(self.data, self.items):
yield (source, offset, value)
def pprint(self):
"""Print the list in `grep` format (`source:offset:value` lines)"""
for line in self.xitems():
print "%s:%d:%s" % line
class StringList(ViewList):
"""A `ViewList` with string-specific methods."""
def trim_left(self, length, start=0, end=sys.maxint):
"""
Trim `length` characters off the beginning of each item, in-place,
from index `start` to `end`. No whitespace-checking is done on the
trimmed text. Does not affect slice parent.
"""
self.data[start:end] = [line[length:]
for line in self.data[start:end]]
def get_text_block(self, start, flush_left=False):
"""
Return a contiguous block of text.
If `flush_left` is true, raise `UnexpectedIndentationError` if an
indented line is encountered before the text block ends (with a blank
line).
"""
end = start
last = len(self.data)
while end < last:
line = self.data[end]
if not line.strip():
break
if flush_left and (line[0] == ' '):
source, offset = self.info(end)
raise UnexpectedIndentationError(self[start:end], source,
offset + 1)
end += 1
return self[start:end]
def get_indented(self, start=0, until_blank=False, strip_indent=True,
block_indent=None, first_indent=None):
"""
Extract and return a StringList of indented lines of text.
Collect all lines with indentation, determine the minimum indentation,
remove the minimum indentation from all indented lines (unless
`strip_indent` is false), and return them. All lines up to but not
including the first unindented line will be returned.
:Parameters:
- `start`: The index of the first line to examine.
- `until_blank`: Stop collecting at the first blank line if true.
- `strip_indent`: Strip common leading indent if true (default).
- `block_indent`: The indent of the entire block, if known.
- `first_indent`: The indent of the first line, if known.
:Return:
- a StringList of indented lines with mininum indent removed;
- the amount of the indent;
- a boolean: did the indented block finish with a blank line or EOF?
"""
indent = block_indent # start with None if unknown
end = start
if block_indent is not None and first_indent is None:
first_indent = block_indent
if first_indent is not None:
end += 1
last = len(self.data)
while end < last:
line = self.data[end]
if line and (line[0] != ' '
or (block_indent is not None
and line[:block_indent].strip())):
# Line not indented or insufficiently indented.
# Block finished properly iff the last indented line blank:
blank_finish = ((end > start)
and not self.data[end - 1].strip())
break
stripped = line.lstrip()
if not stripped: # blank line
if until_blank:
blank_finish = 1
break
elif block_indent is None:
line_indent = len(line) - len(stripped)
if indent is None:
indent = line_indent
else:
indent = min(indent, line_indent)
end += 1
else:
blank_finish = 1 # block ends at end of lines
block = self[start:end]
if first_indent is not None and block:
block.data[0] = block.data[0][first_indent:]
if indent and strip_indent:
block.trim_left(indent, start=(first_indent is not None))
return block, indent or 0, blank_finish
def get_2D_block(self, top, left, bottom, right, strip_indent=True):
block = self[top:bottom]
indent = right
for i in range(len(block.data)):
# get slice from line, care for combining characters
ci = utils.column_indices(block.data[i])
try:
left = ci[left]
except IndexError:
left += len(block.data[i]) - len(ci)
try:
right = ci[right]
except IndexError:
right += len(block.data[i]) - len(ci)
block.data[i] = line = block.data[i][left:right].rstrip()
if line:
indent = min(indent, len(line) - len(line.lstrip()))
if strip_indent and 0 < indent < right:
block.data = [line[indent:] for line in block.data]
return block
def pad_double_width(self, pad_char):
"""
Pad all double-width characters in self by appending `pad_char` to each.
For East Asian language support.
"""
if hasattr(unicodedata, 'east_asian_width'):
east_asian_width = unicodedata.east_asian_width
else:
return # new in Python 2.4
for i in range(len(self.data)):
line = self.data[i]
if isinstance(line, unicode):
new = []
for char in line:
new.append(char)
if east_asian_width(char) in 'WF': # 'W'ide & 'F'ull-width
new.append(pad_char)
self.data[i] = ''.join(new)
def replace(self, old, new):
"""Replace all occurrences of substring `old` with `new`."""
for i in range(len(self.data)):
self.data[i] = self.data[i].replace(old, new)
class StateMachineError(Exception): pass
class UnknownStateError(StateMachineError): pass
class DuplicateStateError(StateMachineError): pass
class UnknownTransitionError(StateMachineError): pass
class DuplicateTransitionError(StateMachineError): pass
class TransitionPatternNotFound(StateMachineError): pass
class TransitionMethodNotFound(StateMachineError): pass
class UnexpectedIndentationError(StateMachineError): pass
class TransitionCorrection(Exception):
"""
Raise from within a transition method to switch to another transition.
Raise with one argument, the new transition name.
"""
class StateCorrection(Exception):
"""
Raise from within a transition method to switch to another state.
Raise with one or two arguments: new state name, and an optional new
transition name.
"""
def string2lines(astring, tab_width=8, convert_whitespace=False,
whitespace=re.compile('[\v\f]')):
"""
Return a list of one-line strings with tabs expanded, no newlines, and
trailing whitespace stripped.
Each tab is expanded with between 1 and `tab_width` spaces, so that the
next character's index becomes a multiple of `tab_width` (8 by default).
Parameters:
- `astring`: a multi-line string.
- `tab_width`: the number of columns between tab stops.
- `convert_whitespace`: convert form feeds and vertical tabs to spaces?
"""
if convert_whitespace:
astring = whitespace.sub(' ', astring)
return [s.expandtabs(tab_width).rstrip() for s in astring.splitlines()]
def _exception_data():
"""
Return exception information:
- the exception's class name;
- the exception object;
- the name of the file containing the offending code;
- the line number of the offending code;
- the function name of the offending code.
"""
type, value, traceback = sys.exc_info()
while traceback.tb_next:
traceback = traceback.tb_next
code = traceback.tb_frame.f_code
return (type.__name__, value, code.co_filename, traceback.tb_lineno,
code.co_name)
| {
"repo_name": "Lyleo/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python2/docutils/statemachine.py",
"copies": "2",
"size": "57522",
"license": "mit",
"hash": -7635963413147025000,
"line_mean": 36.400520156,
"line_max": 80,
"alpha_frac": 0.5860714162,
"autogenerated": false,
"ratio": 4.393004429509699,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006058621092029923,
"num_lines": 1538
} |
"""
Calling the ``publish_*`` convenience functions (or instantiating a
`Publisher` object) with component names will result in default
behavior. For custom behavior (setting component options), create
custom component objects first, and pass *them* to
``publish_*``/`Publisher`. See `The Docutils Publisher`_.
.. _The Docutils Publisher: http://docutils.sf.net/docs/api/publisher.html
"""
__docformat__ = 'reStructuredText'
import sys
import pprint
from docutils import __version__, __version_details__, SettingsSpec
from docutils import frontend, io, utils, readers, writers
from docutils.frontend import OptionParser
from docutils.transforms import Transformer
from docutils.utils.error_reporting import ErrorOutput, ErrorString
import docutils.readers.doctree
class Publisher:
"""
A facade encapsulating the high-level logic of a Docutils system.
"""
def __init__(self, reader=None, parser=None, writer=None,
source=None, source_class=io.FileInput,
destination=None, destination_class=io.FileOutput,
settings=None):
"""
Initial setup. If any of `reader`, `parser`, or `writer` are not
specified, the corresponding ``set_...`` method should be called with
a component name (`set_reader` sets the parser as well).
"""
self.document = None
"""The document tree (`docutils.nodes` objects)."""
self.reader = reader
"""A `docutils.readers.Reader` instance."""
self.parser = parser
"""A `docutils.parsers.Parser` instance."""
self.writer = writer
"""A `docutils.writers.Writer` instance."""
for component in 'reader', 'parser', 'writer':
assert not isinstance(getattr(self, component), str), (
'passed string "%s" as "%s" parameter; pass an instance, '
'or use the "%s_name" parameter instead (in '
'docutils.core.publish_* convenience functions).'
% (getattr(self, component), component, component))
self.source = source
"""The source of input data, a `docutils.io.Input` instance."""
self.source_class = source_class
"""The class for dynamically created source objects."""
self.destination = destination
"""The destination for docutils output, a `docutils.io.Output`
instance."""
self.destination_class = destination_class
"""The class for dynamically created destination objects."""
self.settings = settings
"""An object containing Docutils settings as instance attributes.
Set by `self.process_command_line()` or `self.get_settings()`."""
self._stderr = ErrorOutput()
def set_reader(self, reader_name, parser, parser_name):
"""Set `self.reader` by name."""
reader_class = readers.get_reader_class(reader_name)
self.reader = reader_class(parser, parser_name)
self.parser = self.reader.parser
def set_writer(self, writer_name):
"""Set `self.writer` by name."""
writer_class = writers.get_writer_class(writer_name)
self.writer = writer_class()
def set_components(self, reader_name, parser_name, writer_name):
if self.reader is None:
self.set_reader(reader_name, self.parser, parser_name)
if self.parser is None:
if self.reader.parser is None:
self.reader.set_parser(parser_name)
self.parser = self.reader.parser
if self.writer is None:
self.set_writer(writer_name)
def setup_option_parser(self, usage=None, description=None,
settings_spec=None, config_section=None,
**defaults):
if config_section:
if not settings_spec:
settings_spec = SettingsSpec()
settings_spec.config_section = config_section
parts = config_section.split()
if len(parts) > 1 and parts[-1] == 'application':
settings_spec.config_section_dependencies = ['applications']
#@@@ Add self.source & self.destination to components in future?
option_parser = OptionParser(
components=(self.parser, self.reader, self.writer, settings_spec),
defaults=defaults, read_config_files=True,
usage=usage, description=description)
return option_parser
def get_settings(self, usage=None, description=None,
settings_spec=None, config_section=None, **defaults):
"""
Set and return default settings (overrides in `defaults` dict).
Set components first (`self.set_reader` & `self.set_writer`).
Explicitly setting `self.settings` disables command line option
processing from `self.publish()`.
"""
option_parser = self.setup_option_parser(
usage, description, settings_spec, config_section, **defaults)
self.settings = option_parser.get_default_values()
return self.settings
def process_programmatic_settings(self, settings_spec,
settings_overrides,
config_section):
if self.settings is None:
defaults = (settings_overrides or {}).copy()
# Propagate exceptions by default when used programmatically:
defaults.setdefault('traceback', True)
self.get_settings(settings_spec=settings_spec,
config_section=config_section,
**defaults)
def process_command_line(self, argv=None, usage=None, description=None,
settings_spec=None, config_section=None,
**defaults):
"""
Pass an empty list to `argv` to avoid reading `sys.argv` (the
default).
Set components first (`self.set_reader` & `self.set_writer`).
"""
option_parser = self.setup_option_parser(
usage, description, settings_spec, config_section, **defaults)
if argv is None:
argv = sys.argv[1:]
# converting to Unicode (Python 3 does this automatically):
if sys.version_info < (3,0):
# TODO: make this failsafe and reversible?
argv_encoding = (frontend.locale_encoding or 'ascii')
argv = [a.decode(argv_encoding) for a in argv]
self.settings = option_parser.parse_args(argv)
def set_io(self, source_path=None, destination_path=None):
if self.source is None:
self.set_source(source_path=source_path)
if self.destination is None:
self.set_destination(destination_path=destination_path)
def set_source(self, source=None, source_path=None):
if source_path is None:
source_path = self.settings._source
else:
self.settings._source = source_path
# Raise IOError instead of system exit with `tracback == True`
# TODO: change io.FileInput's default behaviour and remove this hack
try:
self.source = self.source_class(
source=source, source_path=source_path,
encoding=self.settings.input_encoding)
except TypeError:
self.source = self.source_class(
source=source, source_path=source_path,
encoding=self.settings.input_encoding)
def set_destination(self, destination=None, destination_path=None):
if destination_path is None:
destination_path = self.settings._destination
else:
self.settings._destination = destination_path
self.destination = self.destination_class(
destination=destination, destination_path=destination_path,
encoding=self.settings.output_encoding,
error_handler=self.settings.output_encoding_error_handler)
def apply_transforms(self):
self.document.transformer.populate_from_components(
(self.source, self.reader, self.reader.parser, self.writer,
self.destination))
self.document.transformer.apply_transforms()
def publish(self, argv=None, usage=None, description=None,
settings_spec=None, settings_overrides=None,
config_section=None, enable_exit_status=False):
"""
Process command line options and arguments (if `self.settings` not
already set), run `self.reader` and then `self.writer`. Return
`self.writer`'s output.
"""
exit = None
try:
if self.settings is None:
self.process_command_line(
argv, usage, description, settings_spec, config_section,
**(settings_overrides or {}))
self.set_io()
self.document = self.reader.read(self.source, self.parser,
self.settings)
self.apply_transforms()
output = self.writer.write(self.document, self.destination)
self.writer.assemble_parts()
except SystemExit as error:
exit = 1
exit_status = error.code
except Exception as error:
if not self.settings: # exception too early to report nicely
raise
if self.settings.traceback: # Propagate exceptions?
self.debugging_dumps()
raise
self.report_Exception(error)
exit = True
exit_status = 1
self.debugging_dumps()
if (enable_exit_status and self.document
and (self.document.reporter.max_level
>= self.settings.exit_status_level)):
sys.exit(self.document.reporter.max_level + 10)
elif exit:
sys.exit(exit_status)
return output
def debugging_dumps(self):
if not self.document:
return
if self.settings.dump_settings:
print('\n::: Runtime settings:', file=self._stderr)
print(pprint.pformat(self.settings.__dict__), file=self._stderr)
if self.settings.dump_internals:
print('\n::: Document internals:', file=self._stderr)
print(pprint.pformat(self.document.__dict__), file=self._stderr)
if self.settings.dump_transforms:
print('\n::: Transforms applied:', file=self._stderr)
print((' (priority, transform class, '
'pending node details, keyword args)'), file=self._stderr)
print(pprint.pformat(
[(priority, '%s.%s' % (xclass.__module__, xclass.__name__),
pending and pending.details, kwargs)
for priority, xclass, pending, kwargs
in self.document.transformer.applied]), file=self._stderr)
if self.settings.dump_pseudo_xml:
print('\n::: Pseudo-XML:', file=self._stderr)
print(self.document.pformat().encode(
'raw_unicode_escape'), file=self._stderr)
def report_Exception(self, error):
if isinstance(error, utils.SystemMessage):
self.report_SystemMessage(error)
elif isinstance(error, UnicodeEncodeError):
self.report_UnicodeError(error)
elif isinstance(error, io.InputError):
self._stderr.write('Unable to open source file for reading:\n'
' %s\n' % ErrorString(error))
elif isinstance(error, io.OutputError):
self._stderr.write(
'Unable to open destination file for writing:\n'
' %s\n' % ErrorString(error))
else:
print('%s' % ErrorString(error), file=self._stderr)
print(("""\
Exiting due to error. Use "--traceback" to diagnose.
Please report errors to <[email protected]>.
Include "--traceback" output, Docutils version (%s [%s]),
Python version (%s), your OS type & version, and the
command line used.""" % (__version__, __version_details__,
sys.version.split()[0])), file=self._stderr)
def report_SystemMessage(self, error):
print(('Exiting due to level-%s (%s) system message.'
% (error.level,
utils.Reporter.levels[error.level])), file=self._stderr)
def report_UnicodeError(self, error):
data = error.object[error.start:error.end]
self._stderr.write(
'%s\n'
'\n'
'The specified output encoding (%s) cannot\n'
'handle all of the output.\n'
'Try setting "--output-encoding-error-handler" to\n'
'\n'
'* "xmlcharrefreplace" (for HTML & XML output);\n'
' the output will contain "%s" and should be usable.\n'
'* "backslashreplace" (for other output formats);\n'
' look for "%s" in the output.\n'
'* "replace"; look for "?" in the output.\n'
'\n'
'"--output-encoding-error-handler" is currently set to "%s".\n'
'\n'
'Exiting due to error. Use "--traceback" to diagnose.\n'
'If the advice above doesn\'t eliminate the error,\n'
'please report it to <[email protected]>.\n'
'Include "--traceback" output, Docutils version (%s),\n'
'Python version (%s), your OS type & version, and the\n'
'command line used.\n'
% (ErrorString(error),
self.settings.output_encoding,
data.encode('ascii', 'xmlcharrefreplace'),
data.encode('ascii', 'backslashreplace'),
self.settings.output_encoding_error_handler,
__version__, sys.version.split()[0]))
default_usage = '%prog [options] [<source> [<destination>]]'
default_description = ('Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sf.net/docs/user/config.html> for '
'the full reference.')
def publish_cmdline(reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=True, argv=None,
usage=default_usage, description=default_description):
"""
Set up & run a `Publisher` for command-line-based file I/O (input and
output file paths taken automatically from the command line). Return the
encoded string output also.
Parameters: see `publish_programmatically` for the remainder.
- `argv`: Command-line argument list to use instead of ``sys.argv[1:]``.
- `usage`: Usage string, output if there's a problem parsing the command
line.
- `description`: Program description, output for the "--help" option
(along with command-line option descriptions).
"""
pub = Publisher(reader, parser, writer, settings=settings)
pub.set_components(reader_name, parser_name, writer_name)
output = pub.publish(
argv, usage, description, settings_spec, settings_overrides,
config_section=config_section, enable_exit_status=enable_exit_status)
return output
def publish_file(source=None, source_path=None,
destination=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None, settings_overrides=None,
config_section=None, enable_exit_status=False):
"""
Set up & run a `Publisher` for programmatic use with file-like I/O.
Return the encoded string output also.
Parameters: see `publish_programmatically`.
"""
output, pub = publish_programmatically(
source_class=io.FileInput, source=source, source_path=source_path,
destination_class=io.FileOutput,
destination=destination, destination_path=destination_path,
reader=reader, reader_name=reader_name,
parser=parser, parser_name=parser_name,
writer=writer, writer_name=writer_name,
settings=settings, settings_spec=settings_spec,
settings_overrides=settings_overrides,
config_section=config_section,
enable_exit_status=enable_exit_status)
return output
def publish_string(source, source_path=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=False):
"""
Set up & run a `Publisher` for programmatic use with string I/O. Return
the encoded string or Unicode string output.
For encoded string output, be sure to set the 'output_encoding' setting to
the desired encoding. Set it to 'unicode' for unencoded Unicode string
output. Here's one way::
publish_string(..., settings_overrides={'output_encoding': 'unicode'})
Similarly for Unicode string input (`source`)::
publish_string(..., settings_overrides={'input_encoding': 'unicode'})
Parameters: see `publish_programmatically`.
"""
output, pub = publish_programmatically(
source_class=io.StringInput, source=source, source_path=source_path,
destination_class=io.StringOutput,
destination=None, destination_path=destination_path,
reader=reader, reader_name=reader_name,
parser=parser, parser_name=parser_name,
writer=writer, writer_name=writer_name,
settings=settings, settings_spec=settings_spec,
settings_overrides=settings_overrides,
config_section=config_section,
enable_exit_status=enable_exit_status)
return output
def publish_parts(source, source_path=None, source_class=io.StringInput,
destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=False):
"""
Set up & run a `Publisher`, and return a dictionary of document parts.
Dictionary keys are the names of parts, and values are Unicode strings;
encoding is up to the client. For programmatic use with string I/O.
For encoded string input, be sure to set the 'input_encoding' setting to
the desired encoding. Set it to 'unicode' for unencoded Unicode string
input. Here's how::
publish_parts(..., settings_overrides={'input_encoding': 'unicode'})
Parameters: see `publish_programmatically`.
"""
output, pub = publish_programmatically(
source=source, source_path=source_path, source_class=source_class,
destination_class=io.StringOutput,
destination=None, destination_path=destination_path,
reader=reader, reader_name=reader_name,
parser=parser, parser_name=parser_name,
writer=writer, writer_name=writer_name,
settings=settings, settings_spec=settings_spec,
settings_overrides=settings_overrides,
config_section=config_section,
enable_exit_status=enable_exit_status)
return pub.writer.parts
def publish_doctree(source, source_path=None,
source_class=io.StringInput,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=False):
"""
Set up & run a `Publisher` for programmatic use with string I/O.
Return the document tree.
For encoded string input, be sure to set the 'input_encoding' setting to
the desired encoding. Set it to 'unicode' for unencoded Unicode string
input. Here's one way::
publish_doctree(..., settings_overrides={'input_encoding': 'unicode'})
Parameters: see `publish_programmatically`.
"""
pub = Publisher(reader=reader, parser=parser, writer=None,
settings=settings,
source_class=source_class,
destination_class=io.NullOutput)
pub.set_components(reader_name, parser_name, 'null')
pub.process_programmatic_settings(
settings_spec, settings_overrides, config_section)
pub.set_source(source, source_path)
pub.set_destination(None, None)
output = pub.publish(enable_exit_status=enable_exit_status)
return pub.document
def publish_from_doctree(document, destination_path=None,
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=False):
"""
Set up & run a `Publisher` to render from an existing document
tree data structure, for programmatic use with string I/O. Return
the encoded string output.
Note that document.settings is overridden; if you want to use the settings
of the original `document`, pass settings=document.settings.
Also, new document.transformer and document.reporter objects are
generated.
For encoded string output, be sure to set the 'output_encoding' setting to
the desired encoding. Set it to 'unicode' for unencoded Unicode string
output. Here's one way::
publish_from_doctree(
..., settings_overrides={'output_encoding': 'unicode'})
Parameters: `document` is a `docutils.nodes.document` object, an existing
document tree.
Other parameters: see `publish_programmatically`.
"""
reader = docutils.readers.doctree.Reader(parser_name='null')
pub = Publisher(reader, None, writer,
source=io.DocTreeInput(document),
destination_class=io.StringOutput, settings=settings)
if not writer and writer_name:
pub.set_writer(writer_name)
pub.process_programmatic_settings(
settings_spec, settings_overrides, config_section)
pub.set_destination(None, destination_path)
return pub.publish(enable_exit_status=enable_exit_status)
def publish_cmdline_to_binary(reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=True, argv=None,
usage=default_usage, description=default_description,
destination=None, destination_class=io.BinaryFileOutput
):
"""
Set up & run a `Publisher` for command-line-based file I/O (input and
output file paths taken automatically from the command line). Return the
encoded string output also.
This is just like publish_cmdline, except that it uses
io.BinaryFileOutput instead of io.FileOutput.
Parameters: see `publish_programmatically` for the remainder.
- `argv`: Command-line argument list to use instead of ``sys.argv[1:]``.
- `usage`: Usage string, output if there's a problem parsing the command
line.
- `description`: Program description, output for the "--help" option
(along with command-line option descriptions).
"""
pub = Publisher(reader, parser, writer, settings=settings,
destination_class=destination_class)
pub.set_components(reader_name, parser_name, writer_name)
output = pub.publish(
argv, usage, description, settings_spec, settings_overrides,
config_section=config_section, enable_exit_status=enable_exit_status)
return output
def publish_programmatically(source_class, source, source_path,
destination_class, destination, destination_path,
reader, reader_name,
parser, parser_name,
writer, writer_name,
settings, settings_spec,
settings_overrides, config_section,
enable_exit_status):
"""
Set up & run a `Publisher` for custom programmatic use. Return the
encoded string output and the Publisher object.
Applications should not need to call this function directly. If it does
seem to be necessary to call this function directly, please write to the
Docutils-develop mailing list
<http://docutils.sf.net/docs/user/mailing-lists.html#docutils-develop>.
Parameters:
* `source_class` **required**: The class for dynamically created source
objects. Typically `io.FileInput` or `io.StringInput`.
* `source`: Type depends on `source_class`:
- If `source_class` is `io.FileInput`: Either a file-like object
(must have 'read' and 'close' methods), or ``None``
(`source_path` is opened). If neither `source` nor
`source_path` are supplied, `sys.stdin` is used.
- If `source_class` is `io.StringInput` **required**: The input
string, either an encoded 8-bit string (set the
'input_encoding' setting to the correct encoding) or a Unicode
string (set the 'input_encoding' setting to 'unicode').
* `source_path`: Type depends on `source_class`:
- `io.FileInput`: Path to the input file, opened if no `source`
supplied.
- `io.StringInput`: Optional. Path to the file or object that produced
`source`. Only used for diagnostic output.
* `destination_class` **required**: The class for dynamically created
destination objects. Typically `io.FileOutput` or `io.StringOutput`.
* `destination`: Type depends on `destination_class`:
- `io.FileOutput`: Either a file-like object (must have 'write' and
'close' methods), or ``None`` (`destination_path` is opened). If
neither `destination` nor `destination_path` are supplied,
`sys.stdout` is used.
- `io.StringOutput`: Not used; pass ``None``.
* `destination_path`: Type depends on `destination_class`:
- `io.FileOutput`: Path to the output file. Opened if no `destination`
supplied.
- `io.StringOutput`: Path to the file or object which will receive the
output; optional. Used for determining relative paths (stylesheets,
source links, etc.).
* `reader`: A `docutils.readers.Reader` object.
* `reader_name`: Name or alias of the Reader class to be instantiated if
no `reader` supplied.
* `parser`: A `docutils.parsers.Parser` object.
* `parser_name`: Name or alias of the Parser class to be instantiated if
no `parser` supplied.
* `writer`: A `docutils.writers.Writer` object.
* `writer_name`: Name or alias of the Writer class to be instantiated if
no `writer` supplied.
* `settings`: A runtime settings (`docutils.frontend.Values`) object, for
dotted-attribute access to runtime settings. It's the end result of the
`SettingsSpec`, config file, and option processing. If `settings` is
passed, it's assumed to be complete and no further setting/config/option
processing is done.
* `settings_spec`: A `docutils.SettingsSpec` subclass or object. Provides
extra application-specific settings definitions independently of
components. In other words, the application becomes a component, and
its settings data is processed along with that of the other components.
Used only if no `settings` specified.
* `settings_overrides`: A dictionary containing application-specific
settings defaults that override the defaults of other components.
Used only if no `settings` specified.
* `config_section`: A string, the name of the configuration file section
for this application. Overrides the ``config_section`` attribute
defined by `settings_spec`. Used only if no `settings` specified.
* `enable_exit_status`: Boolean; enable exit status at end of processing?
"""
pub = Publisher(reader, parser, writer, settings=settings,
source_class=source_class,
destination_class=destination_class)
pub.set_components(reader_name, parser_name, writer_name)
pub.process_programmatic_settings(
settings_spec, settings_overrides, config_section)
pub.set_source(source, source_path)
pub.set_destination(destination, destination_path)
output = pub.publish(enable_exit_status=enable_exit_status)
return output, pub
| {
"repo_name": "Lyleo/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python3/docutils/core.py",
"copies": "2",
"size": "29431",
"license": "mit",
"hash": 1413975898762804700,
"line_mean": 43.3906485671,
"line_max": 91,
"alpha_frac": 0.623662125,
"autogenerated": false,
"ratio": 4.38679385899538,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007475139935880158,
"num_lines": 663
} |
"""
Command-line and common processing for Docutils front-end tools.
Exports the following classes:
* `OptionParser`: Standard Docutils command-line processing.
* `Option`: Customized version of `optparse.Option`; validation support.
* `Values`: Runtime settings; objects are simple structs
(``object.attribute``). Supports cumulative list settings (attributes).
* `ConfigParser`: Standard Docutils config file processing.
Also exports the following functions:
* Option callbacks: `store_multiple`, `read_config_file`.
* Setting validators: `validate_encoding`,
`validate_encoding_error_handler`,
`validate_encoding_and_error_handler`,
`validate_boolean`, `validate_ternary`, `validate_threshold`,
`validate_colon_separated_string_list`,
`validate_comma_separated_string_list`,
`validate_dependency_file`.
* `make_paths_absolute`.
* SettingSpec manipulation: `filter_settings_spec`.
"""
__docformat__ = 'reStructuredText'
import os
import os.path
import sys
import warnings
import ConfigParser as CP
import codecs
import optparse
from optparse import SUPPRESS_HELP
import docutils
import docutils.utils
import docutils.nodes
from docutils.utils.error_reporting import locale_encoding, ErrorOutput, ErrorString
def store_multiple(option, opt, value, parser, *args, **kwargs):
"""
Store multiple values in `parser.values`. (Option callback.)
Store `None` for each attribute named in `args`, and store the value for
each key (attribute name) in `kwargs`.
"""
for attribute in args:
setattr(parser.values, attribute, None)
for key, value in kwargs.items():
setattr(parser.values, key, value)
def read_config_file(option, opt, value, parser):
"""
Read a configuration file during option processing. (Option callback.)
"""
try:
new_settings = parser.get_config_file_settings(value)
except ValueError, error:
parser.error(error)
parser.values.update(new_settings, parser)
def validate_encoding(setting, value, option_parser,
config_parser=None, config_section=None):
try:
codecs.lookup(value)
except LookupError:
raise (LookupError('setting "%s": unknown encoding: "%s"'
% (setting, value)),
None, sys.exc_info()[2])
return value
def validate_encoding_error_handler(setting, value, option_parser,
config_parser=None, config_section=None):
try:
codecs.lookup_error(value)
except LookupError:
raise (LookupError(
'unknown encoding error handler: "%s" (choices: '
'"strict", "ignore", "replace", "backslashreplace", '
'"xmlcharrefreplace", and possibly others; see documentation for '
'the Python ``codecs`` module)' % value),
None, sys.exc_info()[2])
return value
def validate_encoding_and_error_handler(
setting, value, option_parser, config_parser=None, config_section=None):
"""
Side-effect: if an error handler is included in the value, it is inserted
into the appropriate place as if it was a separate setting/option.
"""
if ':' in value:
encoding, handler = value.split(':')
validate_encoding_error_handler(
setting + '_error_handler', handler, option_parser,
config_parser, config_section)
if config_parser:
config_parser.set(config_section, setting + '_error_handler',
handler)
else:
setattr(option_parser.values, setting + '_error_handler', handler)
else:
encoding = value
validate_encoding(setting, encoding, option_parser,
config_parser, config_section)
return encoding
def validate_boolean(setting, value, option_parser,
config_parser=None, config_section=None):
"""Check/normalize boolean settings:
True: '1', 'on', 'yes', 'true'
False: '0', 'off', 'no','false', ''
"""
if isinstance(value, bool):
return value
try:
return option_parser.booleans[value.strip().lower()]
except KeyError:
raise (LookupError('unknown boolean value: "%s"' % value),
None, sys.exc_info()[2])
def validate_ternary(setting, value, option_parser,
config_parser=None, config_section=None):
"""Check/normalize three-value settings:
True: '1', 'on', 'yes', 'true'
False: '0', 'off', 'no','false', ''
any other value: returned as-is.
"""
if isinstance(value, bool) or value is None:
return value
try:
return option_parser.booleans[value.strip().lower()]
except KeyError:
return value
def validate_nonnegative_int(setting, value, option_parser,
config_parser=None, config_section=None):
value = int(value)
if value < 0:
raise ValueError('negative value; must be positive or zero')
return value
def validate_threshold(setting, value, option_parser,
config_parser=None, config_section=None):
try:
return int(value)
except ValueError:
try:
return option_parser.thresholds[value.lower()]
except (KeyError, AttributeError):
raise (LookupError('unknown threshold: %r.' % value),
None, sys.exc_info[2])
def validate_colon_separated_string_list(
setting, value, option_parser, config_parser=None, config_section=None):
if not isinstance(value, list):
value = value.split(':')
else:
last = value.pop()
value.extend(last.split(':'))
return value
def validate_comma_separated_list(setting, value, option_parser,
config_parser=None, config_section=None):
"""Check/normalize list arguments (split at "," and strip whitespace).
"""
# `value` is already a ``list`` when given as command line option
# and "action" is "append" and ``unicode`` or ``str`` else.
if not isinstance(value, list):
value = [value]
# this function is called for every option added to `value`
# -> split the last item and append the result:
last = value.pop()
items = [i.strip(u' \t\n') for i in last.split(u',') if i.strip(u' \t\n')]
value.extend(items)
return value
def validate_url_trailing_slash(
setting, value, option_parser, config_parser=None, config_section=None):
if not value:
return './'
elif value.endswith('/'):
return value
else:
return value + '/'
def validate_dependency_file(setting, value, option_parser,
config_parser=None, config_section=None):
try:
return docutils.utils.DependencyList(value)
except IOError:
return docutils.utils.DependencyList(None)
def validate_strip_class(setting, value, option_parser,
config_parser=None, config_section=None):
# value is a comma separated string list:
value = validate_comma_separated_list(setting, value, option_parser,
config_parser, config_section)
# validate list elements:
for cls in value:
normalized = docutils.nodes.make_id(cls)
if cls != normalized:
raise ValueError('invalid class value %r (perhaps %r?)'
% (cls, normalized))
return value
def make_paths_absolute(pathdict, keys, base_path=None):
"""
Interpret filesystem path settings relative to the `base_path` given.
Paths are values in `pathdict` whose keys are in `keys`. Get `keys` from
`OptionParser.relative_path_settings`.
"""
if base_path is None:
base_path = os.getcwdu() # type(base_path) == unicode
# to allow combining non-ASCII cwd with unicode values in `pathdict`
for key in keys:
if key in pathdict:
value = pathdict[key]
if isinstance(value, list):
value = [make_one_path_absolute(base_path, path)
for path in value]
elif value:
value = make_one_path_absolute(base_path, value)
pathdict[key] = value
def make_one_path_absolute(base_path, path):
return os.path.abspath(os.path.join(base_path, path))
def filter_settings_spec(settings_spec, *exclude, **replace):
"""Return a copy of `settings_spec` excluding/replacing some settings.
`settings_spec` is a tuple of configuration settings with a structure
described for docutils.SettingsSpec.settings_spec.
Optional positional arguments are names of to-be-excluded settings.
Keyword arguments are option specification replacements.
(See the html4strict writer for an example.)
"""
settings = list(settings_spec)
# every third item is a sequence of option tuples
for i in range(2, len(settings), 3):
newopts = []
for opt_spec in settings[i]:
# opt_spec is ("<help>", [<option strings>], {<keyword args>})
opt_name = [opt_string[2:].replace('-', '_')
for opt_string in opt_spec[1]
if opt_string.startswith('--')
][0]
if opt_name in exclude:
continue
if opt_name in replace.keys():
newopts.append(replace[opt_name])
else:
newopts.append(opt_spec)
settings[i] = tuple(newopts)
return tuple(settings)
class Values(optparse.Values):
"""
Updates list attributes by extension rather than by replacement.
Works in conjunction with the `OptionParser.lists` instance attribute.
"""
def __init__(self, *args, **kwargs):
optparse.Values.__init__(self, *args, **kwargs)
if (not hasattr(self, 'record_dependencies')
or self.record_dependencies is None):
# Set up dependency list, in case it is needed.
self.record_dependencies = docutils.utils.DependencyList()
def update(self, other_dict, option_parser):
if isinstance(other_dict, Values):
other_dict = other_dict.__dict__
other_dict = other_dict.copy()
for setting in option_parser.lists.keys():
if (hasattr(self, setting) and setting in other_dict):
value = getattr(self, setting)
if value:
value += other_dict[setting]
del other_dict[setting]
self._update_loose(other_dict)
def copy(self):
"""Return a shallow copy of `self`."""
return self.__class__(defaults=self.__dict__)
class Option(optparse.Option):
ATTRS = optparse.Option.ATTRS + ['validator', 'overrides']
def process(self, opt, value, values, parser):
"""
Call the validator function on applicable settings and
evaluate the 'overrides' option.
Extends `optparse.Option.process`.
"""
result = optparse.Option.process(self, opt, value, values, parser)
setting = self.dest
if setting:
if self.validator:
value = getattr(values, setting)
try:
new_value = self.validator(setting, value, parser)
except Exception, error:
raise (optparse.OptionValueError(
'Error in option "%s":\n %s'
% (opt, ErrorString(error))),
None, sys.exc_info()[2])
setattr(values, setting, new_value)
if self.overrides:
setattr(values, self.overrides, None)
return result
class OptionParser(optparse.OptionParser, docutils.SettingsSpec):
"""
Parser for command-line and library use. The `settings_spec`
specification here and in other Docutils components are merged to build
the set of command-line options and runtime settings for this process.
Common settings (defined below) and component-specific settings must not
conflict. Short options are reserved for common settings, and components
are restrict to using long options.
"""
standard_config_files = [
'/etc/docutils.conf', # system-wide
'./docutils.conf', # project-specific
'~/.docutils'] # user-specific
"""Docutils configuration files, using ConfigParser syntax. Filenames
will be tilde-expanded later. Later files override earlier ones."""
threshold_choices = 'info 1 warning 2 error 3 severe 4 none 5'.split()
"""Possible inputs for for --report and --halt threshold values."""
thresholds = {'info': 1, 'warning': 2, 'error': 3, 'severe': 4, 'none': 5}
"""Lookup table for --report and --halt threshold values."""
booleans={'1': True, 'on': True, 'yes': True, 'true': True,
'0': False, 'off': False, 'no': False, 'false': False, '': False}
"""Lookup table for boolean configuration file settings."""
default_error_encoding = getattr(sys.stderr, 'encoding',
None) or locale_encoding or 'ascii'
default_error_encoding_error_handler = 'backslashreplace'
settings_spec = (
'General Docutils Options',
None,
(('Specify the document title as metadata.',
['--title'], {}),
('Include a "Generated by Docutils" credit and link.',
['--generator', '-g'], {'action': 'store_true',
'validator': validate_boolean}),
('Do not include a generator credit.',
['--no-generator'], {'action': 'store_false', 'dest': 'generator'}),
('Include the date at the end of the document (UTC).',
['--date', '-d'], {'action': 'store_const', 'const': '%Y-%m-%d',
'dest': 'datestamp'}),
('Include the time & date (UTC).',
['--time', '-t'], {'action': 'store_const',
'const': '%Y-%m-%d %H:%M UTC',
'dest': 'datestamp'}),
('Do not include a datestamp of any kind.',
['--no-datestamp'], {'action': 'store_const', 'const': None,
'dest': 'datestamp'}),
('Include a "View document source" link.',
['--source-link', '-s'], {'action': 'store_true',
'validator': validate_boolean}),
('Use <URL> for a source link; implies --source-link.',
['--source-url'], {'metavar': '<URL>'}),
('Do not include a "View document source" link.',
['--no-source-link'],
{'action': 'callback', 'callback': store_multiple,
'callback_args': ('source_link', 'source_url')}),
('Link from section headers to TOC entries. (default)',
['--toc-entry-backlinks'],
{'dest': 'toc_backlinks', 'action': 'store_const', 'const': 'entry',
'default': 'entry'}),
('Link from section headers to the top of the TOC.',
['--toc-top-backlinks'],
{'dest': 'toc_backlinks', 'action': 'store_const', 'const': 'top'}),
('Disable backlinks to the table of contents.',
['--no-toc-backlinks'],
{'dest': 'toc_backlinks', 'action': 'store_false'}),
('Link from footnotes/citations to references. (default)',
['--footnote-backlinks'],
{'action': 'store_true', 'default': 1,
'validator': validate_boolean}),
('Disable backlinks from footnotes and citations.',
['--no-footnote-backlinks'],
{'dest': 'footnote_backlinks', 'action': 'store_false'}),
('Enable section numbering by Docutils. (default)',
['--section-numbering'],
{'action': 'store_true', 'dest': 'sectnum_xform',
'default': 1, 'validator': validate_boolean}),
('Disable section numbering by Docutils.',
['--no-section-numbering'],
{'action': 'store_false', 'dest': 'sectnum_xform'}),
('Remove comment elements from the document tree.',
['--strip-comments'],
{'action': 'store_true', 'validator': validate_boolean}),
('Leave comment elements in the document tree. (default)',
['--leave-comments'],
{'action': 'store_false', 'dest': 'strip_comments'}),
('Remove all elements with classes="<class>" from the document tree. '
'Warning: potentially dangerous; use with caution. '
'(Multiple-use option.)',
['--strip-elements-with-class'],
{'action': 'append', 'dest': 'strip_elements_with_classes',
'metavar': '<class>', 'validator': validate_strip_class}),
('Remove all classes="<class>" attributes from elements in the '
'document tree. Warning: potentially dangerous; use with caution. '
'(Multiple-use option.)',
['--strip-class'],
{'action': 'append', 'dest': 'strip_classes',
'metavar': '<class>', 'validator': validate_strip_class}),
('Report system messages at or higher than <level>: "info" or "1", '
'"warning"/"2" (default), "error"/"3", "severe"/"4", "none"/"5"',
['--report', '-r'], {'choices': threshold_choices, 'default': 2,
'dest': 'report_level', 'metavar': '<level>',
'validator': validate_threshold}),
('Report all system messages. (Same as "--report=1".)',
['--verbose', '-v'], {'action': 'store_const', 'const': 1,
'dest': 'report_level'}),
('Report no system messages. (Same as "--report=5".)',
['--quiet', '-q'], {'action': 'store_const', 'const': 5,
'dest': 'report_level'}),
('Halt execution at system messages at or above <level>. '
'Levels as in --report. Default: 4 (severe).',
['--halt'], {'choices': threshold_choices, 'dest': 'halt_level',
'default': 4, 'metavar': '<level>',
'validator': validate_threshold}),
('Halt at the slightest problem. Same as "--halt=info".',
['--strict'], {'action': 'store_const', 'const': 1,
'dest': 'halt_level'}),
('Enable a non-zero exit status for non-halting system messages at '
'or above <level>. Default: 5 (disabled).',
['--exit-status'], {'choices': threshold_choices,
'dest': 'exit_status_level',
'default': 5, 'metavar': '<level>',
'validator': validate_threshold}),
('Enable debug-level system messages and diagnostics.',
['--debug'], {'action': 'store_true', 'validator': validate_boolean}),
('Disable debug output. (default)',
['--no-debug'], {'action': 'store_false', 'dest': 'debug'}),
('Send the output of system messages to <file>.',
['--warnings'], {'dest': 'warning_stream', 'metavar': '<file>'}),
('Enable Python tracebacks when Docutils is halted.',
['--traceback'], {'action': 'store_true', 'default': None,
'validator': validate_boolean}),
('Disable Python tracebacks. (default)',
['--no-traceback'], {'dest': 'traceback', 'action': 'store_false'}),
('Specify the encoding and optionally the '
'error handler of input text. Default: <locale-dependent>:strict.',
['--input-encoding', '-i'],
{'metavar': '<name[:handler]>',
'validator': validate_encoding_and_error_handler}),
('Specify the error handler for undecodable characters. '
'Choices: "strict" (default), "ignore", and "replace".',
['--input-encoding-error-handler'],
{'default': 'strict', 'validator': validate_encoding_error_handler}),
('Specify the text encoding and optionally the error handler for '
'output. Default: UTF-8:strict.',
['--output-encoding', '-o'],
{'metavar': '<name[:handler]>', 'default': 'utf-8',
'validator': validate_encoding_and_error_handler}),
('Specify error handler for unencodable output characters; '
'"strict" (default), "ignore", "replace", '
'"xmlcharrefreplace", "backslashreplace".',
['--output-encoding-error-handler'],
{'default': 'strict', 'validator': validate_encoding_error_handler}),
('Specify text encoding and error handler for error output. '
'Default: %s:%s.'
% (default_error_encoding, default_error_encoding_error_handler),
['--error-encoding', '-e'],
{'metavar': '<name[:handler]>', 'default': default_error_encoding,
'validator': validate_encoding_and_error_handler}),
('Specify the error handler for unencodable characters in '
'error output. Default: %s.'
% default_error_encoding_error_handler,
['--error-encoding-error-handler'],
{'default': default_error_encoding_error_handler,
'validator': validate_encoding_error_handler}),
('Specify the language (as BCP 47 language tag). Default: en.',
['--language', '-l'], {'dest': 'language_code', 'default': 'en',
'metavar': '<name>'}),
('Write output file dependencies to <file>.',
['--record-dependencies'],
{'metavar': '<file>', 'validator': validate_dependency_file,
'default': None}), # default set in Values class
('Read configuration settings from <file>, if it exists.',
['--config'], {'metavar': '<file>', 'type': 'string',
'action': 'callback', 'callback': read_config_file}),
("Show this program's version number and exit.",
['--version', '-V'], {'action': 'version'}),
('Show this help message and exit.',
['--help', '-h'], {'action': 'help'}),
# Typically not useful for non-programmatical use:
(SUPPRESS_HELP, ['--id-prefix'], {'default': ''}),
(SUPPRESS_HELP, ['--auto-id-prefix'], {'default': 'id'}),
# Hidden options, for development use only:
(SUPPRESS_HELP, ['--dump-settings'], {'action': 'store_true'}),
(SUPPRESS_HELP, ['--dump-internals'], {'action': 'store_true'}),
(SUPPRESS_HELP, ['--dump-transforms'], {'action': 'store_true'}),
(SUPPRESS_HELP, ['--dump-pseudo-xml'], {'action': 'store_true'}),
(SUPPRESS_HELP, ['--expose-internal-attribute'],
{'action': 'append', 'dest': 'expose_internals',
'validator': validate_colon_separated_string_list}),
(SUPPRESS_HELP, ['--strict-visitor'], {'action': 'store_true'}),
))
"""Runtime settings and command-line options common to all Docutils front
ends. Setting specs specific to individual Docutils components are also
used (see `populate_from_components()`)."""
settings_defaults = {'_disable_config': None,
'_source': None,
'_destination': None,
'_config_files': None}
"""Defaults for settings that don't have command-line option equivalents."""
relative_path_settings = ('warning_stream',)
config_section = 'general'
version_template = ('%%prog (Docutils %s [%s], Python %s, on %s)'
% (docutils.__version__, docutils.__version_details__,
sys.version.split()[0], sys.platform))
"""Default version message."""
def __init__(self, components=(), defaults=None, read_config_files=None,
*args, **kwargs):
"""
`components` is a list of Docutils components each containing a
``.settings_spec`` attribute. `defaults` is a mapping of setting
default overrides.
"""
self.lists = {}
"""Set of list-type settings."""
self.config_files = []
"""List of paths of applied configuration files."""
optparse.OptionParser.__init__(
self, option_class=Option, add_help_option=None,
formatter=optparse.TitledHelpFormatter(width=78),
*args, **kwargs)
if not self.version:
self.version = self.version_template
# Make an instance copy (it will be modified):
self.relative_path_settings = list(self.relative_path_settings)
self.components = (self,) + tuple(components)
self.populate_from_components(self.components)
self.set_defaults_from_dict(defaults or {})
if read_config_files and not self.defaults['_disable_config']:
try:
config_settings = self.get_standard_config_settings()
except ValueError, error:
self.error(error)
self.set_defaults_from_dict(config_settings.__dict__)
def populate_from_components(self, components):
"""
For each component, first populate from the `SettingsSpec.settings_spec`
structure, then from the `SettingsSpec.settings_defaults` dictionary.
After all components have been processed, check for and populate from
each component's `SettingsSpec.settings_default_overrides` dictionary.
"""
for component in components:
if component is None:
continue
settings_spec = component.settings_spec
self.relative_path_settings.extend(
component.relative_path_settings)
for i in range(0, len(settings_spec), 3):
title, description, option_spec = settings_spec[i:i+3]
if title:
group = optparse.OptionGroup(self, title, description)
self.add_option_group(group)
else:
group = self # single options
for (help_text, option_strings, kwargs) in option_spec:
option = group.add_option(help=help_text, *option_strings,
**kwargs)
if kwargs.get('action') == 'append':
self.lists[option.dest] = 1
if component.settings_defaults:
self.defaults.update(component.settings_defaults)
for component in components:
if component and component.settings_default_overrides:
self.defaults.update(component.settings_default_overrides)
def get_standard_config_files(self):
"""Return list of config files, from environment or standard."""
try:
config_files = os.environ['DOCUTILSCONFIG'].split(os.pathsep)
except KeyError:
config_files = self.standard_config_files
# If 'HOME' is not set, expandvars() requires the 'pwd' module which is
# not available under certain environments, for example, within
# mod_python. The publisher ends up in here, and we need to publish
# from within mod_python. Therefore we need to avoid expanding when we
# are in those environments.
expand = os.path.expanduser
if 'HOME' not in os.environ:
try:
import pwd
except ImportError:
expand = lambda x: x
return [expand(f) for f in config_files if f.strip()]
def get_standard_config_settings(self):
settings = Values()
for filename in self.get_standard_config_files():
settings.update(self.get_config_file_settings(filename), self)
return settings
def get_config_file_settings(self, config_file):
"""Returns a dictionary containing appropriate config file settings."""
parser = ConfigParser()
parser.read(config_file, self)
self.config_files.extend(parser._files)
base_path = os.path.dirname(config_file)
applied = {}
settings = Values()
for component in self.components:
if not component:
continue
for section in (tuple(component.config_section_dependencies or ())
+ (component.config_section,)):
if section in applied:
continue
applied[section] = 1
settings.update(parser.get_section(section), self)
make_paths_absolute(
settings.__dict__, self.relative_path_settings, base_path)
return settings.__dict__
def check_values(self, values, args):
"""Store positional arguments as runtime settings."""
values._source, values._destination = self.check_args(args)
make_paths_absolute(values.__dict__, self.relative_path_settings)
values._config_files = self.config_files
return values
def check_args(self, args):
source = destination = None
if args:
source = args.pop(0)
if source == '-': # means stdin
source = None
if args:
destination = args.pop(0)
if destination == '-': # means stdout
destination = None
if args:
self.error('Maximum 2 arguments allowed.')
if source and source == destination:
self.error('Do not specify the same file for both source and '
'destination. It will clobber the source file.')
return source, destination
def set_defaults_from_dict(self, defaults):
self.defaults.update(defaults)
def get_default_values(self):
"""Needed to get custom `Values` instances."""
defaults = Values(self.defaults)
defaults._config_files = self.config_files
return defaults
def get_option_by_dest(self, dest):
"""
Get an option by its dest.
If you're supplying a dest which is shared by several options,
it is undefined which option of those is returned.
A KeyError is raised if there is no option with the supplied
dest.
"""
for group in self.option_groups + [self]:
for option in group.option_list:
if option.dest == dest:
return option
raise KeyError('No option with dest == %r.' % dest)
class ConfigParser(CP.RawConfigParser):
old_settings = {
'pep_stylesheet': ('pep_html writer', 'stylesheet'),
'pep_stylesheet_path': ('pep_html writer', 'stylesheet_path'),
'pep_template': ('pep_html writer', 'template')}
"""{old setting: (new section, new setting)} mapping, used by
`handle_old_config`, to convert settings from the old [options] section."""
old_warning = """
The "[option]" section is deprecated. Support for old-format configuration
files may be removed in a future Docutils release. Please revise your
configuration files. See <http://docutils.sf.net/docs/user/config.html>,
section "Old-Format Configuration Files".
"""
not_utf8_error = """\
Unable to read configuration file "%s": content not encoded as UTF-8.
Skipping "%s" configuration file.
"""
def __init__(self, *args, **kwargs):
CP.RawConfigParser.__init__(self, *args, **kwargs)
self._files = []
"""List of paths of configuration files read."""
self._stderr = ErrorOutput()
"""Wrapper around sys.stderr catching en-/decoding errors"""
def read(self, filenames, option_parser):
if type(filenames) in (str, unicode):
filenames = [filenames]
for filename in filenames:
try:
# Config files must be UTF-8-encoded:
fp = codecs.open(filename, 'r', 'utf-8')
except IOError:
continue
try:
if sys.version_info < (3,2):
CP.RawConfigParser.readfp(self, fp, filename)
else:
CP.RawConfigParser.read_file(self, fp, filename)
except UnicodeDecodeError:
self._stderr.write(self.not_utf8_error % (filename, filename))
fp.close()
continue
fp.close()
self._files.append(filename)
if self.has_section('options'):
self.handle_old_config(filename)
self.validate_settings(filename, option_parser)
def handle_old_config(self, filename):
warnings.warn_explicit(self.old_warning, ConfigDeprecationWarning,
filename, 0)
options = self.get_section('options')
if not self.has_section('general'):
self.add_section('general')
for key, value in options.items():
if key in self.old_settings:
section, setting = self.old_settings[key]
if not self.has_section(section):
self.add_section(section)
else:
section = 'general'
setting = key
if not self.has_option(section, setting):
self.set(section, setting, value)
self.remove_section('options')
def validate_settings(self, filename, option_parser):
"""
Call the validator function and implement overrides on all applicable
settings.
"""
for section in self.sections():
for setting in self.options(section):
try:
option = option_parser.get_option_by_dest(setting)
except KeyError:
continue
if option.validator:
value = self.get(section, setting)
try:
new_value = option.validator(
setting, value, option_parser,
config_parser=self, config_section=section)
except Exception, error:
raise (ValueError(
'Error in config file "%s", section "[%s]":\n'
' %s\n'
' %s = %s'
% (filename, section, ErrorString(error),
setting, value)), None, sys.exc_info()[2])
self.set(section, setting, new_value)
if option.overrides:
self.set(section, option.overrides, None)
def optionxform(self, optionstr):
"""
Transform '-' to '_' so the cmdline form of option names can be used.
"""
return optionstr.lower().replace('-', '_')
def get_section(self, section):
"""
Return a given section as a dictionary (empty if the section
doesn't exist).
"""
section_dict = {}
if self.has_section(section):
for option in self.options(section):
section_dict[option] = self.get(section, option)
return section_dict
class ConfigDeprecationWarning(DeprecationWarning):
"""Warning for deprecated configuration file features."""
| {
"repo_name": "Lyleo/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python2/docutils/frontend.py",
"copies": "2",
"size": "35388",
"license": "mit",
"hash": 9055995411965828000,
"line_mean": 42.1035322777,
"line_max": 84,
"alpha_frac": 0.572764779,
"autogenerated": false,
"ratio": 4.42239440139965,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5995159180399651,
"avg_score": null,
"num_lines": null
} |
"""
Directives for additional body elements.
See `docutils.parsers.rst.directives` for API details.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
from docutils.parsers.rst.roles import set_classes
from docutils.utils.code_analyzer import Lexer, LexerError, NumberLines
class BasePseudoSection(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
node_class = None
"""Node class to be used (must be set in subclasses)."""
def run(self):
if not (self.state_machine.match_titles
or isinstance(self.state_machine.node, nodes.sidebar)):
raise self.error('The "%s" directive may not be used within '
'topics or body elements.' % self.name)
self.assert_has_content()
title_text = self.arguments[0]
textnodes, messages = self.state.inline_text(title_text, self.lineno)
titles = [nodes.title(title_text, '', *textnodes)]
# Sidebar uses this code.
if 'subtitle' in self.options:
textnodes, more_messages = self.state.inline_text(
self.options['subtitle'], self.lineno)
titles.append(nodes.subtitle(self.options['subtitle'], '',
*textnodes))
messages.extend(more_messages)
text = '\n'.join(self.content)
node = self.node_class(text, *(titles + messages))
node['classes'] += self.options.get('class', [])
self.add_name(node)
if text:
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class Topic(BasePseudoSection):
node_class = nodes.topic
class Sidebar(BasePseudoSection):
node_class = nodes.sidebar
option_spec = BasePseudoSection.option_spec.copy()
option_spec['subtitle'] = directives.unchanged_required
def run(self):
if isinstance(self.state_machine.node, nodes.sidebar):
raise self.error('The "%s" directive may not be used within a '
'sidebar element.' % self.name)
return BasePseudoSection.run(self)
class LineBlock(Directive):
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
def run(self):
self.assert_has_content()
block = nodes.line_block(classes=self.options.get('class', []))
self.add_name(block)
node_list = [block]
for line_text in self.content:
text_nodes, messages = self.state.inline_text(
line_text.strip(), self.lineno + self.content_offset)
line = nodes.line(line_text, '', *text_nodes)
if line_text.strip():
line.indent = len(line_text) - len(line_text.lstrip())
block += line
node_list.extend(messages)
self.content_offset += 1
self.state.nest_line_block_lines(block)
return node_list
class ParsedLiteral(Directive):
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
def run(self):
set_classes(self.options)
self.assert_has_content()
text = '\n'.join(self.content)
text_nodes, messages = self.state.inline_text(text, self.lineno)
node = nodes.literal_block(text, '', *text_nodes, **self.options)
node.line = self.content_offset + 1
self.add_name(node)
return [node] + messages
class CodeBlock(Directive):
"""Parse and mark up content of a code block.
Configuration setting: syntax_highlight
Highlight Code content with Pygments?
Possible values: ('long', 'short', 'none')
"""
optional_arguments = 1
option_spec = {'class': directives.class_option,
'name': directives.unchanged,
'number-lines': directives.unchanged # integer or None
}
has_content = True
def run(self):
self.assert_has_content()
if self.arguments:
language = self.arguments[0]
else:
language = ''
set_classes(self.options)
classes = ['code']
if language:
classes.append(language)
if 'classes' in self.options:
classes.extend(self.options['classes'])
# set up lexical analyzer
try:
tokens = Lexer(u'\n'.join(self.content), language,
self.state.document.settings.syntax_highlight)
except LexerError, error:
raise self.warning(error)
if 'number-lines' in self.options:
# optional argument `startline`, defaults to 1
try:
startline = int(self.options['number-lines'] or 1)
except ValueError:
raise self.error(':number-lines: with non-integer start value')
endline = startline + len(self.content)
# add linenumber filter:
tokens = NumberLines(tokens, startline, endline)
node = nodes.literal_block('\n'.join(self.content), classes=classes)
self.add_name(node)
# if called from "include", set the source
if 'source' in self.options:
node.attributes['source'] = self.options['source']
# analyze content and add nodes for every token
for classes, value in tokens:
# print (classes, value)
if classes:
node += nodes.inline(value, value, classes=classes)
else:
# insert as Text to decrease the verbosity of the output
node += nodes.Text(value, value)
return [node]
class MathBlock(Directive):
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
## TODO: Add Sphinx' ``mathbase.py`` option 'nowrap'?
# 'nowrap': directives.flag,
has_content = True
def run(self):
set_classes(self.options)
self.assert_has_content()
# join lines, separate blocks
content = '\n'.join(self.content).split('\n\n')
_nodes = []
for block in content:
if not block:
continue
node = nodes.math_block(self.block_text, block, **self.options)
node.line = self.content_offset + 1
self.add_name(node)
_nodes.append(node)
return _nodes
class Rubric(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
def run(self):
set_classes(self.options)
rubric_text = self.arguments[0]
textnodes, messages = self.state.inline_text(rubric_text, self.lineno)
rubric = nodes.rubric(rubric_text, '', *textnodes, **self.options)
self.add_name(rubric)
return [rubric] + messages
class BlockQuote(Directive):
has_content = True
classes = []
def run(self):
self.assert_has_content()
elements = self.state.block_quote(self.content, self.content_offset)
for element in elements:
if isinstance(element, nodes.block_quote):
element['classes'] += self.classes
return elements
class Epigraph(BlockQuote):
classes = ['epigraph']
class Highlights(BlockQuote):
classes = ['highlights']
class PullQuote(BlockQuote):
classes = ['pull-quote']
class Compound(Directive):
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
def run(self):
self.assert_has_content()
text = '\n'.join(self.content)
node = nodes.compound(text)
node['classes'] += self.options.get('class', [])
self.add_name(node)
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class Container(Directive):
optional_arguments = 1
final_argument_whitespace = True
option_spec = {'name': directives.unchanged}
has_content = True
def run(self):
self.assert_has_content()
text = '\n'.join(self.content)
try:
if self.arguments:
classes = directives.class_option(self.arguments[0])
else:
classes = []
except ValueError:
raise self.error(
'Invalid class attribute value for "%s" directive: "%s".'
% (self.name, self.arguments[0]))
node = nodes.container(text)
node['classes'].extend(classes)
self.add_name(node)
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
| {
"repo_name": "Lyleo/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python2/docutils/parsers/rst/directives/body.py",
"copies": "2",
"size": "9201",
"license": "mit",
"hash": -8823078911328821000,
"line_mean": 30.8373702422,
"line_max": 79,
"alpha_frac": 0.5928703402,
"autogenerated": false,
"ratio": 4.144594594594595,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5737464934794595,
"avg_score": null,
"num_lines": null
} |
"""
Directives for figures and simple images.
"""
__docformat__ = 'reStructuredText'
import sys
import urllib
from docutils import nodes, utils
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives, states
from docutils.nodes import fully_normalize_name, whitespace_normalize_name
from docutils.parsers.rst.roles import set_classes
try: # check for the Python Imaging Library
import PIL.Image
except ImportError:
try: # sometimes PIL modules are put in PYTHONPATH's root
import Image
class PIL(object): pass # dummy wrapper
PIL.Image = Image
except ImportError:
PIL = None
class Image(Directive):
align_h_values = ('left', 'center', 'right')
align_v_values = ('top', 'middle', 'bottom')
align_values = align_v_values + align_h_values
def align(argument):
# This is not callable as self.align. We cannot make it a
# staticmethod because we're saving an unbound method in
# option_spec below.
return directives.choice(argument, Image.align_values)
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.percentage,
'align': align,
'name': directives.unchanged,
'target': directives.unchanged_required,
'class': directives.class_option}
def run(self):
if 'align' in self.options:
if isinstance(self.state, states.SubstitutionDef):
# Check for align_v_values.
if self.options['align'] not in self.align_v_values:
raise self.error(
'Error in "%s" directive: "%s" is not a valid value '
'for the "align" option within a substitution '
'definition. Valid values for "align" are: "%s".'
% (self.name, self.options['align'],
'", "'.join(self.align_v_values)))
elif self.options['align'] not in self.align_h_values:
raise self.error(
'Error in "%s" directive: "%s" is not a valid value for '
'the "align" option. Valid values for "align" are: "%s".'
% (self.name, self.options['align'],
'", "'.join(self.align_h_values)))
messages = []
reference = directives.uri(self.arguments[0])
self.options['uri'] = reference
reference_node = None
if 'target' in self.options:
block = states.escape2null(
self.options['target']).splitlines()
block = [line for line in block]
target_type, data = self.state.parse_target(
block, self.block_text, self.lineno)
if target_type == 'refuri':
reference_node = nodes.reference(refuri=data)
elif target_type == 'refname':
reference_node = nodes.reference(
refname=fully_normalize_name(data),
name=whitespace_normalize_name(data))
reference_node.indirect_reference_name = data
self.state.document.note_refname(reference_node)
else: # malformed target
messages.append(data) # data is a system message
del self.options['target']
set_classes(self.options)
image_node = nodes.image(self.block_text, **self.options)
self.add_name(image_node)
if reference_node:
reference_node += image_node
return messages + [reference_node]
else:
return messages + [image_node]
class Figure(Image):
def align(argument):
return directives.choice(argument, Figure.align_h_values)
def figwidth_value(argument):
if argument.lower() == 'image':
return 'image'
else:
return directives.length_or_percentage_or_unitless(argument, 'px')
option_spec = Image.option_spec.copy()
option_spec['figwidth'] = figwidth_value
option_spec['figclass'] = directives.class_option
option_spec['align'] = align
has_content = True
def run(self):
figwidth = self.options.pop('figwidth', None)
figclasses = self.options.pop('figclass', None)
align = self.options.pop('align', None)
(image_node,) = Image.run(self)
if isinstance(image_node, nodes.system_message):
return [image_node]
figure_node = nodes.figure('', image_node)
if figwidth == 'image':
if PIL and self.state.document.settings.file_insertion_enabled:
imagepath = urllib.url2pathname(image_node['uri'])
try:
img = PIL.Image.open(
imagepath.encode(sys.getfilesystemencoding()))
except (IOError, UnicodeEncodeError):
pass # TODO: warn?
else:
self.state.document.settings.record_dependencies.add(
imagepath.replace('\\', '/'))
figure_node['width'] = img.size[0]
del img
elif figwidth is not None:
figure_node['width'] = figwidth
if figclasses:
figure_node['classes'] += figclasses
if align:
figure_node['align'] = align
if self.content:
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
first_node = node[0]
if isinstance(first_node, nodes.paragraph):
caption = nodes.caption(first_node.rawsource, '',
*first_node.children)
caption.source = first_node.source
caption.line = first_node.line
figure_node += caption
elif not (isinstance(first_node, nodes.comment)
and len(first_node) == 0):
error = self.state_machine.reporter.error(
'Figure caption must be a paragraph or empty comment.',
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return [figure_node, error]
if len(node) > 1:
figure_node += nodes.legend('', *node[1:])
return [figure_node]
| {
"repo_name": "timonwong/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python2/docutils/parsers/rst/directives/images.py",
"copies": "2",
"size": "6829",
"license": "mit",
"hash": -3690031516343331000,
"line_mean": 40.6402439024,
"line_max": 78,
"alpha_frac": 0.5596719871,
"autogenerated": false,
"ratio": 4.420064724919094,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5979736712019094,
"avg_score": null,
"num_lines": null
} |
"""
Directives for figures and simple images.
"""
__docformat__ = 'reStructuredText'
import sys
import urllib.request, urllib.parse, urllib.error
from docutils import nodes, utils
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives, states
from docutils.nodes import fully_normalize_name, whitespace_normalize_name
from docutils.parsers.rst.roles import set_classes
try: # check for the Python Imaging Library
import PIL.Image
except ImportError:
try: # sometimes PIL modules are put in PYTHONPATH's root
import Image
class PIL(object): pass # dummy wrapper
PIL.Image = Image
except ImportError:
PIL = None
class Image(Directive):
align_h_values = ('left', 'center', 'right')
align_v_values = ('top', 'middle', 'bottom')
align_values = align_v_values + align_h_values
def align(argument):
# This is not callable as self.align. We cannot make it a
# staticmethod because we're saving an unbound method in
# option_spec below.
return directives.choice(argument, Image.align_values)
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.percentage,
'align': align,
'name': directives.unchanged,
'target': directives.unchanged_required,
'class': directives.class_option}
def run(self):
if 'align' in self.options:
if isinstance(self.state, states.SubstitutionDef):
# Check for align_v_values.
if self.options['align'] not in self.align_v_values:
raise self.error(
'Error in "%s" directive: "%s" is not a valid value '
'for the "align" option within a substitution '
'definition. Valid values for "align" are: "%s".'
% (self.name, self.options['align'],
'", "'.join(self.align_v_values)))
elif self.options['align'] not in self.align_h_values:
raise self.error(
'Error in "%s" directive: "%s" is not a valid value for '
'the "align" option. Valid values for "align" are: "%s".'
% (self.name, self.options['align'],
'", "'.join(self.align_h_values)))
messages = []
reference = directives.uri(self.arguments[0])
self.options['uri'] = reference
reference_node = None
if 'target' in self.options:
block = states.escape2null(
self.options['target']).splitlines()
block = [line for line in block]
target_type, data = self.state.parse_target(
block, self.block_text, self.lineno)
if target_type == 'refuri':
reference_node = nodes.reference(refuri=data)
elif target_type == 'refname':
reference_node = nodes.reference(
refname=fully_normalize_name(data),
name=whitespace_normalize_name(data))
reference_node.indirect_reference_name = data
self.state.document.note_refname(reference_node)
else: # malformed target
messages.append(data) # data is a system message
del self.options['target']
set_classes(self.options)
image_node = nodes.image(self.block_text, **self.options)
self.add_name(image_node)
if reference_node:
reference_node += image_node
return messages + [reference_node]
else:
return messages + [image_node]
class Figure(Image):
def align(argument):
return directives.choice(argument, Figure.align_h_values)
def figwidth_value(argument):
if argument.lower() == 'image':
return 'image'
else:
return directives.length_or_percentage_or_unitless(argument, 'px')
option_spec = Image.option_spec.copy()
option_spec['figwidth'] = figwidth_value
option_spec['figclass'] = directives.class_option
option_spec['align'] = align
has_content = True
def run(self):
figwidth = self.options.pop('figwidth', None)
figclasses = self.options.pop('figclass', None)
align = self.options.pop('align', None)
(image_node,) = Image.run(self)
if isinstance(image_node, nodes.system_message):
return [image_node]
figure_node = nodes.figure('', image_node)
if figwidth == 'image':
if PIL and self.state.document.settings.file_insertion_enabled:
imagepath = urllib.request.url2pathname(image_node['uri'])
try:
img = PIL.Image.open(
imagepath.encode(sys.getfilesystemencoding()))
except (IOError, UnicodeEncodeError):
pass # TODO: warn?
else:
self.state.document.settings.record_dependencies.add(
imagepath.replace('\\', '/'))
figure_node['width'] = img.size[0]
del img
elif figwidth is not None:
figure_node['width'] = figwidth
if figclasses:
figure_node['classes'] += figclasses
if align:
figure_node['align'] = align
if self.content:
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
first_node = node[0]
if isinstance(first_node, nodes.paragraph):
caption = nodes.caption(first_node.rawsource, '',
*first_node.children)
caption.source = first_node.source
caption.line = first_node.line
figure_node += caption
elif not (isinstance(first_node, nodes.comment)
and len(first_node) == 0):
error = self.state_machine.reporter.error(
'Figure caption must be a paragraph or empty comment.',
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return [figure_node, error]
if len(node) > 1:
figure_node += nodes.legend('', *node[1:])
return [figure_node]
| {
"repo_name": "Lyleo/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python3/docutils/parsers/rst/directives/images.py",
"copies": "2",
"size": "6873",
"license": "mit",
"hash": 463692073697503940,
"line_mean": 40.9085365854,
"line_max": 78,
"alpha_frac": 0.5613269315,
"autogenerated": false,
"ratio": 4.414258188824663,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007953849286601899,
"num_lines": 164
} |
"""
Directives for typically HTML-specific constructs.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils import nodes, utils
from docutils.parsers.rst import Directive
from docutils.parsers.rst import states
from docutils.transforms import components
class MetaBody(states.SpecializedBody):
class meta(nodes.Special, nodes.PreBibliographic, nodes.Element):
"""HTML-specific "meta" element."""
pass
def field_marker(self, match, context, next_state):
"""Meta element."""
node, blank_finish = self.parsemeta(match)
self.parent += node
return [], next_state, []
def parsemeta(self, match):
name = self.parse_field_marker(match)
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
node = self.meta()
pending = nodes.pending(components.Filter,
{'component': 'writer',
'format': 'html',
'nodes': [node]})
node['content'] = ' '.join(indented)
if not indented:
line = self.state_machine.line
msg = self.reporter.info(
'No content for meta tag "%s".' % name,
nodes.literal_block(line, line))
return msg, blank_finish
tokens = name.split()
try:
attname, val = utils.extract_name_value(tokens[0])[0]
node[attname.lower()] = val
except utils.NameValueError:
node['name'] = tokens[0]
for token in tokens[1:]:
try:
attname, val = utils.extract_name_value(token)[0]
node[attname.lower()] = val
except utils.NameValueError, detail:
line = self.state_machine.line
msg = self.reporter.error(
'Error parsing meta tag attribute "%s": %s.'
% (token, detail), nodes.literal_block(line, line))
return msg, blank_finish
self.document.note_pending(pending)
return pending, blank_finish
class Meta(Directive):
has_content = True
SMkwargs = {'state_classes': (MetaBody,)}
def run(self):
self.assert_has_content()
node = nodes.Element()
new_line_offset, blank_finish = self.state.nested_list_parse(
self.content, self.content_offset, node,
initial_state='MetaBody', blank_finish=True,
state_machine_kwargs=self.SMkwargs)
if (new_line_offset - self.content_offset) != len(self.content):
# incomplete parse of block?
error = self.state_machine.reporter.error(
'Invalid meta directive.',
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
node += error
return node.children
| {
"repo_name": "timonwong/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python2/docutils/parsers/rst/directives/html.py",
"copies": "2",
"size": "3056",
"license": "mit",
"hash": 8258224157546362000,
"line_mean": 34.5348837209,
"line_max": 73,
"alpha_frac": 0.5726439791,
"autogenerated": false,
"ratio": 4.256267409470752,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00016377333770062235,
"num_lines": 86
} |
"""
Docutils component-related transforms.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import time
from docutils import nodes, utils
from docutils import ApplicationError, DataError
from docutils.transforms import Transform, TransformError
class Filter(Transform):
"""
Include or exclude elements which depend on a specific Docutils component.
For use with `nodes.pending` elements. A "pending" element's dictionary
attribute ``details`` must contain the keys "component" and "format". The
value of ``details['component']`` must match the type name of the
component the elements depend on (e.g. "writer"). The value of
``details['format']`` is the name of a specific format or context of that
component (e.g. "html"). If the matching Docutils component supports that
format or context, the "pending" element is replaced by the contents of
``details['nodes']`` (a list of nodes); otherwise, the "pending" element
is removed.
For example, the reStructuredText "meta" directive creates a "pending"
element containing a "meta" element (in ``pending.details['nodes']``).
Only writers (``pending.details['component'] == 'writer'``) supporting the
"html" format (``pending.details['format'] == 'html'``) will include the
"meta" element; it will be deleted from the output of all other writers.
"""
default_priority = 780
def apply(self):
pending = self.startnode
component_type = pending.details['component'] # 'reader' or 'writer'
format = pending.details['format']
component = self.document.transformer.components[component_type]
if component.supports(format):
pending.replace_self(pending.details['nodes'])
else:
pending.parent.remove(pending)
| {
"repo_name": "Lyleo/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python2/docutils/transforms/components.py",
"copies": "4",
"size": "1943",
"license": "mit",
"hash": 4096444413060584000,
"line_mean": 36.3653846154,
"line_max": 78,
"alpha_frac": 0.6963458569,
"autogenerated": false,
"ratio": 4.24235807860262,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.693870393550262,
"avg_score": null,
"num_lines": null
} |
"""
I/O classes provide a uniform API for low-level input and output. Subclasses
exist for a variety of input/output mechanisms.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import codecs
from docutils import TransformSpec
from docutils._compat import b
from docutils.utils.error_reporting import locale_encoding, ErrorString, ErrorOutput
class InputError(IOError): pass
class OutputError(IOError): pass
def check_encoding(stream, encoding):
"""Test, whether the encoding of `stream` matches `encoding`.
Returns
:None: if `encoding` or `stream.encoding` are not a valid encoding
argument (e.g. ``None``) or `stream.encoding is missing.
:True: if the encoding argument resolves to the same value as `encoding`,
:False: if the encodings differ.
"""
try:
return codecs.lookup(stream.encoding) == codecs.lookup(encoding)
except (LookupError, AttributeError, TypeError):
return None
class Input(TransformSpec):
"""
Abstract base class for input wrappers.
"""
component_type = 'input'
default_source_path = None
def __init__(self, source=None, source_path=None, encoding=None,
error_handler='strict'):
self.encoding = encoding
"""Text encoding for the input source."""
self.error_handler = error_handler
"""Text decoding error handler."""
self.source = source
"""The source of input data."""
self.source_path = source_path
"""A text reference to the source."""
if not source_path:
self.source_path = self.default_source_path
self.successful_encoding = None
"""The encoding that successfully decoded the source data."""
def __repr__(self):
return '%s: source=%r, source_path=%r' % (self.__class__, self.source,
self.source_path)
def read(self):
raise NotImplementedError
def decode(self, data):
"""
Decode a string, `data`, heuristically.
Raise UnicodeError if unsuccessful.
The client application should call ``locale.setlocale`` at the
beginning of processing::
locale.setlocale(locale.LC_ALL, '')
"""
if self.encoding and self.encoding.lower() == 'unicode':
assert isinstance(data, unicode), (
'input encoding is "unicode" '
'but input is not a unicode object')
if isinstance(data, unicode):
# Accept unicode even if self.encoding != 'unicode'.
return data
if self.encoding:
# We believe the user/application when the encoding is
# explicitly given.
encodings = [self.encoding]
else:
data_encoding = self.determine_encoding_from_data(data)
if data_encoding:
# If the data declares its encoding (explicitly or via a BOM),
# we believe it.
encodings = [data_encoding]
else:
# Apply heuristics only if no encoding is explicitly given and
# no BOM found. Start with UTF-8, because that only matches
# data that *IS* UTF-8:
encodings = ['utf-8', 'latin-1']
if locale_encoding:
encodings.insert(1, locale_encoding)
for enc in encodings:
try:
decoded = unicode(data, enc, self.error_handler)
self.successful_encoding = enc
# Return decoded, removing BOMs.
return decoded.replace(u'\ufeff', u'')
except (UnicodeError, LookupError), err:
error = err # in Python 3, the <exception instance> is
# local to the except clause
raise UnicodeError(
'Unable to decode input data. Tried the following encodings: '
'%s.\n(%s)' % (', '.join([repr(enc) for enc in encodings]),
ErrorString(error)))
coding_slug = re.compile(b("coding[:=]\s*([-\w.]+)"))
"""Encoding declaration pattern."""
byte_order_marks = ((codecs.BOM_UTF8, 'utf-8'), # 'utf-8-sig' new in v2.5
(codecs.BOM_UTF16_BE, 'utf-16-be'),
(codecs.BOM_UTF16_LE, 'utf-16-le'),)
"""Sequence of (start_bytes, encoding) tuples for encoding detection.
The first bytes of input data are checked against the start_bytes strings.
A match indicates the given encoding."""
def determine_encoding_from_data(self, data):
"""
Try to determine the encoding of `data` by looking *in* `data`.
Check for a byte order mark (BOM) or an encoding declaration.
"""
# check for a byte order mark:
for start_bytes, encoding in self.byte_order_marks:
if data.startswith(start_bytes):
return encoding
# check for an encoding declaration pattern in first 2 lines of file:
for line in data.splitlines()[:2]:
match = self.coding_slug.search(line)
if match:
return match.group(1).decode('ascii')
return None
class Output(TransformSpec):
"""
Abstract base class for output wrappers.
"""
component_type = 'output'
default_destination_path = None
def __init__(self, destination=None, destination_path=None,
encoding=None, error_handler='strict'):
self.encoding = encoding
"""Text encoding for the output destination."""
self.error_handler = error_handler or 'strict'
"""Text encoding error handler."""
self.destination = destination
"""The destination for output data."""
self.destination_path = destination_path
"""A text reference to the destination."""
if not destination_path:
self.destination_path = self.default_destination_path
def __repr__(self):
return ('%s: destination=%r, destination_path=%r'
% (self.__class__, self.destination, self.destination_path))
def write(self, data):
"""`data` is a Unicode string, to be encoded by `self.encode`."""
raise NotImplementedError
def encode(self, data):
if self.encoding and self.encoding.lower() == 'unicode':
assert isinstance(data, unicode), (
'the encoding given is "unicode" but the output is not '
'a Unicode string')
return data
if not isinstance(data, unicode):
# Non-unicode (e.g. bytes) output.
return data
else:
return data.encode(self.encoding, self.error_handler)
class FileInput(Input):
"""
Input for single, simple file-like objects.
"""
def __init__(self, source=None, source_path=None,
encoding=None, error_handler='strict',
autoclose=True, handle_io_errors=None, mode='rU'):
"""
:Parameters:
- `source`: either a file-like object (which is read directly), or
`None` (which implies `sys.stdin` if no `source_path` given).
- `source_path`: a path to a file, which is opened and then read.
- `encoding`: the expected text encoding of the input file.
- `error_handler`: the encoding error handler to use.
- `autoclose`: close automatically after read (except when
`sys.stdin` is the source).
- `handle_io_errors`: ignored, deprecated, will be removed.
- `mode`: how the file is to be opened (see standard function
`open`). The default 'rU' provides universal newline support
for text files.
"""
Input.__init__(self, source, source_path, encoding, error_handler)
self.autoclose = autoclose
self._stderr = ErrorOutput()
if source is None:
if source_path:
# Specify encoding in Python 3
if sys.version_info >= (3,0):
kwargs = {'encoding': self.encoding,
'errors': self.error_handler}
else:
kwargs = {}
try:
self.source = open(source_path, mode, **kwargs)
except IOError, error:
raise InputError(error.errno, error.strerror, source_path)
else:
self.source = sys.stdin
elif (sys.version_info >= (3,0) and
check_encoding(self.source, self.encoding) is False):
# TODO: re-open, warn or raise error?
raise UnicodeError('Encoding clash: encoding given is "%s" '
'but source is opened with encoding "%s".' %
(self.encoding, self.source.encoding))
if not source_path:
try:
self.source_path = self.source.name
except AttributeError:
pass
def read(self):
"""
Read and decode a single file and return the data (Unicode string).
"""
try: # In Python < 2.5, try...except has to be nested in try...finally.
try:
if self.source is sys.stdin and sys.version_info >= (3,0):
# read as binary data to circumvent auto-decoding
data = self.source.buffer.read()
# normalize newlines
data = b('\n').join(data.splitlines()) + b('\n')
else:
data = self.source.read()
except (UnicodeError, LookupError), err: # (in Py3k read() decodes)
if not self.encoding and self.source_path:
# re-read in binary mode and decode with heuristics
b_source = open(self.source_path, 'rb')
data = b_source.read()
b_source.close()
# normalize newlines
data = b('\n').join(data.splitlines()) + b('\n')
else:
raise
finally:
if self.autoclose:
self.close()
return self.decode(data)
def readlines(self):
"""
Return lines of a single file as list of Unicode strings.
"""
return self.read().splitlines(True)
def close(self):
if self.source is not sys.stdin:
self.source.close()
class FileOutput(Output):
"""
Output for single, simple file-like objects.
"""
mode = 'w'
"""The mode argument for `open()`."""
# 'wb' for binary (e.g. OpenOffice) files (see also `BinaryFileOutput`).
# (Do not use binary mode ('wb') for text files, as this prevents the
# conversion of newlines to the system specific default.)
def __init__(self, destination=None, destination_path=None,
encoding=None, error_handler='strict', autoclose=True,
handle_io_errors=None, mode=None):
"""
:Parameters:
- `destination`: either a file-like object (which is written
directly) or `None` (which implies `sys.stdout` if no
`destination_path` given).
- `destination_path`: a path to a file, which is opened and then
written.
- `encoding`: the text encoding of the output file.
- `error_handler`: the encoding error handler to use.
- `autoclose`: close automatically after write (except when
`sys.stdout` or `sys.stderr` is the destination).
- `handle_io_errors`: ignored, deprecated, will be removed.
- `mode`: how the file is to be opened (see standard function
`open`). The default is 'w', providing universal newline
support for text files.
"""
Output.__init__(self, destination, destination_path,
encoding, error_handler)
self.opened = True
self.autoclose = autoclose
if mode is not None:
self.mode = mode
self._stderr = ErrorOutput()
if destination is None:
if destination_path:
self.opened = False
else:
self.destination = sys.stdout
elif (# destination is file-type object -> check mode:
mode and hasattr(self.destination, 'mode')
and mode != self.destination.mode):
print >>self._stderr, ('Warning: Destination mode "%s" '
'differs from specified mode "%s"' %
(self.destination.mode, mode))
if not destination_path:
try:
self.destination_path = self.destination.name
except AttributeError:
pass
def open(self):
# Specify encoding in Python 3.
if sys.version_info >= (3,0) and 'b' not in self.mode:
kwargs = {'encoding': self.encoding,
'errors': self.error_handler}
else:
kwargs = {}
try:
self.destination = open(self.destination_path, self.mode, **kwargs)
except IOError, error:
raise OutputError(error.errno, error.strerror,
self.destination_path)
self.opened = True
def write(self, data):
"""Encode `data`, write it to a single file, and return it.
With Python 3 or binary output mode, `data` is returned unchanged,
except when specified encoding and output encoding differ.
"""
if not self.opened:
self.open()
if ('b' not in self.mode and sys.version_info < (3,0)
or check_encoding(self.destination, self.encoding) is False
):
if sys.version_info >= (3,0) and os.linesep != '\n':
data = data.replace('\n', os.linesep) # fix endings
data = self.encode(data)
try: # In Python < 2.5, try...except has to be nested in try...finally.
try:
self.destination.write(data)
except TypeError, e:
if sys.version_info >= (3,0) and isinstance(data, bytes):
try:
self.destination.buffer.write(data)
except AttributeError:
if check_encoding(self.destination,
self.encoding) is False:
raise ValueError('Encoding of %s (%s) differs \n'
' from specified encoding (%s)' %
(self.destination_path or 'destination',
self.destination.encoding, self.encoding))
else:
raise e
except (UnicodeError, LookupError), err:
raise UnicodeError(
'Unable to encode output data. output-encoding is: '
'%s.\n(%s)' % (self.encoding, ErrorString(err)))
finally:
if self.autoclose:
self.close()
return data
def close(self):
if self.destination not in (sys.stdout, sys.stderr):
self.destination.close()
self.opened = False
class BinaryFileOutput(FileOutput):
"""
A version of docutils.io.FileOutput which writes to a binary file.
"""
# Used by core.publish_cmdline_to_binary() which in turn is used by
# rst2odt (OpenOffice writer)
mode = 'wb'
class StringInput(Input):
"""
Direct string input.
"""
default_source_path = '<string>'
def read(self):
"""Decode and return the source string."""
return self.decode(self.source)
class StringOutput(Output):
"""
Direct string output.
"""
default_destination_path = '<string>'
def write(self, data):
"""Encode `data`, store it in `self.destination`, and return it."""
self.destination = self.encode(data)
return self.destination
class NullInput(Input):
"""
Degenerate input: read nothing.
"""
default_source_path = 'null input'
def read(self):
"""Return a null string."""
return u''
class NullOutput(Output):
"""
Degenerate output: write nothing.
"""
default_destination_path = 'null output'
def write(self, data):
"""Do nothing ([don't even] send data to the bit bucket)."""
pass
class DocTreeInput(Input):
"""
Adapter for document tree input.
The document tree must be passed in the ``source`` parameter.
"""
default_source_path = 'doctree input'
def read(self):
"""Return the document tree."""
return self.source
| {
"repo_name": "timonwong/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python2/docutils/io.py",
"copies": "2",
"size": "17008",
"license": "mit",
"hash": 7462872917759843000,
"line_mean": 34.3596673597,
"line_max": 84,
"alpha_frac": 0.5540921919,
"autogenerated": false,
"ratio": 4.551244313620551,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011671434540148568,
"num_lines": 481
} |
"""
Miscellaneous transforms.
"""
__docformat__ = 'reStructuredText'
from docutils import nodes
from docutils.transforms import Transform, TransformError
class CallBack(Transform):
"""
Inserts a callback into a document. The callback is called when the
transform is applied, which is determined by its priority.
For use with `nodes.pending` elements. Requires a ``details['callback']``
entry, a bound method or function which takes one parameter: the pending
node. Other data can be stored in the ``details`` attribute or in the
object hosting the callback method.
"""
default_priority = 990
def apply(self):
pending = self.startnode
pending.details['callback'](pending)
pending.parent.remove(pending)
class ClassAttribute(Transform):
"""
Move the "class" attribute specified in the "pending" node into the
immediately following non-comment element.
"""
default_priority = 210
def apply(self):
pending = self.startnode
parent = pending.parent
child = pending
while parent:
# Check for appropriate following siblings:
for index in range(parent.index(child) + 1, len(parent)):
element = parent[index]
if (isinstance(element, nodes.Invisible) or
isinstance(element, nodes.system_message)):
continue
element['classes'] += pending.details['class']
pending.parent.remove(pending)
return
else:
# At end of section or container; apply to sibling
child = parent
parent = parent.parent
error = self.document.reporter.error(
'No suitable element following "%s" directive'
% pending.details['directive'],
nodes.literal_block(pending.rawsource, pending.rawsource),
line=pending.line)
pending.replace_self(error)
class Transitions(Transform):
"""
Move transitions at the end of sections up the tree. Complain
on transitions after a title, at the beginning or end of the
document, and after another transition.
For example, transform this::
<section>
...
<transition>
<section>
...
into this::
<section>
...
<transition>
<section>
...
"""
default_priority = 830
def apply(self):
for node in self.document.traverse(nodes.transition):
self.visit_transition(node)
def visit_transition(self, node):
index = node.parent.index(node)
error = None
if (index == 0 or
isinstance(node.parent[0], nodes.title) and
(index == 1 or
isinstance(node.parent[1], nodes.subtitle) and
index == 2)):
assert (isinstance(node.parent, nodes.document) or
isinstance(node.parent, nodes.section))
error = self.document.reporter.error(
'Document or section may not begin with a transition.',
source=node.source, line=node.line)
elif isinstance(node.parent[index - 1], nodes.transition):
error = self.document.reporter.error(
'At least one body element must separate transitions; '
'adjacent transitions are not allowed.',
source=node.source, line=node.line)
if error:
# Insert before node and update index.
node.parent.insert(index, error)
index += 1
assert index < len(node.parent)
if index != len(node.parent) - 1:
# No need to move the node.
return
# Node behind which the transition is to be moved.
sibling = node
# While sibling is the last node of its parent.
while index == len(sibling.parent) - 1:
sibling = sibling.parent
# If sibling is the whole document (i.e. it has no parent).
if sibling.parent is None:
# Transition at the end of document. Do not move the
# transition up, and place an error behind.
error = self.document.reporter.error(
'Document may not end with a transition.',
line=node.line)
node.parent.insert(node.parent.index(node) + 1, error)
return
index = sibling.parent.index(sibling)
# Remove the original transition node.
node.parent.remove(node)
# Insert the transition after the sibling.
sibling.parent.insert(index + 1, node)
| {
"repo_name": "timonwong/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python3/docutils/transforms/misc.py",
"copies": "4",
"size": "4840",
"license": "mit",
"hash": 1337563130773199000,
"line_mean": 32.6111111111,
"line_max": 78,
"alpha_frac": 0.5867768595,
"autogenerated": false,
"ratio": 4.69447138700291,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00010850694444444444,
"num_lines": 144
} |
"""
PEP HTML Writer.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import os.path
import codecs
import docutils
from docutils import frontend, nodes, utils, writers
from docutils.writers import html4css1
class Writer(html4css1.Writer):
default_stylesheet = 'pep.css'
default_stylesheet_path = utils.relative_path(
os.path.join(os.getcwd(), 'dummy'),
os.path.join(os.path.dirname(__file__), default_stylesheet))
default_template = 'template.txt'
default_template_path = utils.relative_path(
os.path.join(os.getcwd(), 'dummy'),
os.path.join(os.path.dirname(__file__), default_template))
settings_spec = html4css1.Writer.settings_spec + (
'PEP/HTML-Specific Options',
'For the PEP/HTML writer, the default value for the --stylesheet-path '
'option is "%s", and the default value for --template is "%s". '
'See HTML-Specific Options above.'
% (default_stylesheet_path, default_template_path),
(('Python\'s home URL. Default is "http://www.python.org".',
['--python-home'],
{'default': 'http://www.python.org', 'metavar': '<URL>'}),
('Home URL prefix for PEPs. Default is "." (current directory).',
['--pep-home'],
{'default': '.', 'metavar': '<URL>'}),
# For testing.
(frontend.SUPPRESS_HELP,
['--no-random'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),))
settings_default_overrides = {'stylesheet_path': default_stylesheet_path,
'template': default_template_path,}
relative_path_settings = (html4css1.Writer.relative_path_settings
+ ('template',))
config_section = 'pep_html writer'
config_section_dependencies = ('writers', 'html4css1 writer')
def __init__(self):
html4css1.Writer.__init__(self)
self.translator_class = HTMLTranslator
def interpolation_dict(self):
subs = html4css1.Writer.interpolation_dict(self)
settings = self.document.settings
pyhome = settings.python_home
subs['pyhome'] = pyhome
subs['pephome'] = settings.pep_home
if pyhome == '..':
subs['pepindex'] = '.'
else:
subs['pepindex'] = pyhome + '/dev/peps'
index = self.document.first_child_matching_class(nodes.field_list)
header = self.document[index]
self.pepnum = header[0][1].astext()
subs['pep'] = self.pepnum
if settings.no_random:
subs['banner'] = 0
else:
import random
subs['banner'] = random.randrange(64)
try:
subs['pepnum'] = '%04i' % int(self.pepnum)
except ValueError:
subs['pepnum'] = self.pepnum
self.title = header[1][1].astext()
subs['title'] = self.title
subs['body'] = ''.join(
self.body_pre_docinfo + self.docinfo + self.body)
return subs
def assemble_parts(self):
html4css1.Writer.assemble_parts(self)
self.parts['title'] = [self.title]
self.parts['pepnum'] = self.pepnum
class HTMLTranslator(html4css1.HTMLTranslator):
def depart_field_list(self, node):
html4css1.HTMLTranslator.depart_field_list(self, node)
if 'rfc2822' in node['classes']:
self.body.append('<hr />\n')
| {
"repo_name": "Lyleo/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python2/docutils/writers/pep_html/__init__.py",
"copies": "4",
"size": "3535",
"license": "mit",
"hash": -763275437023351600,
"line_mean": 32.6666666667,
"line_max": 79,
"alpha_frac": 0.5937765205,
"autogenerated": false,
"ratio": 3.8216216216216217,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6415398142121622,
"avg_score": null,
"num_lines": null
} |
"""
Python Enhancement Proposal (PEP) Reader.
"""
__docformat__ = 'reStructuredText'
from docutils.readers import standalone
from docutils.transforms import peps, references, misc, frontmatter
from docutils.parsers import rst
class Reader(standalone.Reader):
supported = ('pep',)
"""Contexts this reader supports."""
settings_spec = (
'PEP Reader Option Defaults',
'The --pep-references and --rfc-references options (for the '
'reStructuredText parser) are on by default.',
())
config_section = 'pep reader'
config_section_dependencies = ('readers', 'standalone reader')
def get_transforms(self):
transforms = standalone.Reader.get_transforms(self)
# We have PEP-specific frontmatter handling.
transforms.remove(frontmatter.DocTitle)
transforms.remove(frontmatter.SectionSubTitle)
transforms.remove(frontmatter.DocInfo)
transforms.extend([peps.Headers, peps.Contents, peps.TargetNotes])
return transforms
settings_default_overrides = {'pep_references': 1, 'rfc_references': 1}
inliner_class = rst.states.Inliner
def __init__(self, parser=None, parser_name=None):
"""`parser` should be ``None``."""
if parser is None:
parser = rst.Parser(rfc2822=True, inliner=self.inliner_class())
standalone.Reader.__init__(self, parser, '')
| {
"repo_name": "timonwong/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python2/docutils/readers/pep.py",
"copies": "4",
"size": "1514",
"license": "mit",
"hash": -6548643697418254000,
"line_mean": 30.5416666667,
"line_max": 75,
"alpha_frac": 0.6743725231,
"autogenerated": false,
"ratio": 4.005291005291006,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 48
} |
"""
Standalone file Reader for the reStructuredText markup syntax.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils import frontend, readers
from docutils.transforms import frontmatter, references, misc
class Reader(readers.Reader):
supported = ('standalone',)
"""Contexts this reader supports."""
document = None
"""A single document tree."""
settings_spec = (
'Standalone Reader',
None,
(('Disable the promotion of a lone top-level section title to '
'document title (and subsequent section title to document '
'subtitle promotion; enabled by default).',
['--no-doc-title'],
{'dest': 'doctitle_xform', 'action': 'store_false', 'default': 1,
'validator': frontend.validate_boolean}),
('Disable the bibliographic field list transform (enabled by '
'default).',
['--no-doc-info'],
{'dest': 'docinfo_xform', 'action': 'store_false', 'default': 1,
'validator': frontend.validate_boolean}),
('Activate the promotion of lone subsection titles to '
'section subtitles (disabled by default).',
['--section-subtitles'],
{'dest': 'sectsubtitle_xform', 'action': 'store_true', 'default': 0,
'validator': frontend.validate_boolean}),
('Deactivate the promotion of lone subsection titles.',
['--no-section-subtitles'],
{'dest': 'sectsubtitle_xform', 'action': 'store_false'}),
))
config_section = 'standalone reader'
config_section_dependencies = ('readers',)
def get_transforms(self):
return readers.Reader.get_transforms(self) + [
references.Substitutions,
references.PropagateTargets,
frontmatter.DocTitle,
frontmatter.SectionSubTitle,
frontmatter.DocInfo,
references.AnonymousHyperlinks,
references.IndirectHyperlinks,
references.Footnotes,
references.ExternalTargets,
references.InternalTargets,
references.DanglingReferences,
misc.Transitions,
]
| {
"repo_name": "Lyleo/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python3/docutils/readers/standalone.py",
"copies": "4",
"size": "2290",
"license": "mit",
"hash": -6883822941403228000,
"line_mean": 33.696969697,
"line_max": 78,
"alpha_frac": 0.6144104803,
"autogenerated": false,
"ratio": 4.49901768172888,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7113428162028881,
"avg_score": null,
"num_lines": null
} |
"""
This is ``docutils.parsers.rst`` package. It exports a single class, `Parser`,
the reStructuredText parser.
Usage
=====
1. Create a parser::
parser = docutils.parsers.rst.Parser()
Several optional arguments may be passed to modify the parser's behavior.
Please see `Customizing the Parser`_ below for details.
2. Gather input (a multi-line string), by reading a file or the standard
input::
input = sys.stdin.read()
3. Create a new empty `docutils.nodes.document` tree::
document = docutils.utils.new_document(source, settings)
See `docutils.utils.new_document()` for parameter details.
4. Run the parser, populating the document tree::
parser.parse(input, document)
Parser Overview
===============
The reStructuredText parser is implemented as a state machine, examining its
input one line at a time. To understand how the parser works, please first
become familiar with the `docutils.statemachine` module, then see the
`states` module.
Customizing the Parser
----------------------
Anything that isn't already customizable is that way simply because that type
of customizability hasn't been implemented yet. Patches welcome!
When instantiating an object of the `Parser` class, two parameters may be
passed: ``rfc2822`` and ``inliner``. Pass ``rfc2822=True`` to enable an
initial RFC-2822 style header block, parsed as a "field_list" element (with
"class" attribute set to "rfc2822"). Currently this is the only body-level
element which is customizable without subclassing. (Tip: subclass `Parser`
and change its "state_classes" and "initial_state" attributes to refer to new
classes. Contact the author if you need more details.)
The ``inliner`` parameter takes an instance of `states.Inliner` or a subclass.
It handles inline markup recognition. A common extension is the addition of
further implicit hyperlinks, like "RFC 2822". This can be done by subclassing
`states.Inliner`, adding a new method for the implicit markup, and adding a
``(pattern, method)`` pair to the "implicit_dispatch" attribute of the
subclass. See `states.Inliner.implicit_inline()` for details. Explicit
inline markup can be customized in a `states.Inliner` subclass via the
``patterns.initial`` and ``dispatch`` attributes (and new methods as
appropriate).
"""
__docformat__ = 'reStructuredText'
import docutils.parsers
import docutils.statemachine
from docutils.parsers.rst import states
from docutils import frontend, nodes, Component
from docutils.transforms import universal
class Parser(docutils.parsers.Parser):
"""The reStructuredText parser."""
supported = ('restructuredtext', 'rst', 'rest', 'restx', 'rtxt', 'rstx')
"""Aliases this parser supports."""
settings_spec = (
'reStructuredText Parser Options',
None,
(('Recognize and link to standalone PEP references (like "PEP 258").',
['--pep-references'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Base URL for PEP references '
'(default "http://www.python.org/dev/peps/").',
['--pep-base-url'],
{'metavar': '<URL>', 'default': 'http://www.python.org/dev/peps/',
'validator': frontend.validate_url_trailing_slash}),
('Template for PEP file part of URL. (default "pep-%04d")',
['--pep-file-url-template'],
{'metavar': '<URL>', 'default': 'pep-%04d'}),
('Recognize and link to standalone RFC references (like "RFC 822").',
['--rfc-references'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Base URL for RFC references (default "http://www.faqs.org/rfcs/").',
['--rfc-base-url'],
{'metavar': '<URL>', 'default': 'http://www.faqs.org/rfcs/',
'validator': frontend.validate_url_trailing_slash}),
('Set number of spaces for tab expansion (default 8).',
['--tab-width'],
{'metavar': '<width>', 'type': 'int', 'default': 8,
'validator': frontend.validate_nonnegative_int}),
('Remove spaces before footnote references.',
['--trim-footnote-reference-space'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Leave spaces before footnote references.',
['--leave-footnote-reference-space'],
{'action': 'store_false', 'dest': 'trim_footnote_reference_space'}),
('Disable directives that insert the contents of external file '
'("include" & "raw"); replaced with a "warning" system message.',
['--no-file-insertion'],
{'action': 'store_false', 'default': 1,
'dest': 'file_insertion_enabled',
'validator': frontend.validate_boolean}),
('Enable directives that insert the contents of external file '
'("include" & "raw"). Enabled by default.',
['--file-insertion-enabled'],
{'action': 'store_true'}),
('Disable the "raw" directives; replaced with a "warning" '
'system message.',
['--no-raw'],
{'action': 'store_false', 'default': 1, 'dest': 'raw_enabled',
'validator': frontend.validate_boolean}),
('Enable the "raw" directive. Enabled by default.',
['--raw-enabled'],
{'action': 'store_true'}),
('Token name set for parsing code with Pygments: one of '
'"long", "short", or "none (no parsing)". Default is "long".',
['--syntax-highlight'],
{'choices': ['long', 'short', 'none'],
'default': 'long', 'metavar': '<format>'}),
('Change straight quotation marks to typographic form: '
'one of "yes", "no", "alt[ernative]" (default "no").',
['--smart-quotes'],
{'default': False, 'validator': frontend.validate_ternary}),
))
config_section = 'restructuredtext parser'
config_section_dependencies = ('parsers',)
def __init__(self, rfc2822=False, inliner=None):
if rfc2822:
self.initial_state = 'RFC2822Body'
else:
self.initial_state = 'Body'
self.state_classes = states.state_classes
self.inliner = inliner
def get_transforms(self):
return Component.get_transforms(self) + [
universal.SmartQuotes]
def parse(self, inputstring, document):
"""Parse `inputstring` and populate `document`, a document tree."""
self.setup_parse(inputstring, document)
self.statemachine = states.RSTStateMachine(
state_classes=self.state_classes,
initial_state=self.initial_state,
debug=document.reporter.debug_flag)
inputlines = docutils.statemachine.string2lines(
inputstring, tab_width=document.settings.tab_width,
convert_whitespace=True)
self.statemachine.run(inputlines, document, inliner=self.inliner)
self.finish_parse()
class DirectiveError(Exception):
"""
Store a message and a system message level.
To be thrown from inside directive code.
Do not instantiate directly -- use `Directive.directive_error()`
instead!
"""
def __init__(self, level, message):
"""Set error `message` and `level`"""
Exception.__init__(self)
self.level = level
self.msg = message
class Directive(object):
"""
Base class for reStructuredText directives.
The following attributes may be set by subclasses. They are
interpreted by the directive parser (which runs the directive
class):
- `required_arguments`: The number of required arguments (default:
0).
- `optional_arguments`: The number of optional arguments (default:
0).
- `final_argument_whitespace`: A boolean, indicating if the final
argument may contain whitespace (default: False).
- `option_spec`: A dictionary, mapping known option names to
conversion functions such as `int` or `float` (default: {}, no
options). Several conversion functions are defined in the
directives/__init__.py module.
Option conversion functions take a single parameter, the option
argument (a string or ``None``), validate it and/or convert it
to the appropriate form. Conversion functions may raise
`ValueError` and `TypeError` exceptions.
- `has_content`: A boolean; True if content is allowed. Client
code must handle the case where content is required but not
supplied (an empty content list will be supplied).
Arguments are normally single whitespace-separated words. The
final argument may contain whitespace and/or newlines if
`final_argument_whitespace` is True.
If the form of the arguments is more complex, specify only one
argument (either required or optional) and set
`final_argument_whitespace` to True; the client code must do any
context-sensitive parsing.
When a directive implementation is being run, the directive class
is instantiated, and the `run()` method is executed. During
instantiation, the following instance variables are set:
- ``name`` is the directive type or name (string).
- ``arguments`` is the list of positional arguments (strings).
- ``options`` is a dictionary mapping option names (strings) to
values (type depends on option conversion functions; see
`option_spec` above).
- ``content`` is a list of strings, the directive content line by line.
- ``lineno`` is the absolute line number of the first line
of the directive.
- ``src`` is the name (or path) of the rst source of the directive.
- ``srcline`` is the line number of the first line of the directive
in its source. It may differ from ``lineno``, if the main source
includes other sources with the ``.. include::`` directive.
- ``content_offset`` is the line offset of the first line of the content from
the beginning of the current input. Used when initiating a nested parse.
- ``block_text`` is a string containing the entire directive.
- ``state`` is the state which called the directive function.
- ``state_machine`` is the state machine which controls the state which called
the directive function.
Directive functions return a list of nodes which will be inserted
into the document tree at the point where the directive was
encountered. This can be an empty list if there is nothing to
insert.
For ordinary directives, the list must contain body elements or
structural elements. Some directives are intended specifically
for substitution definitions, and must return a list of `Text`
nodes and/or inline elements (suitable for inline insertion, in
place of the substitution reference). Such directives must verify
substitution definition context, typically using code like this::
if not isinstance(state, states.SubstitutionDef):
error = state_machine.reporter.error(
'Invalid context: the "%s" directive can only be used '
'within a substitution definition.' % (name),
nodes.literal_block(block_text, block_text), line=lineno)
return [error]
"""
# There is a "Creating reStructuredText Directives" how-to at
# <http://docutils.sf.net/docs/howto/rst-directives.html>. If you
# update this docstring, please update the how-to as well.
required_arguments = 0
"""Number of required directive arguments."""
optional_arguments = 0
"""Number of optional arguments after the required arguments."""
final_argument_whitespace = False
"""May the final argument contain whitespace?"""
option_spec = None
"""Mapping of option names to validator functions."""
has_content = False
"""May the directive have content?"""
def __init__(self, name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
self.name = name
self.arguments = arguments
self.options = options
self.content = content
self.lineno = lineno
self.content_offset = content_offset
self.block_text = block_text
self.state = state
self.state_machine = state_machine
def run(self):
raise NotImplementedError('Must override run() is subclass.')
# Directive errors:
def directive_error(self, level, message):
"""
Return a DirectiveError suitable for being thrown as an exception.
Call "raise self.directive_error(level, message)" from within
a directive implementation to return one single system message
at level `level`, which automatically gets the directive block
and the line number added.
Preferably use the `debug`, `info`, `warning`, `error`, or `severe`
wrapper methods, e.g. ``self.error(message)`` to generate an
ERROR-level directive error.
"""
return DirectiveError(level, message)
def debug(self, message):
return self.directive_error(0, message)
def info(self, message):
return self.directive_error(1, message)
def warning(self, message):
return self.directive_error(2, message)
def error(self, message):
return self.directive_error(3, message)
def severe(self, message):
return self.directive_error(4, message)
# Convenience methods:
def assert_has_content(self):
"""
Throw an ERROR-level DirectiveError if the directive doesn't
have contents.
"""
if not self.content:
raise self.error('Content block expected for the "%s" directive; '
'none found.' % self.name)
def add_name(self, node):
"""Append self.options['name'] to node['names'] if it exists.
Also normalize the name string and register it as explicit target.
"""
if 'name' in self.options:
name = nodes.fully_normalize_name(self.options.pop('name'))
if 'name' in node:
del(node['name'])
node['names'].append(name)
self.state.document.note_explicit_target(node, node)
def convert_directive_function(directive_fn):
"""
Define & return a directive class generated from `directive_fn`.
`directive_fn` uses the old-style, functional interface.
"""
class FunctionalDirective(Directive):
option_spec = getattr(directive_fn, 'options', None)
has_content = getattr(directive_fn, 'content', False)
_argument_spec = getattr(directive_fn, 'arguments', (0, 0, False))
required_arguments, optional_arguments, final_argument_whitespace \
= _argument_spec
def run(self):
return directive_fn(
self.name, self.arguments, self.options, self.content,
self.lineno, self.content_offset, self.block_text,
self.state, self.state_machine)
# Return new-style directive.
return FunctionalDirective
| {
"repo_name": "timonwong/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python3/docutils/parsers/rst/__init__.py",
"copies": "4",
"size": "15251",
"license": "mit",
"hash": 6119284180955981000,
"line_mean": 37.1275,
"line_max": 82,
"alpha_frac": 0.650186873,
"autogenerated": false,
"ratio": 4.354940034266134,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002878810140784869,
"num_lines": 400
} |
"""
This is the ``docutils.parsers.rst.states`` module, the core of
the reStructuredText parser. It defines the following:
:Classes:
- `RSTStateMachine`: reStructuredText parser's entry point.
- `NestedStateMachine`: recursive StateMachine.
- `RSTState`: reStructuredText State superclass.
- `Inliner`: For parsing inline markup.
- `Body`: Generic classifier of the first line of a block.
- `SpecializedBody`: Superclass for compound element members.
- `BulletList`: Second and subsequent bullet_list list_items
- `DefinitionList`: Second+ definition_list_items.
- `EnumeratedList`: Second+ enumerated_list list_items.
- `FieldList`: Second+ fields.
- `OptionList`: Second+ option_list_items.
- `RFC2822List`: Second+ RFC2822-style fields.
- `ExtensionOptions`: Parses directive option fields.
- `Explicit`: Second+ explicit markup constructs.
- `SubstitutionDef`: For embedded directives in substitution definitions.
- `Text`: Classifier of second line of a text block.
- `SpecializedText`: Superclass for continuation lines of Text-variants.
- `Definition`: Second line of potential definition_list_item.
- `Line`: Second line of overlined section title or transition marker.
- `Struct`: An auxiliary collection class.
:Exception classes:
- `MarkupError`
- `ParserError`
- `MarkupMismatch`
:Functions:
- `escape2null()`: Return a string, escape-backslashes converted to nulls.
- `unescape()`: Return a string, nulls removed or restored to backslashes.
:Attributes:
- `state_classes`: set of State classes used with `RSTStateMachine`.
Parser Overview
===============
The reStructuredText parser is implemented as a recursive state machine,
examining its input one line at a time. To understand how the parser works,
please first become familiar with the `docutils.statemachine` module. In the
description below, references are made to classes defined in this module;
please see the individual classes for details.
Parsing proceeds as follows:
1. The state machine examines each line of input, checking each of the
transition patterns of the state `Body`, in order, looking for a match.
The implicit transitions (blank lines and indentation) are checked before
any others. The 'text' transition is a catch-all (matches anything).
2. The method associated with the matched transition pattern is called.
A. Some transition methods are self-contained, appending elements to the
document tree (`Body.doctest` parses a doctest block). The parser's
current line index is advanced to the end of the element, and parsing
continues with step 1.
B. Other transition methods trigger the creation of a nested state machine,
whose job is to parse a compound construct ('indent' does a block quote,
'bullet' does a bullet list, 'overline' does a section [first checking
for a valid section header], etc.).
- In the case of lists and explicit markup, a one-off state machine is
created and run to parse contents of the first item.
- A new state machine is created and its initial state is set to the
appropriate specialized state (`BulletList` in the case of the
'bullet' transition; see `SpecializedBody` for more detail). This
state machine is run to parse the compound element (or series of
explicit markup elements), and returns as soon as a non-member element
is encountered. For example, the `BulletList` state machine ends as
soon as it encounters an element which is not a list item of that
bullet list. The optional omission of inter-element blank lines is
enabled by this nested state machine.
- The current line index is advanced to the end of the elements parsed,
and parsing continues with step 1.
C. The result of the 'text' transition depends on the next line of text.
The current state is changed to `Text`, under which the second line is
examined. If the second line is:
- Indented: The element is a definition list item, and parsing proceeds
similarly to step 2.B, using the `DefinitionList` state.
- A line of uniform punctuation characters: The element is a section
header; again, parsing proceeds as in step 2.B, and `Body` is still
used.
- Anything else: The element is a paragraph, which is examined for
inline markup and appended to the parent element. Processing
continues with step 1.
"""
__docformat__ = 'reStructuredText'
import sys
import re
try:
import roman
except ImportError:
import docutils.utils.roman as roman
from types import FunctionType, MethodType
from docutils import nodes, statemachine, utils
from docutils import ApplicationError, DataError
from docutils.statemachine import StateMachineWS, StateWS
from docutils.nodes import fully_normalize_name as normalize_name
from docutils.nodes import whitespace_normalize_name
import docutils.parsers.rst
from docutils.parsers.rst import directives, languages, tableparser, roles
from docutils.parsers.rst.languages import en as _fallback_language_module
from docutils.utils import escape2null, unescape, column_width
from docutils.utils import punctuation_chars, urischemes
class MarkupError(DataError): pass
class UnknownInterpretedRoleError(DataError): pass
class InterpretedRoleNotImplementedError(DataError): pass
class ParserError(ApplicationError): pass
class MarkupMismatch(Exception): pass
class Struct:
"""Stores data attributes for dotted-attribute access."""
def __init__(self, **keywordargs):
self.__dict__.update(keywordargs)
class RSTStateMachine(StateMachineWS):
"""
reStructuredText's master StateMachine.
The entry point to reStructuredText parsing is the `run()` method.
"""
def run(self, input_lines, document, input_offset=0, match_titles=True,
inliner=None):
"""
Parse `input_lines` and modify the `document` node in place.
Extend `StateMachineWS.run()`: set up parse-global data and
run the StateMachine.
"""
self.language = languages.get_language(
document.settings.language_code)
self.match_titles = match_titles
if inliner is None:
inliner = Inliner()
inliner.init_customizations(document.settings)
self.memo = Struct(document=document,
reporter=document.reporter,
language=self.language,
title_styles=[],
section_level=0,
section_bubble_up_kludge=False,
inliner=inliner)
self.document = document
self.attach_observer(document.note_source)
self.reporter = self.memo.reporter
self.node = document
results = StateMachineWS.run(self, input_lines, input_offset,
input_source=document['source'])
assert results == [], 'RSTStateMachine.run() results should be empty!'
self.node = self.memo = None # remove unneeded references
class NestedStateMachine(StateMachineWS):
"""
StateMachine run from within other StateMachine runs, to parse nested
document structures.
"""
def run(self, input_lines, input_offset, memo, node, match_titles=True):
"""
Parse `input_lines` and populate a `docutils.nodes.document` instance.
Extend `StateMachineWS.run()`: set up document-wide data.
"""
self.match_titles = match_titles
self.memo = memo
self.document = memo.document
self.attach_observer(self.document.note_source)
self.reporter = memo.reporter
self.language = memo.language
self.node = node
results = StateMachineWS.run(self, input_lines, input_offset)
assert results == [], ('NestedStateMachine.run() results should be '
'empty!')
return results
class RSTState(StateWS):
"""
reStructuredText State superclass.
Contains methods used by all State subclasses.
"""
nested_sm = NestedStateMachine
nested_sm_cache = []
def __init__(self, state_machine, debug=False):
self.nested_sm_kwargs = {'state_classes': state_classes,
'initial_state': 'Body'}
StateWS.__init__(self, state_machine, debug)
def runtime_init(self):
StateWS.runtime_init(self)
memo = self.state_machine.memo
self.memo = memo
self.reporter = memo.reporter
self.inliner = memo.inliner
self.document = memo.document
self.parent = self.state_machine.node
# enable the reporter to determine source and source-line
if not hasattr(self.reporter, 'get_source_and_line'):
self.reporter.get_source_and_line = self.state_machine.get_source_and_line
# print "adding get_source_and_line to reporter", self.state_machine.input_offset
def goto_line(self, abs_line_offset):
"""
Jump to input line `abs_line_offset`, ignoring jumps past the end.
"""
try:
self.state_machine.goto_line(abs_line_offset)
except EOFError:
pass
def no_match(self, context, transitions):
"""
Override `StateWS.no_match` to generate a system message.
This code should never be run.
"""
self.reporter.severe(
'Internal error: no transition pattern match. State: "%s"; '
'transitions: %s; context: %s; current line: %r.'
% (self.__class__.__name__, transitions, context,
self.state_machine.line))
return context, None, []
def bof(self, context):
"""Called at beginning of file."""
return [], []
def nested_parse(self, block, input_offset, node, match_titles=False,
state_machine_class=None, state_machine_kwargs=None):
"""
Create a new StateMachine rooted at `node` and run it over the input
`block`.
"""
use_default = 0
if state_machine_class is None:
state_machine_class = self.nested_sm
use_default += 1
if state_machine_kwargs is None:
state_machine_kwargs = self.nested_sm_kwargs
use_default += 1
block_length = len(block)
state_machine = None
if use_default == 2:
try:
state_machine = self.nested_sm_cache.pop()
except IndexError:
pass
if not state_machine:
state_machine = state_machine_class(debug=self.debug,
**state_machine_kwargs)
state_machine.run(block, input_offset, memo=self.memo,
node=node, match_titles=match_titles)
if use_default == 2:
self.nested_sm_cache.append(state_machine)
else:
state_machine.unlink()
new_offset = state_machine.abs_line_offset()
# No `block.parent` implies disconnected -- lines aren't in sync:
if block.parent and (len(block) - block_length) != 0:
# Adjustment for block if modified in nested parse:
self.state_machine.next_line(len(block) - block_length)
return new_offset
def nested_list_parse(self, block, input_offset, node, initial_state,
blank_finish,
blank_finish_state=None,
extra_settings={},
match_titles=False,
state_machine_class=None,
state_machine_kwargs=None):
"""
Create a new StateMachine rooted at `node` and run it over the input
`block`. Also keep track of optional intermediate blank lines and the
required final one.
"""
if state_machine_class is None:
state_machine_class = self.nested_sm
if state_machine_kwargs is None:
state_machine_kwargs = self.nested_sm_kwargs.copy()
state_machine_kwargs['initial_state'] = initial_state
state_machine = state_machine_class(debug=self.debug,
**state_machine_kwargs)
if blank_finish_state is None:
blank_finish_state = initial_state
state_machine.states[blank_finish_state].blank_finish = blank_finish
for key, value in list(extra_settings.items()):
setattr(state_machine.states[initial_state], key, value)
state_machine.run(block, input_offset, memo=self.memo,
node=node, match_titles=match_titles)
blank_finish = state_machine.states[blank_finish_state].blank_finish
state_machine.unlink()
return state_machine.abs_line_offset(), blank_finish
def section(self, title, source, style, lineno, messages):
"""Check for a valid subsection and create one if it checks out."""
if self.check_subsection(source, style, lineno):
self.new_subsection(title, lineno, messages)
def check_subsection(self, source, style, lineno):
"""
Check for a valid subsection header. Return 1 (true) or None (false).
When a new section is reached that isn't a subsection of the current
section, back up the line count (use ``previous_line(-x)``), then
``raise EOFError``. The current StateMachine will finish, then the
calling StateMachine can re-examine the title. This will work its way
back up the calling chain until the correct section level isreached.
@@@ Alternative: Evaluate the title, store the title info & level, and
back up the chain until that level is reached. Store in memo? Or
return in results?
:Exception: `EOFError` when a sibling or supersection encountered.
"""
memo = self.memo
title_styles = memo.title_styles
mylevel = memo.section_level
try: # check for existing title style
level = title_styles.index(style) + 1
except ValueError: # new title style
if len(title_styles) == memo.section_level: # new subsection
title_styles.append(style)
return 1
else: # not at lowest level
self.parent += self.title_inconsistent(source, lineno)
return None
if level <= mylevel: # sibling or supersection
memo.section_level = level # bubble up to parent section
if len(style) == 2:
memo.section_bubble_up_kludge = True
# back up 2 lines for underline title, 3 for overline title
self.state_machine.previous_line(len(style) + 1)
raise EOFError # let parent section re-evaluate
if level == mylevel + 1: # immediate subsection
return 1
else: # invalid subsection
self.parent += self.title_inconsistent(source, lineno)
return None
def title_inconsistent(self, sourcetext, lineno):
error = self.reporter.severe(
'Title level inconsistent:', nodes.literal_block('', sourcetext),
line=lineno)
return error
def new_subsection(self, title, lineno, messages):
"""Append new subsection to document tree. On return, check level."""
memo = self.memo
mylevel = memo.section_level
memo.section_level += 1
section_node = nodes.section()
self.parent += section_node
textnodes, title_messages = self.inline_text(title, lineno)
titlenode = nodes.title(title, '', *textnodes)
name = normalize_name(titlenode.astext())
section_node['names'].append(name)
section_node += titlenode
section_node += messages
section_node += title_messages
self.document.note_implicit_target(section_node, section_node)
offset = self.state_machine.line_offset + 1
absoffset = self.state_machine.abs_line_offset() + 1
newabsoffset = self.nested_parse(
self.state_machine.input_lines[offset:], input_offset=absoffset,
node=section_node, match_titles=True)
self.goto_line(newabsoffset)
if memo.section_level <= mylevel: # can't handle next section?
raise EOFError # bubble up to supersection
# reset section_level; next pass will detect it properly
memo.section_level = mylevel
def paragraph(self, lines, lineno):
"""
Return a list (paragraph & messages) & a boolean: literal_block next?
"""
data = '\n'.join(lines).rstrip()
if re.search(r'(?<!\\)(\\\\)*::$', data):
if len(data) == 2:
return [], 1
elif data[-3] in ' \n':
text = data[:-3].rstrip()
else:
text = data[:-1]
literalnext = 1
else:
text = data
literalnext = 0
textnodes, messages = self.inline_text(text, lineno)
p = nodes.paragraph(data, '', *textnodes)
p.source, p.line = self.state_machine.get_source_and_line(lineno)
return [p] + messages, literalnext
def inline_text(self, text, lineno):
"""
Return 2 lists: nodes (text and inline elements), and system_messages.
"""
return self.inliner.parse(text, lineno, self.memo, self.parent)
def unindent_warning(self, node_name):
# the actual problem is one line below the current line
lineno = self.state_machine.abs_line_number()+1
return self.reporter.warning('%s ends without a blank line; '
'unexpected unindent.' % node_name,
line=lineno)
def build_regexp(definition, compile=True):
"""
Build, compile and return a regular expression based on `definition`.
:Parameter: `definition`: a 4-tuple (group name, prefix, suffix, parts),
where "parts" is a list of regular expressions and/or regular
expression definitions to be joined into an or-group.
"""
name, prefix, suffix, parts = definition
part_strings = []
for part in parts:
if type(part) is tuple:
part_strings.append(build_regexp(part, None))
else:
part_strings.append(part)
or_group = '|'.join(part_strings)
regexp = '%(prefix)s(?P<%(name)s>%(or_group)s)%(suffix)s' % locals()
if compile:
return re.compile(regexp, re.UNICODE)
else:
return regexp
class Inliner:
"""
Parse inline markup; call the `parse()` method.
"""
def __init__(self):
self.implicit_dispatch = [(self.patterns.uri, self.standalone_uri),]
"""List of (pattern, bound method) tuples, used by
`self.implicit_inline`."""
def init_customizations(self, settings):
"""Setting-based customizations; run when parsing begins."""
if settings.pep_references:
self.implicit_dispatch.append((self.patterns.pep,
self.pep_reference))
if settings.rfc_references:
self.implicit_dispatch.append((self.patterns.rfc,
self.rfc_reference))
def parse(self, text, lineno, memo, parent):
# Needs to be refactored for nested inline markup.
# Add nested_parse() method?
"""
Return 2 lists: nodes (text and inline elements), and system_messages.
Using `self.patterns.initial`, a pattern which matches start-strings
(emphasis, strong, interpreted, phrase reference, literal,
substitution reference, and inline target) and complete constructs
(simple reference, footnote reference), search for a candidate. When
one is found, check for validity (e.g., not a quoted '*' character).
If valid, search for the corresponding end string if applicable, and
check it for validity. If not found or invalid, generate a warning
and ignore the start-string. Implicit inline markup (e.g. standalone
URIs) is found last.
"""
self.reporter = memo.reporter
self.document = memo.document
self.language = memo.language
self.parent = parent
pattern_search = self.patterns.initial.search
dispatch = self.dispatch
remaining = escape2null(text)
processed = []
unprocessed = []
messages = []
while remaining:
match = pattern_search(remaining)
if match:
groups = match.groupdict()
method = dispatch[groups['start'] or groups['backquote']
or groups['refend'] or groups['fnend']]
before, inlines, remaining, sysmessages = method(self, match,
lineno)
unprocessed.append(before)
messages += sysmessages
if inlines:
processed += self.implicit_inline(''.join(unprocessed),
lineno)
processed += inlines
unprocessed = []
else:
break
remaining = ''.join(unprocessed) + remaining
if remaining:
processed += self.implicit_inline(remaining, lineno)
return processed, messages
# Inline object recognition
# -------------------------
# lookahead and look-behind expressions for inline markup rules
start_string_prefix = ('(^|(?<=\\s|[%s%s]))' %
(punctuation_chars.openers,
punctuation_chars.delimiters))
end_string_suffix = ('($|(?=\\s|[\x00%s%s%s]))' %
(punctuation_chars.closing_delimiters,
punctuation_chars.delimiters,
punctuation_chars.closers))
# print start_string_prefix.encode('utf8')
# TODO: support non-ASCII whitespace in the following 4 patterns?
non_whitespace_before = r'(?<![ \n])'
non_whitespace_escape_before = r'(?<![ \n\x00])'
non_unescaped_whitespace_escape_before = r'(?<!(?<!\x00)[ \n\x00])'
non_whitespace_after = r'(?![ \n])'
# Alphanumerics with isolated internal [-._+:] chars (i.e. not 2 together):
simplename = r'(?:(?!_)\w)+(?:[-._+:](?:(?!_)\w)+)*'
# Valid URI characters (see RFC 2396 & RFC 2732);
# final \x00 allows backslash escapes in URIs:
uric = r"""[-_.!~*'()[\];/:@&=+$,%a-zA-Z0-9\x00]"""
# Delimiter indicating the end of a URI (not part of the URI):
uri_end_delim = r"""[>]"""
# Last URI character; same as uric but no punctuation:
urilast = r"""[_~*/=+a-zA-Z0-9]"""
# End of a URI (either 'urilast' or 'uric followed by a
# uri_end_delim'):
uri_end = r"""(?:%(urilast)s|%(uric)s(?=%(uri_end_delim)s))""" % locals()
emailc = r"""[-_!~*'{|}/#?^`&=+$%a-zA-Z0-9\x00]"""
email_pattern = r"""
%(emailc)s+(?:\.%(emailc)s+)* # name
(?<!\x00)@ # at
%(emailc)s+(?:\.%(emailc)s*)* # host
%(uri_end)s # final URI char
"""
parts = ('initial_inline', start_string_prefix, '',
[('start', '', non_whitespace_after, # simple start-strings
[r'\*\*', # strong
r'\*(?!\*)', # emphasis but not strong
r'``', # literal
r'_`', # inline internal target
r'\|(?!\|)'] # substitution reference
),
('whole', '', end_string_suffix, # whole constructs
[# reference name & end-string
r'(?P<refname>%s)(?P<refend>__?)' % simplename,
('footnotelabel', r'\[', r'(?P<fnend>\]_)',
[r'[0-9]+', # manually numbered
r'\#(%s)?' % simplename, # auto-numbered (w/ label?)
r'\*', # auto-symbol
r'(?P<citationlabel>%s)' % simplename] # citation reference
)
]
),
('backquote', # interpreted text or phrase reference
'(?P<role>(:%s:)?)' % simplename, # optional role
non_whitespace_after,
['`(?!`)'] # but not literal
)
]
)
patterns = Struct(
initial=build_regexp(parts),
emphasis=re.compile(non_whitespace_escape_before
+ r'(\*)' + end_string_suffix, re.UNICODE),
strong=re.compile(non_whitespace_escape_before
+ r'(\*\*)' + end_string_suffix, re.UNICODE),
interpreted_or_phrase_ref=re.compile(
r"""
%(non_unescaped_whitespace_escape_before)s
(
`
(?P<suffix>
(?P<role>:%(simplename)s:)?
(?P<refend>__?)?
)
)
%(end_string_suffix)s
""" % locals(), re.VERBOSE | re.UNICODE),
embedded_link=re.compile(
r"""
(
(?:[ \n]+|^) # spaces or beginning of line/string
< # open bracket
%(non_whitespace_after)s
([^<>\x00]+(\x00_)?) # anything but angle brackets & nulls
# except escaped trailing low line
%(non_whitespace_before)s
> # close bracket w/o whitespace before
)
$ # end of string
""" % locals(), re.VERBOSE | re.UNICODE),
literal=re.compile(non_whitespace_before + '(``)'
+ end_string_suffix),
target=re.compile(non_whitespace_escape_before
+ r'(`)' + end_string_suffix),
substitution_ref=re.compile(non_whitespace_escape_before
+ r'(\|_{0,2})'
+ end_string_suffix),
email=re.compile(email_pattern % locals() + '$',
re.VERBOSE | re.UNICODE),
uri=re.compile(
(r"""
%(start_string_prefix)s
(?P<whole>
(?P<absolute> # absolute URI
(?P<scheme> # scheme (http, ftp, mailto)
[a-zA-Z][a-zA-Z0-9.+-]*
)
:
(
( # either:
(//?)? # hierarchical URI
%(uric)s* # URI characters
%(uri_end)s # final URI char
)
( # optional query
\?%(uric)s*
%(uri_end)s
)?
( # optional fragment
\#%(uric)s*
%(uri_end)s
)?
)
)
| # *OR*
(?P<email> # email address
""" + email_pattern + r"""
)
)
%(end_string_suffix)s
""") % locals(), re.VERBOSE | re.UNICODE),
pep=re.compile(
r"""
%(start_string_prefix)s
(
(pep-(?P<pepnum1>\d+)(.txt)?) # reference to source file
|
(PEP\s+(?P<pepnum2>\d+)) # reference by name
)
%(end_string_suffix)s""" % locals(), re.VERBOSE | re.UNICODE),
rfc=re.compile(
r"""
%(start_string_prefix)s
(RFC(-|\s+)?(?P<rfcnum>\d+))
%(end_string_suffix)s""" % locals(), re.VERBOSE | re.UNICODE))
def quoted_start(self, match):
"""Test if inline markup start-string is 'quoted'.
'Quoted' in this context means the start-string is enclosed in a pair
of matching opening/closing delimiters (not necessarily quotes)
or at the end of the match.
"""
string = match.string
start = match.start()
if start == 0: # start-string at beginning of text
return False
prestart = string[start - 1]
try:
poststart = string[match.end()]
except IndexError: # start-string at end of text
return True # not "quoted" but no markup start-string either
return punctuation_chars.match_chars(prestart, poststart)
def inline_obj(self, match, lineno, end_pattern, nodeclass,
restore_backslashes=False):
string = match.string
matchstart = match.start('start')
matchend = match.end('start')
if self.quoted_start(match):
return (string[:matchend], [], string[matchend:], [], '')
endmatch = end_pattern.search(string[matchend:])
if endmatch and endmatch.start(1): # 1 or more chars
text = unescape(endmatch.string[:endmatch.start(1)],
restore_backslashes)
textend = matchend + endmatch.end(1)
rawsource = unescape(string[matchstart:textend], 1)
return (string[:matchstart], [nodeclass(rawsource, text)],
string[textend:], [], endmatch.group(1))
msg = self.reporter.warning(
'Inline %s start-string without end-string.'
% nodeclass.__name__, line=lineno)
text = unescape(string[matchstart:matchend], 1)
rawsource = unescape(string[matchstart:matchend], 1)
prb = self.problematic(text, rawsource, msg)
return string[:matchstart], [prb], string[matchend:], [msg], ''
def problematic(self, text, rawsource, message):
msgid = self.document.set_id(message, self.parent)
problematic = nodes.problematic(rawsource, text, refid=msgid)
prbid = self.document.set_id(problematic)
message.add_backref(prbid)
return problematic
def emphasis(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.emphasis, nodes.emphasis)
return before, inlines, remaining, sysmessages
def strong(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.strong, nodes.strong)
return before, inlines, remaining, sysmessages
def interpreted_or_phrase_ref(self, match, lineno):
end_pattern = self.patterns.interpreted_or_phrase_ref
string = match.string
matchstart = match.start('backquote')
matchend = match.end('backquote')
rolestart = match.start('role')
role = match.group('role')
position = ''
if role:
role = role[1:-1]
position = 'prefix'
elif self.quoted_start(match):
return (string[:matchend], [], string[matchend:], [])
endmatch = end_pattern.search(string[matchend:])
if endmatch and endmatch.start(1): # 1 or more chars
textend = matchend + endmatch.end()
if endmatch.group('role'):
if role:
msg = self.reporter.warning(
'Multiple roles in interpreted text (both '
'prefix and suffix present; only one allowed).',
line=lineno)
text = unescape(string[rolestart:textend], 1)
prb = self.problematic(text, text, msg)
return string[:rolestart], [prb], string[textend:], [msg]
role = endmatch.group('suffix')[1:-1]
position = 'suffix'
escaped = endmatch.string[:endmatch.start(1)]
rawsource = unescape(string[matchstart:textend], 1)
if rawsource[-1:] == '_':
if role:
msg = self.reporter.warning(
'Mismatch: both interpreted text role %s and '
'reference suffix.' % position, line=lineno)
text = unescape(string[rolestart:textend], 1)
prb = self.problematic(text, text, msg)
return string[:rolestart], [prb], string[textend:], [msg]
return self.phrase_ref(string[:matchstart], string[textend:],
rawsource, escaped, unescape(escaped))
else:
rawsource = unescape(string[rolestart:textend], 1)
nodelist, messages = self.interpreted(rawsource, escaped, role,
lineno)
return (string[:rolestart], nodelist,
string[textend:], messages)
msg = self.reporter.warning(
'Inline interpreted text or phrase reference start-string '
'without end-string.', line=lineno)
text = unescape(string[matchstart:matchend], 1)
prb = self.problematic(text, text, msg)
return string[:matchstart], [prb], string[matchend:], [msg]
def phrase_ref(self, before, after, rawsource, escaped, text):
match = self.patterns.embedded_link.search(escaped)
if match: # embedded <URI> or <alias_>
text = unescape(escaped[:match.start(0)])
aliastext = unescape(match.group(2), restore_backslashes=True)
if aliastext.endswith('_') and not (aliastext.endswith(r'\_')
or self.patterns.uri.match(aliastext)):
aliastype = 'name'
alias = normalize_name(aliastext[:-1])
target = nodes.target(match.group(1), refname=alias)
target.indirect_reference_name = aliastext[:-1]
else:
aliastype = 'uri'
alias = ''.join(aliastext.split())
alias = self.adjust_uri(alias)
if alias.endswith(r'\_'):
alias = alias[:-2] + '_'
target = nodes.target(match.group(1), refuri=alias)
target.referenced = 1
if not aliastext:
raise ApplicationError('problem with embedded link: %r'
% aliastext)
if not text:
text = alias
else:
target = None
refname = normalize_name(text)
reference = nodes.reference(rawsource, text,
name=whitespace_normalize_name(text))
node_list = [reference]
if rawsource[-2:] == '__':
if target and (aliastype == 'name'):
reference['refname'] = alias
self.document.note_refname(reference)
# self.document.note_indirect_target(target) # required?
elif target and (aliastype == 'uri'):
reference['refuri'] = alias
else:
reference['anonymous'] = 1
else:
if target:
target['names'].append(refname)
if aliastype == 'name':
reference['refname'] = alias
self.document.note_indirect_target(target)
self.document.note_refname(reference)
else:
reference['refuri'] = alias
self.document.note_explicit_target(target, self.parent)
# target.note_referenced_by(name=refname)
node_list.append(target)
else:
reference['refname'] = refname
self.document.note_refname(reference)
return before, node_list, after, []
def adjust_uri(self, uri):
match = self.patterns.email.match(uri)
if match:
return 'mailto:' + uri
else:
return uri
def interpreted(self, rawsource, text, role, lineno):
role_fn, messages = roles.role(role, self.language, lineno,
self.reporter)
if role_fn:
nodes, messages2 = role_fn(role, rawsource, text, lineno, self)
return nodes, messages + messages2
else:
msg = self.reporter.error(
'Unknown interpreted text role "%s".' % role,
line=lineno)
return ([self.problematic(rawsource, rawsource, msg)],
messages + [msg])
def literal(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.literal, nodes.literal,
restore_backslashes=True)
return before, inlines, remaining, sysmessages
def inline_internal_target(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.target, nodes.target)
if inlines and isinstance(inlines[0], nodes.target):
assert len(inlines) == 1
target = inlines[0]
name = normalize_name(target.astext())
target['names'].append(name)
self.document.note_explicit_target(target, self.parent)
return before, inlines, remaining, sysmessages
def substitution_reference(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.substitution_ref,
nodes.substitution_reference)
if len(inlines) == 1:
subref_node = inlines[0]
if isinstance(subref_node, nodes.substitution_reference):
subref_text = subref_node.astext()
self.document.note_substitution_ref(subref_node, subref_text)
if endstring[-1:] == '_':
reference_node = nodes.reference(
'|%s%s' % (subref_text, endstring), '')
if endstring[-2:] == '__':
reference_node['anonymous'] = 1
else:
reference_node['refname'] = normalize_name(subref_text)
self.document.note_refname(reference_node)
reference_node += subref_node
inlines = [reference_node]
return before, inlines, remaining, sysmessages
def footnote_reference(self, match, lineno):
"""
Handles `nodes.footnote_reference` and `nodes.citation_reference`
elements.
"""
label = match.group('footnotelabel')
refname = normalize_name(label)
string = match.string
before = string[:match.start('whole')]
remaining = string[match.end('whole'):]
if match.group('citationlabel'):
refnode = nodes.citation_reference('[%s]_' % label,
refname=refname)
refnode += nodes.Text(label)
self.document.note_citation_ref(refnode)
else:
refnode = nodes.footnote_reference('[%s]_' % label)
if refname[0] == '#':
refname = refname[1:]
refnode['auto'] = 1
self.document.note_autofootnote_ref(refnode)
elif refname == '*':
refname = ''
refnode['auto'] = '*'
self.document.note_symbol_footnote_ref(
refnode)
else:
refnode += nodes.Text(label)
if refname:
refnode['refname'] = refname
self.document.note_footnote_ref(refnode)
if utils.get_trim_footnote_ref_space(self.document.settings):
before = before.rstrip()
return (before, [refnode], remaining, [])
def reference(self, match, lineno, anonymous=False):
referencename = match.group('refname')
refname = normalize_name(referencename)
referencenode = nodes.reference(
referencename + match.group('refend'), referencename,
name=whitespace_normalize_name(referencename))
if anonymous:
referencenode['anonymous'] = 1
else:
referencenode['refname'] = refname
self.document.note_refname(referencenode)
string = match.string
matchstart = match.start('whole')
matchend = match.end('whole')
return (string[:matchstart], [referencenode], string[matchend:], [])
def anonymous_reference(self, match, lineno):
return self.reference(match, lineno, anonymous=1)
def standalone_uri(self, match, lineno):
if (not match.group('scheme')
or match.group('scheme').lower() in urischemes.schemes):
if match.group('email'):
addscheme = 'mailto:'
else:
addscheme = ''
text = match.group('whole')
unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped,
refuri=addscheme + unescaped)]
else: # not a valid scheme
raise MarkupMismatch
def pep_reference(self, match, lineno):
text = match.group(0)
if text.startswith('pep-'):
pepnum = int(match.group('pepnum1'))
elif text.startswith('PEP'):
pepnum = int(match.group('pepnum2'))
else:
raise MarkupMismatch
ref = (self.document.settings.pep_base_url
+ self.document.settings.pep_file_url_template % pepnum)
unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped, refuri=ref)]
rfc_url = 'rfc%d.html'
def rfc_reference(self, match, lineno):
text = match.group(0)
if text.startswith('RFC'):
rfcnum = int(match.group('rfcnum'))
ref = self.document.settings.rfc_base_url + self.rfc_url % rfcnum
else:
raise MarkupMismatch
unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped, refuri=ref)]
def implicit_inline(self, text, lineno):
"""
Check each of the patterns in `self.implicit_dispatch` for a match,
and dispatch to the stored method for the pattern. Recursively check
the text before and after the match. Return a list of `nodes.Text`
and inline element nodes.
"""
if not text:
return []
for pattern, method in self.implicit_dispatch:
match = pattern.search(text)
if match:
try:
# Must recurse on strings before *and* after the match;
# there may be multiple patterns.
return (self.implicit_inline(text[:match.start()], lineno)
+ method(match, lineno) +
self.implicit_inline(text[match.end():], lineno))
except MarkupMismatch:
pass
return [nodes.Text(unescape(text), rawsource=unescape(text, 1))]
dispatch = {'*': emphasis,
'**': strong,
'`': interpreted_or_phrase_ref,
'``': literal,
'_`': inline_internal_target,
']_': footnote_reference,
'|': substitution_reference,
'_': reference,
'__': anonymous_reference}
def _loweralpha_to_int(s, _zero=(ord('a')-1)):
return ord(s) - _zero
def _upperalpha_to_int(s, _zero=(ord('A')-1)):
return ord(s) - _zero
def _lowerroman_to_int(s):
return roman.fromRoman(s.upper())
class Body(RSTState):
"""
Generic classifier of the first line of a block.
"""
double_width_pad_char = tableparser.TableParser.double_width_pad_char
"""Padding character for East Asian double-width text."""
enum = Struct()
"""Enumerated list parsing information."""
enum.formatinfo = {
'parens': Struct(prefix='(', suffix=')', start=1, end=-1),
'rparen': Struct(prefix='', suffix=')', start=0, end=-1),
'period': Struct(prefix='', suffix='.', start=0, end=-1)}
enum.formats = list(enum.formatinfo.keys())
enum.sequences = ['arabic', 'loweralpha', 'upperalpha',
'lowerroman', 'upperroman'] # ORDERED!
enum.sequencepats = {'arabic': '[0-9]+',
'loweralpha': '[a-z]',
'upperalpha': '[A-Z]',
'lowerroman': '[ivxlcdm]+',
'upperroman': '[IVXLCDM]+',}
enum.converters = {'arabic': int,
'loweralpha': _loweralpha_to_int,
'upperalpha': _upperalpha_to_int,
'lowerroman': _lowerroman_to_int,
'upperroman': roman.fromRoman}
enum.sequenceregexps = {}
for sequence in enum.sequences:
enum.sequenceregexps[sequence] = re.compile(
enum.sequencepats[sequence] + '$', re.UNICODE)
grid_table_top_pat = re.compile(r'\+-[-+]+-\+ *$')
"""Matches the top (& bottom) of a full table)."""
simple_table_top_pat = re.compile('=+( +=+)+ *$')
"""Matches the top of a simple table."""
simple_table_border_pat = re.compile('=+[ =]*$')
"""Matches the bottom & header bottom of a simple table."""
pats = {}
"""Fragments of patterns used by transitions."""
pats['nonalphanum7bit'] = '[!-/:-@[-`{-~]'
pats['alpha'] = '[a-zA-Z]'
pats['alphanum'] = '[a-zA-Z0-9]'
pats['alphanumplus'] = '[a-zA-Z0-9_-]'
pats['enum'] = ('(%(arabic)s|%(loweralpha)s|%(upperalpha)s|%(lowerroman)s'
'|%(upperroman)s|#)' % enum.sequencepats)
pats['optname'] = '%(alphanum)s%(alphanumplus)s*' % pats
# @@@ Loosen up the pattern? Allow Unicode?
pats['optarg'] = '(%(alpha)s%(alphanumplus)s*|<[^<>]+>)' % pats
pats['shortopt'] = r'(-|\+)%(alphanum)s( ?%(optarg)s)?' % pats
pats['longopt'] = r'(--|/)%(optname)s([ =]%(optarg)s)?' % pats
pats['option'] = r'(%(shortopt)s|%(longopt)s)' % pats
for format in enum.formats:
pats[format] = '(?P<%s>%s%s%s)' % (
format, re.escape(enum.formatinfo[format].prefix),
pats['enum'], re.escape(enum.formatinfo[format].suffix))
patterns = {
'bullet': '[-+*\u2022\u2023\u2043]( +|$)',
'enumerator': r'(%(parens)s|%(rparen)s|%(period)s)( +|$)' % pats,
'field_marker': r':(?![: ])([^:\\]|\\.)*(?<! ):( +|$)',
'option_marker': r'%(option)s(, %(option)s)*( +| ?$)' % pats,
'doctest': r'>>>( +|$)',
'line_block': r'\|( +|$)',
'grid_table_top': grid_table_top_pat,
'simple_table_top': simple_table_top_pat,
'explicit_markup': r'\.\.( +|$)',
'anonymous': r'__( +|$)',
'line': r'(%(nonalphanum7bit)s)\1* *$' % pats,
'text': r''}
initial_transitions = (
'bullet',
'enumerator',
'field_marker',
'option_marker',
'doctest',
'line_block',
'grid_table_top',
'simple_table_top',
'explicit_markup',
'anonymous',
'line',
'text')
def indent(self, match, context, next_state):
"""Block quote."""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
elements = self.block_quote(indented, line_offset)
self.parent += elements
if not blank_finish:
self.parent += self.unindent_warning('Block quote')
return context, next_state, []
def block_quote(self, indented, line_offset):
elements = []
while indented:
(blockquote_lines,
attribution_lines,
attribution_offset,
indented,
new_line_offset) = self.split_attribution(indented, line_offset)
blockquote = nodes.block_quote()
self.nested_parse(blockquote_lines, line_offset, blockquote)
elements.append(blockquote)
if attribution_lines:
attribution, messages = self.parse_attribution(
attribution_lines, attribution_offset)
blockquote += attribution
elements += messages
line_offset = new_line_offset
while indented and not indented[0]:
indented = indented[1:]
line_offset += 1
return elements
# U+2014 is an em-dash:
attribution_pattern = re.compile('(---?(?!-)|\u2014) *(?=[^ \\n])',
re.UNICODE)
def split_attribution(self, indented, line_offset):
"""
Check for a block quote attribution and split it off:
* First line after a blank line must begin with a dash ("--", "---",
em-dash; matches `self.attribution_pattern`).
* Every line after that must have consistent indentation.
* Attributions must be preceded by block quote content.
Return a tuple of: (block quote content lines, content offset,
attribution lines, attribution offset, remaining indented lines).
"""
blank = None
nonblank_seen = False
for i in range(len(indented)):
line = indented[i].rstrip()
if line:
if nonblank_seen and blank == i - 1: # last line blank
match = self.attribution_pattern.match(line)
if match:
attribution_end, indent = self.check_attribution(
indented, i)
if attribution_end:
a_lines = indented[i:attribution_end]
a_lines.trim_left(match.end(), end=1)
a_lines.trim_left(indent, start=1)
return (indented[:i], a_lines,
i, indented[attribution_end:],
line_offset + attribution_end)
nonblank_seen = True
else:
blank = i
else:
return (indented, None, None, None, None)
def check_attribution(self, indented, attribution_start):
"""
Check attribution shape.
Return the index past the end of the attribution, and the indent.
"""
indent = None
i = attribution_start + 1
for i in range(attribution_start + 1, len(indented)):
line = indented[i].rstrip()
if not line:
break
if indent is None:
indent = len(line) - len(line.lstrip())
elif len(line) - len(line.lstrip()) != indent:
return None, None # bad shape; not an attribution
else:
# return index of line after last attribution line:
i += 1
return i, (indent or 0)
def parse_attribution(self, indented, line_offset):
text = '\n'.join(indented).rstrip()
lineno = self.state_machine.abs_line_number() + line_offset
textnodes, messages = self.inline_text(text, lineno)
node = nodes.attribution(text, '', *textnodes)
node.source, node.line = self.state_machine.get_source_and_line(lineno)
return node, messages
def bullet(self, match, context, next_state):
"""Bullet list item."""
bulletlist = nodes.bullet_list()
self.parent += bulletlist
bulletlist['bullet'] = match.string[0]
i, blank_finish = self.list_item(match.end())
bulletlist += i
offset = self.state_machine.line_offset + 1 # next line
new_line_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=bulletlist, initial_state='BulletList',
blank_finish=blank_finish)
self.goto_line(new_line_offset)
if not blank_finish:
self.parent += self.unindent_warning('Bullet list')
return [], next_state, []
def list_item(self, indent):
if self.state_machine.line[indent:]:
indented, line_offset, blank_finish = (
self.state_machine.get_known_indented(indent))
else:
indented, indent, line_offset, blank_finish = (
self.state_machine.get_first_known_indented(indent))
listitem = nodes.list_item('\n'.join(indented))
if indented:
self.nested_parse(indented, input_offset=line_offset,
node=listitem)
return listitem, blank_finish
def enumerator(self, match, context, next_state):
"""Enumerated List Item"""
format, sequence, text, ordinal = self.parse_enumerator(match)
if not self.is_enumerated_list_item(ordinal, sequence, format):
raise statemachine.TransitionCorrection('text')
enumlist = nodes.enumerated_list()
self.parent += enumlist
if sequence == '#':
enumlist['enumtype'] = 'arabic'
else:
enumlist['enumtype'] = sequence
enumlist['prefix'] = self.enum.formatinfo[format].prefix
enumlist['suffix'] = self.enum.formatinfo[format].suffix
if ordinal != 1:
enumlist['start'] = ordinal
msg = self.reporter.info(
'Enumerated list start value not ordinal-1: "%s" (ordinal %s)'
% (text, ordinal))
self.parent += msg
listitem, blank_finish = self.list_item(match.end())
enumlist += listitem
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=enumlist, initial_state='EnumeratedList',
blank_finish=blank_finish,
extra_settings={'lastordinal': ordinal,
'format': format,
'auto': sequence == '#'})
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Enumerated list')
return [], next_state, []
def parse_enumerator(self, match, expected_sequence=None):
"""
Analyze an enumerator and return the results.
:Return:
- the enumerator format ('period', 'parens', or 'rparen'),
- the sequence used ('arabic', 'loweralpha', 'upperroman', etc.),
- the text of the enumerator, stripped of formatting, and
- the ordinal value of the enumerator ('a' -> 1, 'ii' -> 2, etc.;
``None`` is returned for invalid enumerator text).
The enumerator format has already been determined by the regular
expression match. If `expected_sequence` is given, that sequence is
tried first. If not, we check for Roman numeral 1. This way,
single-character Roman numerals (which are also alphabetical) can be
matched. If no sequence has been matched, all sequences are checked in
order.
"""
groupdict = match.groupdict()
sequence = ''
for format in self.enum.formats:
if groupdict[format]: # was this the format matched?
break # yes; keep `format`
else: # shouldn't happen
raise ParserError('enumerator format not matched')
text = groupdict[format][self.enum.formatinfo[format].start
:self.enum.formatinfo[format].end]
if text == '#':
sequence = '#'
elif expected_sequence:
try:
if self.enum.sequenceregexps[expected_sequence].match(text):
sequence = expected_sequence
except KeyError: # shouldn't happen
raise ParserError('unknown enumerator sequence: %s'
% sequence)
elif text == 'i':
sequence = 'lowerroman'
elif text == 'I':
sequence = 'upperroman'
if not sequence:
for sequence in self.enum.sequences:
if self.enum.sequenceregexps[sequence].match(text):
break
else: # shouldn't happen
raise ParserError('enumerator sequence not matched')
if sequence == '#':
ordinal = 1
else:
try:
ordinal = self.enum.converters[sequence](text)
except roman.InvalidRomanNumeralError:
ordinal = None
return format, sequence, text, ordinal
def is_enumerated_list_item(self, ordinal, sequence, format):
"""
Check validity based on the ordinal value and the second line.
Return true if the ordinal is valid and the second line is blank,
indented, or starts with the next enumerator or an auto-enumerator.
"""
if ordinal is None:
return None
try:
next_line = self.state_machine.next_line()
except EOFError: # end of input lines
self.state_machine.previous_line()
return 1
else:
self.state_machine.previous_line()
if not next_line[:1].strip(): # blank or indented
return 1
result = self.make_enumerator(ordinal + 1, sequence, format)
if result:
next_enumerator, auto_enumerator = result
try:
if ( next_line.startswith(next_enumerator) or
next_line.startswith(auto_enumerator) ):
return 1
except TypeError:
pass
return None
def make_enumerator(self, ordinal, sequence, format):
"""
Construct and return the next enumerated list item marker, and an
auto-enumerator ("#" instead of the regular enumerator).
Return ``None`` for invalid (out of range) ordinals.
""" #"
if sequence == '#':
enumerator = '#'
elif sequence == 'arabic':
enumerator = str(ordinal)
else:
if sequence.endswith('alpha'):
if ordinal > 26:
return None
enumerator = chr(ordinal + ord('a') - 1)
elif sequence.endswith('roman'):
try:
enumerator = roman.toRoman(ordinal)
except roman.RomanError:
return None
else: # shouldn't happen
raise ParserError('unknown enumerator sequence: "%s"'
% sequence)
if sequence.startswith('lower'):
enumerator = enumerator.lower()
elif sequence.startswith('upper'):
enumerator = enumerator.upper()
else: # shouldn't happen
raise ParserError('unknown enumerator sequence: "%s"'
% sequence)
formatinfo = self.enum.formatinfo[format]
next_enumerator = (formatinfo.prefix + enumerator + formatinfo.suffix
+ ' ')
auto_enumerator = formatinfo.prefix + '#' + formatinfo.suffix + ' '
return next_enumerator, auto_enumerator
def field_marker(self, match, context, next_state):
"""Field list item."""
field_list = nodes.field_list()
self.parent += field_list
field, blank_finish = self.field(match)
field_list += field
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=field_list, initial_state='FieldList',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Field list')
return [], next_state, []
def field(self, match):
name = self.parse_field_marker(match)
src, srcline = self.state_machine.get_source_and_line()
lineno = self.state_machine.abs_line_number()
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
field_node = nodes.field()
field_node.source = src
field_node.line = srcline
name_nodes, name_messages = self.inline_text(name, lineno)
field_node += nodes.field_name(name, '', *name_nodes)
field_body = nodes.field_body('\n'.join(indented), *name_messages)
field_node += field_body
if indented:
self.parse_field_body(indented, line_offset, field_body)
return field_node, blank_finish
def parse_field_marker(self, match):
"""Extract & return field name from a field marker match."""
field = match.group()[1:] # strip off leading ':'
field = field[:field.rfind(':')] # strip off trailing ':' etc.
return field
def parse_field_body(self, indented, offset, node):
self.nested_parse(indented, input_offset=offset, node=node)
def option_marker(self, match, context, next_state):
"""Option list item."""
optionlist = nodes.option_list()
try:
listitem, blank_finish = self.option_list_item(match)
except MarkupError as error:
# This shouldn't happen; pattern won't match.
msg = self.reporter.error('Invalid option list marker: %s' %
error)
self.parent += msg
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
elements = self.block_quote(indented, line_offset)
self.parent += elements
if not blank_finish:
self.parent += self.unindent_warning('Option list')
return [], next_state, []
self.parent += optionlist
optionlist += listitem
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=optionlist, initial_state='OptionList',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Option list')
return [], next_state, []
def option_list_item(self, match):
offset = self.state_machine.abs_line_offset()
options = self.parse_option_marker(match)
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
if not indented: # not an option list item
self.goto_line(offset)
raise statemachine.TransitionCorrection('text')
option_group = nodes.option_group('', *options)
description = nodes.description('\n'.join(indented))
option_list_item = nodes.option_list_item('', option_group,
description)
if indented:
self.nested_parse(indented, input_offset=line_offset,
node=description)
return option_list_item, blank_finish
def parse_option_marker(self, match):
"""
Return a list of `node.option` and `node.option_argument` objects,
parsed from an option marker match.
:Exception: `MarkupError` for invalid option markers.
"""
optlist = []
optionstrings = match.group().rstrip().split(', ')
for optionstring in optionstrings:
tokens = optionstring.split()
delimiter = ' '
firstopt = tokens[0].split('=', 1)
if len(firstopt) > 1:
# "--opt=value" form
tokens[:1] = firstopt
delimiter = '='
elif (len(tokens[0]) > 2
and ((tokens[0].startswith('-')
and not tokens[0].startswith('--'))
or tokens[0].startswith('+'))):
# "-ovalue" form
tokens[:1] = [tokens[0][:2], tokens[0][2:]]
delimiter = ''
if len(tokens) > 1 and (tokens[1].startswith('<')
and tokens[-1].endswith('>')):
# "-o <value1 value2>" form; join all values into one token
tokens[1:] = [' '.join(tokens[1:])]
if 0 < len(tokens) <= 2:
option = nodes.option(optionstring)
option += nodes.option_string(tokens[0], tokens[0])
if len(tokens) > 1:
option += nodes.option_argument(tokens[1], tokens[1],
delimiter=delimiter)
optlist.append(option)
else:
raise MarkupError(
'wrong number of option tokens (=%s), should be 1 or 2: '
'"%s"' % (len(tokens), optionstring))
return optlist
def doctest(self, match, context, next_state):
data = '\n'.join(self.state_machine.get_text_block())
self.parent += nodes.doctest_block(data, data)
return [], next_state, []
def line_block(self, match, context, next_state):
"""First line of a line block."""
block = nodes.line_block()
self.parent += block
lineno = self.state_machine.abs_line_number()
line, messages, blank_finish = self.line_block_line(match, lineno)
block += line
self.parent += messages
if not blank_finish:
offset = self.state_machine.line_offset + 1 # next line
new_line_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=block, initial_state='LineBlock',
blank_finish=0)
self.goto_line(new_line_offset)
if not blank_finish:
self.parent += self.reporter.warning(
'Line block ends without a blank line.',
line=lineno+1)
if len(block):
if block[0].indent is None:
block[0].indent = 0
self.nest_line_block_lines(block)
return [], next_state, []
def line_block_line(self, match, lineno):
"""Return one line element of a line_block."""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
until_blank=True)
text = '\n'.join(indented)
text_nodes, messages = self.inline_text(text, lineno)
line = nodes.line(text, '', *text_nodes)
if match.string.rstrip() != '|': # not empty
line.indent = len(match.group(1)) - 1
return line, messages, blank_finish
def nest_line_block_lines(self, block):
for index in range(1, len(block)):
if block[index].indent is None:
block[index].indent = block[index - 1].indent
self.nest_line_block_segment(block)
def nest_line_block_segment(self, block):
indents = [item.indent for item in block]
least = min(indents)
new_items = []
new_block = nodes.line_block()
for item in block:
if item.indent > least:
new_block.append(item)
else:
if len(new_block):
self.nest_line_block_segment(new_block)
new_items.append(new_block)
new_block = nodes.line_block()
new_items.append(item)
if len(new_block):
self.nest_line_block_segment(new_block)
new_items.append(new_block)
block[:] = new_items
def grid_table_top(self, match, context, next_state):
"""Top border of a full table."""
return self.table_top(match, context, next_state,
self.isolate_grid_table,
tableparser.GridTableParser)
def simple_table_top(self, match, context, next_state):
"""Top border of a simple table."""
return self.table_top(match, context, next_state,
self.isolate_simple_table,
tableparser.SimpleTableParser)
def table_top(self, match, context, next_state,
isolate_function, parser_class):
"""Top border of a generic table."""
nodelist, blank_finish = self.table(isolate_function, parser_class)
self.parent += nodelist
if not blank_finish:
msg = self.reporter.warning(
'Blank line required after table.',
line=self.state_machine.abs_line_number()+1)
self.parent += msg
return [], next_state, []
def table(self, isolate_function, parser_class):
"""Parse a table."""
block, messages, blank_finish = isolate_function()
if block:
try:
parser = parser_class()
tabledata = parser.parse(block)
tableline = (self.state_machine.abs_line_number() - len(block)
+ 1)
table = self.build_table(tabledata, tableline)
nodelist = [table] + messages
except tableparser.TableMarkupError as err:
nodelist = self.malformed_table(block, ' '.join(err.args),
offset=err.offset) + messages
else:
nodelist = messages
return nodelist, blank_finish
def isolate_grid_table(self):
messages = []
blank_finish = 1
try:
block = self.state_machine.get_text_block(flush_left=True)
except statemachine.UnexpectedIndentationError as err:
block, src, srcline = err.args
messages.append(self.reporter.error('Unexpected indentation.',
source=src, line=srcline))
blank_finish = 0
block.disconnect()
# for East Asian chars:
block.pad_double_width(self.double_width_pad_char)
width = len(block[0].strip())
for i in range(len(block)):
block[i] = block[i].strip()
if block[i][0] not in '+|': # check left edge
blank_finish = 0
self.state_machine.previous_line(len(block) - i)
del block[i:]
break
if not self.grid_table_top_pat.match(block[-1]): # find bottom
blank_finish = 0
# from second-last to third line of table:
for i in range(len(block) - 2, 1, -1):
if self.grid_table_top_pat.match(block[i]):
self.state_machine.previous_line(len(block) - i + 1)
del block[i+1:]
break
else:
messages.extend(self.malformed_table(block))
return [], messages, blank_finish
for i in range(len(block)): # check right edge
if len(block[i]) != width or block[i][-1] not in '+|':
messages.extend(self.malformed_table(block))
return [], messages, blank_finish
return block, messages, blank_finish
def isolate_simple_table(self):
start = self.state_machine.line_offset
lines = self.state_machine.input_lines
limit = len(lines) - 1
toplen = len(lines[start].strip())
pattern_match = self.simple_table_border_pat.match
found = 0
found_at = None
i = start + 1
while i <= limit:
line = lines[i]
match = pattern_match(line)
if match:
if len(line.strip()) != toplen:
self.state_machine.next_line(i - start)
messages = self.malformed_table(
lines[start:i+1], 'Bottom/header table border does '
'not match top border.')
return [], messages, i == limit or not lines[i+1].strip()
found += 1
found_at = i
if found == 2 or i == limit or not lines[i+1].strip():
end = i
break
i += 1
else: # reached end of input_lines
if found:
extra = ' or no blank line after table bottom'
self.state_machine.next_line(found_at - start)
block = lines[start:found_at+1]
else:
extra = ''
self.state_machine.next_line(i - start - 1)
block = lines[start:]
messages = self.malformed_table(
block, 'No bottom table border found%s.' % extra)
return [], messages, not extra
self.state_machine.next_line(end - start)
block = lines[start:end+1]
# for East Asian chars:
block.pad_double_width(self.double_width_pad_char)
return block, [], end == limit or not lines[end+1].strip()
def malformed_table(self, block, detail='', offset=0):
block.replace(self.double_width_pad_char, '')
data = '\n'.join(block)
message = 'Malformed table.'
startline = self.state_machine.abs_line_number() - len(block) + 1
if detail:
message += '\n' + detail
error = self.reporter.error(message, nodes.literal_block(data, data),
line=startline+offset)
return [error]
def build_table(self, tabledata, tableline, stub_columns=0):
colwidths, headrows, bodyrows = tabledata
table = nodes.table()
tgroup = nodes.tgroup(cols=len(colwidths))
table += tgroup
for colwidth in colwidths:
colspec = nodes.colspec(colwidth=colwidth)
if stub_columns:
colspec.attributes['stub'] = 1
stub_columns -= 1
tgroup += colspec
if headrows:
thead = nodes.thead()
tgroup += thead
for row in headrows:
thead += self.build_table_row(row, tableline)
tbody = nodes.tbody()
tgroup += tbody
for row in bodyrows:
tbody += self.build_table_row(row, tableline)
return table
def build_table_row(self, rowdata, tableline):
row = nodes.row()
for cell in rowdata:
if cell is None:
continue
morerows, morecols, offset, cellblock = cell
attributes = {}
if morerows:
attributes['morerows'] = morerows
if morecols:
attributes['morecols'] = morecols
entry = nodes.entry(**attributes)
row += entry
if ''.join(cellblock):
self.nested_parse(cellblock, input_offset=tableline+offset,
node=entry)
return row
explicit = Struct()
"""Patterns and constants used for explicit markup recognition."""
explicit.patterns = Struct(
target=re.compile(r"""
(
_ # anonymous target
| # *OR*
(?!_) # no underscore at the beginning
(?P<quote>`?) # optional open quote
(?![ `]) # first char. not space or
# backquote
(?P<name> # reference name
.+?
)
%(non_whitespace_escape_before)s
(?P=quote) # close quote if open quote used
)
(?<!(?<!\x00):) # no unescaped colon at end
%(non_whitespace_escape_before)s
[ ]? # optional space
: # end of reference name
([ ]+|$) # followed by whitespace
""" % vars(Inliner), re.VERBOSE | re.UNICODE),
reference=re.compile(r"""
(
(?P<simple>%(simplename)s)_
| # *OR*
` # open backquote
(?![ ]) # not space
(?P<phrase>.+?) # hyperlink phrase
%(non_whitespace_escape_before)s
`_ # close backquote,
# reference mark
)
$ # end of string
""" % vars(Inliner), re.VERBOSE | re.UNICODE),
substitution=re.compile(r"""
(
(?![ ]) # first char. not space
(?P<name>.+?) # substitution text
%(non_whitespace_escape_before)s
\| # close delimiter
)
([ ]+|$) # followed by whitespace
""" % vars(Inliner),
re.VERBOSE | re.UNICODE),)
def footnote(self, match):
src, srcline = self.state_machine.get_source_and_line()
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
label = match.group(1)
name = normalize_name(label)
footnote = nodes.footnote('\n'.join(indented))
footnote.source = src
footnote.line = srcline
if name[0] == '#': # auto-numbered
name = name[1:] # autonumber label
footnote['auto'] = 1
if name:
footnote['names'].append(name)
self.document.note_autofootnote(footnote)
elif name == '*': # auto-symbol
name = ''
footnote['auto'] = '*'
self.document.note_symbol_footnote(footnote)
else: # manually numbered
footnote += nodes.label('', label)
footnote['names'].append(name)
self.document.note_footnote(footnote)
if name:
self.document.note_explicit_target(footnote, footnote)
else:
self.document.set_id(footnote, footnote)
if indented:
self.nested_parse(indented, input_offset=offset, node=footnote)
return [footnote], blank_finish
def citation(self, match):
src, srcline = self.state_machine.get_source_and_line()
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
label = match.group(1)
name = normalize_name(label)
citation = nodes.citation('\n'.join(indented))
citation.source = src
citation.line = srcline
citation += nodes.label('', label)
citation['names'].append(name)
self.document.note_citation(citation)
self.document.note_explicit_target(citation, citation)
if indented:
self.nested_parse(indented, input_offset=offset, node=citation)
return [citation], blank_finish
def hyperlink_target(self, match):
pattern = self.explicit.patterns.target
lineno = self.state_machine.abs_line_number()
block, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(
match.end(), until_blank=True, strip_indent=False)
blocktext = match.string[:match.end()] + '\n'.join(block)
block = [escape2null(line) for line in block]
escaped = block[0]
blockindex = 0
while True:
targetmatch = pattern.match(escaped)
if targetmatch:
break
blockindex += 1
try:
escaped += block[blockindex]
except IndexError:
raise MarkupError('malformed hyperlink target.')
del block[:blockindex]
block[0] = (block[0] + ' ')[targetmatch.end()-len(escaped)-1:].strip()
target = self.make_target(block, blocktext, lineno,
targetmatch.group('name'))
return [target], blank_finish
def make_target(self, block, block_text, lineno, target_name):
target_type, data = self.parse_target(block, block_text, lineno)
if target_type == 'refname':
target = nodes.target(block_text, '', refname=normalize_name(data))
target.indirect_reference_name = data
self.add_target(target_name, '', target, lineno)
self.document.note_indirect_target(target)
return target
elif target_type == 'refuri':
target = nodes.target(block_text, '')
self.add_target(target_name, data, target, lineno)
return target
else:
return data
def parse_target(self, block, block_text, lineno):
"""
Determine the type of reference of a target.
:Return: A 2-tuple, one of:
- 'refname' and the indirect reference name
- 'refuri' and the URI
- 'malformed' and a system_message node
"""
if block and block[-1].strip()[-1:] == '_': # possible indirect target
reference = ' '.join([line.strip() for line in block])
refname = self.is_reference(reference)
if refname:
return 'refname', refname
reference = ''.join([''.join(line.split()) for line in block])
return 'refuri', unescape(reference)
def is_reference(self, reference):
match = self.explicit.patterns.reference.match(
whitespace_normalize_name(reference))
if not match:
return None
return unescape(match.group('simple') or match.group('phrase'))
def add_target(self, targetname, refuri, target, lineno):
target.line = lineno
if targetname:
name = normalize_name(unescape(targetname))
target['names'].append(name)
if refuri:
uri = self.inliner.adjust_uri(refuri)
if uri:
target['refuri'] = uri
else:
raise ApplicationError('problem with URI: %r' % refuri)
self.document.note_explicit_target(target, self.parent)
else: # anonymous target
if refuri:
target['refuri'] = refuri
target['anonymous'] = 1
self.document.note_anonymous_target(target)
def substitution_def(self, match):
pattern = self.explicit.patterns.substitution
src, srcline = self.state_machine.get_source_and_line()
block, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
strip_indent=False)
blocktext = (match.string[:match.end()] + '\n'.join(block))
block.disconnect()
escaped = escape2null(block[0].rstrip())
blockindex = 0
while True:
subdefmatch = pattern.match(escaped)
if subdefmatch:
break
blockindex += 1
try:
escaped = escaped + ' ' + escape2null(block[blockindex].strip())
except IndexError:
raise MarkupError('malformed substitution definition.')
del block[:blockindex] # strip out the substitution marker
block[0] = (block[0].strip() + ' ')[subdefmatch.end()-len(escaped)-1:-1]
if not block[0]:
del block[0]
offset += 1
while block and not block[-1].strip():
block.pop()
subname = subdefmatch.group('name')
substitution_node = nodes.substitution_definition(blocktext)
substitution_node.source = src
substitution_node.line = srcline
if not block:
msg = self.reporter.warning(
'Substitution definition "%s" missing contents.' % subname,
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
return [msg], blank_finish
block[0] = block[0].strip()
substitution_node['names'].append(
nodes.whitespace_normalize_name(subname))
new_abs_offset, blank_finish = self.nested_list_parse(
block, input_offset=offset, node=substitution_node,
initial_state='SubstitutionDef', blank_finish=blank_finish)
i = 0
for node in substitution_node[:]:
if not (isinstance(node, nodes.Inline) or
isinstance(node, nodes.Text)):
self.parent += substitution_node[i]
del substitution_node[i]
else:
i += 1
for node in substitution_node.traverse(nodes.Element):
if self.disallowed_inside_substitution_definitions(node):
pformat = nodes.literal_block('', node.pformat().rstrip())
msg = self.reporter.error(
'Substitution definition contains illegal element:',
pformat, nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
return [msg], blank_finish
if len(substitution_node) == 0:
msg = self.reporter.warning(
'Substitution definition "%s" empty or invalid.' % subname,
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
return [msg], blank_finish
self.document.note_substitution_def(
substitution_node, subname, self.parent)
return [substitution_node], blank_finish
def disallowed_inside_substitution_definitions(self, node):
if (node['ids'] or
isinstance(node, nodes.reference) and node.get('anonymous') or
isinstance(node, nodes.footnote_reference) and node.get('auto')):
return 1
else:
return 0
def directive(self, match, **option_presets):
"""Returns a 2-tuple: list of nodes, and a "blank finish" boolean."""
type_name = match.group(1)
directive_class, messages = directives.directive(
type_name, self.memo.language, self.document)
self.parent += messages
if directive_class:
return self.run_directive(
directive_class, match, type_name, option_presets)
else:
return self.unknown_directive(type_name)
def run_directive(self, directive, match, type_name, option_presets):
"""
Parse a directive then run its directive function.
Parameters:
- `directive`: The class implementing the directive. Must be
a subclass of `rst.Directive`.
- `match`: A regular expression match object which matched the first
line of the directive.
- `type_name`: The directive name, as used in the source text.
- `option_presets`: A dictionary of preset options, defaults for the
directive options. Currently, only an "alt" option is passed by
substitution definitions (value: the substitution name), which may
be used by an embedded image directive.
Returns a 2-tuple: list of nodes, and a "blank finish" boolean.
"""
if isinstance(directive, (FunctionType, MethodType)):
from docutils.parsers.rst import convert_directive_function
directive = convert_directive_function(directive)
lineno = self.state_machine.abs_line_number()
initial_line_offset = self.state_machine.line_offset
indented, indent, line_offset, blank_finish \
= self.state_machine.get_first_known_indented(match.end(),
strip_top=0)
block_text = '\n'.join(self.state_machine.input_lines[
initial_line_offset : self.state_machine.line_offset + 1])
try:
arguments, options, content, content_offset = (
self.parse_directive_block(indented, line_offset,
directive, option_presets))
except MarkupError as detail:
error = self.reporter.error(
'Error in "%s" directive:\n%s.' % (type_name,
' '.join(detail.args)),
nodes.literal_block(block_text, block_text), line=lineno)
return [error], blank_finish
directive_instance = directive(
type_name, arguments, options, content, lineno,
content_offset, block_text, self, self.state_machine)
try:
result = directive_instance.run()
except docutils.parsers.rst.DirectiveError as error:
msg_node = self.reporter.system_message(error.level, error.msg,
line=lineno)
msg_node += nodes.literal_block(block_text, block_text)
result = [msg_node]
assert isinstance(result, list), \
'Directive "%s" must return a list of nodes.' % type_name
for i in range(len(result)):
assert isinstance(result[i], nodes.Node), \
('Directive "%s" returned non-Node object (index %s): %r'
% (type_name, i, result[i]))
return (result,
blank_finish or self.state_machine.is_next_line_blank())
def parse_directive_block(self, indented, line_offset, directive,
option_presets):
option_spec = directive.option_spec
has_content = directive.has_content
if indented and not indented[0].strip():
indented.trim_start()
line_offset += 1
while indented and not indented[-1].strip():
indented.trim_end()
if indented and (directive.required_arguments
or directive.optional_arguments
or option_spec):
for i, line in enumerate(indented):
if not line.strip():
break
else:
i += 1
arg_block = indented[:i]
content = indented[i+1:]
content_offset = line_offset + i + 1
else:
content = indented
content_offset = line_offset
arg_block = []
if option_spec:
options, arg_block = self.parse_directive_options(
option_presets, option_spec, arg_block)
else:
options = {}
if arg_block and not (directive.required_arguments
or directive.optional_arguments):
content = arg_block + indented[i:]
content_offset = line_offset
arg_block = []
while content and not content[0].strip():
content.trim_start()
content_offset += 1
if directive.required_arguments or directive.optional_arguments:
arguments = self.parse_directive_arguments(
directive, arg_block)
else:
arguments = []
if content and not has_content:
raise MarkupError('no content permitted')
return (arguments, options, content, content_offset)
def parse_directive_options(self, option_presets, option_spec, arg_block):
options = option_presets.copy()
for i, line in enumerate(arg_block):
if re.match(Body.patterns['field_marker'], line):
opt_block = arg_block[i:]
arg_block = arg_block[:i]
break
else:
opt_block = []
if opt_block:
success, data = self.parse_extension_options(option_spec,
opt_block)
if success: # data is a dict of options
options.update(data)
else: # data is an error string
raise MarkupError(data)
return options, arg_block
def parse_directive_arguments(self, directive, arg_block):
required = directive.required_arguments
optional = directive.optional_arguments
arg_text = '\n'.join(arg_block)
arguments = arg_text.split()
if len(arguments) < required:
raise MarkupError('%s argument(s) required, %s supplied'
% (required, len(arguments)))
elif len(arguments) > required + optional:
if directive.final_argument_whitespace:
arguments = arg_text.split(None, required + optional - 1)
else:
raise MarkupError(
'maximum %s argument(s) allowed, %s supplied'
% (required + optional, len(arguments)))
return arguments
def parse_extension_options(self, option_spec, datalines):
"""
Parse `datalines` for a field list containing extension options
matching `option_spec`.
:Parameters:
- `option_spec`: a mapping of option name to conversion
function, which should raise an exception on bad input.
- `datalines`: a list of input strings.
:Return:
- Success value, 1 or 0.
- An option dictionary on success, an error string on failure.
"""
node = nodes.field_list()
newline_offset, blank_finish = self.nested_list_parse(
datalines, 0, node, initial_state='ExtensionOptions',
blank_finish=True)
if newline_offset != len(datalines): # incomplete parse of block
return 0, 'invalid option block'
try:
options = utils.extract_extension_options(node, option_spec)
except KeyError as detail:
return 0, ('unknown option: "%s"' % detail.args[0])
except (ValueError, TypeError) as detail:
return 0, ('invalid option value: %s' % ' '.join(detail.args))
except utils.ExtensionOptionError as detail:
return 0, ('invalid option data: %s' % ' '.join(detail.args))
if blank_finish:
return 1, options
else:
return 0, 'option data incompletely parsed'
def unknown_directive(self, type_name):
lineno = self.state_machine.abs_line_number()
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(0, strip_indent=False)
text = '\n'.join(indented)
error = self.reporter.error(
'Unknown directive type "%s".' % type_name,
nodes.literal_block(text, text), line=lineno)
return [error], blank_finish
def comment(self, match):
if not match.string[match.end():].strip() \
and self.state_machine.is_next_line_blank(): # an empty comment?
return [nodes.comment()], 1 # "A tiny but practical wart."
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
while indented and not indented[-1].strip():
indented.trim_end()
text = '\n'.join(indented)
return [nodes.comment(text, text)], blank_finish
explicit.constructs = [
(footnote,
re.compile(r"""
\.\.[ ]+ # explicit markup start
\[
( # footnote label:
[0-9]+ # manually numbered footnote
| # *OR*
\# # anonymous auto-numbered footnote
| # *OR*
\#%s # auto-number ed?) footnote label
| # *OR*
\* # auto-symbol footnote
)
\]
([ ]+|$) # whitespace or end of line
""" % Inliner.simplename, re.VERBOSE | re.UNICODE)),
(citation,
re.compile(r"""
\.\.[ ]+ # explicit markup start
\[(%s)\] # citation label
([ ]+|$) # whitespace or end of line
""" % Inliner.simplename, re.VERBOSE | re.UNICODE)),
(hyperlink_target,
re.compile(r"""
\.\.[ ]+ # explicit markup start
_ # target indicator
(?![ ]|$) # first char. not space or EOL
""", re.VERBOSE | re.UNICODE)),
(substitution_def,
re.compile(r"""
\.\.[ ]+ # explicit markup start
\| # substitution indicator
(?![ ]|$) # first char. not space or EOL
""", re.VERBOSE | re.UNICODE)),
(directive,
re.compile(r"""
\.\.[ ]+ # explicit markup start
(%s) # directive name
[ ]? # optional space
:: # directive delimiter
([ ]+|$) # whitespace or end of line
""" % Inliner.simplename, re.VERBOSE | re.UNICODE))]
def explicit_markup(self, match, context, next_state):
"""Footnotes, hyperlink targets, directives, comments."""
nodelist, blank_finish = self.explicit_construct(match)
self.parent += nodelist
self.explicit_list(blank_finish)
return [], next_state, []
def explicit_construct(self, match):
"""Determine which explicit construct this is, parse & return it."""
errors = []
for method, pattern in self.explicit.constructs:
expmatch = pattern.match(match.string)
if expmatch:
try:
return method(self, expmatch)
except MarkupError as error:
lineno = self.state_machine.abs_line_number()
message = ' '.join(error.args)
errors.append(self.reporter.warning(message, line=lineno))
break
nodelist, blank_finish = self.comment(match)
return nodelist + errors, blank_finish
def explicit_list(self, blank_finish):
"""
Create a nested state machine for a series of explicit markup
constructs (including anonymous hyperlink targets).
"""
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=self.parent, initial_state='Explicit',
blank_finish=blank_finish,
match_titles=self.state_machine.match_titles)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Explicit markup')
def anonymous(self, match, context, next_state):
"""Anonymous hyperlink targets."""
nodelist, blank_finish = self.anonymous_target(match)
self.parent += nodelist
self.explicit_list(blank_finish)
return [], next_state, []
def anonymous_target(self, match):
lineno = self.state_machine.abs_line_number()
block, indent, offset, blank_finish \
= self.state_machine.get_first_known_indented(match.end(),
until_blank=True)
blocktext = match.string[:match.end()] + '\n'.join(block)
block = [escape2null(line) for line in block]
target = self.make_target(block, blocktext, lineno, '')
return [target], blank_finish
def line(self, match, context, next_state):
"""Section title overline or transition marker."""
if self.state_machine.match_titles:
return [match.string], 'Line', []
elif match.string.strip() == '::':
raise statemachine.TransitionCorrection('text')
elif len(match.string.strip()) < 4:
msg = self.reporter.info(
'Unexpected possible title overline or transition.\n'
"Treating it as ordinary text because it's so short.",
line=self.state_machine.abs_line_number())
self.parent += msg
raise statemachine.TransitionCorrection('text')
else:
blocktext = self.state_machine.line
msg = self.reporter.severe(
'Unexpected section title or transition.',
nodes.literal_block(blocktext, blocktext),
line=self.state_machine.abs_line_number())
self.parent += msg
return [], next_state, []
def text(self, match, context, next_state):
"""Titles, definition lists, paragraphs."""
return [match.string], 'Text', []
class RFC2822Body(Body):
"""
RFC2822 headers are only valid as the first constructs in documents. As
soon as anything else appears, the `Body` state should take over.
"""
patterns = Body.patterns.copy() # can't modify the original
patterns['rfc2822'] = r'[!-9;-~]+:( +|$)'
initial_transitions = [(name, 'Body')
for name in Body.initial_transitions]
initial_transitions.insert(-1, ('rfc2822', 'Body')) # just before 'text'
def rfc2822(self, match, context, next_state):
"""RFC2822-style field list item."""
fieldlist = nodes.field_list(classes=['rfc2822'])
self.parent += fieldlist
field, blank_finish = self.rfc2822_field(match)
fieldlist += field
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=fieldlist, initial_state='RFC2822List',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning(
'RFC2822-style field list')
return [], next_state, []
def rfc2822_field(self, match):
name = match.string[:match.string.find(':')]
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
until_blank=True)
fieldnode = nodes.field()
fieldnode += nodes.field_name(name, name)
fieldbody = nodes.field_body('\n'.join(indented))
fieldnode += fieldbody
if indented:
self.nested_parse(indented, input_offset=line_offset,
node=fieldbody)
return fieldnode, blank_finish
class SpecializedBody(Body):
"""
Superclass for second and subsequent compound element members. Compound
elements are lists and list-like constructs.
All transition methods are disabled (redefined as `invalid_input`).
Override individual methods in subclasses to re-enable.
For example, once an initial bullet list item, say, is recognized, the
`BulletList` subclass takes over, with a "bullet_list" node as its
container. Upon encountering the initial bullet list item, `Body.bullet`
calls its ``self.nested_list_parse`` (`RSTState.nested_list_parse`), which
starts up a nested parsing session with `BulletList` as the initial state.
Only the ``bullet`` transition method is enabled in `BulletList`; as long
as only bullet list items are encountered, they are parsed and inserted
into the container. The first construct which is *not* a bullet list item
triggers the `invalid_input` method, which ends the nested parse and
closes the container. `BulletList` needs to recognize input that is
invalid in the context of a bullet list, which means everything *other
than* bullet list items, so it inherits the transition list created in
`Body`.
"""
def invalid_input(self, match=None, context=None, next_state=None):
"""Not a compound element member. Abort this state machine."""
self.state_machine.previous_line() # back up so parent SM can reassess
raise EOFError
indent = invalid_input
bullet = invalid_input
enumerator = invalid_input
field_marker = invalid_input
option_marker = invalid_input
doctest = invalid_input
line_block = invalid_input
grid_table_top = invalid_input
simple_table_top = invalid_input
explicit_markup = invalid_input
anonymous = invalid_input
line = invalid_input
text = invalid_input
class BulletList(SpecializedBody):
"""Second and subsequent bullet_list list_items."""
def bullet(self, match, context, next_state):
"""Bullet list item."""
if match.string[0] != self.parent['bullet']:
# different bullet: new list
self.invalid_input()
listitem, blank_finish = self.list_item(match.end())
self.parent += listitem
self.blank_finish = blank_finish
return [], next_state, []
class DefinitionList(SpecializedBody):
"""Second and subsequent definition_list_items."""
def text(self, match, context, next_state):
"""Definition lists."""
return [match.string], 'Definition', []
class EnumeratedList(SpecializedBody):
"""Second and subsequent enumerated_list list_items."""
def enumerator(self, match, context, next_state):
"""Enumerated list item."""
format, sequence, text, ordinal = self.parse_enumerator(
match, self.parent['enumtype'])
if ( format != self.format
or (sequence != '#' and (sequence != self.parent['enumtype']
or self.auto
or ordinal != (self.lastordinal + 1)))
or not self.is_enumerated_list_item(ordinal, sequence, format)):
# different enumeration: new list
self.invalid_input()
if sequence == '#':
self.auto = 1
listitem, blank_finish = self.list_item(match.end())
self.parent += listitem
self.blank_finish = blank_finish
self.lastordinal = ordinal
return [], next_state, []
class FieldList(SpecializedBody):
"""Second and subsequent field_list fields."""
def field_marker(self, match, context, next_state):
"""Field list field."""
field, blank_finish = self.field(match)
self.parent += field
self.blank_finish = blank_finish
return [], next_state, []
class OptionList(SpecializedBody):
"""Second and subsequent option_list option_list_items."""
def option_marker(self, match, context, next_state):
"""Option list item."""
try:
option_list_item, blank_finish = self.option_list_item(match)
except MarkupError:
self.invalid_input()
self.parent += option_list_item
self.blank_finish = blank_finish
return [], next_state, []
class RFC2822List(SpecializedBody, RFC2822Body):
"""Second and subsequent RFC2822-style field_list fields."""
patterns = RFC2822Body.patterns
initial_transitions = RFC2822Body.initial_transitions
def rfc2822(self, match, context, next_state):
"""RFC2822-style field list item."""
field, blank_finish = self.rfc2822_field(match)
self.parent += field
self.blank_finish = blank_finish
return [], 'RFC2822List', []
blank = SpecializedBody.invalid_input
class ExtensionOptions(FieldList):
"""
Parse field_list fields for extension options.
No nested parsing is done (including inline markup parsing).
"""
def parse_field_body(self, indented, offset, node):
"""Override `Body.parse_field_body` for simpler parsing."""
lines = []
for line in list(indented) + ['']:
if line.strip():
lines.append(line)
elif lines:
text = '\n'.join(lines)
node += nodes.paragraph(text, text)
lines = []
class LineBlock(SpecializedBody):
"""Second and subsequent lines of a line_block."""
blank = SpecializedBody.invalid_input
def line_block(self, match, context, next_state):
"""New line of line block."""
lineno = self.state_machine.abs_line_number()
line, messages, blank_finish = self.line_block_line(match, lineno)
self.parent += line
self.parent.parent += messages
self.blank_finish = blank_finish
return [], next_state, []
class Explicit(SpecializedBody):
"""Second and subsequent explicit markup construct."""
def explicit_markup(self, match, context, next_state):
"""Footnotes, hyperlink targets, directives, comments."""
nodelist, blank_finish = self.explicit_construct(match)
self.parent += nodelist
self.blank_finish = blank_finish
return [], next_state, []
def anonymous(self, match, context, next_state):
"""Anonymous hyperlink targets."""
nodelist, blank_finish = self.anonymous_target(match)
self.parent += nodelist
self.blank_finish = blank_finish
return [], next_state, []
blank = SpecializedBody.invalid_input
class SubstitutionDef(Body):
"""
Parser for the contents of a substitution_definition element.
"""
patterns = {
'embedded_directive': re.compile(r'(%s)::( +|$)'
% Inliner.simplename, re.UNICODE),
'text': r''}
initial_transitions = ['embedded_directive', 'text']
def embedded_directive(self, match, context, next_state):
nodelist, blank_finish = self.directive(match,
alt=self.parent['names'][0])
self.parent += nodelist
if not self.state_machine.at_eof():
self.blank_finish = blank_finish
raise EOFError
def text(self, match, context, next_state):
if not self.state_machine.at_eof():
self.blank_finish = self.state_machine.is_next_line_blank()
raise EOFError
class Text(RSTState):
"""
Classifier of second line of a text block.
Could be a paragraph, a definition list item, or a title.
"""
patterns = {'underline': Body.patterns['line'],
'text': r''}
initial_transitions = [('underline', 'Body'), ('text', 'Body')]
def blank(self, match, context, next_state):
"""End of paragraph."""
# NOTE: self.paragraph returns [ node, system_message(s) ], literalnext
paragraph, literalnext = self.paragraph(
context, self.state_machine.abs_line_number() - 1)
self.parent += paragraph
if literalnext:
self.parent += self.literal_block()
return [], 'Body', []
def eof(self, context):
if context:
self.blank(None, context, None)
return []
def indent(self, match, context, next_state):
"""Definition list item."""
definitionlist = nodes.definition_list()
definitionlistitem, blank_finish = self.definition_list_item(context)
definitionlist += definitionlistitem
self.parent += definitionlist
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=definitionlist, initial_state='DefinitionList',
blank_finish=blank_finish, blank_finish_state='Definition')
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Definition list')
return [], 'Body', []
def underline(self, match, context, next_state):
"""Section title."""
lineno = self.state_machine.abs_line_number()
title = context[0].rstrip()
underline = match.string.rstrip()
source = title + '\n' + underline
messages = []
if column_width(title) > len(underline):
if len(underline) < 4:
if self.state_machine.match_titles:
msg = self.reporter.info(
'Possible title underline, too short for the title.\n'
"Treating it as ordinary text because it's so short.",
line=lineno)
self.parent += msg
raise statemachine.TransitionCorrection('text')
else:
blocktext = context[0] + '\n' + self.state_machine.line
msg = self.reporter.warning('Title underline too short.',
nodes.literal_block(blocktext, blocktext), line=lineno)
messages.append(msg)
if not self.state_machine.match_titles:
blocktext = context[0] + '\n' + self.state_machine.line
# We need get_source_and_line() here to report correctly
src, srcline = self.state_machine.get_source_and_line()
# TODO: why is abs_line_number() == srcline+1
# if the error is in a table (try with test_tables.py)?
# print "get_source_and_line", srcline
# print "abs_line_number", self.state_machine.abs_line_number()
msg = self.reporter.severe('Unexpected section title.',
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
self.parent += messages
self.parent += msg
return [], next_state, []
style = underline[0]
context[:] = []
self.section(title, source, style, lineno - 1, messages)
return [], next_state, []
def text(self, match, context, next_state):
"""Paragraph."""
startline = self.state_machine.abs_line_number() - 1
msg = None
try:
block = self.state_machine.get_text_block(flush_left=True)
except statemachine.UnexpectedIndentationError as err:
block, src, srcline = err.args
msg = self.reporter.error('Unexpected indentation.',
source=src, line=srcline)
lines = context + list(block)
paragraph, literalnext = self.paragraph(lines, startline)
self.parent += paragraph
self.parent += msg
if literalnext:
try:
self.state_machine.next_line()
except EOFError:
pass
self.parent += self.literal_block()
return [], next_state, []
def literal_block(self):
"""Return a list of nodes."""
indented, indent, offset, blank_finish = \
self.state_machine.get_indented()
while indented and not indented[-1].strip():
indented.trim_end()
if not indented:
return self.quoted_literal_block()
data = '\n'.join(indented)
literal_block = nodes.literal_block(data, data)
literal_block.line = offset + 1
nodelist = [literal_block]
if not blank_finish:
nodelist.append(self.unindent_warning('Literal block'))
return nodelist
def quoted_literal_block(self):
abs_line_offset = self.state_machine.abs_line_offset()
offset = self.state_machine.line_offset
parent_node = nodes.Element()
new_abs_offset = self.nested_parse(
self.state_machine.input_lines[offset:],
input_offset=abs_line_offset, node=parent_node, match_titles=False,
state_machine_kwargs={'state_classes': (QuotedLiteralBlock,),
'initial_state': 'QuotedLiteralBlock'})
self.goto_line(new_abs_offset)
return parent_node.children
def definition_list_item(self, termline):
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
itemnode = nodes.definition_list_item(
'\n'.join(termline + list(indented)))
lineno = self.state_machine.abs_line_number() - 1
(itemnode.source,
itemnode.line) = self.state_machine.get_source_and_line(lineno)
termlist, messages = self.term(termline, lineno)
itemnode += termlist
definition = nodes.definition('', *messages)
itemnode += definition
if termline[0][-2:] == '::':
definition += self.reporter.info(
'Blank line missing before literal block (after the "::")? '
'Interpreted as a definition list item.',
line=lineno+1)
self.nested_parse(indented, input_offset=line_offset, node=definition)
return itemnode, blank_finish
classifier_delimiter = re.compile(' +: +')
def term(self, lines, lineno):
"""Return a definition_list's term and optional classifiers."""
assert len(lines) == 1
text_nodes, messages = self.inline_text(lines[0], lineno)
term_node = nodes.term()
(term_node.source,
term_node.line) = self.state_machine.get_source_and_line(lineno)
term_node.rawsource = unescape(lines[0])
node_list = [term_node]
for i in range(len(text_nodes)):
node = text_nodes[i]
if isinstance(node, nodes.Text):
parts = self.classifier_delimiter.split(node.rawsource)
if len(parts) == 1:
node_list[-1] += node
else:
node_list[-1] += nodes.Text(parts[0].rstrip())
for part in parts[1:]:
classifier_node = nodes.classifier('', part)
node_list.append(classifier_node)
else:
node_list[-1] += node
return node_list, messages
class SpecializedText(Text):
"""
Superclass for second and subsequent lines of Text-variants.
All transition methods are disabled. Override individual methods in
subclasses to re-enable.
"""
def eof(self, context):
"""Incomplete construct."""
return []
def invalid_input(self, match=None, context=None, next_state=None):
"""Not a compound element member. Abort this state machine."""
raise EOFError
blank = invalid_input
indent = invalid_input
underline = invalid_input
text = invalid_input
class Definition(SpecializedText):
"""Second line of potential definition_list_item."""
def eof(self, context):
"""Not a definition."""
self.state_machine.previous_line(2) # so parent SM can reassess
return []
def indent(self, match, context, next_state):
"""Definition list item."""
itemnode, blank_finish = self.definition_list_item(context)
self.parent += itemnode
self.blank_finish = blank_finish
return [], 'DefinitionList', []
class Line(SpecializedText):
"""
Second line of over- & underlined section title or transition marker.
"""
eofcheck = 1 # @@@ ???
"""Set to 0 while parsing sections, so that we don't catch the EOF."""
def eof(self, context):
"""Transition marker at end of section or document."""
marker = context[0].strip()
if self.memo.section_bubble_up_kludge:
self.memo.section_bubble_up_kludge = False
elif len(marker) < 4:
self.state_correction(context)
if self.eofcheck: # ignore EOFError with sections
lineno = self.state_machine.abs_line_number() - 1
transition = nodes.transition(rawsource=context[0])
transition.line = lineno
self.parent += transition
self.eofcheck = 1
return []
def blank(self, match, context, next_state):
"""Transition marker."""
src, srcline = self.state_machine.get_source_and_line()
marker = context[0].strip()
if len(marker) < 4:
self.state_correction(context)
transition = nodes.transition(rawsource=marker)
transition.source = src
transition.line = srcline - 1
self.parent += transition
return [], 'Body', []
def text(self, match, context, next_state):
"""Potential over- & underlined title."""
lineno = self.state_machine.abs_line_number() - 1
overline = context[0]
title = match.string
underline = ''
try:
underline = self.state_machine.next_line()
except EOFError:
blocktext = overline + '\n' + title
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
'Incomplete section title.',
nodes.literal_block(blocktext, blocktext),
line=lineno)
self.parent += msg
return [], 'Body', []
source = '%s\n%s\n%s' % (overline, title, underline)
overline = overline.rstrip()
underline = underline.rstrip()
if not self.transitions['underline'][0].match(underline):
blocktext = overline + '\n' + title + '\n' + underline
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
'Missing matching underline for section title overline.',
nodes.literal_block(source, source),
line=lineno)
self.parent += msg
return [], 'Body', []
elif overline != underline:
blocktext = overline + '\n' + title + '\n' + underline
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
'Title overline & underline mismatch.',
nodes.literal_block(source, source),
line=lineno)
self.parent += msg
return [], 'Body', []
title = title.rstrip()
messages = []
if column_width(title) > len(overline):
blocktext = overline + '\n' + title + '\n' + underline
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.warning(
'Title overline too short.',
nodes.literal_block(source, source),
line=lineno)
messages.append(msg)
style = (overline[0], underline[0])
self.eofcheck = 0 # @@@ not sure this is correct
self.section(title.lstrip(), source, style, lineno + 1, messages)
self.eofcheck = 1
return [], 'Body', []
indent = text # indented title
def underline(self, match, context, next_state):
overline = context[0]
blocktext = overline + '\n' + self.state_machine.line
lineno = self.state_machine.abs_line_number() - 1
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 1)
msg = self.reporter.error(
'Invalid section title or transition marker.',
nodes.literal_block(blocktext, blocktext),
line=lineno)
self.parent += msg
return [], 'Body', []
def short_overline(self, context, blocktext, lineno, lines=1):
msg = self.reporter.info(
'Possible incomplete section title.\nTreating the overline as '
"ordinary text because it's so short.",
line=lineno)
self.parent += msg
self.state_correction(context, lines)
def state_correction(self, context, lines=1):
self.state_machine.previous_line(lines)
context[:] = []
raise statemachine.StateCorrection('Body', 'text')
class QuotedLiteralBlock(RSTState):
"""
Nested parse handler for quoted (unindented) literal blocks.
Special-purpose. Not for inclusion in `state_classes`.
"""
patterns = {'initial_quoted': r'(%(nonalphanum7bit)s)' % Body.pats,
'text': r''}
initial_transitions = ('initial_quoted', 'text')
def __init__(self, state_machine, debug=False):
RSTState.__init__(self, state_machine, debug)
self.messages = []
self.initial_lineno = None
def blank(self, match, context, next_state):
if context:
raise EOFError
else:
return context, next_state, []
def eof(self, context):
if context:
src, srcline = self.state_machine.get_source_and_line(
self.initial_lineno)
text = '\n'.join(context)
literal_block = nodes.literal_block(text, text)
literal_block.source = src
literal_block.line = srcline
self.parent += literal_block
else:
self.parent += self.reporter.warning(
'Literal block expected; none found.',
line=self.state_machine.abs_line_number())
# src not available, because statemachine.input_lines is empty
self.state_machine.previous_line()
self.parent += self.messages
return []
def indent(self, match, context, next_state):
assert context, ('QuotedLiteralBlock.indent: context should not '
'be empty!')
self.messages.append(
self.reporter.error('Unexpected indentation.',
line=self.state_machine.abs_line_number()))
self.state_machine.previous_line()
raise EOFError
def initial_quoted(self, match, context, next_state):
"""Match arbitrary quote character on the first line only."""
self.remove_transition('initial_quoted')
quote = match.string[0]
pattern = re.compile(re.escape(quote), re.UNICODE)
# New transition matches consistent quotes only:
self.add_transition('quoted',
(pattern, self.quoted, self.__class__.__name__))
self.initial_lineno = self.state_machine.abs_line_number()
return [match.string], next_state, []
def quoted(self, match, context, next_state):
"""Match consistent quotes on subsequent lines."""
context.append(match.string)
return context, next_state, []
def text(self, match, context, next_state):
if context:
self.messages.append(
self.reporter.error('Inconsistent literal block quoting.',
line=self.state_machine.abs_line_number()))
self.state_machine.previous_line()
raise EOFError
state_classes = (Body, BulletList, DefinitionList, EnumeratedList, FieldList,
OptionList, LineBlock, ExtensionOptions, Explicit, Text,
Definition, Line, SubstitutionDef, RFC2822Body, RFC2822List)
"""Standard set of State classes used to start `RSTStateMachine`."""
| {
"repo_name": "timonwong/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python3/docutils/parsers/rst/states.py",
"copies": "2",
"size": "130226",
"license": "mit",
"hash": 8212583848851441000,
"line_mean": 41.3361508453,
"line_max": 93,
"alpha_frac": 0.5469952237,
"autogenerated": false,
"ratio": 4.403990530943524,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003999505202225416,
"num_lines": 3076
} |
"""
This module contains practical examples of Docutils client code.
Importing this module from client code is not recommended; its contents are
subject to change in future Docutils releases. Instead, it is recommended
that you copy and paste the parts you need into your own code, modifying as
necessary.
"""
from docutils import core, io
def html_parts(input_string, source_path=None, destination_path=None,
input_encoding='unicode', doctitle=True,
initial_header_level=1):
"""
Given an input string, returns a dictionary of HTML document parts.
Dictionary keys are the names of parts, and values are Unicode strings;
encoding is up to the client.
Parameters:
- `input_string`: A multi-line text string; required.
- `source_path`: Path to the source file or object. Optional, but useful
for diagnostic output (system messages).
- `destination_path`: Path to the file or object which will receive the
output; optional. Used for determining relative paths (stylesheets,
source links, etc.).
- `input_encoding`: The encoding of `input_string`. If it is an encoded
8-bit string, provide the correct encoding. If it is a Unicode string,
use "unicode", the default.
- `doctitle`: Disable the promotion of a lone top-level section title to
document title (and subsequent section title to document subtitle
promotion); enabled by default.
- `initial_header_level`: The initial level for header elements (e.g. 1
for "<h1>").
"""
overrides = {'input_encoding': input_encoding,
'doctitle_xform': doctitle,
'initial_header_level': initial_header_level}
parts = core.publish_parts(
source=input_string, source_path=source_path,
destination_path=destination_path,
writer_name='html', settings_overrides=overrides)
return parts
def html_body(input_string, source_path=None, destination_path=None,
input_encoding='unicode', output_encoding='unicode',
doctitle=True, initial_header_level=1):
"""
Given an input string, returns an HTML fragment as a string.
The return value is the contents of the <body> element.
Parameters (see `html_parts()` for the remainder):
- `output_encoding`: The desired encoding of the output. If a Unicode
string is desired, use the default value of "unicode" .
"""
parts = html_parts(
input_string=input_string, source_path=source_path,
destination_path=destination_path,
input_encoding=input_encoding, doctitle=doctitle,
initial_header_level=initial_header_level)
fragment = parts['html_body']
if output_encoding != 'unicode':
fragment = fragment.encode(output_encoding)
return fragment
def internals(input_string, source_path=None, destination_path=None,
input_encoding='unicode', settings_overrides=None):
"""
Return the document tree and publisher, for exploring Docutils internals.
Parameters: see `html_parts()`.
"""
if settings_overrides:
overrides = settings_overrides.copy()
else:
overrides = {}
overrides['input_encoding'] = input_encoding
output, pub = core.publish_programmatically(
source_class=io.StringInput, source=input_string,
source_path=source_path,
destination_class=io.NullOutput, destination=None,
destination_path=destination_path,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='null',
settings=None, settings_spec=None, settings_overrides=overrides,
config_section=None, enable_exit_status=None)
return pub.writer.document, pub
| {
"repo_name": "Lyleo/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python3/docutils/examples.py",
"copies": "4",
"size": "3913",
"license": "mit",
"hash": -5686533538776739000,
"line_mean": 39.3402061856,
"line_max": 77,
"alpha_frac": 0.6841298237,
"autogenerated": false,
"ratio": 4.189507494646681,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6873637318346681,
"avg_score": null,
"num_lines": null
} |
"""
This module defines table parser classes,which parse plaintext-graphic tables
and produce a well-formed data structure suitable for building a CALS table.
:Classes:
- `GridTableParser`: Parse fully-formed tables represented with a grid.
- `SimpleTableParser`: Parse simple tables, delimited by top & bottom
borders.
:Exception class: `TableMarkupError`
:Function:
`update_dict_of_lists()`: Merge two dictionaries containing list values.
"""
__docformat__ = 'reStructuredText'
import re
import sys
from docutils import DataError
from docutils.utils import strip_combining_chars
class TableMarkupError(DataError):
"""
Raise if there is any problem with table markup.
The keyword argument `offset` denotes the offset of the problem
from the table's start line.
"""
def __init__(self, *args, **kwargs):
self.offset = kwargs.pop('offset', 0)
DataError.__init__(self, *args)
class TableParser:
"""
Abstract superclass for the common parts of the syntax-specific parsers.
"""
head_body_separator_pat = None
"""Matches the row separator between head rows and body rows."""
double_width_pad_char = '\x00'
"""Padding character for East Asian double-width text."""
def parse(self, block):
"""
Analyze the text `block` and return a table data structure.
Given a plaintext-graphic table in `block` (list of lines of text; no
whitespace padding), parse the table, construct and return the data
necessary to construct a CALS table or equivalent.
Raise `TableMarkupError` if there is any problem with the markup.
"""
self.setup(block)
self.find_head_body_sep()
self.parse_table()
structure = self.structure_from_cells()
return structure
def find_head_body_sep(self):
"""Look for a head/body row separator line; store the line index."""
for i in range(len(self.block)):
line = self.block[i]
if self.head_body_separator_pat.match(line):
if self.head_body_sep:
raise TableMarkupError(
'Multiple head/body row separators '
'(table lines %s and %s); only one allowed.'
% (self.head_body_sep+1, i+1), offset=i)
else:
self.head_body_sep = i
self.block[i] = line.replace('=', '-')
if self.head_body_sep == 0 or self.head_body_sep == (len(self.block)
- 1):
raise TableMarkupError('The head/body row separator may not be '
'the first or last line of the table.',
offset=i)
class GridTableParser(TableParser):
"""
Parse a grid table using `parse()`.
Here's an example of a grid table::
+------------------------+------------+----------+----------+
| Header row, column 1 | Header 2 | Header 3 | Header 4 |
+========================+============+==========+==========+
| body row 1, column 1 | column 2 | column 3 | column 4 |
+------------------------+------------+----------+----------+
| body row 2 | Cells may span columns. |
+------------------------+------------+---------------------+
| body row 3 | Cells may | - Table cells |
+------------------------+ span rows. | - contain |
| body row 4 | | - body elements. |
+------------------------+------------+---------------------+
Intersections use '+', row separators use '-' (except for one optional
head/body row separator, which uses '='), and column separators use '|'.
Passing the above table to the `parse()` method will result in the
following data structure::
([24, 12, 10, 10],
[[(0, 0, 1, ['Header row, column 1']),
(0, 0, 1, ['Header 2']),
(0, 0, 1, ['Header 3']),
(0, 0, 1, ['Header 4'])]],
[[(0, 0, 3, ['body row 1, column 1']),
(0, 0, 3, ['column 2']),
(0, 0, 3, ['column 3']),
(0, 0, 3, ['column 4'])],
[(0, 0, 5, ['body row 2']),
(0, 2, 5, ['Cells may span columns.']),
None,
None],
[(0, 0, 7, ['body row 3']),
(1, 0, 7, ['Cells may', 'span rows.', '']),
(1, 1, 7, ['- Table cells', '- contain', '- body elements.']),
None],
[(0, 0, 9, ['body row 4']), None, None, None]])
The first item is a list containing column widths (colspecs). The second
item is a list of head rows, and the third is a list of body rows. Each
row contains a list of cells. Each cell is either None (for a cell unused
because of another cell's span), or a tuple. A cell tuple contains four
items: the number of extra rows used by the cell in a vertical span
(morerows); the number of extra columns used by the cell in a horizontal
span (morecols); the line offset of the first line of the cell contents;
and the cell contents, a list of lines of text.
"""
head_body_separator_pat = re.compile(r'\+=[=+]+=\+ *$')
def setup(self, block):
self.block = block[:] # make a copy; it may be modified
self.block.disconnect() # don't propagate changes to parent
self.bottom = len(block) - 1
self.right = len(block[0]) - 1
self.head_body_sep = None
self.done = [-1] * len(block[0])
self.cells = []
self.rowseps = {0: [0]}
self.colseps = {0: [0]}
def parse_table(self):
"""
Start with a queue of upper-left corners, containing the upper-left
corner of the table itself. Trace out one rectangular cell, remember
it, and add its upper-right and lower-left corners to the queue of
potential upper-left corners of further cells. Process the queue in
top-to-bottom order, keeping track of how much of each text column has
been seen.
We'll end up knowing all the row and column boundaries, cell positions
and their dimensions.
"""
corners = [(0, 0)]
while corners:
top, left = corners.pop(0)
if top == self.bottom or left == self.right \
or top <= self.done[left]:
continue
result = self.scan_cell(top, left)
if not result:
continue
bottom, right, rowseps, colseps = result
update_dict_of_lists(self.rowseps, rowseps)
update_dict_of_lists(self.colseps, colseps)
self.mark_done(top, left, bottom, right)
cellblock = self.block.get_2D_block(top + 1, left + 1,
bottom, right)
cellblock.disconnect() # lines in cell can't sync with parent
cellblock.replace(self.double_width_pad_char, '')
self.cells.append((top, left, bottom, right, cellblock))
corners.extend([(top, right), (bottom, left)])
corners.sort()
if not self.check_parse_complete():
raise TableMarkupError('Malformed table; parse incomplete.')
def mark_done(self, top, left, bottom, right):
"""For keeping track of how much of each text column has been seen."""
before = top - 1
after = bottom - 1
for col in range(left, right):
assert self.done[col] == before
self.done[col] = after
def check_parse_complete(self):
"""Each text column should have been completely seen."""
last = self.bottom - 1
for col in range(self.right):
if self.done[col] != last:
return False
return True
def scan_cell(self, top, left):
"""Starting at the top-left corner, start tracing out a cell."""
assert self.block[top][left] == '+'
result = self.scan_right(top, left)
return result
def scan_right(self, top, left):
"""
Look for the top-right corner of the cell, and make note of all column
boundaries ('+').
"""
colseps = {}
line = self.block[top]
for i in range(left + 1, self.right + 1):
if line[i] == '+':
colseps[i] = [top]
result = self.scan_down(top, left, i)
if result:
bottom, rowseps, newcolseps = result
update_dict_of_lists(colseps, newcolseps)
return bottom, i, rowseps, colseps
elif line[i] != '-':
return None
return None
def scan_down(self, top, left, right):
"""
Look for the bottom-right corner of the cell, making note of all row
boundaries.
"""
rowseps = {}
for i in range(top + 1, self.bottom + 1):
if self.block[i][right] == '+':
rowseps[i] = [right]
result = self.scan_left(top, left, i, right)
if result:
newrowseps, colseps = result
update_dict_of_lists(rowseps, newrowseps)
return i, rowseps, colseps
elif self.block[i][right] != '|':
return None
return None
def scan_left(self, top, left, bottom, right):
"""
Noting column boundaries, look for the bottom-left corner of the cell.
It must line up with the starting point.
"""
colseps = {}
line = self.block[bottom]
for i in range(right - 1, left, -1):
if line[i] == '+':
colseps[i] = [bottom]
elif line[i] != '-':
return None
if line[left] != '+':
return None
result = self.scan_up(top, left, bottom, right)
if result is not None:
rowseps = result
return rowseps, colseps
return None
def scan_up(self, top, left, bottom, right):
"""
Noting row boundaries, see if we can return to the starting point.
"""
rowseps = {}
for i in range(bottom - 1, top, -1):
if self.block[i][left] == '+':
rowseps[i] = [left]
elif self.block[i][left] != '|':
return None
return rowseps
def structure_from_cells(self):
"""
From the data collected by `scan_cell()`, convert to the final data
structure.
"""
rowseps = self.rowseps.keys() # list of row boundaries
rowseps.sort()
rowindex = {}
for i in range(len(rowseps)):
rowindex[rowseps[i]] = i # row boundary -> row number mapping
colseps = self.colseps.keys() # list of column boundaries
colseps.sort()
colindex = {}
for i in range(len(colseps)):
colindex[colseps[i]] = i # column boundary -> col number map
colspecs = [(colseps[i] - colseps[i - 1] - 1)
for i in range(1, len(colseps))] # list of column widths
# prepare an empty table with the correct number of rows & columns
onerow = [None for i in range(len(colseps) - 1)]
rows = [onerow[:] for i in range(len(rowseps) - 1)]
# keep track of # of cells remaining; should reduce to zero
remaining = (len(rowseps) - 1) * (len(colseps) - 1)
for top, left, bottom, right, block in self.cells:
rownum = rowindex[top]
colnum = colindex[left]
assert rows[rownum][colnum] is None, (
'Cell (row %s, column %s) already used.'
% (rownum + 1, colnum + 1))
morerows = rowindex[bottom] - rownum - 1
morecols = colindex[right] - colnum - 1
remaining -= (morerows + 1) * (morecols + 1)
# write the cell into the table
rows[rownum][colnum] = (morerows, morecols, top + 1, block)
assert remaining == 0, 'Unused cells remaining.'
if self.head_body_sep: # separate head rows from body rows
numheadrows = rowindex[self.head_body_sep]
headrows = rows[:numheadrows]
bodyrows = rows[numheadrows:]
else:
headrows = []
bodyrows = rows
return (colspecs, headrows, bodyrows)
class SimpleTableParser(TableParser):
"""
Parse a simple table using `parse()`.
Here's an example of a simple table::
===== =====
col 1 col 2
===== =====
1 Second column of row 1.
2 Second column of row 2.
Second line of paragraph.
3 - Second column of row 3.
- Second item in bullet
list (row 3, column 2).
4 is a span
------------
5
===== =====
Top and bottom borders use '=', column span underlines use '-', column
separation is indicated with spaces.
Passing the above table to the `parse()` method will result in the
following data structure, whose interpretation is the same as for
`GridTableParser`::
([5, 25],
[[(0, 0, 1, ['col 1']),
(0, 0, 1, ['col 2'])]],
[[(0, 0, 3, ['1']),
(0, 0, 3, ['Second column of row 1.'])],
[(0, 0, 4, ['2']),
(0, 0, 4, ['Second column of row 2.',
'Second line of paragraph.'])],
[(0, 0, 6, ['3']),
(0, 0, 6, ['- Second column of row 3.',
'',
'- Second item in bullet',
' list (row 3, column 2).'])],
[(0, 1, 10, ['4 is a span'])],
[(0, 0, 12, ['5']),
(0, 0, 12, [''])]])
"""
head_body_separator_pat = re.compile('=[ =]*$')
span_pat = re.compile('-[ -]*$')
def setup(self, block):
self.block = block[:] # make a copy; it will be modified
self.block.disconnect() # don't propagate changes to parent
# Convert top & bottom borders to column span underlines:
self.block[0] = self.block[0].replace('=', '-')
self.block[-1] = self.block[-1].replace('=', '-')
self.head_body_sep = None
self.columns = []
self.border_end = None
self.table = []
self.done = [-1] * len(block[0])
self.rowseps = {0: [0]}
self.colseps = {0: [0]}
def parse_table(self):
"""
First determine the column boundaries from the top border, then
process rows. Each row may consist of multiple lines; accumulate
lines until a row is complete. Call `self.parse_row` to finish the
job.
"""
# Top border must fully describe all table columns.
self.columns = self.parse_columns(self.block[0], 0)
self.border_end = self.columns[-1][1]
firststart, firstend = self.columns[0]
offset = 1 # skip top border
start = 1
text_found = None
while offset < len(self.block):
line = self.block[offset]
if self.span_pat.match(line):
# Column span underline or border; row is complete.
self.parse_row(self.block[start:offset], start,
(line.rstrip(), offset))
start = offset + 1
text_found = None
elif line[firststart:firstend].strip():
# First column not blank, therefore it's a new row.
if text_found and offset != start:
self.parse_row(self.block[start:offset], start)
start = offset
text_found = 1
elif not text_found:
start = offset + 1
offset += 1
def parse_columns(self, line, offset):
"""
Given a column span underline, return a list of (begin, end) pairs.
"""
cols = []
end = 0
while True:
begin = line.find('-', end)
end = line.find(' ', begin)
if begin < 0:
break
if end < 0:
end = len(line)
cols.append((begin, end))
if self.columns:
if cols[-1][1] != self.border_end:
raise TableMarkupError('Column span incomplete in table '
'line %s.' % (offset+1),
offset=offset)
# Allow for an unbounded rightmost column:
cols[-1] = (cols[-1][0], self.columns[-1][1])
return cols
def init_row(self, colspec, offset):
i = 0
cells = []
for start, end in colspec:
morecols = 0
try:
assert start == self.columns[i][0]
while end != self.columns[i][1]:
i += 1
morecols += 1
except (AssertionError, IndexError):
raise TableMarkupError('Column span alignment problem '
'in table line %s.' % (offset+2),
offset=offset+1)
cells.append([0, morecols, offset, []])
i += 1
return cells
def parse_row(self, lines, start, spanline=None):
"""
Given the text `lines` of a row, parse it and append to `self.table`.
The row is parsed according to the current column spec (either
`spanline` if provided or `self.columns`). For each column, extract
text from each line, and check for text in column margins. Finally,
adjust for insignificant whitespace.
"""
if not (lines or spanline):
# No new row, just blank lines.
return
if spanline:
columns = self.parse_columns(*spanline)
span_offset = spanline[1]
else:
columns = self.columns[:]
span_offset = start
self.check_columns(lines, start, columns)
row = self.init_row(columns, start)
for i in range(len(columns)):
start, end = columns[i]
cellblock = lines.get_2D_block(0, start, len(lines), end)
cellblock.disconnect() # lines in cell can't sync with parent
cellblock.replace(self.double_width_pad_char, '')
row[i][3] = cellblock
self.table.append(row)
def check_columns(self, lines, first_line, columns):
"""
Check for text in column margins and text overflow in the last column.
Raise TableMarkupError if anything but whitespace is in column margins.
Adjust the end value for the last column if there is text overflow.
"""
# "Infinite" value for a dummy last column's beginning, used to
# check for text overflow:
columns.append((sys.maxint, None))
lastcol = len(columns) - 2
# combining characters do not contribute to the column width
lines = [strip_combining_chars(line) for line in lines]
for i in range(len(columns) - 1):
start, end = columns[i]
nextstart = columns[i+1][0]
offset = 0
for line in lines:
if i == lastcol and line[end:].strip():
text = line[start:].rstrip()
new_end = start + len(text)
columns[i] = (start, new_end)
main_start, main_end = self.columns[-1]
if new_end > main_end:
self.columns[-1] = (main_start, new_end)
elif line[end:nextstart].strip():
raise TableMarkupError('Text in column margin '
'in table line %s.' % (first_line+offset+1),
offset=first_line+offset)
offset += 1
columns.pop()
def structure_from_cells(self):
colspecs = [end - start for start, end in self.columns]
first_body_row = 0
if self.head_body_sep:
for i in range(len(self.table)):
if self.table[i][0][2] > self.head_body_sep:
first_body_row = i
break
return (colspecs, self.table[:first_body_row],
self.table[first_body_row:])
def update_dict_of_lists(master, newdata):
"""
Extend the list values of `master` with those from `newdata`.
Both parameters must be dictionaries containing list values.
"""
for key, values in newdata.items():
master.setdefault(key, []).extend(values)
| {
"repo_name": "timonwong/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python2/docutils/parsers/rst/tableparser.py",
"copies": "2",
"size": "20958",
"license": "mit",
"hash": -6393871154027236000,
"line_mean": 37.5257352941,
"line_max": 79,
"alpha_frac": 0.5163660655,
"autogenerated": false,
"ratio": 4.1575084308668915,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001648933618243081,
"num_lines": 544
} |
"""
This package contains directive implementation modules.
"""
__docformat__ = 'reStructuredText'
import re
import codecs
import sys
from docutils import nodes
from docutils.parsers.rst.languages import en as _fallback_language_module
if sys.version_info < (2,5):
from docutils._compat import __import__
_directive_registry = {
'attention': ('admonitions', 'Attention'),
'caution': ('admonitions', 'Caution'),
'code': ('body', 'CodeBlock'),
'danger': ('admonitions', 'Danger'),
'error': ('admonitions', 'Error'),
'important': ('admonitions', 'Important'),
'note': ('admonitions', 'Note'),
'tip': ('admonitions', 'Tip'),
'hint': ('admonitions', 'Hint'),
'warning': ('admonitions', 'Warning'),
'admonition': ('admonitions', 'Admonition'),
'sidebar': ('body', 'Sidebar'),
'topic': ('body', 'Topic'),
'line-block': ('body', 'LineBlock'),
'parsed-literal': ('body', 'ParsedLiteral'),
'math': ('body', 'MathBlock'),
'rubric': ('body', 'Rubric'),
'epigraph': ('body', 'Epigraph'),
'highlights': ('body', 'Highlights'),
'pull-quote': ('body', 'PullQuote'),
'compound': ('body', 'Compound'),
'container': ('body', 'Container'),
#'questions': ('body', 'question_list'),
'table': ('tables', 'RSTTable'),
'csv-table': ('tables', 'CSVTable'),
'list-table': ('tables', 'ListTable'),
'image': ('images', 'Image'),
'figure': ('images', 'Figure'),
'contents': ('parts', 'Contents'),
'sectnum': ('parts', 'Sectnum'),
'header': ('parts', 'Header'),
'footer': ('parts', 'Footer'),
#'footnotes': ('parts', 'footnotes'),
#'citations': ('parts', 'citations'),
'target-notes': ('references', 'TargetNotes'),
'meta': ('html', 'Meta'),
#'imagemap': ('html', 'imagemap'),
'raw': ('misc', 'Raw'),
'include': ('misc', 'Include'),
'replace': ('misc', 'Replace'),
'unicode': ('misc', 'Unicode'),
'class': ('misc', 'Class'),
'role': ('misc', 'Role'),
'default-role': ('misc', 'DefaultRole'),
'title': ('misc', 'Title'),
'date': ('misc', 'Date'),
'restructuredtext-test-directive': ('misc', 'TestDirective'),}
"""Mapping of directive name to (module name, class name). The
directive name is canonical & must be lowercase. Language-dependent
names are defined in the ``language`` subpackage."""
_directives = {}
"""Cache of imported directives."""
def directive(directive_name, language_module, document):
"""
Locate and return a directive function from its language-dependent name.
If not found in the current language, check English. Return None if the
named directive cannot be found.
"""
normname = directive_name.lower()
messages = []
msg_text = []
if normname in _directives:
return _directives[normname], messages
canonicalname = None
try:
canonicalname = language_module.directives[normname]
except AttributeError, error:
msg_text.append('Problem retrieving directive entry from language '
'module %r: %s.' % (language_module, error))
except KeyError:
msg_text.append('No directive entry for "%s" in module "%s".'
% (directive_name, language_module.__name__))
if not canonicalname:
try:
canonicalname = _fallback_language_module.directives[normname]
msg_text.append('Using English fallback for directive "%s".'
% directive_name)
except KeyError:
msg_text.append('Trying "%s" as canonical directive name.'
% directive_name)
# The canonical name should be an English name, but just in case:
canonicalname = normname
if msg_text:
message = document.reporter.info(
'\n'.join(msg_text), line=document.current_line)
messages.append(message)
try:
modulename, classname = _directive_registry[canonicalname]
except KeyError:
# Error handling done by caller.
return None, messages
try:
module = __import__(modulename, globals(), locals(), level=1)
except ImportError, detail:
messages.append(document.reporter.error(
'Error importing directive module "%s" (directive "%s"):\n%s'
% (modulename, directive_name, detail),
line=document.current_line))
return None, messages
try:
directive = getattr(module, classname)
_directives[normname] = directive
except AttributeError:
messages.append(document.reporter.error(
'No directive class "%s" in module "%s" (directive "%s").'
% (classname, modulename, directive_name),
line=document.current_line))
return None, messages
return directive, messages
def register_directive(name, directive):
"""
Register a nonstandard application-defined directive function.
Language lookups are not needed for such functions.
"""
_directives[name] = directive
def flag(argument):
"""
Check for a valid flag option (no argument) and return ``None``.
(Directive option conversion function.)
Raise ``ValueError`` if an argument is found.
"""
if argument and argument.strip():
raise ValueError('no argument is allowed; "%s" supplied' % argument)
else:
return None
def unchanged_required(argument):
"""
Return the argument text, unchanged.
(Directive option conversion function.)
Raise ``ValueError`` if no argument is found.
"""
if argument is None:
raise ValueError('argument required but none supplied')
else:
return argument # unchanged!
def unchanged(argument):
"""
Return the argument text, unchanged.
(Directive option conversion function.)
No argument implies empty string ("").
"""
if argument is None:
return u''
else:
return argument # unchanged!
def path(argument):
"""
Return the path argument unwrapped (with newlines removed).
(Directive option conversion function.)
Raise ``ValueError`` if no argument is found.
"""
if argument is None:
raise ValueError('argument required but none supplied')
else:
path = ''.join([s.strip() for s in argument.splitlines()])
return path
def uri(argument):
"""
Return the URI argument with whitespace removed.
(Directive option conversion function.)
Raise ``ValueError`` if no argument is found.
"""
if argument is None:
raise ValueError('argument required but none supplied')
else:
uri = ''.join(argument.split())
return uri
def nonnegative_int(argument):
"""
Check for a nonnegative integer argument; raise ``ValueError`` if not.
(Directive option conversion function.)
"""
value = int(argument)
if value < 0:
raise ValueError('negative value; must be positive or zero')
return value
def percentage(argument):
"""
Check for an integer percentage value with optional percent sign.
"""
try:
argument = argument.rstrip(' %')
except AttributeError:
pass
return nonnegative_int(argument)
length_units = ['em', 'ex', 'px', 'in', 'cm', 'mm', 'pt', 'pc']
def get_measure(argument, units):
"""
Check for a positive argument of one of the units and return a
normalized string of the form "<value><unit>" (without space in
between).
To be called from directive option conversion functions.
"""
match = re.match(r'^([0-9.]+) *(%s)$' % '|'.join(units), argument)
try:
float(match.group(1))
except (AttributeError, ValueError):
raise ValueError(
'not a positive measure of one of the following units:\n%s'
% ' '.join(['"%s"' % i for i in units]))
return match.group(1) + match.group(2)
def length_or_unitless(argument):
return get_measure(argument, length_units + [''])
def length_or_percentage_or_unitless(argument, default=''):
"""
Return normalized string of a length or percentage unit.
Add <default> if there is no unit. Raise ValueError if the argument is not
a positive measure of one of the valid CSS units (or without unit).
>>> length_or_percentage_or_unitless('3 pt')
'3pt'
>>> length_or_percentage_or_unitless('3%', 'em')
'3%'
>>> length_or_percentage_or_unitless('3')
'3'
>>> length_or_percentage_or_unitless('3', 'px')
'3px'
"""
try:
return get_measure(argument, length_units + ['%'])
except ValueError:
try:
return get_measure(argument, ['']) + default
except ValueError:
# raise ValueError with list of valid units:
return get_measure(argument, length_units + ['%'])
def class_option(argument):
"""
Convert the argument into a list of ID-compatible strings and return it.
(Directive option conversion function.)
Raise ``ValueError`` if no argument is found.
"""
if argument is None:
raise ValueError('argument required but none supplied')
names = argument.split()
class_names = []
for name in names:
class_name = nodes.make_id(name)
if not class_name:
raise ValueError('cannot make "%s" into a class name' % name)
class_names.append(class_name)
return class_names
unicode_pattern = re.compile(
r'(?:0x|x|\\x|U\+?|\\u)([0-9a-f]+)$|&#x([0-9a-f]+);$', re.IGNORECASE)
def unicode_code(code):
r"""
Convert a Unicode character code to a Unicode character.
(Directive option conversion function.)
Codes may be decimal numbers, hexadecimal numbers (prefixed by ``0x``,
``x``, ``\x``, ``U+``, ``u``, or ``\u``; e.g. ``U+262E``), or XML-style
numeric character entities (e.g. ``☮``). Other text remains as-is.
Raise ValueError for illegal Unicode code values.
"""
try:
if code.isdigit(): # decimal number
return unichr(int(code))
else:
match = unicode_pattern.match(code)
if match: # hex number
value = match.group(1) or match.group(2)
return unichr(int(value, 16))
else: # other text
return code
except OverflowError, detail:
raise ValueError('code too large (%s)' % detail)
def single_char_or_unicode(argument):
"""
A single character is returned as-is. Unicode characters codes are
converted as in `unicode_code`. (Directive option conversion function.)
"""
char = unicode_code(argument)
if len(char) > 1:
raise ValueError('%r invalid; must be a single character or '
'a Unicode code' % char)
return char
def single_char_or_whitespace_or_unicode(argument):
"""
As with `single_char_or_unicode`, but "tab" and "space" are also supported.
(Directive option conversion function.)
"""
if argument == 'tab':
char = '\t'
elif argument == 'space':
char = ' '
else:
char = single_char_or_unicode(argument)
return char
def positive_int(argument):
"""
Converts the argument into an integer. Raises ValueError for negative,
zero, or non-integer values. (Directive option conversion function.)
"""
value = int(argument)
if value < 1:
raise ValueError('negative or zero value; must be positive')
return value
def positive_int_list(argument):
"""
Converts a space- or comma-separated list of values into a Python list
of integers.
(Directive option conversion function.)
Raises ValueError for non-positive-integer values.
"""
if ',' in argument:
entries = argument.split(',')
else:
entries = argument.split()
return [positive_int(entry) for entry in entries]
def encoding(argument):
"""
Verfies the encoding argument by lookup.
(Directive option conversion function.)
Raises ValueError for unknown encodings.
"""
try:
codecs.lookup(argument)
except LookupError:
raise ValueError('unknown encoding: "%s"' % argument)
return argument
def choice(argument, values):
"""
Directive option utility function, supplied to enable options whose
argument must be a member of a finite set of possible values (must be
lower case). A custom conversion function must be written to use it. For
example::
from docutils.parsers.rst import directives
def yesno(argument):
return directives.choice(argument, ('yes', 'no'))
Raise ``ValueError`` if no argument is found or if the argument's value is
not valid (not an entry in the supplied list).
"""
try:
value = argument.lower().strip()
except AttributeError:
raise ValueError('must supply an argument; choose from %s'
% format_values(values))
if value in values:
return value
else:
raise ValueError('"%s" unknown; choose from %s'
% (argument, format_values(values)))
def format_values(values):
return '%s, or "%s"' % (', '.join(['"%s"' % s for s in values[:-1]]),
values[-1])
| {
"repo_name": "Lyleo/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python2/docutils/parsers/rst/directives/__init__.py",
"copies": "2",
"size": "13561",
"license": "mit",
"hash": -6169642691767550000,
"line_mean": 32.5668316832,
"line_max": 79,
"alpha_frac": 0.6060762481,
"autogenerated": false,
"ratio": 4.181621954979957,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5787698203079957,
"avg_score": null,
"num_lines": null
} |
"""
This package contains Docutils parser modules.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils import Component
if sys.version_info < (2,5):
from docutils._compat import __import__
class Parser(Component):
component_type = 'parser'
config_section = 'parsers'
def parse(self, inputstring, document):
"""Override to parse `inputstring` into document tree `document`."""
raise NotImplementedError('subclass must override this method')
def setup_parse(self, inputstring, document):
"""Initial parse setup. Call at start of `self.parse()`."""
self.inputstring = inputstring
self.document = document
document.reporter.attach_observer(document.note_parse_message)
def finish_parse(self):
"""Finalize parse details. Call at end of `self.parse()`."""
self.document.reporter.detach_observer(
self.document.note_parse_message)
_parser_aliases = {
'restructuredtext': 'rst',
'rest': 'rst',
'restx': 'rst',
'rtxt': 'rst',}
def get_parser_class(parser_name):
"""Return the Parser class from the `parser_name` module."""
parser_name = parser_name.lower()
if parser_name in _parser_aliases:
parser_name = _parser_aliases[parser_name]
try:
module = __import__(parser_name, globals(), locals(), level=0)
except ImportError:
module = __import__(parser_name, globals(), locals(), level=1)
return module.Parser
| {
"repo_name": "timonwong/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python3/docutils/parsers/__init__.py",
"copies": "4",
"size": "1611",
"license": "mit",
"hash": -1721679786475125500,
"line_mean": 29.3962264151,
"line_max": 76,
"alpha_frac": 0.6530105525,
"autogenerated": false,
"ratio": 3.9388753056234718,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6591885858123472,
"avg_score": null,
"num_lines": null
} |
"""
This package contains Docutils Writer modules.
"""
__docformat__ = 'reStructuredText'
import os.path
import sys
import docutils
from docutils import languages, Component
from docutils.transforms import universal
if sys.version_info < (2,5):
from docutils._compat import __import__
class Writer(Component):
"""
Abstract base class for docutils Writers.
Each writer module or package must export a subclass also called 'Writer'.
Each writer must support all standard node types listed in
`docutils.nodes.node_class_names`.
The `write()` method is the main entry point.
"""
component_type = 'writer'
config_section = 'writers'
def get_transforms(self):
return Component.get_transforms(self) + [
universal.Messages,
universal.FilterMessages,
universal.StripClassesAndElements,]
document = None
"""The document to write (Docutils doctree); set by `write`."""
output = None
"""Final translated form of `document` (Unicode string for text, binary
string for other forms); set by `translate`."""
language = None
"""Language module for the document; set by `write`."""
destination = None
"""`docutils.io` Output object; where to write the document.
Set by `write`."""
def __init__(self):
# Used by HTML and LaTeX writer for output fragments:
self.parts = {}
"""Mapping of document part names to fragments of `self.output`.
Values are Unicode strings; encoding is up to the client. The 'whole'
key should contain the entire document output.
"""
def write(self, document, destination):
"""
Process a document into its final form.
Translate `document` (a Docutils document tree) into the Writer's
native format, and write it out to its `destination` (a
`docutils.io.Output` subclass object).
Normally not overridden or extended in subclasses.
"""
self.document = document
self.language = languages.get_language(
document.settings.language_code,
document.reporter)
self.destination = destination
self.translate()
output = self.destination.write(self.output)
return output
def translate(self):
"""
Do final translation of `self.document` into `self.output`. Called
from `write`. Override in subclasses.
Usually done with a `docutils.nodes.NodeVisitor` subclass, in
combination with a call to `docutils.nodes.Node.walk()` or
`docutils.nodes.Node.walkabout()`. The ``NodeVisitor`` subclass must
support all standard elements (listed in
`docutils.nodes.node_class_names`) and possibly non-standard elements
used by the current Reader as well.
"""
raise NotImplementedError('subclass must override this method')
def assemble_parts(self):
"""Assemble the `self.parts` dictionary. Extend in subclasses."""
self.parts['whole'] = self.output
self.parts['encoding'] = self.document.settings.output_encoding
self.parts['version'] = docutils.__version__
class UnfilteredWriter(Writer):
"""
A writer that passes the document tree on unchanged (e.g. a
serializer.)
Documents written by UnfilteredWriters are typically reused at a
later date using a subclass of `readers.ReReader`.
"""
def get_transforms(self):
# Do not add any transforms. When the document is reused
# later, the then-used writer will add the appropriate
# transforms.
return Component.get_transforms(self)
_writer_aliases = {
'html': 'html4css1',
'latex': 'latex2e',
'pprint': 'pseudoxml',
'pformat': 'pseudoxml',
'pdf': 'rlpdf',
'xml': 'docutils_xml',
's5': 's5_html'}
def get_writer_class(writer_name):
"""Return the Writer class from the `writer_name` module."""
writer_name = writer_name.lower()
if writer_name in _writer_aliases:
writer_name = _writer_aliases[writer_name]
try:
module = __import__(writer_name, globals(), locals(), level=0)
except ImportError:
module = __import__(writer_name, globals(), locals(), level=1)
return module.Writer
| {
"repo_name": "timonwong/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python3/docutils/writers/__init__.py",
"copies": "4",
"size": "4438",
"license": "mit",
"hash": -4861539981892089000,
"line_mean": 30.7,
"line_max": 78,
"alpha_frac": 0.6480396575,
"autogenerated": false,
"ratio": 4.355250245338567,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005991965751817968,
"num_lines": 140
} |
"""
Transforms for PEP processing.
- `Headers`: Used to transform a PEP's initial RFC-2822 header. It remains a
field list, but some entries get processed.
- `Contents`: Auto-inserts a table of contents.
- `PEPZero`: Special processing for PEP 0.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import time
from docutils import nodes, utils, languages
from docutils import ApplicationError, DataError
from docutils.transforms import Transform, TransformError
from docutils.transforms import parts, references, misc
class Headers(Transform):
"""
Process fields in a PEP's initial RFC-2822 header.
"""
default_priority = 360
pep_url = 'pep-%04d'
pep_cvs_url = ('http://svn.python.org/view/*checkout*'
'/peps/trunk/pep-%04d.txt')
rcs_keyword_substitutions = (
(re.compile(r'\$' r'RCSfile: (.+),v \$$', re.IGNORECASE), r'\1'),
(re.compile(r'\$[a-zA-Z]+: (.+) \$$'), r'\1'),)
def apply(self):
if not len(self.document):
# @@@ replace these DataErrors with proper system messages
raise DataError('Document tree is empty.')
header = self.document[0]
if not isinstance(header, nodes.field_list) or \
'rfc2822' not in header['classes']:
raise DataError('Document does not begin with an RFC-2822 '
'header; it is not a PEP.')
pep = None
for field in header:
if field[0].astext().lower() == 'pep': # should be the first field
value = field[1].astext()
try:
pep = int(value)
cvs_url = self.pep_cvs_url % pep
except ValueError:
pep = value
cvs_url = None
msg = self.document.reporter.warning(
'"PEP" header must contain an integer; "%s" is an '
'invalid value.' % pep, base_node=field)
msgid = self.document.set_id(msg)
prb = nodes.problematic(value, value or '(none)',
refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
if len(field[1]):
field[1][0][:] = [prb]
else:
field[1] += nodes.paragraph('', '', prb)
break
if pep is None:
raise DataError('Document does not contain an RFC-2822 "PEP" '
'header.')
if pep == 0:
# Special processing for PEP 0.
pending = nodes.pending(PEPZero)
self.document.insert(1, pending)
self.document.note_pending(pending)
if len(header) < 2 or header[1][0].astext().lower() != 'title':
raise DataError('No title!')
for field in header:
name = field[0].astext().lower()
body = field[1]
if len(body) > 1:
raise DataError('PEP header field body contains multiple '
'elements:\n%s' % field.pformat(level=1))
elif len(body) == 1:
if not isinstance(body[0], nodes.paragraph):
raise DataError('PEP header field body may only contain '
'a single paragraph:\n%s'
% field.pformat(level=1))
elif name == 'last-modified':
date = time.strftime(
'%d-%b-%Y',
time.localtime(os.stat(self.document['source'])[8]))
if cvs_url:
body += nodes.paragraph(
'', '', nodes.reference('', date, refuri=cvs_url))
else:
# empty
continue
para = body[0]
if name == 'author':
for node in para:
if isinstance(node, nodes.reference):
node.replace_self(mask_email(node))
elif name == 'discussions-to':
for node in para:
if isinstance(node, nodes.reference):
node.replace_self(mask_email(node, pep))
elif name in ('replaces', 'replaced-by', 'requires'):
newbody = []
space = nodes.Text(' ')
for refpep in re.split(',?\s+', body.astext()):
pepno = int(refpep)
newbody.append(nodes.reference(
refpep, refpep,
refuri=(self.document.settings.pep_base_url
+ self.pep_url % pepno)))
newbody.append(space)
para[:] = newbody[:-1] # drop trailing space
elif name == 'last-modified':
utils.clean_rcs_keywords(para, self.rcs_keyword_substitutions)
if cvs_url:
date = para.astext()
para[:] = [nodes.reference('', date, refuri=cvs_url)]
elif name == 'content-type':
pep_type = para.astext()
uri = self.document.settings.pep_base_url + self.pep_url % 12
para[:] = [nodes.reference('', pep_type, refuri=uri)]
elif name == 'version' and len(body):
utils.clean_rcs_keywords(para, self.rcs_keyword_substitutions)
class Contents(Transform):
"""
Insert an empty table of contents topic and a transform placeholder into
the document after the RFC 2822 header.
"""
default_priority = 380
def apply(self):
language = languages.get_language(self.document.settings.language_code,
self.document.reporter)
name = language.labels['contents']
title = nodes.title('', name)
topic = nodes.topic('', title, classes=['contents'])
name = nodes.fully_normalize_name(name)
if not self.document.has_name(name):
topic['names'].append(name)
self.document.note_implicit_target(topic)
pending = nodes.pending(parts.Contents)
topic += pending
self.document.insert(1, topic)
self.document.note_pending(pending)
class TargetNotes(Transform):
"""
Locate the "References" section, insert a placeholder for an external
target footnote insertion transform at the end, and schedule the
transform to run immediately.
"""
default_priority = 520
def apply(self):
doc = self.document
i = len(doc) - 1
refsect = copyright = None
while i >= 0 and isinstance(doc[i], nodes.section):
title_words = doc[i][0].astext().lower().split()
if 'references' in title_words:
refsect = doc[i]
break
elif 'copyright' in title_words:
copyright = i
i -= 1
if not refsect:
refsect = nodes.section()
refsect += nodes.title('', 'References')
doc.set_id(refsect)
if copyright:
# Put the new "References" section before "Copyright":
doc.insert(copyright, refsect)
else:
# Put the new "References" section at end of doc:
doc.append(refsect)
pending = nodes.pending(references.TargetNotes)
refsect.append(pending)
self.document.note_pending(pending, 0)
pending = nodes.pending(misc.CallBack,
details={'callback': self.cleanup_callback})
refsect.append(pending)
self.document.note_pending(pending, 1)
def cleanup_callback(self, pending):
"""
Remove an empty "References" section.
Called after the `references.TargetNotes` transform is complete.
"""
if len(pending.parent) == 2: # <title> and <pending>
pending.parent.parent.remove(pending.parent)
class PEPZero(Transform):
"""
Special processing for PEP 0.
"""
default_priority =760
def apply(self):
visitor = PEPZeroSpecial(self.document)
self.document.walk(visitor)
self.startnode.parent.remove(self.startnode)
class PEPZeroSpecial(nodes.SparseNodeVisitor):
"""
Perform the special processing needed by PEP 0:
- Mask email addresses.
- Link PEP numbers in the second column of 4-column tables to the PEPs
themselves.
"""
pep_url = Headers.pep_url
def unknown_visit(self, node):
pass
def visit_reference(self, node):
node.replace_self(mask_email(node))
def visit_field_list(self, node):
if 'rfc2822' in node['classes']:
raise nodes.SkipNode
def visit_tgroup(self, node):
self.pep_table = node['cols'] == 4
self.entry = 0
def visit_colspec(self, node):
self.entry += 1
if self.pep_table and self.entry == 2:
node['classes'].append('num')
def visit_row(self, node):
self.entry = 0
def visit_entry(self, node):
self.entry += 1
if self.pep_table and self.entry == 2 and len(node) == 1:
node['classes'].append('num')
p = node[0]
if isinstance(p, nodes.paragraph) and len(p) == 1:
text = p.astext()
try:
pep = int(text)
ref = (self.document.settings.pep_base_url
+ self.pep_url % pep)
p[0] = nodes.reference(text, text, refuri=ref)
except ValueError:
pass
non_masked_addresses = ('[email protected]',
'[email protected]',
'[email protected]')
def mask_email(ref, pepno=None):
"""
Mask the email address in `ref` and return a replacement node.
`ref` is returned unchanged if it contains no email address.
For email addresses such as "user@host", mask the address as "user at
host" (text) to thwart simple email address harvesters (except for those
listed in `non_masked_addresses`). If a PEP number (`pepno`) is given,
return a reference including a default email subject.
"""
if ref.hasattr('refuri') and ref['refuri'].startswith('mailto:'):
if ref['refuri'][8:] in non_masked_addresses:
replacement = ref[0]
else:
replacement_text = ref.astext().replace('@', ' at ')
replacement = nodes.raw('', replacement_text, format='html')
if pepno is None:
return replacement
else:
ref['refuri'] += '?subject=PEP%%20%s' % pepno
ref[:] = [replacement]
return ref
else:
return ref
| {
"repo_name": "timonwong/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python3/docutils/transforms/peps.py",
"copies": "4",
"size": "11023",
"license": "mit",
"hash": 996974929146058000,
"line_mean": 35.1409836066,
"line_max": 79,
"alpha_frac": 0.5314342738,
"autogenerated": false,
"ratio": 4.284104158569763,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004375122597889208,
"num_lines": 305
} |
"""
Transforms for resolving references.
"""
__docformat__ = 'reStructuredText'
import sys
import re
from docutils import nodes, utils
from docutils.transforms import TransformError, Transform
class PropagateTargets(Transform):
"""
Propagate empty internal targets to the next element.
Given the following nodes::
<target ids="internal1" names="internal1">
<target anonymous="1" ids="id1">
<target ids="internal2" names="internal2">
<paragraph>
This is a test.
PropagateTargets propagates the ids and names of the internal
targets preceding the paragraph to the paragraph itself::
<target refid="internal1">
<target anonymous="1" refid="id1">
<target refid="internal2">
<paragraph ids="internal2 id1 internal1" names="internal2 internal1">
This is a test.
"""
default_priority = 260
def apply(self):
for target in self.document.traverse(nodes.target):
# Only block-level targets without reference (like ".. target:"):
if (isinstance(target.parent, nodes.TextElement) or
(target.hasattr('refid') or target.hasattr('refuri') or
target.hasattr('refname'))):
continue
assert len(target) == 0, 'error: block-level target has children'
next_node = target.next_node(ascend=True)
# Do not move names and ids into Invisibles (we'd lose the
# attributes) or different Targetables (e.g. footnotes).
if (next_node is not None and
((not isinstance(next_node, nodes.Invisible) and
not isinstance(next_node, nodes.Targetable)) or
isinstance(next_node, nodes.target))):
next_node['ids'].extend(target['ids'])
next_node['names'].extend(target['names'])
# Set defaults for next_node.expect_referenced_by_name/id.
if not hasattr(next_node, 'expect_referenced_by_name'):
next_node.expect_referenced_by_name = {}
if not hasattr(next_node, 'expect_referenced_by_id'):
next_node.expect_referenced_by_id = {}
for id in target['ids']:
# Update IDs to node mapping.
self.document.ids[id] = next_node
# If next_node is referenced by id ``id``, this
# target shall be marked as referenced.
next_node.expect_referenced_by_id[id] = target
for name in target['names']:
next_node.expect_referenced_by_name[name] = target
# If there are any expect_referenced_by_... attributes
# in target set, copy them to next_node.
next_node.expect_referenced_by_name.update(
getattr(target, 'expect_referenced_by_name', {}))
next_node.expect_referenced_by_id.update(
getattr(target, 'expect_referenced_by_id', {}))
# Set refid to point to the first former ID of target
# which is now an ID of next_node.
target['refid'] = target['ids'][0]
# Clear ids and names; they have been moved to
# next_node.
target['ids'] = []
target['names'] = []
self.document.note_refid(target)
class AnonymousHyperlinks(Transform):
"""
Link anonymous references to targets. Given::
<paragraph>
<reference anonymous="1">
internal
<reference anonymous="1">
external
<target anonymous="1" ids="id1">
<target anonymous="1" ids="id2" refuri="http://external">
Corresponding references are linked via "refid" or resolved via "refuri"::
<paragraph>
<reference anonymous="1" refid="id1">
text
<reference anonymous="1" refuri="http://external">
external
<target anonymous="1" ids="id1">
<target anonymous="1" ids="id2" refuri="http://external">
"""
default_priority = 440
def apply(self):
anonymous_refs = []
anonymous_targets = []
for node in self.document.traverse(nodes.reference):
if node.get('anonymous'):
anonymous_refs.append(node)
for node in self.document.traverse(nodes.target):
if node.get('anonymous'):
anonymous_targets.append(node)
if len(anonymous_refs) \
!= len(anonymous_targets):
msg = self.document.reporter.error(
'Anonymous hyperlink mismatch: %s references but %s '
'targets.\nSee "backrefs" attribute for IDs.'
% (len(anonymous_refs), len(anonymous_targets)))
msgid = self.document.set_id(msg)
for ref in anonymous_refs:
prb = nodes.problematic(
ref.rawsource, ref.rawsource, refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
ref.replace_self(prb)
return
for ref, target in zip(anonymous_refs, anonymous_targets):
target.referenced = 1
while True:
if target.hasattr('refuri'):
ref['refuri'] = target['refuri']
ref.resolved = 1
break
else:
if not target['ids']:
# Propagated target.
target = self.document.ids[target['refid']]
continue
ref['refid'] = target['ids'][0]
self.document.note_refid(ref)
break
class IndirectHyperlinks(Transform):
"""
a) Indirect external references::
<paragraph>
<reference refname="indirect external">
indirect external
<target id="id1" name="direct external"
refuri="http://indirect">
<target id="id2" name="indirect external"
refname="direct external">
The "refuri" attribute is migrated back to all indirect targets
from the final direct target (i.e. a target not referring to
another indirect target)::
<paragraph>
<reference refname="indirect external">
indirect external
<target id="id1" name="direct external"
refuri="http://indirect">
<target id="id2" name="indirect external"
refuri="http://indirect">
Once the attribute is migrated, the preexisting "refname" attribute
is dropped.
b) Indirect internal references::
<target id="id1" name="final target">
<paragraph>
<reference refname="indirect internal">
indirect internal
<target id="id2" name="indirect internal 2"
refname="final target">
<target id="id3" name="indirect internal"
refname="indirect internal 2">
Targets which indirectly refer to an internal target become one-hop
indirect (their "refid" attributes are directly set to the internal
target's "id"). References which indirectly refer to an internal
target become direct internal references::
<target id="id1" name="final target">
<paragraph>
<reference refid="id1">
indirect internal
<target id="id2" name="indirect internal 2" refid="id1">
<target id="id3" name="indirect internal" refid="id1">
"""
default_priority = 460
def apply(self):
for target in self.document.indirect_targets:
if not target.resolved:
self.resolve_indirect_target(target)
self.resolve_indirect_references(target)
def resolve_indirect_target(self, target):
refname = target.get('refname')
if refname is None:
reftarget_id = target['refid']
else:
reftarget_id = self.document.nameids.get(refname)
if not reftarget_id:
# Check the unknown_reference_resolvers
for resolver_function in \
self.document.transformer.unknown_reference_resolvers:
if resolver_function(target):
break
else:
self.nonexistent_indirect_target(target)
return
reftarget = self.document.ids[reftarget_id]
reftarget.note_referenced_by(id=reftarget_id)
if isinstance(reftarget, nodes.target) \
and not reftarget.resolved and reftarget.hasattr('refname'):
if hasattr(target, 'multiply_indirect'):
#and target.multiply_indirect):
#del target.multiply_indirect
self.circular_indirect_reference(target)
return
target.multiply_indirect = 1
self.resolve_indirect_target(reftarget) # multiply indirect
del target.multiply_indirect
if reftarget.hasattr('refuri'):
target['refuri'] = reftarget['refuri']
if 'refid' in target:
del target['refid']
elif reftarget.hasattr('refid'):
target['refid'] = reftarget['refid']
self.document.note_refid(target)
else:
if reftarget['ids']:
target['refid'] = reftarget_id
self.document.note_refid(target)
else:
self.nonexistent_indirect_target(target)
return
if refname is not None:
del target['refname']
target.resolved = 1
def nonexistent_indirect_target(self, target):
if target['refname'] in self.document.nameids:
self.indirect_target_error(target, 'which is a duplicate, and '
'cannot be used as a unique reference')
else:
self.indirect_target_error(target, 'which does not exist')
def circular_indirect_reference(self, target):
self.indirect_target_error(target, 'forming a circular reference')
def indirect_target_error(self, target, explanation):
naming = ''
reflist = []
if target['names']:
naming = '"%s" ' % target['names'][0]
for name in target['names']:
reflist.extend(self.document.refnames.get(name, []))
for id in target['ids']:
reflist.extend(self.document.refids.get(id, []))
if target['ids']:
naming += '(id="%s")' % target['ids'][0]
msg = self.document.reporter.error(
'Indirect hyperlink target %s refers to target "%s", %s.'
% (naming, target['refname'], explanation), base_node=target)
msgid = self.document.set_id(msg)
for ref in utils.uniq(reflist):
prb = nodes.problematic(
ref.rawsource, ref.rawsource, refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
ref.replace_self(prb)
target.resolved = 1
def resolve_indirect_references(self, target):
if target.hasattr('refid'):
attname = 'refid'
call_method = self.document.note_refid
elif target.hasattr('refuri'):
attname = 'refuri'
call_method = None
else:
return
attval = target[attname]
for name in target['names']:
reflist = self.document.refnames.get(name, [])
if reflist:
target.note_referenced_by(name=name)
for ref in reflist:
if ref.resolved:
continue
del ref['refname']
ref[attname] = attval
if call_method:
call_method(ref)
ref.resolved = 1
if isinstance(ref, nodes.target):
self.resolve_indirect_references(ref)
for id in target['ids']:
reflist = self.document.refids.get(id, [])
if reflist:
target.note_referenced_by(id=id)
for ref in reflist:
if ref.resolved:
continue
del ref['refid']
ref[attname] = attval
if call_method:
call_method(ref)
ref.resolved = 1
if isinstance(ref, nodes.target):
self.resolve_indirect_references(ref)
class ExternalTargets(Transform):
"""
Given::
<paragraph>
<reference refname="direct external">
direct external
<target id="id1" name="direct external" refuri="http://direct">
The "refname" attribute is replaced by the direct "refuri" attribute::
<paragraph>
<reference refuri="http://direct">
direct external
<target id="id1" name="direct external" refuri="http://direct">
"""
default_priority = 640
def apply(self):
for target in self.document.traverse(nodes.target):
if target.hasattr('refuri'):
refuri = target['refuri']
for name in target['names']:
reflist = self.document.refnames.get(name, [])
if reflist:
target.note_referenced_by(name=name)
for ref in reflist:
if ref.resolved:
continue
del ref['refname']
ref['refuri'] = refuri
ref.resolved = 1
class InternalTargets(Transform):
default_priority = 660
def apply(self):
for target in self.document.traverse(nodes.target):
if not target.hasattr('refuri') and not target.hasattr('refid'):
self.resolve_reference_ids(target)
def resolve_reference_ids(self, target):
"""
Given::
<paragraph>
<reference refname="direct internal">
direct internal
<target id="id1" name="direct internal">
The "refname" attribute is replaced by "refid" linking to the target's
"id"::
<paragraph>
<reference refid="id1">
direct internal
<target id="id1" name="direct internal">
"""
for name in target['names']:
refid = self.document.nameids.get(name)
reflist = self.document.refnames.get(name, [])
if reflist:
target.note_referenced_by(name=name)
for ref in reflist:
if ref.resolved:
continue
if refid:
del ref['refname']
ref['refid'] = refid
ref.resolved = 1
class Footnotes(Transform):
"""
Assign numbers to autonumbered footnotes, and resolve links to footnotes,
citations, and their references.
Given the following ``document`` as input::
<document>
<paragraph>
A labeled autonumbered footnote referece:
<footnote_reference auto="1" id="id1" refname="footnote">
<paragraph>
An unlabeled autonumbered footnote referece:
<footnote_reference auto="1" id="id2">
<footnote auto="1" id="id3">
<paragraph>
Unlabeled autonumbered footnote.
<footnote auto="1" id="footnote" name="footnote">
<paragraph>
Labeled autonumbered footnote.
Auto-numbered footnotes have attribute ``auto="1"`` and no label.
Auto-numbered footnote_references have no reference text (they're
empty elements). When resolving the numbering, a ``label`` element
is added to the beginning of the ``footnote``, and reference text
to the ``footnote_reference``.
The transformed result will be::
<document>
<paragraph>
A labeled autonumbered footnote referece:
<footnote_reference auto="1" id="id1" refid="footnote">
2
<paragraph>
An unlabeled autonumbered footnote referece:
<footnote_reference auto="1" id="id2" refid="id3">
1
<footnote auto="1" id="id3" backrefs="id2">
<label>
1
<paragraph>
Unlabeled autonumbered footnote.
<footnote auto="1" id="footnote" name="footnote" backrefs="id1">
<label>
2
<paragraph>
Labeled autonumbered footnote.
Note that the footnotes are not in the same order as the references.
The labels and reference text are added to the auto-numbered ``footnote``
and ``footnote_reference`` elements. Footnote elements are backlinked to
their references via "refids" attributes. References are assigned "id"
and "refid" attributes.
After adding labels and reference text, the "auto" attributes can be
ignored.
"""
default_priority = 620
autofootnote_labels = None
"""Keep track of unlabeled autonumbered footnotes."""
symbols = [
# Entries 1-4 and 6 below are from section 12.51 of
# The Chicago Manual of Style, 14th edition.
'*', # asterisk/star
'\u2020', # dagger †
'\u2021', # double dagger ‡
'\u00A7', # section mark §
'\u00B6', # paragraph mark (pilcrow) ¶
# (parallels ['||'] in CMoS)
'#', # number sign
# The entries below were chosen arbitrarily.
'\u2660', # spade suit ♠
'\u2665', # heart suit ♥
'\u2666', # diamond suit ♦
'\u2663', # club suit ♣
]
def apply(self):
self.autofootnote_labels = []
startnum = self.document.autofootnote_start
self.document.autofootnote_start = self.number_footnotes(startnum)
self.number_footnote_references(startnum)
self.symbolize_footnotes()
self.resolve_footnotes_and_citations()
def number_footnotes(self, startnum):
"""
Assign numbers to autonumbered footnotes.
For labeled autonumbered footnotes, copy the number over to
corresponding footnote references.
"""
for footnote in self.document.autofootnotes:
while True:
label = str(startnum)
startnum += 1
if label not in self.document.nameids:
break
footnote.insert(0, nodes.label('', label))
for name in footnote['names']:
for ref in self.document.footnote_refs.get(name, []):
ref += nodes.Text(label)
ref.delattr('refname')
assert len(footnote['ids']) == len(ref['ids']) == 1
ref['refid'] = footnote['ids'][0]
footnote.add_backref(ref['ids'][0])
self.document.note_refid(ref)
ref.resolved = 1
if not footnote['names'] and not footnote['dupnames']:
footnote['names'].append(label)
self.document.note_explicit_target(footnote, footnote)
self.autofootnote_labels.append(label)
return startnum
def number_footnote_references(self, startnum):
"""Assign numbers to autonumbered footnote references."""
i = 0
for ref in self.document.autofootnote_refs:
if ref.resolved or ref.hasattr('refid'):
continue
try:
label = self.autofootnote_labels[i]
except IndexError:
msg = self.document.reporter.error(
'Too many autonumbered footnote references: only %s '
'corresponding footnotes available.'
% len(self.autofootnote_labels), base_node=ref)
msgid = self.document.set_id(msg)
for ref in self.document.autofootnote_refs[i:]:
if ref.resolved or ref.hasattr('refname'):
continue
prb = nodes.problematic(
ref.rawsource, ref.rawsource, refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
ref.replace_self(prb)
break
ref += nodes.Text(label)
id = self.document.nameids[label]
footnote = self.document.ids[id]
ref['refid'] = id
self.document.note_refid(ref)
assert len(ref['ids']) == 1
footnote.add_backref(ref['ids'][0])
ref.resolved = 1
i += 1
def symbolize_footnotes(self):
"""Add symbols indexes to "[*]"-style footnotes and references."""
labels = []
for footnote in self.document.symbol_footnotes:
reps, index = divmod(self.document.symbol_footnote_start,
len(self.symbols))
labeltext = self.symbols[index] * (reps + 1)
labels.append(labeltext)
footnote.insert(0, nodes.label('', labeltext))
self.document.symbol_footnote_start += 1
self.document.set_id(footnote)
i = 0
for ref in self.document.symbol_footnote_refs:
try:
ref += nodes.Text(labels[i])
except IndexError:
msg = self.document.reporter.error(
'Too many symbol footnote references: only %s '
'corresponding footnotes available.' % len(labels),
base_node=ref)
msgid = self.document.set_id(msg)
for ref in self.document.symbol_footnote_refs[i:]:
if ref.resolved or ref.hasattr('refid'):
continue
prb = nodes.problematic(
ref.rawsource, ref.rawsource, refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
ref.replace_self(prb)
break
footnote = self.document.symbol_footnotes[i]
assert len(footnote['ids']) == 1
ref['refid'] = footnote['ids'][0]
self.document.note_refid(ref)
footnote.add_backref(ref['ids'][0])
i += 1
def resolve_footnotes_and_citations(self):
"""
Link manually-labeled footnotes and citations to/from their
references.
"""
for footnote in self.document.footnotes:
for label in footnote['names']:
if label in self.document.footnote_refs:
reflist = self.document.footnote_refs[label]
self.resolve_references(footnote, reflist)
for citation in self.document.citations:
for label in citation['names']:
if label in self.document.citation_refs:
reflist = self.document.citation_refs[label]
self.resolve_references(citation, reflist)
def resolve_references(self, note, reflist):
assert len(note['ids']) == 1
id = note['ids'][0]
for ref in reflist:
if ref.resolved:
continue
ref.delattr('refname')
ref['refid'] = id
assert len(ref['ids']) == 1
note.add_backref(ref['ids'][0])
ref.resolved = 1
note.resolved = 1
class CircularSubstitutionDefinitionError(Exception): pass
class Substitutions(Transform):
"""
Given the following ``document`` as input::
<document>
<paragraph>
The
<substitution_reference refname="biohazard">
biohazard
symbol is deservedly scary-looking.
<substitution_definition name="biohazard">
<image alt="biohazard" uri="biohazard.png">
The ``substitution_reference`` will simply be replaced by the
contents of the corresponding ``substitution_definition``.
The transformed result will be::
<document>
<paragraph>
The
<image alt="biohazard" uri="biohazard.png">
symbol is deservedly scary-looking.
<substitution_definition name="biohazard">
<image alt="biohazard" uri="biohazard.png">
"""
default_priority = 220
"""The Substitutions transform has to be applied very early, before
`docutils.tranforms.frontmatter.DocTitle` and others."""
def apply(self):
defs = self.document.substitution_defs
normed = self.document.substitution_names
subreflist = self.document.traverse(nodes.substitution_reference)
nested = {}
for ref in subreflist:
refname = ref['refname']
key = None
if refname in defs:
key = refname
else:
normed_name = refname.lower()
if normed_name in normed:
key = normed[normed_name]
if key is None:
msg = self.document.reporter.error(
'Undefined substitution referenced: "%s".'
% refname, base_node=ref)
msgid = self.document.set_id(msg)
prb = nodes.problematic(
ref.rawsource, ref.rawsource, refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
ref.replace_self(prb)
else:
subdef = defs[key]
parent = ref.parent
index = parent.index(ref)
if ('ltrim' in subdef.attributes
or 'trim' in subdef.attributes):
if index > 0 and isinstance(parent[index - 1],
nodes.Text):
parent.replace(parent[index - 1],
parent[index - 1].rstrip())
if ('rtrim' in subdef.attributes
or 'trim' in subdef.attributes):
if (len(parent) > index + 1
and isinstance(parent[index + 1], nodes.Text)):
parent.replace(parent[index + 1],
parent[index + 1].lstrip())
subdef_copy = subdef.deepcopy()
try:
# Take care of nested substitution references:
for nested_ref in subdef_copy.traverse(
nodes.substitution_reference):
nested_name = normed[nested_ref['refname'].lower()]
if nested_name in nested.setdefault(nested_name, []):
raise CircularSubstitutionDefinitionError
else:
nested[nested_name].append(key)
subreflist.append(nested_ref)
except CircularSubstitutionDefinitionError:
parent = ref.parent
if isinstance(parent, nodes.substitution_definition):
msg = self.document.reporter.error(
'Circular substitution definition detected:',
nodes.literal_block(parent.rawsource,
parent.rawsource),
line=parent.line, base_node=parent)
parent.replace_self(msg)
else:
msg = self.document.reporter.error(
'Circular substitution definition referenced: "%s".'
% refname, base_node=ref)
msgid = self.document.set_id(msg)
prb = nodes.problematic(
ref.rawsource, ref.rawsource, refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
ref.replace_self(prb)
else:
ref.replace_self(subdef_copy.children)
# register refname of the replacment node(s)
# (needed for resolution of references)
for node in subdef_copy.children:
if isinstance(node, nodes.Referential):
# HACK: verify refname attribute exists.
# Test with docs/dev/todo.txt, see. |donate|
if 'refname' in node:
self.document.note_refname(node)
class TargetNotes(Transform):
"""
Creates a footnote for each external target in the text, and corresponding
footnote references after each reference.
"""
default_priority = 540
"""The TargetNotes transform has to be applied after `IndirectHyperlinks`
but before `Footnotes`."""
def __init__(self, document, startnode):
Transform.__init__(self, document, startnode=startnode)
self.classes = startnode.details.get('class', [])
def apply(self):
notes = {}
nodelist = []
for target in self.document.traverse(nodes.target):
# Only external targets.
if not target.hasattr('refuri'):
continue
names = target['names']
refs = []
for name in names:
refs.extend(self.document.refnames.get(name, []))
if not refs:
continue
footnote = self.make_target_footnote(target['refuri'], refs,
notes)
if target['refuri'] not in notes:
notes[target['refuri']] = footnote
nodelist.append(footnote)
# Take care of anonymous references.
for ref in self.document.traverse(nodes.reference):
if not ref.get('anonymous'):
continue
if ref.hasattr('refuri'):
footnote = self.make_target_footnote(ref['refuri'], [ref],
notes)
if ref['refuri'] not in notes:
notes[ref['refuri']] = footnote
nodelist.append(footnote)
self.startnode.replace_self(nodelist)
def make_target_footnote(self, refuri, refs, notes):
if refuri in notes: # duplicate?
footnote = notes[refuri]
assert len(footnote['names']) == 1
footnote_name = footnote['names'][0]
else: # original
footnote = nodes.footnote()
footnote_id = self.document.set_id(footnote)
# Use uppercase letters and a colon; they can't be
# produced inside names by the parser.
footnote_name = 'TARGET_NOTE: ' + footnote_id
footnote['auto'] = 1
footnote['names'] = [footnote_name]
footnote_paragraph = nodes.paragraph()
footnote_paragraph += nodes.reference('', refuri, refuri=refuri)
footnote += footnote_paragraph
self.document.note_autofootnote(footnote)
self.document.note_explicit_target(footnote, footnote)
for ref in refs:
if isinstance(ref, nodes.target):
continue
refnode = nodes.footnote_reference(refname=footnote_name, auto=1)
refnode['classes'] += self.classes
self.document.note_autofootnote_ref(refnode)
self.document.note_footnote_ref(refnode)
index = ref.parent.index(ref) + 1
reflist = [refnode]
if not utils.get_trim_footnote_ref_space(self.document.settings):
if self.classes:
reflist.insert(0, nodes.inline(text=' ', Classes=self.classes))
else:
reflist.insert(0, nodes.Text(' '))
ref.parent.insert(index, reflist)
return footnote
class DanglingReferences(Transform):
"""
Check for dangling references (incl. footnote & citation) and for
unreferenced targets.
"""
default_priority = 850
def apply(self):
visitor = DanglingReferencesVisitor(
self.document,
self.document.transformer.unknown_reference_resolvers)
self.document.walk(visitor)
# *After* resolving all references, check for unreferenced
# targets:
for target in self.document.traverse(nodes.target):
if not target.referenced:
if target.get('anonymous'):
# If we have unreferenced anonymous targets, there
# is already an error message about anonymous
# hyperlink mismatch; no need to generate another
# message.
continue
if target['names']:
naming = target['names'][0]
elif target['ids']:
naming = target['ids'][0]
else:
# Hack: Propagated targets always have their refid
# attribute set.
naming = target['refid']
self.document.reporter.info(
'Hyperlink target "%s" is not referenced.'
% naming, base_node=target)
class DanglingReferencesVisitor(nodes.SparseNodeVisitor):
def __init__(self, document, unknown_reference_resolvers):
nodes.SparseNodeVisitor.__init__(self, document)
self.document = document
self.unknown_reference_resolvers = unknown_reference_resolvers
def unknown_visit(self, node):
pass
def visit_reference(self, node):
if node.resolved or not node.hasattr('refname'):
return
refname = node['refname']
id = self.document.nameids.get(refname)
if id is None:
for resolver_function in self.unknown_reference_resolvers:
if resolver_function(node):
break
else:
if refname in self.document.nameids:
msg = self.document.reporter.error(
'Duplicate target name, cannot be used as a unique '
'reference: "%s".' % (node['refname']), base_node=node)
else:
msg = self.document.reporter.error(
'Unknown target name: "%s".' % (node['refname']),
base_node=node)
msgid = self.document.set_id(msg)
prb = nodes.problematic(
node.rawsource, node.rawsource, refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
node.replace_self(prb)
else:
del node['refname']
node['refid'] = id
self.document.ids[id].note_referenced_by(id=id)
node.resolved = 1
visit_footnote_reference = visit_citation_reference = visit_reference
| {
"repo_name": "timonwong/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python3/docutils/transforms/references.py",
"copies": "2",
"size": "36105",
"license": "mit",
"hash": -6373327159839180000,
"line_mean": 38.8950276243,
"line_max": 83,
"alpha_frac": 0.5282370863,
"autogenerated": false,
"ratio": 4.649710238248551,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00046170107209203846,
"num_lines": 905
} |
"""
Docutils document tree element class library.
Classes in CamelCase are abstract base classes or auxiliary classes. The one
exception is `Text`, for a text (PCDATA) node; uppercase is used to
differentiate from element classes. Classes in lower_case_with_underscores
are element classes, matching the XML element generic identifiers in the DTD_.
The position of each node (the level at which it can occur) is significant and
is represented by abstract base classes (`Root`, `Structural`, `Body`,
`Inline`, etc.). Certain transformations will be easier because we can use
``isinstance(node, base_class)`` to determine the position of the node in the
hierarchy.
.. _DTD: http://docutils.sourceforge.net/docs/ref/docutils.dtd
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import warnings
import types
import unicodedata
# ==============================
# Functional Node Base Classes
# ==============================
class Node(object):
"""Abstract base class of nodes in a document tree."""
parent = None
"""Back-reference to the Node immediately containing this Node."""
document = None
"""The `document` node at the root of the tree containing this Node."""
source = None
"""Path or description of the input source which generated this Node."""
line = None
"""The line number (1-based) of the beginning of this Node in `source`."""
def __nonzero__(self):
"""
Node instances are always true, even if they're empty. A node is more
than a simple container. Its boolean "truth" does not depend on
having one or more subnodes in the doctree.
Use `len()` to check node length. Use `None` to represent a boolean
false value.
"""
return True
if sys.version_info < (3,):
# on 2.x, str(node) will be a byte string with Unicode
# characters > 255 escaped; on 3.x this is no longer necessary
def __str__(self):
return unicode(self).encode('raw_unicode_escape')
def asdom(self, dom=None):
"""Return a DOM **fragment** representation of this Node."""
if dom is None:
import xml.dom.minidom as dom
domroot = dom.Document()
return self._dom_node(domroot)
def pformat(self, indent=' ', level=0):
"""
Return an indented pseudo-XML representation, for test purposes.
Override in subclasses.
"""
raise NotImplementedError
def copy(self):
"""Return a copy of self."""
raise NotImplementedError
def deepcopy(self):
"""Return a deep copy of self (also copying children)."""
raise NotImplementedError
def setup_child(self, child):
child.parent = self
if self.document:
child.document = self.document
if child.source is None:
child.source = self.document.current_source
if child.line is None:
child.line = self.document.current_line
def walk(self, visitor):
"""
Traverse a tree of `Node` objects, calling the
`dispatch_visit()` method of `visitor` when entering each
node. (The `walkabout()` method is similar, except it also
calls the `dispatch_departure()` method before exiting each
node.)
This tree traversal supports limited in-place tree
modifications. Replacing one node with one or more nodes is
OK, as is removing an element. However, if the node removed
or replaced occurs after the current node, the old node will
still be traversed, and any new nodes will not.
Within ``visit`` methods (and ``depart`` methods for
`walkabout()`), `TreePruningException` subclasses may be raised
(`SkipChildren`, `SkipSiblings`, `SkipNode`, `SkipDeparture`).
Parameter `visitor`: A `NodeVisitor` object, containing a
``visit`` implementation for each `Node` subclass encountered.
Return true if we should stop the traversal.
"""
stop = False
visitor.document.reporter.debug(
'docutils.nodes.Node.walk calling dispatch_visit for %s'
% self.__class__.__name__)
try:
try:
visitor.dispatch_visit(self)
except (SkipChildren, SkipNode):
return stop
except SkipDeparture: # not applicable; ignore
pass
children = self.children
try:
for child in children[:]:
if child.walk(visitor):
stop = True
break
except SkipSiblings:
pass
except StopTraversal:
stop = True
return stop
def walkabout(self, visitor):
"""
Perform a tree traversal similarly to `Node.walk()` (which
see), except also call the `dispatch_departure()` method
before exiting each node.
Parameter `visitor`: A `NodeVisitor` object, containing a
``visit`` and ``depart`` implementation for each `Node`
subclass encountered.
Return true if we should stop the traversal.
"""
call_depart = True
stop = False
visitor.document.reporter.debug(
'docutils.nodes.Node.walkabout calling dispatch_visit for %s'
% self.__class__.__name__)
try:
try:
visitor.dispatch_visit(self)
except SkipNode:
return stop
except SkipDeparture:
call_depart = False
children = self.children
try:
for child in children[:]:
if child.walkabout(visitor):
stop = True
break
except SkipSiblings:
pass
except SkipChildren:
pass
except StopTraversal:
stop = True
if call_depart:
visitor.document.reporter.debug(
'docutils.nodes.Node.walkabout calling dispatch_departure '
'for %s' % self.__class__.__name__)
visitor.dispatch_departure(self)
return stop
def _fast_traverse(self, cls):
"""Specialized traverse() that only supports instance checks."""
result = []
if isinstance(self, cls):
result.append(self)
for child in self.children:
result.extend(child._fast_traverse(cls))
return result
def _all_traverse(self):
"""Specialized traverse() that doesn't check for a condition."""
result = []
result.append(self)
for child in self.children:
result.extend(child._all_traverse())
return result
def traverse(self, condition=None, include_self=True, descend=True,
siblings=False, ascend=False):
"""
Return an iterable containing
* self (if include_self is true)
* all descendants in tree traversal order (if descend is true)
* all siblings (if siblings is true) and their descendants (if
also descend is true)
* the siblings of the parent (if ascend is true) and their
descendants (if also descend is true), and so on
If `condition` is not None, the iterable contains only nodes
for which ``condition(node)`` is true. If `condition` is a
node class ``cls``, it is equivalent to a function consisting
of ``return isinstance(node, cls)``.
If ascend is true, assume siblings to be true as well.
For example, given the following tree::
<paragraph>
<emphasis> <--- emphasis.traverse() and
<strong> <--- strong.traverse() are called.
Foo
Bar
<reference name="Baz" refid="baz">
Baz
Then list(emphasis.traverse()) equals ::
[<emphasis>, <strong>, <#text: Foo>, <#text: Bar>]
and list(strong.traverse(ascend=True)) equals ::
[<strong>, <#text: Foo>, <#text: Bar>, <reference>, <#text: Baz>]
"""
if ascend:
siblings=True
# Check for special argument combinations that allow using an
# optimized version of traverse()
if include_self and descend and not siblings:
if condition is None:
return self._all_traverse()
elif isinstance(condition, (types.ClassType, type)):
return self._fast_traverse(condition)
# Check if `condition` is a class (check for TypeType for Python
# implementations that use only new-style classes, like PyPy).
if isinstance(condition, (types.ClassType, type)):
node_class = condition
def condition(node, node_class=node_class):
return isinstance(node, node_class)
r = []
if include_self and (condition is None or condition(self)):
r.append(self)
if descend and len(self.children):
for child in self:
r.extend(child.traverse(include_self=True, descend=True,
siblings=False, ascend=False,
condition=condition))
if siblings or ascend:
node = self
while node.parent:
index = node.parent.index(node)
for sibling in node.parent[index+1:]:
r.extend(sibling.traverse(include_self=True,
descend=descend,
siblings=False, ascend=False,
condition=condition))
if not ascend:
break
else:
node = node.parent
return r
def next_node(self, condition=None, include_self=False, descend=True,
siblings=False, ascend=False):
"""
Return the first node in the iterable returned by traverse(),
or None if the iterable is empty.
Parameter list is the same as of traverse. Note that
include_self defaults to 0, though.
"""
iterable = self.traverse(condition=condition,
include_self=include_self, descend=descend,
siblings=siblings, ascend=ascend)
try:
return iterable[0]
except IndexError:
return None
if sys.version_info < (3,):
class reprunicode(unicode):
"""
A unicode sub-class that removes the initial u from unicode's repr.
"""
def __repr__(self):
return unicode.__repr__(self)[1:]
else:
reprunicode = unicode
def ensure_str(s):
"""
Failsave conversion of `unicode` to `str`.
"""
if sys.version_info < (3,) and isinstance(s, unicode):
return s.encode('ascii', 'backslashreplace')
return s
class Text(Node, reprunicode):
"""
Instances are terminal nodes (leaves) containing text only; no child
nodes or attributes. Initialize by passing a string to the constructor.
Access the text itself with the `astext` method.
"""
tagname = '#text'
children = ()
"""Text nodes have no children, and cannot have children."""
if sys.version_info > (3,):
def __new__(cls, data, rawsource=None):
"""Prevent the rawsource argument from propagating to str."""
if isinstance(data, bytes):
raise TypeError('expecting str data, not bytes')
return reprunicode.__new__(cls, data)
else:
def __new__(cls, data, rawsource=None):
"""Prevent the rawsource argument from propagating to str."""
return reprunicode.__new__(cls, data)
def __init__(self, data, rawsource=''):
self.rawsource = rawsource
"""The raw text from which this element was constructed."""
def shortrepr(self, maxlen=18):
data = self
if len(data) > maxlen:
data = data[:maxlen-4] + ' ...'
return '<%s: %r>' % (self.tagname, reprunicode(data))
def __repr__(self):
return self.shortrepr(maxlen=68)
def _dom_node(self, domroot):
return domroot.createTextNode(unicode(self))
def astext(self):
return reprunicode(self)
# Note about __unicode__: The implementation of __unicode__ here,
# and the one raising NotImplemented in the superclass Node had
# to be removed when changing Text to a subclass of unicode instead
# of UserString, since there is no way to delegate the __unicode__
# call to the superclass unicode:
# unicode itself does not have __unicode__ method to delegate to
# and calling unicode(self) or unicode.__new__ directly creates
# an infinite loop
def copy(self):
return self.__class__(reprunicode(self), rawsource=self.rawsource)
def deepcopy(self):
return self.copy()
def pformat(self, indent=' ', level=0):
result = []
indent = indent * level
for line in self.splitlines():
result.append(indent + line + '\n')
return ''.join(result)
# rstrip and lstrip are used by substitution definitions where
# they are expected to return a Text instance, this was formerly
# taken care of by UserString. Note that then and now the
# rawsource member is lost.
def rstrip(self, chars=None):
return self.__class__(reprunicode.rstrip(self, chars))
def lstrip(self, chars=None):
return self.__class__(reprunicode.lstrip(self, chars))
class Element(Node):
"""
`Element` is the superclass to all specific elements.
Elements contain attributes and child nodes. Elements emulate
dictionaries for attributes, indexing by attribute name (a string). To
set the attribute 'att' to 'value', do::
element['att'] = 'value'
There are two special attributes: 'ids' and 'names'. Both are
lists of unique identifiers, and names serve as human interfaces
to IDs. Names are case- and whitespace-normalized (see the
fully_normalize_name() function), and IDs conform to the regular
expression ``[a-z](-?[a-z0-9]+)*`` (see the make_id() function).
Elements also emulate lists for child nodes (element nodes and/or text
nodes), indexing by integer. To get the first child node, use::
element[0]
Elements may be constructed using the ``+=`` operator. To add one new
child node to element, do::
element += node
This is equivalent to ``element.append(node)``.
To add a list of multiple child nodes at once, use the same ``+=``
operator::
element += [node1, node2]
This is equivalent to ``element.extend([node1, node2])``.
"""
basic_attributes = ('ids', 'classes', 'names', 'dupnames')
"""List attributes which are defined for every Element-derived class
instance and can be safely transferred to a different node."""
local_attributes = ('backrefs',)
"""A list of class-specific attributes that should not be copied with the
standard attributes when replacing a node.
NOTE: Derived classes should override this value to prevent any of its
attributes being copied by adding to the value in its parent class."""
list_attributes = basic_attributes + local_attributes
"""List attributes, automatically initialized to empty lists for
all nodes."""
known_attributes = list_attributes + ('source',)
"""List attributes that are known to the Element base class."""
tagname = None
"""The element generic identifier. If None, it is set as an instance
attribute to the name of the class."""
child_text_separator = '\n\n'
"""Separator for child nodes, used by `astext()` method."""
def __init__(self, rawsource='', *children, **attributes):
self.rawsource = rawsource
"""The raw text from which this element was constructed."""
self.children = []
"""List of child nodes (elements and/or `Text`)."""
self.extend(children) # maintain parent info
self.attributes = {}
"""Dictionary of attribute {name: value}."""
# Initialize list attributes.
for att in self.list_attributes:
self.attributes[att] = []
for att, value in attributes.items():
att = att.lower()
if att in self.list_attributes:
# mutable list; make a copy for this node
self.attributes[att] = value[:]
else:
self.attributes[att] = value
if self.tagname is None:
self.tagname = self.__class__.__name__
def _dom_node(self, domroot):
element = domroot.createElement(self.tagname)
for attribute, value in self.attlist():
if isinstance(value, list):
value = ' '.join([serial_escape('%s' % (v,)) for v in value])
element.setAttribute(attribute, '%s' % value)
for child in self.children:
element.appendChild(child._dom_node(domroot))
return element
def __repr__(self):
data = ''
for c in self.children:
data += c.shortrepr()
if len(data) > 60:
data = data[:56] + ' ...'
break
if self['names']:
return '<%s "%s": %s>' % (self.__class__.__name__,
'; '.join([ensure_str(n) for n in self['names']]), data)
else:
return '<%s: %s>' % (self.__class__.__name__, data)
def shortrepr(self):
if self['names']:
return '<%s "%s"...>' % (self.__class__.__name__,
'; '.join([ensure_str(n) for n in self['names']]))
else:
return '<%s...>' % self.tagname
def __unicode__(self):
if self.children:
return u'%s%s%s' % (self.starttag(),
''.join([unicode(c) for c in self.children]),
self.endtag())
else:
return self.emptytag()
if sys.version_info > (3,):
# 2to3 doesn't convert __unicode__ to __str__
__str__ = __unicode__
def starttag(self, quoteattr=None):
# the optional arg is used by the docutils_xml writer
if quoteattr is None:
quoteattr = pseudo_quoteattr
parts = [self.tagname]
for name, value in self.attlist():
if value is None: # boolean attribute
parts.append(name)
continue
if isinstance(value, list):
values = [serial_escape('%s' % (v,)) for v in value]
value = ' '.join(values)
else:
value = unicode(value)
value = quoteattr(value)
parts.append(u'%s=%s' % (name, value))
return u'<%s>' % u' '.join(parts)
def endtag(self):
return '</%s>' % self.tagname
def emptytag(self):
return u'<%s/>' % u' '.join([self.tagname] +
['%s="%s"' % (n, v)
for n, v in self.attlist()])
def __len__(self):
return len(self.children)
def __contains__(self, key):
# support both membership test for children and attributes
# (has_key is translated to "in" by 2to3)
if isinstance(key, basestring):
return key in self.attributes
return key in self.children
def __getitem__(self, key):
if isinstance(key, basestring):
return self.attributes[key]
elif isinstance(key, int):
return self.children[key]
elif isinstance(key, types.SliceType):
assert key.step in (None, 1), 'cannot handle slice with stride'
return self.children[key.start:key.stop]
else:
raise TypeError, ('element index must be an integer, a slice, or '
'an attribute name string')
def __setitem__(self, key, item):
if isinstance(key, basestring):
self.attributes[str(key)] = item
elif isinstance(key, int):
self.setup_child(item)
self.children[key] = item
elif isinstance(key, types.SliceType):
assert key.step in (None, 1), 'cannot handle slice with stride'
for node in item:
self.setup_child(node)
self.children[key.start:key.stop] = item
else:
raise TypeError, ('element index must be an integer, a slice, or '
'an attribute name string')
def __delitem__(self, key):
if isinstance(key, basestring):
del self.attributes[key]
elif isinstance(key, int):
del self.children[key]
elif isinstance(key, types.SliceType):
assert key.step in (None, 1), 'cannot handle slice with stride'
del self.children[key.start:key.stop]
else:
raise TypeError, ('element index must be an integer, a simple '
'slice, or an attribute name string')
def __add__(self, other):
return self.children + other
def __radd__(self, other):
return other + self.children
def __iadd__(self, other):
"""Append a node or a list of nodes to `self.children`."""
if isinstance(other, Node):
self.append(other)
elif other is not None:
self.extend(other)
return self
def astext(self):
return self.child_text_separator.join(
[child.astext() for child in self.children])
def non_default_attributes(self):
atts = {}
for key, value in self.attributes.items():
if self.is_not_default(key):
atts[key] = value
return atts
def attlist(self):
attlist = self.non_default_attributes().items()
attlist.sort()
return attlist
def get(self, key, failobj=None):
return self.attributes.get(key, failobj)
def hasattr(self, attr):
return attr in self.attributes
def delattr(self, attr):
if attr in self.attributes:
del self.attributes[attr]
def setdefault(self, key, failobj=None):
return self.attributes.setdefault(key, failobj)
has_key = hasattr
# support operator ``in``
__contains__ = hasattr
def get_language_code(self, fallback=''):
"""Return node's language tag.
Look iteratively in self and parents for a class argument
starting with ``language-`` and return the remainder of it
(which should be a `BCP49` language tag) or the `fallback`.
"""
for cls in self.get('classes', []):
if cls.startswith('language-'):
return cls[9:]
try:
return self.parent.get_language(fallback)
except AttributeError:
return fallback
def append(self, item):
self.setup_child(item)
self.children.append(item)
def extend(self, item):
for node in item:
self.append(node)
def insert(self, index, item):
if isinstance(item, Node):
self.setup_child(item)
self.children.insert(index, item)
elif item is not None:
self[index:index] = item
def pop(self, i=-1):
return self.children.pop(i)
def remove(self, item):
self.children.remove(item)
def index(self, item):
return self.children.index(item)
def is_not_default(self, key):
if self[key] == [] and key in self.list_attributes:
return 0
else:
return 1
def update_basic_atts(self, dict_):
"""
Update basic attributes ('ids', 'names', 'classes',
'dupnames', but not 'source') from node or dictionary `dict_`.
"""
if isinstance(dict_, Node):
dict_ = dict_.attributes
for att in self.basic_attributes:
self.append_attr_list(att, dict_.get(att, []))
def append_attr_list(self, attr, values):
"""
For each element in values, if it does not exist in self[attr], append
it.
NOTE: Requires self[attr] and values to be sequence type and the
former should specifically be a list.
"""
# List Concatenation
for value in values:
if not value in self[attr]:
self[attr].append(value)
def coerce_append_attr_list(self, attr, value):
"""
First, convert both self[attr] and value to a non-string sequence
type; if either is not already a sequence, convert it to a list of one
element. Then call append_attr_list.
NOTE: self[attr] and value both must not be None.
"""
# List Concatenation
if not isinstance(self.get(attr), list):
self[attr] = [self[attr]]
if not isinstance(value, list):
value = [value]
self.append_attr_list(attr, value)
def replace_attr(self, attr, value, force = True):
"""
If self[attr] does not exist or force is True or omitted, set
self[attr] to value, otherwise do nothing.
"""
# One or the other
if force or self.get(attr) is None:
self[attr] = value
def copy_attr_convert(self, attr, value, replace = True):
"""
If attr is an attribute of self, set self[attr] to
[self[attr], value], otherwise set self[attr] to value.
NOTE: replace is not used by this function and is kept only for
compatibility with the other copy functions.
"""
if self.get(attr) is not value:
self.coerce_append_attr_list(attr, value)
def copy_attr_coerce(self, attr, value, replace):
"""
If attr is an attribute of self and either self[attr] or value is a
list, convert all non-sequence values to a sequence of 1 element and
then concatenate the two sequence, setting the result to self[attr].
If both self[attr] and value are non-sequences and replace is True or
self[attr] is None, replace self[attr] with value. Otherwise, do
nothing.
"""
if self.get(attr) is not value:
if isinstance(self.get(attr), list) or \
isinstance(value, list):
self.coerce_append_attr_list(attr, value)
else:
self.replace_attr(attr, value, replace)
def copy_attr_concatenate(self, attr, value, replace):
"""
If attr is an attribute of self and both self[attr] and value are
lists, concatenate the two sequences, setting the result to
self[attr]. If either self[attr] or value are non-sequences and
replace is True or self[attr] is None, replace self[attr] with value.
Otherwise, do nothing.
"""
if self.get(attr) is not value:
if isinstance(self.get(attr), list) and \
isinstance(value, list):
self.append_attr_list(attr, value)
else:
self.replace_attr(attr, value, replace)
def copy_attr_consistent(self, attr, value, replace):
"""
If replace is True or selfpattr] is None, replace self[attr] with
value. Otherwise, do nothing.
"""
if self.get(attr) is not value:
self.replace_attr(attr, value, replace)
def update_all_atts(self, dict_, update_fun = copy_attr_consistent,
replace = True, and_source = False):
"""
Updates all attributes from node or dictionary `dict_`.
Appends the basic attributes ('ids', 'names', 'classes',
'dupnames', but not 'source') and then, for all other attributes in
dict_, updates the same attribute in self. When attributes with the
same identifier appear in both self and dict_, the two values are
merged based on the value of update_fun. Generally, when replace is
True, the values in self are replaced or merged with the values in
dict_; otherwise, the values in self may be preserved or merged. When
and_source is True, the 'source' attribute is included in the copy.
NOTE: When replace is False, and self contains a 'source' attribute,
'source' is not replaced even when dict_ has a 'source'
attribute, though it may still be merged into a list depending
on the value of update_fun.
NOTE: It is easier to call the update-specific methods then to pass
the update_fun method to this function.
"""
if isinstance(dict_, Node):
dict_ = dict_.attributes
# Include the source attribute when copying?
if and_source:
filter_fun = self.is_not_list_attribute
else:
filter_fun = self.is_not_known_attribute
# Copy the basic attributes
self.update_basic_atts(dict_)
# Grab other attributes in dict_ not in self except the
# (All basic attributes should be copied already)
for att in filter(filter_fun, dict_):
update_fun(self, att, dict_[att], replace)
def update_all_atts_consistantly(self, dict_, replace = True,
and_source = False):
"""
Updates all attributes from node or dictionary `dict_`.
Appends the basic attributes ('ids', 'names', 'classes',
'dupnames', but not 'source') and then, for all other attributes in
dict_, updates the same attribute in self. When attributes with the
same identifier appear in both self and dict_ and replace is True, the
values in self are replaced with the values in dict_; otherwise, the
values in self are preserved. When and_source is True, the 'source'
attribute is included in the copy.
NOTE: When replace is False, and self contains a 'source' attribute,
'source' is not replaced even when dict_ has a 'source'
attribute, though it may still be merged into a list depending
on the value of update_fun.
"""
self.update_all_atts(dict_, Element.copy_attr_consistent, replace,
and_source)
def update_all_atts_concatenating(self, dict_, replace = True,
and_source = False):
"""
Updates all attributes from node or dictionary `dict_`.
Appends the basic attributes ('ids', 'names', 'classes',
'dupnames', but not 'source') and then, for all other attributes in
dict_, updates the same attribute in self. When attributes with the
same identifier appear in both self and dict_ whose values aren't each
lists and replace is True, the values in self are replaced with the
values in dict_; if the values from self and dict_ for the given
identifier are both of list type, then the two lists are concatenated
and the result stored in self; otherwise, the values in self are
preserved. When and_source is True, the 'source' attribute is
included in the copy.
NOTE: When replace is False, and self contains a 'source' attribute,
'source' is not replaced even when dict_ has a 'source'
attribute, though it may still be merged into a list depending
on the value of update_fun.
"""
self.update_all_atts(dict_, Element.copy_attr_concatenate, replace,
and_source)
def update_all_atts_coercion(self, dict_, replace = True,
and_source = False):
"""
Updates all attributes from node or dictionary `dict_`.
Appends the basic attributes ('ids', 'names', 'classes',
'dupnames', but not 'source') and then, for all other attributes in
dict_, updates the same attribute in self. When attributes with the
same identifier appear in both self and dict_ whose values are both
not lists and replace is True, the values in self are replaced with
the values in dict_; if either of the values from self and dict_ for
the given identifier are of list type, then first any non-lists are
converted to 1-element lists and then the two lists are concatenated
and the result stored in self; otherwise, the values in self are
preserved. When and_source is True, the 'source' attribute is
included in the copy.
NOTE: When replace is False, and self contains a 'source' attribute,
'source' is not replaced even when dict_ has a 'source'
attribute, though it may still be merged into a list depending
on the value of update_fun.
"""
self.update_all_atts(dict_, Element.copy_attr_coerce, replace,
and_source)
def update_all_atts_convert(self, dict_, and_source = False):
"""
Updates all attributes from node or dictionary `dict_`.
Appends the basic attributes ('ids', 'names', 'classes',
'dupnames', but not 'source') and then, for all other attributes in
dict_, updates the same attribute in self. When attributes with the
same identifier appear in both self and dict_ then first any non-lists
are converted to 1-element lists and then the two lists are
concatenated and the result stored in self; otherwise, the values in
self are preserved. When and_source is True, the 'source' attribute
is included in the copy.
NOTE: When replace is False, and self contains a 'source' attribute,
'source' is not replaced even when dict_ has a 'source'
attribute, though it may still be merged into a list depending
on the value of update_fun.
"""
self.update_all_atts(dict_, Element.copy_attr_convert,
and_source = and_source)
def clear(self):
self.children = []
def replace(self, old, new):
"""Replace one child `Node` with another child or children."""
index = self.index(old)
if isinstance(new, Node):
self.setup_child(new)
self[index] = new
elif new is not None:
self[index:index+1] = new
def replace_self(self, new):
"""
Replace `self` node with `new`, where `new` is a node or a
list of nodes.
"""
update = new
if not isinstance(new, Node):
# `new` is a list; update first child.
try:
update = new[0]
except IndexError:
update = None
if isinstance(update, Element):
update.update_basic_atts(self)
else:
# `update` is a Text node or `new` is an empty list.
# Assert that we aren't losing any attributes.
for att in self.basic_attributes:
assert not self[att], \
'Losing "%s" attribute: %s' % (att, self[att])
self.parent.replace(self, new)
def first_child_matching_class(self, childclass, start=0, end=sys.maxint):
"""
Return the index of the first child whose class exactly matches.
Parameters:
- `childclass`: A `Node` subclass to search for, or a tuple of `Node`
classes. If a tuple, any of the classes may match.
- `start`: Initial index to check.
- `end`: Initial index to *not* check.
"""
if not isinstance(childclass, tuple):
childclass = (childclass,)
for index in range(start, min(len(self), end)):
for c in childclass:
if isinstance(self[index], c):
return index
return None
def first_child_not_matching_class(self, childclass, start=0,
end=sys.maxint):
"""
Return the index of the first child whose class does *not* match.
Parameters:
- `childclass`: A `Node` subclass to skip, or a tuple of `Node`
classes. If a tuple, none of the classes may match.
- `start`: Initial index to check.
- `end`: Initial index to *not* check.
"""
if not isinstance(childclass, tuple):
childclass = (childclass,)
for index in range(start, min(len(self), end)):
for c in childclass:
if isinstance(self.children[index], c):
break
else:
return index
return None
def pformat(self, indent=' ', level=0):
return ''.join(['%s%s\n' % (indent * level, self.starttag())] +
[child.pformat(indent, level+1)
for child in self.children])
def copy(self):
return self.__class__(rawsource=self.rawsource, **self.attributes)
def deepcopy(self):
copy = self.copy()
copy.extend([child.deepcopy() for child in self.children])
return copy
def set_class(self, name):
"""Add a new class to the "classes" attribute."""
warnings.warn('docutils.nodes.Element.set_class deprecated; '
"append to Element['classes'] list attribute directly",
DeprecationWarning, stacklevel=2)
assert ' ' not in name
self['classes'].append(name.lower())
def note_referenced_by(self, name=None, id=None):
"""Note that this Element has been referenced by its name
`name` or id `id`."""
self.referenced = 1
# Element.expect_referenced_by_* dictionaries map names or ids
# to nodes whose ``referenced`` attribute is set to true as
# soon as this node is referenced by the given name or id.
# Needed for target propagation.
by_name = getattr(self, 'expect_referenced_by_name', {}).get(name)
by_id = getattr(self, 'expect_referenced_by_id', {}).get(id)
if by_name:
assert name is not None
by_name.referenced = 1
if by_id:
assert id is not None
by_id.referenced = 1
@classmethod
def is_not_list_attribute(cls, attr):
"""
Returns True if and only if the given attribute is NOT one of the
basic list attributes defined for all Elements.
"""
return attr not in cls.list_attributes
@classmethod
def is_not_known_attribute(cls, attr):
"""
Returns True if and only if the given attribute is NOT recognized by
this class.
"""
return attr not in cls.known_attributes
class TextElement(Element):
"""
An element which directly contains text.
Its children are all `Text` or `Inline` subclass nodes. You can
check whether an element's context is inline simply by checking whether
its immediate parent is a `TextElement` instance (including subclasses).
This is handy for nodes like `image` that can appear both inline and as
standalone body elements.
If passing children to `__init__()`, make sure to set `text` to
``''`` or some other suitable value.
"""
child_text_separator = ''
"""Separator for child nodes, used by `astext()` method."""
def __init__(self, rawsource='', text='', *children, **attributes):
if text != '':
textnode = Text(text)
Element.__init__(self, rawsource, textnode, *children,
**attributes)
else:
Element.__init__(self, rawsource, *children, **attributes)
class FixedTextElement(TextElement):
"""An element which directly contains preformatted text."""
def __init__(self, rawsource='', text='', *children, **attributes):
TextElement.__init__(self, rawsource, text, *children, **attributes)
self.attributes['xml:space'] = 'preserve'
# ========
# Mixins
# ========
class Resolvable:
resolved = 0
class BackLinkable:
def add_backref(self, refid):
self['backrefs'].append(refid)
# ====================
# Element Categories
# ====================
class Root: pass
class Titular: pass
class PreBibliographic:
"""Category of Node which may occur before Bibliographic Nodes."""
class Bibliographic: pass
class Decorative(PreBibliographic): pass
class Structural: pass
class Body: pass
class General(Body): pass
class Sequential(Body):
"""List-like elements."""
class Admonition(Body): pass
class Special(Body):
"""Special internal body elements."""
class Invisible(PreBibliographic):
"""Internal elements that don't appear in output."""
class Part: pass
class Inline: pass
class Referential(Resolvable): pass
class Targetable(Resolvable):
referenced = 0
indirect_reference_name = None
"""Holds the whitespace_normalized_name (contains mixed case) of a target.
Required for MoinMoin/reST compatibility."""
class Labeled:
"""Contains a `label` as its first element."""
# ==============
# Root Element
# ==============
class document(Root, Structural, Element):
"""
The document root element.
Do not instantiate this class directly; use
`docutils.utils.new_document()` instead.
"""
def __init__(self, settings, reporter, *args, **kwargs):
Element.__init__(self, *args, **kwargs)
self.current_source = None
"""Path to or description of the input source being processed."""
self.current_line = None
"""Line number (1-based) of `current_source`."""
self.settings = settings
"""Runtime settings data record."""
self.reporter = reporter
"""System message generator."""
self.indirect_targets = []
"""List of indirect target nodes."""
self.substitution_defs = {}
"""Mapping of substitution names to substitution_definition nodes."""
self.substitution_names = {}
"""Mapping of case-normalized substitution names to case-sensitive
names."""
self.refnames = {}
"""Mapping of names to lists of referencing nodes."""
self.refids = {}
"""Mapping of ids to lists of referencing nodes."""
self.nameids = {}
"""Mapping of names to unique id's."""
self.nametypes = {}
"""Mapping of names to hyperlink type (boolean: True => explicit,
False => implicit."""
self.ids = {}
"""Mapping of ids to nodes."""
self.footnote_refs = {}
"""Mapping of footnote labels to lists of footnote_reference nodes."""
self.citation_refs = {}
"""Mapping of citation labels to lists of citation_reference nodes."""
self.autofootnotes = []
"""List of auto-numbered footnote nodes."""
self.autofootnote_refs = []
"""List of auto-numbered footnote_reference nodes."""
self.symbol_footnotes = []
"""List of symbol footnote nodes."""
self.symbol_footnote_refs = []
"""List of symbol footnote_reference nodes."""
self.footnotes = []
"""List of manually-numbered footnote nodes."""
self.citations = []
"""List of citation nodes."""
self.autofootnote_start = 1
"""Initial auto-numbered footnote number."""
self.symbol_footnote_start = 0
"""Initial symbol footnote symbol index."""
self.id_start = 1
"""Initial ID number."""
self.parse_messages = []
"""System messages generated while parsing."""
self.transform_messages = []
"""System messages generated while applying transforms."""
import docutils.transforms
self.transformer = docutils.transforms.Transformer(self)
"""Storage for transforms to be applied to this document."""
self.decoration = None
"""Document's `decoration` node."""
self.document = self
def __getstate__(self):
"""
Return dict with unpicklable references removed.
"""
state = self.__dict__.copy()
state['reporter'] = None
state['transformer'] = None
return state
def asdom(self, dom=None):
"""Return a DOM representation of this document."""
if dom is None:
import xml.dom.minidom as dom
domroot = dom.Document()
domroot.appendChild(self._dom_node(domroot))
return domroot
def set_id(self, node, msgnode=None):
for id in node['ids']:
if id in self.ids and self.ids[id] is not node:
msg = self.reporter.severe('Duplicate ID: "%s".' % id)
if msgnode != None:
msgnode += msg
if not node['ids']:
for name in node['names']:
id = self.settings.id_prefix + make_id(name)
if id and id not in self.ids:
break
else:
id = ''
while not id or id in self.ids:
id = (self.settings.id_prefix +
self.settings.auto_id_prefix + str(self.id_start))
self.id_start += 1
node['ids'].append(id)
self.ids[id] = node
return id
def set_name_id_map(self, node, id, msgnode=None, explicit=None):
"""
`self.nameids` maps names to IDs, while `self.nametypes` maps names to
booleans representing hyperlink type (True==explicit,
False==implicit). This method updates the mappings.
The following state transition table shows how `self.nameids` ("ids")
and `self.nametypes` ("types") change with new input (a call to this
method), and what actions are performed ("implicit"-type system
messages are INFO/1, and "explicit"-type system messages are ERROR/3):
==== ===== ======== ======== ======= ==== ===== =====
Old State Input Action New State Notes
----------- -------- ----------------- ----------- -----
ids types new type sys.msg. dupname ids types
==== ===== ======== ======== ======= ==== ===== =====
- - explicit - - new True
- - implicit - - new False
None False explicit - - new True
old False explicit implicit old new True
None True explicit explicit new None True
old True explicit explicit new,old None True [#]_
None False implicit implicit new None False
old False implicit implicit new,old None False
None True implicit implicit new None True
old True implicit implicit new old True
==== ===== ======== ======== ======= ==== ===== =====
.. [#] Do not clear the name-to-id map or invalidate the old target if
both old and new targets are external and refer to identical URIs.
The new target is invalidated regardless.
"""
for name in node['names']:
if name in self.nameids:
self.set_duplicate_name_id(node, id, name, msgnode, explicit)
else:
self.nameids[name] = id
self.nametypes[name] = explicit
def set_duplicate_name_id(self, node, id, name, msgnode, explicit):
old_id = self.nameids[name]
old_explicit = self.nametypes[name]
self.nametypes[name] = old_explicit or explicit
if explicit:
if old_explicit:
level = 2
if old_id is not None:
old_node = self.ids[old_id]
if 'refuri' in node:
refuri = node['refuri']
if old_node['names'] \
and 'refuri' in old_node \
and old_node['refuri'] == refuri:
level = 1 # just inform if refuri's identical
if level > 1:
dupname(old_node, name)
self.nameids[name] = None
msg = self.reporter.system_message(
level, 'Duplicate explicit target name: "%s".' % name,
backrefs=[id], base_node=node)
if msgnode != None:
msgnode += msg
dupname(node, name)
else:
self.nameids[name] = id
if old_id is not None:
old_node = self.ids[old_id]
dupname(old_node, name)
else:
if old_id is not None and not old_explicit:
self.nameids[name] = None
old_node = self.ids[old_id]
dupname(old_node, name)
dupname(node, name)
if not explicit or (not old_explicit and old_id is not None):
msg = self.reporter.info(
'Duplicate implicit target name: "%s".' % name,
backrefs=[id], base_node=node)
if msgnode != None:
msgnode += msg
def has_name(self, name):
return name in self.nameids
# "note" here is an imperative verb: "take note of".
def note_implicit_target(self, target, msgnode=None):
id = self.set_id(target, msgnode)
self.set_name_id_map(target, id, msgnode, explicit=None)
def note_explicit_target(self, target, msgnode=None):
id = self.set_id(target, msgnode)
self.set_name_id_map(target, id, msgnode, explicit=True)
def note_refname(self, node):
self.refnames.setdefault(node['refname'], []).append(node)
def note_refid(self, node):
self.refids.setdefault(node['refid'], []).append(node)
def note_indirect_target(self, target):
self.indirect_targets.append(target)
if target['names']:
self.note_refname(target)
def note_anonymous_target(self, target):
self.set_id(target)
def note_autofootnote(self, footnote):
self.set_id(footnote)
self.autofootnotes.append(footnote)
def note_autofootnote_ref(self, ref):
self.set_id(ref)
self.autofootnote_refs.append(ref)
def note_symbol_footnote(self, footnote):
self.set_id(footnote)
self.symbol_footnotes.append(footnote)
def note_symbol_footnote_ref(self, ref):
self.set_id(ref)
self.symbol_footnote_refs.append(ref)
def note_footnote(self, footnote):
self.set_id(footnote)
self.footnotes.append(footnote)
def note_footnote_ref(self, ref):
self.set_id(ref)
self.footnote_refs.setdefault(ref['refname'], []).append(ref)
self.note_refname(ref)
def note_citation(self, citation):
self.citations.append(citation)
def note_citation_ref(self, ref):
self.set_id(ref)
self.citation_refs.setdefault(ref['refname'], []).append(ref)
self.note_refname(ref)
def note_substitution_def(self, subdef, def_name, msgnode=None):
name = whitespace_normalize_name(def_name)
if name in self.substitution_defs:
msg = self.reporter.error(
'Duplicate substitution definition name: "%s".' % name,
base_node=subdef)
if msgnode != None:
msgnode += msg
oldnode = self.substitution_defs[name]
dupname(oldnode, name)
# keep only the last definition:
self.substitution_defs[name] = subdef
# case-insensitive mapping:
self.substitution_names[fully_normalize_name(name)] = name
def note_substitution_ref(self, subref, refname):
subref['refname'] = whitespace_normalize_name(refname)
def note_pending(self, pending, priority=None):
self.transformer.add_pending(pending, priority)
def note_parse_message(self, message):
self.parse_messages.append(message)
def note_transform_message(self, message):
self.transform_messages.append(message)
def note_source(self, source, offset):
self.current_source = source
if offset is None:
self.current_line = offset
else:
self.current_line = offset + 1
def copy(self):
return self.__class__(self.settings, self.reporter,
**self.attributes)
def get_decoration(self):
if not self.decoration:
self.decoration = decoration()
index = self.first_child_not_matching_class(Titular)
if index is None:
self.append(self.decoration)
else:
self.insert(index, self.decoration)
return self.decoration
# ================
# Title Elements
# ================
class title(Titular, PreBibliographic, TextElement): pass
class subtitle(Titular, PreBibliographic, TextElement): pass
class rubric(Titular, TextElement): pass
# ========================
# Bibliographic Elements
# ========================
class docinfo(Bibliographic, Element): pass
class author(Bibliographic, TextElement): pass
class authors(Bibliographic, Element): pass
class organization(Bibliographic, TextElement): pass
class address(Bibliographic, FixedTextElement): pass
class contact(Bibliographic, TextElement): pass
class version(Bibliographic, TextElement): pass
class revision(Bibliographic, TextElement): pass
class status(Bibliographic, TextElement): pass
class date(Bibliographic, TextElement): pass
class copyright(Bibliographic, TextElement): pass
# =====================
# Decorative Elements
# =====================
class decoration(Decorative, Element):
def get_header(self):
if not len(self.children) or not isinstance(self.children[0], header):
self.insert(0, header())
return self.children[0]
def get_footer(self):
if not len(self.children) or not isinstance(self.children[-1], footer):
self.append(footer())
return self.children[-1]
class header(Decorative, Element): pass
class footer(Decorative, Element): pass
# =====================
# Structural Elements
# =====================
class section(Structural, Element): pass
class topic(Structural, Element):
"""
Topics are terminal, "leaf" mini-sections, like block quotes with titles,
or textual figures. A topic is just like a section, except that it has no
subsections, and it doesn't have to conform to section placement rules.
Topics are allowed wherever body elements (list, table, etc.) are allowed,
but only at the top level of a section or document. Topics cannot nest
inside topics, sidebars, or body elements; you can't have a topic inside a
table, list, block quote, etc.
"""
class sidebar(Structural, Element):
"""
Sidebars are like miniature, parallel documents that occur inside other
documents, providing related or reference material. A sidebar is
typically offset by a border and "floats" to the side of the page; the
document's main text may flow around it. Sidebars can also be likened to
super-footnotes; their content is outside of the flow of the document's
main text.
Sidebars are allowed wherever body elements (list, table, etc.) are
allowed, but only at the top level of a section or document. Sidebars
cannot nest inside sidebars, topics, or body elements; you can't have a
sidebar inside a table, list, block quote, etc.
"""
class transition(Structural, Element): pass
# ===============
# Body Elements
# ===============
class paragraph(General, TextElement): pass
class compound(General, Element): pass
class container(General, Element): pass
class bullet_list(Sequential, Element): pass
class enumerated_list(Sequential, Element): pass
class list_item(Part, Element): pass
class definition_list(Sequential, Element): pass
class definition_list_item(Part, Element): pass
class term(Part, TextElement): pass
class classifier(Part, TextElement): pass
class definition(Part, Element): pass
class field_list(Sequential, Element): pass
class field(Part, Element): pass
class field_name(Part, TextElement): pass
class field_body(Part, Element): pass
class option(Part, Element):
child_text_separator = ''
class option_argument(Part, TextElement):
def astext(self):
return self.get('delimiter', ' ') + TextElement.astext(self)
class option_group(Part, Element):
child_text_separator = ', '
class option_list(Sequential, Element): pass
class option_list_item(Part, Element):
child_text_separator = ' '
class option_string(Part, TextElement): pass
class description(Part, Element): pass
class literal_block(General, FixedTextElement): pass
class doctest_block(General, FixedTextElement): pass
class math_block(General, FixedTextElement): pass
class line_block(General, Element): pass
class line(Part, TextElement):
indent = None
class block_quote(General, Element): pass
class attribution(Part, TextElement): pass
class attention(Admonition, Element): pass
class caution(Admonition, Element): pass
class danger(Admonition, Element): pass
class error(Admonition, Element): pass
class important(Admonition, Element): pass
class note(Admonition, Element): pass
class tip(Admonition, Element): pass
class hint(Admonition, Element): pass
class warning(Admonition, Element): pass
class admonition(Admonition, Element): pass
class comment(Special, Invisible, FixedTextElement): pass
class substitution_definition(Special, Invisible, TextElement): pass
class target(Special, Invisible, Inline, TextElement, Targetable): pass
class footnote(General, BackLinkable, Element, Labeled, Targetable): pass
class citation(General, BackLinkable, Element, Labeled, Targetable): pass
class label(Part, TextElement): pass
class figure(General, Element): pass
class caption(Part, TextElement): pass
class legend(Part, Element): pass
class table(General, Element): pass
class tgroup(Part, Element): pass
class colspec(Part, Element): pass
class thead(Part, Element): pass
class tbody(Part, Element): pass
class row(Part, Element): pass
class entry(Part, Element): pass
class system_message(Special, BackLinkable, PreBibliographic, Element):
"""
System message element.
Do not instantiate this class directly; use
``document.reporter.info/warning/error/severe()`` instead.
"""
def __init__(self, message=None, *children, **attributes):
if message:
p = paragraph('', message)
children = (p,) + children
try:
Element.__init__(self, '', *children, **attributes)
except:
print 'system_message: children=%r' % (children,)
raise
def astext(self):
line = self.get('line', '')
return u'%s:%s: (%s/%s) %s' % (self['source'], line, self['type'],
self['level'], Element.astext(self))
class pending(Special, Invisible, Element):
"""
The "pending" element is used to encapsulate a pending operation: the
operation (transform), the point at which to apply it, and any data it
requires. Only the pending operation's location within the document is
stored in the public document tree (by the "pending" object itself); the
operation and its data are stored in the "pending" object's internal
instance attributes.
For example, say you want a table of contents in your reStructuredText
document. The easiest way to specify where to put it is from within the
document, with a directive::
.. contents::
But the "contents" directive can't do its work until the entire document
has been parsed and possibly transformed to some extent. So the directive
code leaves a placeholder behind that will trigger the second phase of its
processing, something like this::
<pending ...public attributes...> + internal attributes
Use `document.note_pending()` so that the
`docutils.transforms.Transformer` stage of processing can run all pending
transforms.
"""
def __init__(self, transform, details=None,
rawsource='', *children, **attributes):
Element.__init__(self, rawsource, *children, **attributes)
self.transform = transform
"""The `docutils.transforms.Transform` class implementing the pending
operation."""
self.details = details or {}
"""Detail data (dictionary) required by the pending operation."""
def pformat(self, indent=' ', level=0):
internals = [
'.. internal attributes:',
' .transform: %s.%s' % (self.transform.__module__,
self.transform.__name__),
' .details:']
details = self.details.items()
details.sort()
for key, value in details:
if isinstance(value, Node):
internals.append('%7s%s:' % ('', key))
internals.extend(['%9s%s' % ('', line)
for line in value.pformat().splitlines()])
elif value and isinstance(value, list) \
and isinstance(value[0], Node):
internals.append('%7s%s:' % ('', key))
for v in value:
internals.extend(['%9s%s' % ('', line)
for line in v.pformat().splitlines()])
else:
internals.append('%7s%s: %r' % ('', key, value))
return (Element.pformat(self, indent, level)
+ ''.join([(' %s%s\n' % (indent * level, line))
for line in internals]))
def copy(self):
return self.__class__(self.transform, self.details, self.rawsource,
**self.attributes)
class raw(Special, Inline, PreBibliographic, FixedTextElement):
"""
Raw data that is to be passed untouched to the Writer.
"""
pass
# =================
# Inline Elements
# =================
class emphasis(Inline, TextElement): pass
class strong(Inline, TextElement): pass
class literal(Inline, TextElement): pass
class reference(General, Inline, Referential, TextElement): pass
class footnote_reference(Inline, Referential, TextElement): pass
class citation_reference(Inline, Referential, TextElement): pass
class substitution_reference(Inline, TextElement): pass
class title_reference(Inline, TextElement): pass
class abbreviation(Inline, TextElement): pass
class acronym(Inline, TextElement): pass
class superscript(Inline, TextElement): pass
class subscript(Inline, TextElement): pass
class math(Inline, TextElement): pass
class image(General, Inline, Element):
def astext(self):
return self.get('alt', '')
class inline(Inline, TextElement): pass
class problematic(Inline, TextElement): pass
class generated(Inline, TextElement): pass
# ========================================
# Auxiliary Classes, Functions, and Data
# ========================================
node_class_names = """
Text
abbreviation acronym address admonition attention attribution author
authors
block_quote bullet_list
caption caution citation citation_reference classifier colspec comment
compound contact container copyright
danger date decoration definition definition_list definition_list_item
description docinfo doctest_block document
emphasis entry enumerated_list error
field field_body field_list field_name figure footer
footnote footnote_reference
generated
header hint
image important inline
label legend line line_block list_item literal literal_block
math math_block
note
option option_argument option_group option_list option_list_item
option_string organization
paragraph pending problematic
raw reference revision row rubric
section sidebar status strong subscript substitution_definition
substitution_reference subtitle superscript system_message
table target tbody term tgroup thead tip title title_reference topic
transition
version
warning""".split()
"""A list of names of all concrete Node subclasses."""
class NodeVisitor:
"""
"Visitor" pattern [GoF95]_ abstract superclass implementation for
document tree traversals.
Each node class has corresponding methods, doing nothing by
default; override individual methods for specific and useful
behaviour. The `dispatch_visit()` method is called by
`Node.walk()` upon entering a node. `Node.walkabout()` also calls
the `dispatch_departure()` method before exiting a node.
The dispatch methods call "``visit_`` + node class name" or
"``depart_`` + node class name", resp.
This is a base class for visitors whose ``visit_...`` & ``depart_...``
methods should be implemented for *all* node types encountered (such as
for `docutils.writers.Writer` subclasses). Unimplemented methods will
raise exceptions.
For sparse traversals, where only certain node types are of interest,
subclass `SparseNodeVisitor` instead. When (mostly or entirely) uniform
processing is desired, subclass `GenericNodeVisitor`.
.. [GoF95] Gamma, Helm, Johnson, Vlissides. *Design Patterns: Elements of
Reusable Object-Oriented Software*. Addison-Wesley, Reading, MA, USA,
1995.
"""
optional = ()
"""
Tuple containing node class names (as strings).
No exception will be raised if writers do not implement visit
or departure functions for these node classes.
Used to ensure transitional compatibility with existing 3rd-party writers.
"""
def __init__(self, document):
self.document = document
def dispatch_visit(self, node):
"""
Call self."``visit_`` + node class name" with `node` as
parameter. If the ``visit_...`` method does not exist, call
self.unknown_visit.
"""
node_name = node.__class__.__name__
method = getattr(self, 'visit_' + node_name, self.unknown_visit)
self.document.reporter.debug(
'docutils.nodes.NodeVisitor.dispatch_visit calling %s for %s'
% (method.__name__, node_name))
return method(node)
def dispatch_departure(self, node):
"""
Call self."``depart_`` + node class name" with `node` as
parameter. If the ``depart_...`` method does not exist, call
self.unknown_departure.
"""
node_name = node.__class__.__name__
method = getattr(self, 'depart_' + node_name, self.unknown_departure)
self.document.reporter.debug(
'docutils.nodes.NodeVisitor.dispatch_departure calling %s for %s'
% (method.__name__, node_name))
return method(node)
def unknown_visit(self, node):
"""
Called when entering unknown `Node` types.
Raise an exception unless overridden.
"""
if (self.document.settings.strict_visitor
or node.__class__.__name__ not in self.optional):
raise NotImplementedError(
'%s visiting unknown node type: %s'
% (self.__class__, node.__class__.__name__))
def unknown_departure(self, node):
"""
Called before exiting unknown `Node` types.
Raise exception unless overridden.
"""
if (self.document.settings.strict_visitor
or node.__class__.__name__ not in self.optional):
raise NotImplementedError(
'%s departing unknown node type: %s'
% (self.__class__, node.__class__.__name__))
class SparseNodeVisitor(NodeVisitor):
"""
Base class for sparse traversals, where only certain node types are of
interest. When ``visit_...`` & ``depart_...`` methods should be
implemented for *all* node types (such as for `docutils.writers.Writer`
subclasses), subclass `NodeVisitor` instead.
"""
class GenericNodeVisitor(NodeVisitor):
"""
Generic "Visitor" abstract superclass, for simple traversals.
Unless overridden, each ``visit_...`` method calls `default_visit()`, and
each ``depart_...`` method (when using `Node.walkabout()`) calls
`default_departure()`. `default_visit()` (and `default_departure()`) must
be overridden in subclasses.
Define fully generic visitors by overriding `default_visit()` (and
`default_departure()`) only. Define semi-generic visitors by overriding
individual ``visit_...()`` (and ``depart_...()``) methods also.
`NodeVisitor.unknown_visit()` (`NodeVisitor.unknown_departure()`) should
be overridden for default behavior.
"""
def default_visit(self, node):
"""Override for generic, uniform traversals."""
raise NotImplementedError
def default_departure(self, node):
"""Override for generic, uniform traversals."""
raise NotImplementedError
def _call_default_visit(self, node):
self.default_visit(node)
def _call_default_departure(self, node):
self.default_departure(node)
def _nop(self, node):
pass
def _add_node_class_names(names):
"""Save typing with dynamic assignments:"""
for _name in names:
setattr(GenericNodeVisitor, "visit_" + _name, _call_default_visit)
setattr(GenericNodeVisitor, "depart_" + _name, _call_default_departure)
setattr(SparseNodeVisitor, 'visit_' + _name, _nop)
setattr(SparseNodeVisitor, 'depart_' + _name, _nop)
_add_node_class_names(node_class_names)
class TreeCopyVisitor(GenericNodeVisitor):
"""
Make a complete copy of a tree or branch, including element attributes.
"""
def __init__(self, document):
GenericNodeVisitor.__init__(self, document)
self.parent_stack = []
self.parent = []
def get_tree_copy(self):
return self.parent[0]
def default_visit(self, node):
"""Copy the current node, and make it the new acting parent."""
newnode = node.copy()
self.parent.append(newnode)
self.parent_stack.append(self.parent)
self.parent = newnode
def default_departure(self, node):
"""Restore the previous acting parent."""
self.parent = self.parent_stack.pop()
class TreePruningException(Exception):
"""
Base class for `NodeVisitor`-related tree pruning exceptions.
Raise subclasses from within ``visit_...`` or ``depart_...`` methods
called from `Node.walk()` and `Node.walkabout()` tree traversals to prune
the tree traversed.
"""
pass
class SkipChildren(TreePruningException):
"""
Do not visit any children of the current node. The current node's
siblings and ``depart_...`` method are not affected.
"""
pass
class SkipSiblings(TreePruningException):
"""
Do not visit any more siblings (to the right) of the current node. The
current node's children and its ``depart_...`` method are not affected.
"""
pass
class SkipNode(TreePruningException):
"""
Do not visit the current node's children, and do not call the current
node's ``depart_...`` method.
"""
pass
class SkipDeparture(TreePruningException):
"""
Do not call the current node's ``depart_...`` method. The current node's
children and siblings are not affected.
"""
pass
class NodeFound(TreePruningException):
"""
Raise to indicate that the target of a search has been found. This
exception must be caught by the client; it is not caught by the traversal
code.
"""
pass
class StopTraversal(TreePruningException):
"""
Stop the traversal alltogether. The current node's ``depart_...`` method
is not affected. The parent nodes ``depart_...`` methods are also called
as usual. No other nodes are visited. This is an alternative to
NodeFound that does not cause exception handling to trickle up to the
caller.
"""
pass
def make_id(string):
"""
Convert `string` into an identifier and return it.
Docutils identifiers will conform to the regular expression
``[a-z](-?[a-z0-9]+)*``. For CSS compatibility, identifiers (the "class"
and "id" attributes) should have no underscores, colons, or periods.
Hyphens may be used.
- The `HTML 4.01 spec`_ defines identifiers based on SGML tokens:
ID and NAME tokens must begin with a letter ([A-Za-z]) and may be
followed by any number of letters, digits ([0-9]), hyphens ("-"),
underscores ("_"), colons (":"), and periods (".").
- However the `CSS1 spec`_ defines identifiers based on the "name" token,
a tighter interpretation ("flex" tokenizer notation; "latin1" and
"escape" 8-bit characters have been replaced with entities)::
unicode \\[0-9a-f]{1,4}
latin1 [¡-ÿ]
escape {unicode}|\\[ -~¡-ÿ]
nmchar [-a-z0-9]|{latin1}|{escape}
name {nmchar}+
The CSS1 "nmchar" rule does not include underscores ("_"), colons (":"),
or periods ("."), therefore "class" and "id" attributes should not contain
these characters. They should be replaced with hyphens ("-"). Combined
with HTML's requirements (the first character must be a letter; no
"unicode", "latin1", or "escape" characters), this results in the
``[a-z](-?[a-z0-9]+)*`` pattern.
.. _HTML 4.01 spec: http://www.w3.org/TR/html401
.. _CSS1 spec: http://www.w3.org/TR/REC-CSS1
"""
id = string.lower()
if not isinstance(id, unicode):
id = id.decode()
id = id.translate(_non_id_translate_digraphs)
id = id.translate(_non_id_translate)
# get rid of non-ascii characters.
# 'ascii' lowercase to prevent problems with turkish locale.
id = unicodedata.normalize('NFKD', id).\
encode('ascii', 'ignore').decode('ascii')
# shrink runs of whitespace and replace by hyphen
id = _non_id_chars.sub('-', ' '.join(id.split()))
id = _non_id_at_ends.sub('', id)
return str(id)
_non_id_chars = re.compile('[^a-z0-9]+')
_non_id_at_ends = re.compile('^[-0-9]+|-+$')
_non_id_translate = {
0x00f8: u'o', # o with stroke
0x0111: u'd', # d with stroke
0x0127: u'h', # h with stroke
0x0131: u'i', # dotless i
0x0142: u'l', # l with stroke
0x0167: u't', # t with stroke
0x0180: u'b', # b with stroke
0x0183: u'b', # b with topbar
0x0188: u'c', # c with hook
0x018c: u'd', # d with topbar
0x0192: u'f', # f with hook
0x0199: u'k', # k with hook
0x019a: u'l', # l with bar
0x019e: u'n', # n with long right leg
0x01a5: u'p', # p with hook
0x01ab: u't', # t with palatal hook
0x01ad: u't', # t with hook
0x01b4: u'y', # y with hook
0x01b6: u'z', # z with stroke
0x01e5: u'g', # g with stroke
0x0225: u'z', # z with hook
0x0234: u'l', # l with curl
0x0235: u'n', # n with curl
0x0236: u't', # t with curl
0x0237: u'j', # dotless j
0x023c: u'c', # c with stroke
0x023f: u's', # s with swash tail
0x0240: u'z', # z with swash tail
0x0247: u'e', # e with stroke
0x0249: u'j', # j with stroke
0x024b: u'q', # q with hook tail
0x024d: u'r', # r with stroke
0x024f: u'y', # y with stroke
}
_non_id_translate_digraphs = {
0x00df: u'sz', # ligature sz
0x00e6: u'ae', # ae
0x0153: u'oe', # ligature oe
0x0238: u'db', # db digraph
0x0239: u'qp', # qp digraph
}
def dupname(node, name):
node['dupnames'].append(name)
node['names'].remove(name)
# Assume that this method is referenced, even though it isn't; we
# don't want to throw unnecessary system_messages.
node.referenced = 1
def fully_normalize_name(name):
"""Return a case- and whitespace-normalized name."""
return ' '.join(name.lower().split())
def whitespace_normalize_name(name):
"""Return a whitespace-normalized name."""
return ' '.join(name.split())
def serial_escape(value):
"""Escape string values that are elements of a list, for serialization."""
return value.replace('\\', r'\\').replace(' ', r'\ ')
def pseudo_quoteattr(value):
"""Quote attributes for pseudo-xml"""
return '"%s"' % value
#
#
# Local Variables:
# indent-tabs-mode: nil
# sentence-end-double-space: t
# fill-column: 78
# End:
| {
"repo_name": "Lyleo/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python2/docutils/nodes.py",
"copies": "2",
"size": "77302",
"license": "mit",
"hash": -4553807996180111400,
"line_mean": 34.0575963719,
"line_max": 79,
"alpha_frac": 0.5964140643,
"autogenerated": false,
"ratio": 4.246895945500494,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5843310009800494,
"avg_score": null,
"num_lines": null
} |
"""
Simple HyperText Markup Language document tree Writer.
The output conforms to the XHTML version 1.0 Transitional DTD
(*almost* strict). The output contains a minimum of formatting
information. The cascading style sheet "html4css1.css" is required
for proper viewing with a modern graphical browser.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import os.path
import time
import re
import urllib
try: # check for the Python Imaging Library
import PIL.Image
except ImportError:
try: # sometimes PIL modules are put in PYTHONPATH's root
import Image
class PIL(object): pass # dummy wrapper
PIL.Image = Image
except ImportError:
PIL = None
import docutils
from docutils import frontend, nodes, utils, writers, languages, io
from docutils.utils.error_reporting import SafeString
from docutils.transforms import writer_aux
from docutils.utils.math import unichar2tex, pick_math_environment, math2html
from docutils.utils.math.latex2mathml import parse_latex_math
class Writer(writers.Writer):
supported = ('html', 'html4css1', 'xhtml')
"""Formats this writer supports."""
default_stylesheet = 'html4css1.css'
default_stylesheet_dirs = ['.', utils.relative_path(
os.path.join(os.getcwd(), 'dummy'), os.path.dirname(__file__))]
default_template = 'template.txt'
default_template_path = utils.relative_path(
os.path.join(os.getcwd(), 'dummy'),
os.path.join(os.path.dirname(__file__), default_template))
settings_spec = (
'HTML-Specific Options',
None,
(('Specify the template file (UTF-8 encoded). Default is "%s".'
% default_template_path,
['--template'],
{'default': default_template_path, 'metavar': '<file>'}),
('Comma separated list of stylesheet URLs. '
'Overrides previous --stylesheet and --stylesheet-path settings.',
['--stylesheet'],
{'metavar': '<URL[,URL,...]>', 'overrides': 'stylesheet_path',
'validator': frontend.validate_comma_separated_list}),
('Comma separated list of stylesheet paths. '
'Relative paths are expanded if a matching file is found in '
'the --stylesheet-dirs. With --link-stylesheet, '
'the path is rewritten relative to the output HTML file. '
'Default: "%s"' % default_stylesheet,
['--stylesheet-path'],
{'metavar': '<file[,file,...]>', 'overrides': 'stylesheet',
'validator': frontend.validate_comma_separated_list,
'default': [default_stylesheet]}),
('Embed the stylesheet(s) in the output HTML file. The stylesheet '
'files must be accessible during processing. This is the default.',
['--embed-stylesheet'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Link to the stylesheet(s) in the output HTML file. '
'Default: embed stylesheets.',
['--link-stylesheet'],
{'dest': 'embed_stylesheet', 'action': 'store_false'}),
('Comma-separated list of directories where stylesheets are found. '
'Used by --stylesheet-path when expanding relative path arguments. '
'Default: "%s"' % default_stylesheet_dirs,
['--stylesheet-dirs'],
{'metavar': '<dir[,dir,...]>',
'validator': frontend.validate_comma_separated_list,
'default': default_stylesheet_dirs}),
('Specify the initial header level. Default is 1 for "<h1>". '
'Does not affect document title & subtitle (see --no-doc-title).',
['--initial-header-level'],
{'choices': '1 2 3 4 5 6'.split(), 'default': '1',
'metavar': '<level>'}),
('Specify the maximum width (in characters) for one-column field '
'names. Longer field names will span an entire row of the table '
'used to render the field list. Default is 14 characters. '
'Use 0 for "no limit".',
['--field-name-limit'],
{'default': 14, 'metavar': '<level>',
'validator': frontend.validate_nonnegative_int}),
('Specify the maximum width (in characters) for options in option '
'lists. Longer options will span an entire row of the table used '
'to render the option list. Default is 14 characters. '
'Use 0 for "no limit".',
['--option-limit'],
{'default': 14, 'metavar': '<level>',
'validator': frontend.validate_nonnegative_int}),
('Format for footnote references: one of "superscript" or '
'"brackets". Default is "brackets".',
['--footnote-references'],
{'choices': ['superscript', 'brackets'], 'default': 'brackets',
'metavar': '<format>',
'overrides': 'trim_footnote_reference_space'}),
('Format for block quote attributions: one of "dash" (em-dash '
'prefix), "parentheses"/"parens", or "none". Default is "dash".',
['--attribution'],
{'choices': ['dash', 'parentheses', 'parens', 'none'],
'default': 'dash', 'metavar': '<format>'}),
('Remove extra vertical whitespace between items of "simple" bullet '
'lists and enumerated lists. Default: enabled.',
['--compact-lists'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable compact simple bullet and enumerated lists.',
['--no-compact-lists'],
{'dest': 'compact_lists', 'action': 'store_false'}),
('Remove extra vertical whitespace between items of simple field '
'lists. Default: enabled.',
['--compact-field-lists'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable compact simple field lists.',
['--no-compact-field-lists'],
{'dest': 'compact_field_lists', 'action': 'store_false'}),
('Added to standard table classes. '
'Defined styles: "borderless". Default: ""',
['--table-style'],
{'default': ''}),
('Math output format, one of "MathML", "HTML", "MathJax" '
'or "LaTeX". Default: "HTML math.css"',
['--math-output'],
{'default': 'HTML math.css'}),
('Omit the XML declaration. Use with caution.',
['--no-xml-declaration'],
{'dest': 'xml_declaration', 'default': 1, 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Obfuscate email addresses to confuse harvesters while still '
'keeping email links usable with standards-compliant browsers.',
['--cloak-email-addresses'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),))
settings_defaults = {'output_encoding_error_handler': 'xmlcharrefreplace'}
relative_path_settings = ('stylesheet_path',)
config_section = 'html4css1 writer'
config_section_dependencies = ('writers',)
visitor_attributes = (
'head_prefix', 'head', 'stylesheet', 'body_prefix',
'body_pre_docinfo', 'docinfo', 'body', 'body_suffix',
'title', 'subtitle', 'header', 'footer', 'meta', 'fragment',
'html_prolog', 'html_head', 'html_title', 'html_subtitle',
'html_body')
def get_transforms(self):
return writers.Writer.get_transforms(self) + [writer_aux.Admonitions]
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = HTMLTranslator
def translate(self):
self.visitor = visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
for attr in self.visitor_attributes:
setattr(self, attr, getattr(visitor, attr))
self.output = self.apply_template()
def apply_template(self):
template_file = open(self.document.settings.template, 'rb')
template = unicode(template_file.read(), 'utf-8')
template_file.close()
subs = self.interpolation_dict()
return template % subs
def interpolation_dict(self):
subs = {}
settings = self.document.settings
for attr in self.visitor_attributes:
subs[attr] = ''.join(getattr(self, attr)).rstrip('\n')
subs['encoding'] = settings.output_encoding
subs['version'] = docutils.__version__
return subs
def assemble_parts(self):
writers.Writer.assemble_parts(self)
for part in self.visitor_attributes:
self.parts[part] = ''.join(getattr(self, part))
class HTMLTranslator(nodes.NodeVisitor):
"""
This HTML writer has been optimized to produce visually compact
lists (less vertical whitespace). HTML's mixed content models
allow list items to contain "<li><p>body elements</p></li>" or
"<li>just text</li>" or even "<li>text<p>and body
elements</p>combined</li>", each with different effects. It would
be best to stick with strict body elements in list items, but they
affect vertical spacing in browsers (although they really
shouldn't).
Here is an outline of the optimization:
- Check for and omit <p> tags in "simple" lists: list items
contain either a single paragraph, a nested simple list, or a
paragraph followed by a nested simple list. This means that
this list can be compact:
- Item 1.
- Item 2.
But this list cannot be compact:
- Item 1.
This second paragraph forces space between list items.
- Item 2.
- In non-list contexts, omit <p> tags on a paragraph if that
paragraph is the only child of its parent (footnotes & citations
are allowed a label first).
- Regardless of the above, in definitions, table cells, field bodies,
option descriptions, and list items, mark the first child with
'class="first"' and the last child with 'class="last"'. The stylesheet
sets the margins (top & bottom respectively) to 0 for these elements.
The ``no_compact_lists`` setting (``--no-compact-lists`` command-line
option) disables list whitespace optimization.
"""
xml_declaration = '<?xml version="1.0" encoding="%s" ?>\n'
doctype = (
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"'
' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n')
doctype_mathml = doctype
head_prefix_template = ('<html xmlns="http://www.w3.org/1999/xhtml"'
' xml:lang="%(lang)s" lang="%(lang)s">\n<head>\n')
content_type = ('<meta http-equiv="Content-Type"'
' content="text/html; charset=%s" />\n')
content_type_mathml = ('<meta http-equiv="Content-Type"'
' content="application/xhtml+xml; charset=%s" />\n')
generator = ('<meta name="generator" content="Docutils %s: '
'http://docutils.sourceforge.net/" />\n')
# Template for the MathJax script in the header:
mathjax_script = '<script type="text/javascript" src="%s"></script>\n'
# The latest version of MathJax from the distributed server:
# avaliable to the public under the `MathJax CDN Terms of Service`__
# __http://www.mathjax.org/download/mathjax-cdn-terms-of-service/
mathjax_url = ('http://cdn.mathjax.org/mathjax/latest/MathJax.js?'
'config=TeX-AMS-MML_HTMLorMML')
# may be overwritten by custom URL appended to "mathjax"
stylesheet_link = '<link rel="stylesheet" href="%s" type="text/css" />\n'
embedded_stylesheet = '<style type="text/css">\n\n%s\n</style>\n'
words_and_spaces = re.compile(r'\S+| +|\n')
sollbruchstelle = re.compile(r'.+\W\W.+|[-?].+', re.U) # wrap point inside word
lang_attribute = 'lang' # name changes to 'xml:lang' in XHTML 1.1
def __init__(self, document):
nodes.NodeVisitor.__init__(self, document)
self.settings = settings = document.settings
lcode = settings.language_code
self.language = languages.get_language(lcode, document.reporter)
self.meta = [self.generator % docutils.__version__]
self.head_prefix = []
self.html_prolog = []
if settings.xml_declaration:
self.head_prefix.append(self.xml_declaration
% settings.output_encoding)
# encoding not interpolated:
self.html_prolog.append(self.xml_declaration)
self.head = self.meta[:]
self.stylesheet = [self.stylesheet_call(path)
for path in utils.get_stylesheet_list(settings)]
self.body_prefix = ['</head>\n<body>\n']
# document title, subtitle display
self.body_pre_docinfo = []
# author, date, etc.
self.docinfo = []
self.body = []
self.fragment = []
self.body_suffix = ['</body>\n</html>\n']
self.section_level = 0
self.initial_header_level = int(settings.initial_header_level)
self.math_output = settings.math_output.split()
self.math_output_options = self.math_output[1:]
self.math_output = self.math_output[0].lower()
# A heterogenous stack used in conjunction with the tree traversal.
# Make sure that the pops correspond to the pushes:
self.context = []
self.topic_classes = []
self.colspecs = []
self.compact_p = True
self.compact_simple = False
self.compact_field_list = False
self.in_docinfo = False
self.in_sidebar = False
self.title = []
self.subtitle = []
self.header = []
self.footer = []
self.html_head = [self.content_type] # charset not interpolated
self.html_title = []
self.html_subtitle = []
self.html_body = []
self.in_document_title = 0 # len(self.body) or 0
self.in_mailto = False
self.author_in_authors = False
self.math_header = []
def astext(self):
return ''.join(self.head_prefix + self.head
+ self.stylesheet + self.body_prefix
+ self.body_pre_docinfo + self.docinfo
+ self.body + self.body_suffix)
def encode(self, text):
"""Encode special characters in `text` & return."""
# @@@ A codec to do these and all other HTML entities would be nice.
text = unicode(text)
return text.translate({
ord('&'): u'&',
ord('<'): u'<',
ord('"'): u'"',
ord('>'): u'>',
ord('@'): u'@', # may thwart some address harvesters
# TODO: convert non-breaking space only if needed?
0xa0: u' '}) # non-breaking space
def cloak_mailto(self, uri):
"""Try to hide a mailto: URL from harvesters."""
# Encode "@" using a URL octet reference (see RFC 1738).
# Further cloaking with HTML entities will be done in the
# `attval` function.
return uri.replace('@', '%40')
def cloak_email(self, addr):
"""Try to hide the link text of a email link from harversters."""
# Surround at-signs and periods with <span> tags. ("@" has
# already been encoded to "@" by the `encode` method.)
addr = addr.replace('@', '<span>@</span>')
addr = addr.replace('.', '<span>.</span>')
return addr
def attval(self, text,
whitespace=re.compile('[\n\r\t\v\f]')):
"""Cleanse, HTML encode, and return attribute value text."""
encoded = self.encode(whitespace.sub(' ', text))
if self.in_mailto and self.settings.cloak_email_addresses:
# Cloak at-signs ("%40") and periods with HTML entities.
encoded = encoded.replace('%40', '%40')
encoded = encoded.replace('.', '.')
return encoded
def stylesheet_call(self, path):
"""Return code to reference or embed stylesheet file `path`"""
if self.settings.embed_stylesheet:
try:
content = io.FileInput(source_path=path,
encoding='utf-8').read()
self.settings.record_dependencies.add(path)
except IOError, err:
msg = u"Cannot embed stylesheet '%s': %s." % (
path, SafeString(err.strerror))
self.document.reporter.error(msg)
return '<--- %s --->\n' % msg
return self.embedded_stylesheet % content
# else link to style file:
if self.settings.stylesheet_path:
# adapt path relative to output (cf. config.html#stylesheet-path)
path = utils.relative_path(self.settings._destination, path)
return self.stylesheet_link % self.encode(path)
def starttag(self, node, tagname, suffix='\n', empty=False, **attributes):
"""
Construct and return a start tag given a node (id & class attributes
are extracted), tag name, and optional attributes.
"""
tagname = tagname.lower()
prefix = []
atts = {}
ids = []
for (name, value) in attributes.items():
atts[name.lower()] = value
classes = node.get('classes', [])
if 'class' in atts:
classes.append(atts.pop('class'))
# move language specification to 'lang' attribute
languages = [cls for cls in classes
if cls.startswith('language-')]
if languages:
# attribute name is 'lang' in XHTML 1.0 but 'xml:lang' in 1.1
atts[self.lang_attribute] = languages[0][9:]
classes.pop(classes.index(languages[0]))
classes = ' '.join(classes).strip()
if classes:
atts['class'] = classes
assert 'id' not in atts
ids.extend(node.get('ids', []))
if 'ids' in atts:
ids.extend(atts['ids'])
del atts['ids']
if ids:
atts['id'] = ids[0]
for id in ids[1:]:
# Add empty "span" elements for additional IDs. Note
# that we cannot use empty "a" elements because there
# may be targets inside of references, but nested "a"
# elements aren't allowed in XHTML (even if they do
# not all have a "href" attribute).
if empty:
# Empty tag. Insert target right in front of element.
prefix.append('<span id="%s"></span>' % id)
else:
# Non-empty tag. Place the auxiliary <span> tag
# *inside* the element, as the first child.
suffix += '<span id="%s"></span>' % id
attlist = atts.items()
attlist.sort()
parts = [tagname]
for name, value in attlist:
# value=None was used for boolean attributes without
# value, but this isn't supported by XHTML.
assert value is not None
if isinstance(value, list):
values = [unicode(v) for v in value]
parts.append('%s="%s"' % (name.lower(),
self.attval(' '.join(values))))
else:
parts.append('%s="%s"' % (name.lower(),
self.attval(unicode(value))))
if empty:
infix = ' /'
else:
infix = ''
return ''.join(prefix) + '<%s%s>' % (' '.join(parts), infix) + suffix
def emptytag(self, node, tagname, suffix='\n', **attributes):
"""Construct and return an XML-compatible empty tag."""
return self.starttag(node, tagname, suffix, empty=True, **attributes)
def set_class_on_child(self, node, class_, index=0):
"""
Set class `class_` on the visible child no. index of `node`.
Do nothing if node has fewer children than `index`.
"""
children = [n for n in node if not isinstance(n, nodes.Invisible)]
try:
child = children[index]
except IndexError:
return
child['classes'].append(class_)
def set_first_last(self, node):
self.set_class_on_child(node, 'first', 0)
self.set_class_on_child(node, 'last', -1)
def visit_Text(self, node):
text = node.astext()
encoded = self.encode(text)
if self.in_mailto and self.settings.cloak_email_addresses:
encoded = self.cloak_email(encoded)
self.body.append(encoded)
def depart_Text(self, node):
pass
def visit_abbreviation(self, node):
# @@@ implementation incomplete ("title" attribute)
self.body.append(self.starttag(node, 'abbr', ''))
def depart_abbreviation(self, node):
self.body.append('</abbr>')
def visit_acronym(self, node):
# @@@ implementation incomplete ("title" attribute)
self.body.append(self.starttag(node, 'acronym', ''))
def depart_acronym(self, node):
self.body.append('</acronym>')
def visit_address(self, node):
self.visit_docinfo_item(node, 'address', meta=False)
self.body.append(self.starttag(node, 'pre', CLASS='address'))
def depart_address(self, node):
self.body.append('\n</pre>\n')
self.depart_docinfo_item()
def visit_admonition(self, node):
self.body.append(self.starttag(node, 'div'))
self.set_first_last(node)
def depart_admonition(self, node=None):
self.body.append('</div>\n')
attribution_formats = {'dash': ('—', ''),
'parentheses': ('(', ')'),
'parens': ('(', ')'),
'none': ('', '')}
def visit_attribution(self, node):
prefix, suffix = self.attribution_formats[self.settings.attribution]
self.context.append(suffix)
self.body.append(
self.starttag(node, 'p', prefix, CLASS='attribution'))
def depart_attribution(self, node):
self.body.append(self.context.pop() + '</p>\n')
def visit_author(self, node):
if isinstance(node.parent, nodes.authors):
if self.author_in_authors:
self.body.append('\n<br />')
else:
self.visit_docinfo_item(node, 'author')
def depart_author(self, node):
if isinstance(node.parent, nodes.authors):
self.author_in_authors = True
else:
self.depart_docinfo_item()
def visit_authors(self, node):
self.visit_docinfo_item(node, 'authors')
self.author_in_authors = False # initialize
def depart_authors(self, node):
self.depart_docinfo_item()
def visit_block_quote(self, node):
self.body.append(self.starttag(node, 'blockquote'))
def depart_block_quote(self, node):
self.body.append('</blockquote>\n')
def check_simple_list(self, node):
"""Check for a simple list that can be rendered compactly."""
visitor = SimpleListChecker(self.document)
try:
node.walk(visitor)
except nodes.NodeFound:
return None
else:
return 1
def is_compactable(self, node):
return ('compact' in node['classes']
or (self.settings.compact_lists
and 'open' not in node['classes']
and (self.compact_simple
or self.topic_classes == ['contents']
or self.check_simple_list(node))))
def visit_bullet_list(self, node):
atts = {}
old_compact_simple = self.compact_simple
self.context.append((self.compact_simple, self.compact_p))
self.compact_p = None
self.compact_simple = self.is_compactable(node)
if self.compact_simple and not old_compact_simple:
atts['class'] = 'simple'
self.body.append(self.starttag(node, 'ul', **atts))
def depart_bullet_list(self, node):
self.compact_simple, self.compact_p = self.context.pop()
self.body.append('</ul>\n')
def visit_caption(self, node):
self.body.append(self.starttag(node, 'p', '', CLASS='caption'))
def depart_caption(self, node):
self.body.append('</p>\n')
def visit_citation(self, node):
self.body.append(self.starttag(node, 'table',
CLASS='docutils citation',
frame="void", rules="none"))
self.body.append('<colgroup><col class="label" /><col /></colgroup>\n'
'<tbody valign="top">\n'
'<tr>')
self.footnote_backrefs(node)
def depart_citation(self, node):
self.body.append('</td></tr>\n'
'</tbody>\n</table>\n')
def visit_citation_reference(self, node):
href = '#'
if 'refid' in node:
href += node['refid']
elif 'refname' in node:
href += self.document.nameids[node['refname']]
# else: # TODO system message (or already in the transform)?
# 'Citation reference missing.'
self.body.append(self.starttag(
node, 'a', '[', CLASS='citation-reference', href=href))
def depart_citation_reference(self, node):
self.body.append(']</a>')
def visit_classifier(self, node):
self.body.append(' <span class="classifier-delimiter">:</span> ')
self.body.append(self.starttag(node, 'span', '', CLASS='classifier'))
def depart_classifier(self, node):
self.body.append('</span>')
def visit_colspec(self, node):
self.colspecs.append(node)
# "stubs" list is an attribute of the tgroup element:
node.parent.stubs.append(node.attributes.get('stub'))
def depart_colspec(self, node):
pass
def write_colspecs(self):
width = 0
for node in self.colspecs:
width += node['colwidth']
for node in self.colspecs:
colwidth = int(node['colwidth'] * 100.0 / width + 0.5)
self.body.append(self.emptytag(node, 'col',
width='%i%%' % colwidth))
self.colspecs = []
def visit_comment(self, node,
sub=re.compile('-(?=-)').sub):
"""Escape double-dashes in comment text."""
self.body.append('<!-- %s -->\n' % sub('- ', node.astext()))
# Content already processed:
raise nodes.SkipNode
def visit_compound(self, node):
self.body.append(self.starttag(node, 'div', CLASS='compound'))
if len(node) > 1:
node[0]['classes'].append('compound-first')
node[-1]['classes'].append('compound-last')
for child in node[1:-1]:
child['classes'].append('compound-middle')
def depart_compound(self, node):
self.body.append('</div>\n')
def visit_container(self, node):
self.body.append(self.starttag(node, 'div', CLASS='container'))
def depart_container(self, node):
self.body.append('</div>\n')
def visit_contact(self, node):
self.visit_docinfo_item(node, 'contact', meta=False)
def depart_contact(self, node):
self.depart_docinfo_item()
def visit_copyright(self, node):
self.visit_docinfo_item(node, 'copyright')
def depart_copyright(self, node):
self.depart_docinfo_item()
def visit_date(self, node):
self.visit_docinfo_item(node, 'date')
def depart_date(self, node):
self.depart_docinfo_item()
def visit_decoration(self, node):
pass
def depart_decoration(self, node):
pass
def visit_definition(self, node):
self.body.append('</dt>\n')
self.body.append(self.starttag(node, 'dd', ''))
self.set_first_last(node)
def depart_definition(self, node):
self.body.append('</dd>\n')
def visit_definition_list(self, node):
self.body.append(self.starttag(node, 'dl', CLASS='docutils'))
def depart_definition_list(self, node):
self.body.append('</dl>\n')
def visit_definition_list_item(self, node):
pass
def depart_definition_list_item(self, node):
pass
def visit_description(self, node):
self.body.append(self.starttag(node, 'td', ''))
self.set_first_last(node)
def depart_description(self, node):
self.body.append('</td>')
def visit_docinfo(self, node):
self.context.append(len(self.body))
self.body.append(self.starttag(node, 'table',
CLASS='docinfo',
frame="void", rules="none"))
self.body.append('<col class="docinfo-name" />\n'
'<col class="docinfo-content" />\n'
'<tbody valign="top">\n')
self.in_docinfo = True
def depart_docinfo(self, node):
self.body.append('</tbody>\n</table>\n')
self.in_docinfo = False
start = self.context.pop()
self.docinfo = self.body[start:]
self.body = []
def visit_docinfo_item(self, node, name, meta=True):
if meta:
meta_tag = '<meta name="%s" content="%s" />\n' \
% (name, self.attval(node.astext()))
self.add_meta(meta_tag)
self.body.append(self.starttag(node, 'tr', ''))
self.body.append('<th class="docinfo-name">%s:</th>\n<td>'
% self.language.labels[name])
if len(node):
if isinstance(node[0], nodes.Element):
node[0]['classes'].append('first')
if isinstance(node[-1], nodes.Element):
node[-1]['classes'].append('last')
def depart_docinfo_item(self):
self.body.append('</td></tr>\n')
def visit_doctest_block(self, node):
self.body.append(self.starttag(node, 'pre', CLASS='doctest-block'))
def depart_doctest_block(self, node):
self.body.append('\n</pre>\n')
def visit_document(self, node):
self.head.append('<title>%s</title>\n'
% self.encode(node.get('title', '')))
def depart_document(self, node):
self.head_prefix.extend([self.doctype,
self.head_prefix_template %
{'lang': self.settings.language_code}])
self.html_prolog.append(self.doctype)
self.meta.insert(0, self.content_type % self.settings.output_encoding)
self.head.insert(0, self.content_type % self.settings.output_encoding)
if self.math_header:
if self.math_output == 'mathjax':
self.head.extend(self.math_header)
else:
self.stylesheet.extend(self.math_header)
# skip content-type meta tag with interpolated charset value:
self.html_head.extend(self.head[1:])
self.body_prefix.append(self.starttag(node, 'div', CLASS='document'))
self.body_suffix.insert(0, '</div>\n')
self.fragment.extend(self.body) # self.fragment is the "naked" body
self.html_body.extend(self.body_prefix[1:] + self.body_pre_docinfo
+ self.docinfo + self.body
+ self.body_suffix[:-1])
assert not self.context, 'len(context) = %s' % len(self.context)
def visit_emphasis(self, node):
self.body.append(self.starttag(node, 'em', ''))
def depart_emphasis(self, node):
self.body.append('</em>')
def visit_entry(self, node):
atts = {'class': []}
if isinstance(node.parent.parent, nodes.thead):
atts['class'].append('head')
if node.parent.parent.parent.stubs[node.parent.column]:
# "stubs" list is an attribute of the tgroup element
atts['class'].append('stub')
if atts['class']:
tagname = 'th'
atts['class'] = ' '.join(atts['class'])
else:
tagname = 'td'
del atts['class']
node.parent.column += 1
if 'morerows' in node:
atts['rowspan'] = node['morerows'] + 1
if 'morecols' in node:
atts['colspan'] = node['morecols'] + 1
node.parent.column += node['morecols']
self.body.append(self.starttag(node, tagname, '', **atts))
self.context.append('</%s>\n' % tagname.lower())
if len(node) == 0: # empty cell
self.body.append(' ')
self.set_first_last(node)
def depart_entry(self, node):
self.body.append(self.context.pop())
def visit_enumerated_list(self, node):
"""
The 'start' attribute does not conform to HTML 4.01's strict.dtd, but
CSS1 doesn't help. CSS2 isn't widely enough supported yet to be
usable.
"""
atts = {}
if 'start' in node:
atts['start'] = node['start']
if 'enumtype' in node:
atts['class'] = node['enumtype']
# @@@ To do: prefix, suffix. How? Change prefix/suffix to a
# single "format" attribute? Use CSS2?
old_compact_simple = self.compact_simple
self.context.append((self.compact_simple, self.compact_p))
self.compact_p = None
self.compact_simple = self.is_compactable(node)
if self.compact_simple and not old_compact_simple:
atts['class'] = (atts.get('class', '') + ' simple').strip()
self.body.append(self.starttag(node, 'ol', **atts))
def depart_enumerated_list(self, node):
self.compact_simple, self.compact_p = self.context.pop()
self.body.append('</ol>\n')
def visit_field(self, node):
self.body.append(self.starttag(node, 'tr', '', CLASS='field'))
def depart_field(self, node):
self.body.append('</tr>\n')
def visit_field_body(self, node):
self.body.append(self.starttag(node, 'td', '', CLASS='field-body'))
self.set_class_on_child(node, 'first', 0)
field = node.parent
if (self.compact_field_list or
isinstance(field.parent, nodes.docinfo) or
field.parent.index(field) == len(field.parent) - 1):
# If we are in a compact list, the docinfo, or if this is
# the last field of the field list, do not add vertical
# space after last element.
self.set_class_on_child(node, 'last', -1)
def depart_field_body(self, node):
self.body.append('</td>\n')
def visit_field_list(self, node):
self.context.append((self.compact_field_list, self.compact_p))
self.compact_p = None
if 'compact' in node['classes']:
self.compact_field_list = True
elif (self.settings.compact_field_lists
and 'open' not in node['classes']):
self.compact_field_list = True
if self.compact_field_list:
for field in node:
field_body = field[-1]
assert isinstance(field_body, nodes.field_body)
children = [n for n in field_body
if not isinstance(n, nodes.Invisible)]
if not (len(children) == 0 or
len(children) == 1 and
isinstance(children[0],
(nodes.paragraph, nodes.line_block))):
self.compact_field_list = False
break
self.body.append(self.starttag(node, 'table', frame='void',
rules='none',
CLASS='docutils field-list'))
self.body.append('<col class="field-name" />\n'
'<col class="field-body" />\n'
'<tbody valign="top">\n')
def depart_field_list(self, node):
self.body.append('</tbody>\n</table>\n')
self.compact_field_list, self.compact_p = self.context.pop()
def visit_field_name(self, node):
atts = {}
if self.in_docinfo:
atts['class'] = 'docinfo-name'
else:
atts['class'] = 'field-name'
if ( self.settings.field_name_limit
and len(node.astext()) > self.settings.field_name_limit):
atts['colspan'] = 2
self.context.append('</tr>\n'
+ self.starttag(node.parent, 'tr', '')
+ '<td> </td>')
else:
self.context.append('')
self.body.append(self.starttag(node, 'th', '', **atts))
def depart_field_name(self, node):
self.body.append(':</th>')
self.body.append(self.context.pop())
def visit_figure(self, node):
atts = {'class': 'figure'}
if node.get('width'):
atts['style'] = 'width: %s' % node['width']
if node.get('align'):
atts['class'] += " align-" + node['align']
self.body.append(self.starttag(node, 'div', **atts))
def depart_figure(self, node):
self.body.append('</div>\n')
def visit_footer(self, node):
self.context.append(len(self.body))
def depart_footer(self, node):
start = self.context.pop()
footer = [self.starttag(node, 'div', CLASS='footer'),
'<hr class="footer" />\n']
footer.extend(self.body[start:])
footer.append('\n</div>\n')
self.footer.extend(footer)
self.body_suffix[:0] = footer
del self.body[start:]
def visit_footnote(self, node):
self.body.append(self.starttag(node, 'table',
CLASS='docutils footnote',
frame="void", rules="none"))
self.body.append('<colgroup><col class="label" /><col /></colgroup>\n'
'<tbody valign="top">\n'
'<tr>')
self.footnote_backrefs(node)
def footnote_backrefs(self, node):
backlinks = []
backrefs = node['backrefs']
if self.settings.footnote_backlinks and backrefs:
if len(backrefs) == 1:
self.context.append('')
self.context.append('</a>')
self.context.append('<a class="fn-backref" href="#%s">'
% backrefs[0])
else:
i = 1
for backref in backrefs:
backlinks.append('<a class="fn-backref" href="#%s">%s</a>'
% (backref, i))
i += 1
self.context.append('<em>(%s)</em> ' % ', '.join(backlinks))
self.context += ['', '']
else:
self.context.append('')
self.context += ['', '']
# If the node does not only consist of a label.
if len(node) > 1:
# If there are preceding backlinks, we do not set class
# 'first', because we need to retain the top-margin.
if not backlinks:
node[1]['classes'].append('first')
node[-1]['classes'].append('last')
def depart_footnote(self, node):
self.body.append('</td></tr>\n'
'</tbody>\n</table>\n')
def visit_footnote_reference(self, node):
href = '#' + node['refid']
format = self.settings.footnote_references
if format == 'brackets':
suffix = '['
self.context.append(']')
else:
assert format == 'superscript'
suffix = '<sup>'
self.context.append('</sup>')
self.body.append(self.starttag(node, 'a', suffix,
CLASS='footnote-reference', href=href))
def depart_footnote_reference(self, node):
self.body.append(self.context.pop() + '</a>')
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def visit_header(self, node):
self.context.append(len(self.body))
def depart_header(self, node):
start = self.context.pop()
header = [self.starttag(node, 'div', CLASS='header')]
header.extend(self.body[start:])
header.append('\n<hr class="header"/>\n</div>\n')
self.body_prefix.extend(header)
self.header.extend(header)
del self.body[start:]
def visit_image(self, node):
atts = {}
uri = node['uri']
# place SVG and SWF images in an <object> element
types = {'.svg': 'image/svg+xml',
'.swf': 'application/x-shockwave-flash'}
ext = os.path.splitext(uri)[1].lower()
if ext in ('.svg', '.swf'):
atts['data'] = uri
atts['type'] = types[ext]
else:
atts['src'] = uri
atts['alt'] = node.get('alt', uri)
# image size
if 'width' in node:
atts['width'] = node['width']
if 'height' in node:
atts['height'] = node['height']
if 'scale' in node:
if (PIL and not ('width' in node and 'height' in node)
and self.settings.file_insertion_enabled):
imagepath = urllib.url2pathname(uri)
try:
img = PIL.Image.open(
imagepath.encode(sys.getfilesystemencoding()))
except (IOError, UnicodeEncodeError):
pass # TODO: warn?
else:
self.settings.record_dependencies.add(
imagepath.replace('\\', '/'))
if 'width' not in atts:
atts['width'] = str(img.size[0])
if 'height' not in atts:
atts['height'] = str(img.size[1])
del img
for att_name in 'width', 'height':
if att_name in atts:
match = re.match(r'([0-9.]+)(\S*)$', atts[att_name])
assert match
atts[att_name] = '%s%s' % (
float(match.group(1)) * (float(node['scale']) / 100),
match.group(2))
style = []
for att_name in 'width', 'height':
if att_name in atts:
if re.match(r'^[0-9.]+$', atts[att_name]):
# Interpret unitless values as pixels.
atts[att_name] += 'px'
style.append('%s: %s;' % (att_name, atts[att_name]))
del atts[att_name]
if style:
atts['style'] = ' '.join(style)
if (isinstance(node.parent, nodes.TextElement) or
(isinstance(node.parent, nodes.reference) and
not isinstance(node.parent.parent, nodes.TextElement))):
# Inline context or surrounded by <a>...</a>.
suffix = ''
else:
suffix = '\n'
if 'align' in node:
atts['class'] = 'align-%s' % node['align']
self.context.append('')
if ext in ('.svg', '.swf'): # place in an object element,
# do NOT use an empty tag: incorrect rendering in browsers
self.body.append(self.starttag(node, 'object', suffix, **atts) +
node.get('alt', uri) + '</object>' + suffix)
else:
self.body.append(self.emptytag(node, 'img', suffix, **atts))
def depart_image(self, node):
self.body.append(self.context.pop())
def visit_inline(self, node):
self.body.append(self.starttag(node, 'span', ''))
def depart_inline(self, node):
self.body.append('</span>')
def visit_label(self, node):
# Context added in footnote_backrefs.
self.body.append(self.starttag(node, 'td', '%s[' % self.context.pop(),
CLASS='label'))
def depart_label(self, node):
# Context added in footnote_backrefs.
self.body.append(']%s</td><td>%s' % (self.context.pop(), self.context.pop()))
def visit_legend(self, node):
self.body.append(self.starttag(node, 'div', CLASS='legend'))
def depart_legend(self, node):
self.body.append('</div>\n')
def visit_line(self, node):
self.body.append(self.starttag(node, 'div', suffix='', CLASS='line'))
if not len(node):
self.body.append('<br />')
def depart_line(self, node):
self.body.append('</div>\n')
def visit_line_block(self, node):
self.body.append(self.starttag(node, 'div', CLASS='line-block'))
def depart_line_block(self, node):
self.body.append('</div>\n')
def visit_list_item(self, node):
self.body.append(self.starttag(node, 'li', ''))
if len(node):
node[0]['classes'].append('first')
def depart_list_item(self, node):
self.body.append('</li>\n')
def visit_literal(self, node):
# special case: "code" role
classes = node.get('classes', [])
if 'code' in classes:
# filter 'code' from class arguments
node['classes'] = [cls for cls in classes if cls != 'code']
self.body.append(self.starttag(node, 'code', ''))
return
self.body.append(
self.starttag(node, 'tt', '', CLASS='docutils literal'))
text = node.astext()
for token in self.words_and_spaces.findall(text):
if token.strip():
# Protect text like "--an-option" and the regular expression
# ``[+]?(\d+(\.\d*)?|\.\d+)`` from bad line wrapping
if self.sollbruchstelle.search(token):
self.body.append('<span class="pre">%s</span>'
% self.encode(token))
else:
self.body.append(self.encode(token))
elif token in ('\n', ' '):
# Allow breaks at whitespace:
self.body.append(token)
else:
# Protect runs of multiple spaces; the last space can wrap:
self.body.append(' ' * (len(token) - 1) + ' ')
self.body.append('</tt>')
# Content already processed:
raise nodes.SkipNode
def depart_literal(self, node):
# skipped unless literal element is from "code" role:
self.body.append('</code>')
def visit_literal_block(self, node):
self.body.append(self.starttag(node, 'pre', CLASS='literal-block'))
def depart_literal_block(self, node):
self.body.append('\n</pre>\n')
def visit_math(self, node, math_env=''):
# If the method is called from visit_math_block(), math_env != ''.
# As there is no native HTML math support, we provide alternatives:
# LaTeX and MathJax math_output modes simply wrap the content,
# HTML and MathML math_output modes also convert the math_code.
if self.math_output not in ('mathml', 'html', 'mathjax', 'latex'):
self.document.reporter.error(
'math-output format "%s" not supported '
'falling back to "latex"'% self.math_output)
self.math_output = 'latex'
#
# HTML container
tags = {# math_output: (block, inline, class-arguments)
'mathml': ('div', '', ''),
'html': ('div', 'span', 'formula'),
'mathjax': ('div', 'span', 'math'),
'latex': ('pre', 'tt', 'math'),
}
tag = tags[self.math_output][math_env == '']
clsarg = tags[self.math_output][2]
# LaTeX container
wrappers = {# math_mode: (inline, block)
'mathml': (None, None),
'html': ('$%s$', u'\\begin{%s}\n%s\n\\end{%s}'),
'mathjax': ('\(%s\)', u'\\begin{%s}\n%s\n\\end{%s}'),
'latex': (None, None),
}
wrapper = wrappers[self.math_output][math_env != '']
# get and wrap content
math_code = node.astext().translate(unichar2tex.uni2tex_table)
if wrapper and math_env:
math_code = wrapper % (math_env, math_code, math_env)
elif wrapper:
math_code = wrapper % math_code
# settings and conversion
if self.math_output in ('latex', 'mathjax'):
math_code = self.encode(math_code)
if self.math_output == 'mathjax' and not self.math_header:
if self.math_output_options:
self.mathjax_url = self.math_output_options[0]
self.math_header = [self.mathjax_script % self.mathjax_url]
elif self.math_output == 'html':
if self.math_output_options and not self.math_header:
self.math_header = [self.stylesheet_call(
utils.find_file_in_dirs(s, self.settings.stylesheet_dirs))
for s in self.math_output_options[0].split(',')]
# TODO: fix display mode in matrices and fractions
math2html.DocumentParameters.displaymode = (math_env != '')
math_code = math2html.math2html(math_code)
elif self.math_output == 'mathml':
self.doctype = self.doctype_mathml
self.content_type = self.content_type_mathml
try:
mathml_tree = parse_latex_math(math_code, inline=not(math_env))
math_code = ''.join(mathml_tree.xml())
except SyntaxError, err:
err_node = self.document.reporter.error(err, base_node=node)
self.visit_system_message(err_node)
self.body.append(self.starttag(node, 'p'))
self.body.append(u','.join(err.args))
self.body.append('</p>\n')
self.body.append(self.starttag(node, 'pre',
CLASS='literal-block'))
self.body.append(self.encode(math_code))
self.body.append('\n</pre>\n')
self.depart_system_message(err_node)
raise nodes.SkipNode
# append to document body
if tag:
self.body.append(self.starttag(node, tag,
suffix='\n'*bool(math_env),
CLASS=clsarg))
self.body.append(math_code)
if math_env:
self.body.append('\n')
if tag:
self.body.append('</%s>\n' % tag)
# Content already processed:
raise nodes.SkipNode
def depart_math(self, node):
pass # never reached
def visit_math_block(self, node):
# print node.astext().encode('utf8')
math_env = pick_math_environment(node.astext())
self.visit_math(node, math_env=math_env)
def depart_math_block(self, node):
pass # never reached
def visit_meta(self, node):
meta = self.emptytag(node, 'meta', **node.non_default_attributes())
self.add_meta(meta)
def depart_meta(self, node):
pass
def add_meta(self, tag):
self.meta.append(tag)
self.head.append(tag)
def visit_option(self, node):
if self.context[-1]:
self.body.append(', ')
self.body.append(self.starttag(node, 'span', '', CLASS='option'))
def depart_option(self, node):
self.body.append('</span>')
self.context[-1] += 1
def visit_option_argument(self, node):
self.body.append(node.get('delimiter', ' '))
self.body.append(self.starttag(node, 'var', ''))
def depart_option_argument(self, node):
self.body.append('</var>')
def visit_option_group(self, node):
atts = {}
if ( self.settings.option_limit
and len(node.astext()) > self.settings.option_limit):
atts['colspan'] = 2
self.context.append('</tr>\n<tr><td> </td>')
else:
self.context.append('')
self.body.append(
self.starttag(node, 'td', CLASS='option-group', **atts))
self.body.append('<kbd>')
self.context.append(0) # count number of options
def depart_option_group(self, node):
self.context.pop()
self.body.append('</kbd></td>\n')
self.body.append(self.context.pop())
def visit_option_list(self, node):
self.body.append(
self.starttag(node, 'table', CLASS='docutils option-list',
frame="void", rules="none"))
self.body.append('<col class="option" />\n'
'<col class="description" />\n'
'<tbody valign="top">\n')
def depart_option_list(self, node):
self.body.append('</tbody>\n</table>\n')
def visit_option_list_item(self, node):
self.body.append(self.starttag(node, 'tr', ''))
def depart_option_list_item(self, node):
self.body.append('</tr>\n')
def visit_option_string(self, node):
pass
def depart_option_string(self, node):
pass
def visit_organization(self, node):
self.visit_docinfo_item(node, 'organization')
def depart_organization(self, node):
self.depart_docinfo_item()
def should_be_compact_paragraph(self, node):
"""
Determine if the <p> tags around paragraph ``node`` can be omitted.
"""
if (isinstance(node.parent, nodes.document) or
isinstance(node.parent, nodes.compound)):
# Never compact paragraphs in document or compound.
return False
for key, value in node.attlist():
if (node.is_not_default(key) and
not (key == 'classes' and value in
([], ['first'], ['last'], ['first', 'last']))):
# Attribute which needs to survive.
return False
first = isinstance(node.parent[0], nodes.label) # skip label
for child in node.parent.children[first:]:
# only first paragraph can be compact
if isinstance(child, nodes.Invisible):
continue
if child is node:
break
return False
parent_length = len([n for n in node.parent if not isinstance(
n, (nodes.Invisible, nodes.label))])
if ( self.compact_simple
or self.compact_field_list
or self.compact_p and parent_length == 1):
return True
return False
def visit_paragraph(self, node):
if self.should_be_compact_paragraph(node):
self.context.append('')
else:
self.body.append(self.starttag(node, 'p', ''))
self.context.append('</p>\n')
def depart_paragraph(self, node):
self.body.append(self.context.pop())
def visit_problematic(self, node):
if node.hasattr('refid'):
self.body.append('<a href="#%s">' % node['refid'])
self.context.append('</a>')
else:
self.context.append('')
self.body.append(self.starttag(node, 'span', '', CLASS='problematic'))
def depart_problematic(self, node):
self.body.append('</span>')
self.body.append(self.context.pop())
def visit_raw(self, node):
if 'html' in node.get('format', '').split():
t = isinstance(node.parent, nodes.TextElement) and 'span' or 'div'
if node['classes']:
self.body.append(self.starttag(node, t, suffix=''))
self.body.append(node.astext())
if node['classes']:
self.body.append('</%s>' % t)
# Keep non-HTML raw text out of output:
raise nodes.SkipNode
def visit_reference(self, node):
atts = {'class': 'reference'}
if 'refuri' in node:
atts['href'] = node['refuri']
if ( self.settings.cloak_email_addresses
and atts['href'].startswith('mailto:')):
atts['href'] = self.cloak_mailto(atts['href'])
self.in_mailto = True
atts['class'] += ' external'
else:
assert 'refid' in node, \
'References must have "refuri" or "refid" attribute.'
atts['href'] = '#' + node['refid']
atts['class'] += ' internal'
if not isinstance(node.parent, nodes.TextElement):
assert len(node) == 1 and isinstance(node[0], nodes.image)
atts['class'] += ' image-reference'
self.body.append(self.starttag(node, 'a', '', **atts))
def depart_reference(self, node):
self.body.append('</a>')
if not isinstance(node.parent, nodes.TextElement):
self.body.append('\n')
self.in_mailto = False
def visit_revision(self, node):
self.visit_docinfo_item(node, 'revision', meta=False)
def depart_revision(self, node):
self.depart_docinfo_item()
def visit_row(self, node):
self.body.append(self.starttag(node, 'tr', ''))
node.column = 0
def depart_row(self, node):
self.body.append('</tr>\n')
def visit_rubric(self, node):
self.body.append(self.starttag(node, 'p', '', CLASS='rubric'))
def depart_rubric(self, node):
self.body.append('</p>\n')
def visit_section(self, node):
self.section_level += 1
self.body.append(
self.starttag(node, 'div', CLASS='section'))
def depart_section(self, node):
self.section_level -= 1
self.body.append('</div>\n')
def visit_sidebar(self, node):
self.body.append(
self.starttag(node, 'div', CLASS='sidebar'))
self.set_first_last(node)
self.in_sidebar = True
def depart_sidebar(self, node):
self.body.append('</div>\n')
self.in_sidebar = False
def visit_status(self, node):
self.visit_docinfo_item(node, 'status', meta=False)
def depart_status(self, node):
self.depart_docinfo_item()
def visit_strong(self, node):
self.body.append(self.starttag(node, 'strong', ''))
def depart_strong(self, node):
self.body.append('</strong>')
def visit_subscript(self, node):
self.body.append(self.starttag(node, 'sub', ''))
def depart_subscript(self, node):
self.body.append('</sub>')
def visit_substitution_definition(self, node):
"""Internal only."""
raise nodes.SkipNode
def visit_substitution_reference(self, node):
self.unimplemented_visit(node)
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.sidebar):
self.body.append(self.starttag(node, 'p', '',
CLASS='sidebar-subtitle'))
self.context.append('</p>\n')
elif isinstance(node.parent, nodes.document):
self.body.append(self.starttag(node, 'h2', '', CLASS='subtitle'))
self.context.append('</h2>\n')
self.in_document_title = len(self.body)
elif isinstance(node.parent, nodes.section):
tag = 'h%s' % (self.section_level + self.initial_header_level - 1)
self.body.append(
self.starttag(node, tag, '', CLASS='section-subtitle') +
self.starttag({}, 'span', '', CLASS='section-subtitle'))
self.context.append('</span></%s>\n' % tag)
def depart_subtitle(self, node):
self.body.append(self.context.pop())
if self.in_document_title:
self.subtitle = self.body[self.in_document_title:-1]
self.in_document_title = 0
self.body_pre_docinfo.extend(self.body)
self.html_subtitle.extend(self.body)
del self.body[:]
def visit_superscript(self, node):
self.body.append(self.starttag(node, 'sup', ''))
def depart_superscript(self, node):
self.body.append('</sup>')
def visit_system_message(self, node):
self.body.append(self.starttag(node, 'div', CLASS='system-message'))
self.body.append('<p class="system-message-title">')
backref_text = ''
if len(node['backrefs']):
backrefs = node['backrefs']
if len(backrefs) == 1:
backref_text = ('; <em><a href="#%s">backlink</a></em>'
% backrefs[0])
else:
i = 1
backlinks = []
for backref in backrefs:
backlinks.append('<a href="#%s">%s</a>' % (backref, i))
i += 1
backref_text = ('; <em>backlinks: %s</em>'
% ', '.join(backlinks))
if node.hasattr('line'):
line = ', line %s' % node['line']
else:
line = ''
self.body.append('System Message: %s/%s '
'(<tt class="docutils">%s</tt>%s)%s</p>\n'
% (node['type'], node['level'],
self.encode(node['source']), line, backref_text))
def depart_system_message(self, node):
self.body.append('</div>\n')
def visit_table(self, node):
self.context.append(self.compact_p)
self.compact_p = True
classes = ' '.join(['docutils', self.settings.table_style]).strip()
self.body.append(
self.starttag(node, 'table', CLASS=classes, border="1"))
def depart_table(self, node):
self.compact_p = self.context.pop()
self.body.append('</table>\n')
def visit_target(self, node):
if not ('refuri' in node or 'refid' in node
or 'refname' in node):
self.body.append(self.starttag(node, 'span', '', CLASS='target'))
self.context.append('</span>')
else:
self.context.append('')
def depart_target(self, node):
self.body.append(self.context.pop())
def visit_tbody(self, node):
self.write_colspecs()
self.body.append(self.context.pop()) # '</colgroup>\n' or ''
self.body.append(self.starttag(node, 'tbody', valign='top'))
def depart_tbody(self, node):
self.body.append('</tbody>\n')
def visit_term(self, node):
self.body.append(self.starttag(node, 'dt', ''))
def depart_term(self, node):
"""
Leave the end tag to `self.visit_definition()`, in case there's a
classifier.
"""
pass
def visit_tgroup(self, node):
# Mozilla needs <colgroup>:
self.body.append(self.starttag(node, 'colgroup'))
# Appended by thead or tbody:
self.context.append('</colgroup>\n')
node.stubs = []
def depart_tgroup(self, node):
pass
def visit_thead(self, node):
self.write_colspecs()
self.body.append(self.context.pop()) # '</colgroup>\n'
# There may or may not be a <thead>; this is for <tbody> to use:
self.context.append('')
self.body.append(self.starttag(node, 'thead', valign='bottom'))
def depart_thead(self, node):
self.body.append('</thead>\n')
def visit_title(self, node):
"""Only 6 section levels are supported by HTML."""
check_id = 0 # TODO: is this a bool (False) or a counter?
close_tag = '</p>\n'
if isinstance(node.parent, nodes.topic):
self.body.append(
self.starttag(node, 'p', '', CLASS='topic-title first'))
elif isinstance(node.parent, nodes.sidebar):
self.body.append(
self.starttag(node, 'p', '', CLASS='sidebar-title'))
elif isinstance(node.parent, nodes.Admonition):
self.body.append(
self.starttag(node, 'p', '', CLASS='admonition-title'))
elif isinstance(node.parent, nodes.table):
self.body.append(
self.starttag(node, 'caption', ''))
close_tag = '</caption>\n'
elif isinstance(node.parent, nodes.document):
self.body.append(self.starttag(node, 'h1', '', CLASS='title'))
close_tag = '</h1>\n'
self.in_document_title = len(self.body)
else:
assert isinstance(node.parent, nodes.section)
h_level = self.section_level + self.initial_header_level - 1
atts = {}
if (len(node.parent) >= 2 and
isinstance(node.parent[1], nodes.subtitle)):
atts['CLASS'] = 'with-subtitle'
self.body.append(
self.starttag(node, 'h%s' % h_level, '', **atts))
atts = {}
if node.hasattr('refid'):
atts['class'] = 'toc-backref'
atts['href'] = '#' + node['refid']
if atts:
self.body.append(self.starttag({}, 'a', '', **atts))
close_tag = '</a></h%s>\n' % (h_level)
else:
close_tag = '</h%s>\n' % (h_level)
self.context.append(close_tag)
def depart_title(self, node):
self.body.append(self.context.pop())
if self.in_document_title:
self.title = self.body[self.in_document_title:-1]
self.in_document_title = 0
self.body_pre_docinfo.extend(self.body)
self.html_title.extend(self.body)
del self.body[:]
def visit_title_reference(self, node):
self.body.append(self.starttag(node, 'cite', ''))
def depart_title_reference(self, node):
self.body.append('</cite>')
def visit_topic(self, node):
self.body.append(self.starttag(node, 'div', CLASS='topic'))
self.topic_classes = node['classes']
def depart_topic(self, node):
self.body.append('</div>\n')
self.topic_classes = []
def visit_transition(self, node):
self.body.append(self.emptytag(node, 'hr', CLASS='docutils'))
def depart_transition(self, node):
pass
def visit_version(self, node):
self.visit_docinfo_item(node, 'version', meta=False)
def depart_version(self, node):
self.depart_docinfo_item()
def unimplemented_visit(self, node):
raise NotImplementedError('visiting unimplemented node type: %s'
% node.__class__.__name__)
class SimpleListChecker(nodes.GenericNodeVisitor):
"""
Raise `nodes.NodeFound` if non-simple list item is encountered.
Here "simple" means a list item containing nothing other than a single
paragraph, a simple list, or a paragraph followed by a simple list.
"""
def default_visit(self, node):
raise nodes.NodeFound
def visit_bullet_list(self, node):
pass
def visit_enumerated_list(self, node):
pass
def visit_list_item(self, node):
children = []
for child in node.children:
if not isinstance(child, nodes.Invisible):
children.append(child)
if (children and isinstance(children[0], nodes.paragraph)
and (isinstance(children[-1], nodes.bullet_list)
or isinstance(children[-1], nodes.enumerated_list))):
children.pop()
if len(children) <= 1:
return
else:
raise nodes.NodeFound
def visit_paragraph(self, node):
raise nodes.SkipNode
def invisible_visit(self, node):
"""Invisible nodes should be ignored."""
raise nodes.SkipNode
visit_comment = invisible_visit
visit_substitution_definition = invisible_visit
visit_target = invisible_visit
visit_pending = invisible_visit
| {
"repo_name": "Lyleo/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python2/docutils/writers/html4css1/__init__.py",
"copies": "2",
"size": "67928",
"license": "mit",
"hash": 2739811762678523400,
"line_mean": 38.1515850144,
"line_max": 85,
"alpha_frac": 0.5500824402,
"autogenerated": false,
"ratio": 3.9710043259674968,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003374947927837049,
"num_lines": 1735
} |
"""
Simple document tree Writer, writes Docutils XML according to
http://docutils.sourceforge.net/docs/ref/docutils.dtd.
"""
__docformat__ = 'reStructuredText'
import sys
# Work around broken PyXML and obsolete python stdlib behaviour. (The stdlib
# replaces its own xml module with PyXML if the latter is installed. However,
# PyXML is no longer maintained and partially incompatible/buggy.) Reverse
# the order in which xml module and submodules are searched to import stdlib
# modules if they exist and PyXML modules if they do not exist in the stdlib.
#
# See http://sourceforge.net/tracker/index.php?func=detail&aid=3552403&group_id=38414&atid=422030
# and http://lists.fedoraproject.org/pipermail/python-devel/2012-July/000406.html
import xml
if "_xmlplus" in xml.__path__[0]: # PyXML sub-module
xml.__path__.reverse() # If both are available, prefer stdlib over PyXML
import xml.sax.saxutils
from io import StringIO
import docutils
from docutils import frontend, writers, nodes
class RawXmlError(docutils.ApplicationError): pass
class Writer(writers.Writer):
supported = ('xml',)
"""Formats this writer supports."""
settings_spec = (
'"Docutils XML" Writer Options',
None,
(('Generate XML with newlines before and after tags.',
['--newlines'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Generate XML with indents and newlines.',
['--indents'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Omit the XML declaration. Use with caution.',
['--no-xml-declaration'],
{'dest': 'xml_declaration', 'default': 1, 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Omit the DOCTYPE declaration.',
['--no-doctype'],
{'dest': 'doctype_declaration', 'default': 1,
'action': 'store_false', 'validator': frontend.validate_boolean}),))
settings_defaults = {'output_encoding_error_handler': 'xmlcharrefreplace'}
config_section = 'docutils_xml writer'
config_section_dependencies = ('writers',)
output = None
"""Final translated form of `document`."""
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = XMLTranslator
def translate(self):
self.visitor = visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
self.output = ''.join(visitor.output)
class XMLTranslator(nodes.GenericNodeVisitor):
xml_declaration = '<?xml version="1.0" encoding="%s"?>\n'
# TODO: add stylesheet options similar to HTML and LaTeX writers?
#xml_stylesheet = '<?xml-stylesheet type="text/xsl" href="%s"?>\n'
doctype = (
'<!DOCTYPE document PUBLIC'
' "+//IDN docutils.sourceforge.net//DTD Docutils Generic//EN//XML"'
' "http://docutils.sourceforge.net/docs/ref/docutils.dtd">\n')
generator = '<!-- Generated by Docutils %s -->\n'
xmlparser = xml.sax.make_parser()
"""SAX parser instance to check/exctract raw XML."""
xmlparser.setFeature(
"http://xml.org/sax/features/external-general-entities", True)
def __init__(self, document):
nodes.NodeVisitor.__init__(self, document)
# Reporter
self.warn = self.document.reporter.warning
self.error = self.document.reporter.error
# Settings
self.settings = settings = document.settings
self.indent = self.newline = ''
if settings.newlines:
self.newline = '\n'
if settings.indents:
self.newline = '\n'
self.indent = ' '
self.level = 0 # indentation level
self.in_simple = 0 # level of nesting inside mixed-content elements
# Output
self.output = []
if settings.xml_declaration:
self.output.append(
self.xml_declaration % settings.output_encoding)
if settings.doctype_declaration:
self.output.append(self.doctype)
self.output.append(self.generator % docutils.__version__)
# initialize XML parser
self.the_handle=TestXml()
self.xmlparser.setContentHandler(self.the_handle)
# generic visit and depart methods
# --------------------------------
def default_visit(self, node):
"""Default node visit method."""
if not self.in_simple:
self.output.append(self.indent*self.level)
self.output.append(node.starttag(xml.sax.saxutils.quoteattr))
self.level += 1
if isinstance(node, nodes.TextElement):
self.in_simple += 1
if not self.in_simple:
self.output.append(self.newline)
def default_departure(self, node):
"""Default node depart method."""
self.level -= 1
if not self.in_simple:
self.output.append(self.indent*self.level)
self.output.append(node.endtag())
if isinstance(node, nodes.TextElement):
self.in_simple -= 1
if not self.in_simple:
self.output.append(self.newline)
# specific visit and depart methods
# ---------------------------------
def visit_Text(self, node):
text = xml.sax.saxutils.escape(node.astext())
self.output.append(text)
def depart_Text(self, node):
pass
def visit_raw(self, node):
if 'xml' not in node.get('format', '').split():
# skip other raw content?
# raise nodes.SkipNode
self.default_visit(node)
return
# wrap in <raw> element
self.default_visit(node) # or not?
xml_string = node.astext()
self.output.append(xml_string)
self.default_departure(node) # or not?
# Check validity of raw XML:
if isinstance(xml_string, str) and sys.version_info < (3,):
xml_string = xml_string.encode('utf8')
try:
self.xmlparser.parse(StringIO(xml_string))
except xml.sax._exceptions.SAXParseException as error:
col_num = self.the_handle.locator.getColumnNumber()
line_num = self.the_handle.locator.getLineNumber()
srcline = node.line
if not isinstance(node.parent, nodes.TextElement):
srcline += 2 # directive content start line
msg = 'Invalid raw XML in column %d, line offset %d:\n%s' % (
col_num, line_num, node.astext())
self.warn(msg, source=node.source, line=srcline+line_num-1)
raise nodes.SkipNode # content already processed
class TestXml(xml.sax.ContentHandler):
def setDocumentLocator(self, locator):
self.locator = locator
| {
"repo_name": "timonwong/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python3/docutils/writers/docutils_xml.py",
"copies": "2",
"size": "6923",
"license": "mit",
"hash": 1082699349224629100,
"line_mean": 35.0572916667,
"line_max": 97,
"alpha_frac": 0.6229958111,
"autogenerated": false,
"ratio": 4.011008111239861,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012323004012354678,
"num_lines": 192
} |
"""
Transforms related to the front matter of a document or a section
(information found before the main text):
- `DocTitle`: Used to transform a lone top level section's title to
the document title, promote a remaining lone top-level section's
title to the document subtitle, and determine the document's title
metadata (document['title']) based on the document title and/or the
"title" setting.
- `SectionSubTitle`: Used to transform a lone subsection into a
subtitle.
- `DocInfo`: Used to transform a bibliographic field list into docinfo
elements.
"""
__docformat__ = 'reStructuredText'
import re
from docutils import nodes, utils
from docutils.transforms import TransformError, Transform
class TitlePromoter(Transform):
"""
Abstract base class for DocTitle and SectionSubTitle transforms.
"""
def promote_title(self, node):
"""
Transform the following tree::
<node>
<section>
<title>
...
into ::
<node>
<title>
...
`node` is normally a document.
"""
# Type check
if not isinstance(node, nodes.Element):
raise TypeError('node must be of Element-derived type.')
# `node` must not have a title yet.
assert not (len(node) and isinstance(node[0], nodes.title))
section, index = self.candidate_index(node)
if index is None:
return None
# Transfer the section's attributes to the node:
# NOTE: Change second parameter to False to NOT replace
# attributes that already exist in node with those in
# section
# NOTE: Remove third parameter to NOT copy the 'source'
# attribute from section
node.update_all_atts_concatenating(section, True, True)
# setup_child is called automatically for all nodes.
node[:] = (section[:1] # section title
+ node[:index] # everything that was in the
# node before the section
+ section[1:]) # everything that was in the section
assert isinstance(node[0], nodes.title)
return 1
def promote_subtitle(self, node):
"""
Transform the following node tree::
<node>
<title>
<section>
<title>
...
into ::
<node>
<title>
<subtitle>
...
"""
# Type check
if not isinstance(node, nodes.Element):
raise TypeError('node must be of Element-derived type.')
subsection, index = self.candidate_index(node)
if index is None:
return None
subtitle = nodes.subtitle()
# Transfer the subsection's attributes to the new subtitle
# NOTE: Change second parameter to False to NOT replace
# attributes that already exist in node with those in
# section
# NOTE: Remove third parameter to NOT copy the 'source'
# attribute from section
subtitle.update_all_atts_concatenating(subsection, True, True)
# Transfer the contents of the subsection's title to the
# subtitle:
subtitle[:] = subsection[0][:]
node[:] = (node[:1] # title
+ [subtitle]
# everything that was before the section:
+ node[1:index]
# everything that was in the subsection:
+ subsection[1:])
return 1
def candidate_index(self, node):
"""
Find and return the promotion candidate and its index.
Return (None, None) if no valid candidate was found.
"""
index = node.first_child_not_matching_class(
nodes.PreBibliographic)
if index is None or len(node) > (index + 1) or \
not isinstance(node[index], nodes.section):
return None, None
else:
return node[index], index
class DocTitle(TitlePromoter):
"""
In reStructuredText_, there is no way to specify a document title
and subtitle explicitly. Instead, we can supply the document title
(and possibly the subtitle as well) implicitly, and use this
two-step transform to "raise" or "promote" the title(s) (and their
corresponding section contents) to the document level.
1. If the document contains a single top-level section as its
first non-comment element, the top-level section's title
becomes the document's title, and the top-level section's
contents become the document's immediate contents. The lone
top-level section header must be the first non-comment element
in the document.
For example, take this input text::
=================
Top-Level Title
=================
A paragraph.
Once parsed, it looks like this::
<document>
<section names="top-level title">
<title>
Top-Level Title
<paragraph>
A paragraph.
After running the DocTitle transform, we have::
<document names="top-level title">
<title>
Top-Level Title
<paragraph>
A paragraph.
2. If step 1 successfully determines the document title, we
continue by checking for a subtitle.
If the lone top-level section itself contains a single
second-level section as its first non-comment element, that
section's title is promoted to the document's subtitle, and
that section's contents become the document's immediate
contents. Given this input text::
=================
Top-Level Title
=================
Second-Level Title
~~~~~~~~~~~~~~~~~~
A paragraph.
After parsing and running the Section Promotion transform, the
result is::
<document names="top-level title">
<title>
Top-Level Title
<subtitle names="second-level title">
Second-Level Title
<paragraph>
A paragraph.
(Note that the implicit hyperlink target generated by the
"Second-Level Title" is preserved on the "subtitle" element
itself.)
Any comment elements occurring before the document title or
subtitle are accumulated and inserted as the first body elements
after the title(s).
This transform also sets the document's metadata title
(document['title']).
.. _reStructuredText: http://docutils.sf.net/rst.html
"""
default_priority = 320
def set_metadata(self):
"""
Set document['title'] metadata title from the following
sources, listed in order of priority:
* Existing document['title'] attribute.
* "title" setting.
* Document title node (as promoted by promote_title).
"""
if not self.document.hasattr('title'):
if self.document.settings.title is not None:
self.document['title'] = self.document.settings.title
elif len(self.document) and isinstance(self.document[0], nodes.title):
self.document['title'] = self.document[0].astext()
def apply(self):
if getattr(self.document.settings, 'doctitle_xform', 1):
# promote_(sub)title defined in TitlePromoter base class.
if self.promote_title(self.document):
# If a title has been promoted, also try to promote a
# subtitle.
self.promote_subtitle(self.document)
# Set document['title'].
self.set_metadata()
class SectionSubTitle(TitlePromoter):
"""
This works like document subtitles, but for sections. For example, ::
<section>
<title>
Title
<section>
<title>
Subtitle
...
is transformed into ::
<section>
<title>
Title
<subtitle>
Subtitle
...
For details refer to the docstring of DocTitle.
"""
default_priority = 350
def apply(self):
if not getattr(self.document.settings, 'sectsubtitle_xform', 1):
return
for section in self.document.traverse(nodes.section):
# On our way through the node tree, we are deleting
# sections, but we call self.promote_subtitle for those
# sections nonetheless. To do: Write a test case which
# shows the problem and discuss on Docutils-develop.
self.promote_subtitle(section)
class DocInfo(Transform):
"""
This transform is specific to the reStructuredText_ markup syntax;
see "Bibliographic Fields" in the `reStructuredText Markup
Specification`_ for a high-level description. This transform
should be run *after* the `DocTitle` transform.
Given a field list as the first non-comment element after the
document title and subtitle (if present), registered bibliographic
field names are transformed to the corresponding DTD elements,
becoming child elements of the "docinfo" element (except for a
dedication and/or an abstract, which become "topic" elements after
"docinfo").
For example, given this document fragment after parsing::
<document>
<title>
Document Title
<field_list>
<field>
<field_name>
Author
<field_body>
<paragraph>
A. Name
<field>
<field_name>
Status
<field_body>
<paragraph>
$RCSfile$
...
After running the bibliographic field list transform, the
resulting document tree would look like this::
<document>
<title>
Document Title
<docinfo>
<author>
A. Name
<status>
frontmatter.py
...
The "Status" field contained an expanded RCS keyword, which is
normally (but optionally) cleaned up by the transform. The sole
contents of the field body must be a paragraph containing an
expanded RCS keyword of the form "$keyword: expansion text $". Any
RCS keyword can be processed in any bibliographic field. The
dollar signs and leading RCS keyword name are removed. Extra
processing is done for the following RCS keywords:
- "RCSfile" expands to the name of the file in the RCS or CVS
repository, which is the name of the source file with a ",v"
suffix appended. The transform will remove the ",v" suffix.
- "Date" expands to the format "YYYY/MM/DD hh:mm:ss" (in the UTC
time zone). The RCS Keywords transform will extract just the
date itself and transform it to an ISO 8601 format date, as in
"2000-12-31".
(Since the source file for this text is itself stored under CVS,
we can't show an example of the "Date" RCS keyword because we
can't prevent any RCS keywords used in this explanation from
being expanded. Only the "RCSfile" keyword is stable; its
expansion text changes only if the file name changes.)
.. _reStructuredText: http://docutils.sf.net/rst.html
.. _reStructuredText Markup Specification:
http://docutils.sf.net/docs/ref/rst/restructuredtext.html
"""
default_priority = 340
biblio_nodes = {
'author': nodes.author,
'authors': nodes.authors,
'organization': nodes.organization,
'address': nodes.address,
'contact': nodes.contact,
'version': nodes.version,
'revision': nodes.revision,
'status': nodes.status,
'date': nodes.date,
'copyright': nodes.copyright,
'dedication': nodes.topic,
'abstract': nodes.topic}
"""Canonical field name (lowcased) to node class name mapping for
bibliographic fields (field_list)."""
def apply(self):
if not getattr(self.document.settings, 'docinfo_xform', 1):
return
document = self.document
index = document.first_child_not_matching_class(
nodes.PreBibliographic)
if index is None:
return
candidate = document[index]
if isinstance(candidate, nodes.field_list):
biblioindex = document.first_child_not_matching_class(
(nodes.Titular, nodes.Decorative))
nodelist = self.extract_bibliographic(candidate)
del document[index] # untransformed field list (candidate)
document[biblioindex:biblioindex] = nodelist
def extract_bibliographic(self, field_list):
docinfo = nodes.docinfo()
bibliofields = self.language.bibliographic_fields
labels = self.language.labels
topics = {'dedication': None, 'abstract': None}
for field in field_list:
try:
name = field[0][0].astext()
normedname = nodes.fully_normalize_name(name)
if not (len(field) == 2 and normedname in bibliofields
and self.check_empty_biblio_field(field, name)):
raise TransformError
canonical = bibliofields[normedname]
biblioclass = self.biblio_nodes[canonical]
if issubclass(biblioclass, nodes.TextElement):
if not self.check_compound_biblio_field(field, name):
raise TransformError
utils.clean_rcs_keywords(
field[1][0], self.rcs_keyword_substitutions)
docinfo.append(biblioclass('', '', *field[1][0]))
elif issubclass(biblioclass, nodes.authors):
self.extract_authors(field, name, docinfo)
elif issubclass(biblioclass, nodes.topic):
if topics[canonical]:
field[-1] += self.document.reporter.warning(
'There can only be one "%s" field.' % name,
base_node=field)
raise TransformError
title = nodes.title(name, labels[canonical])
topics[canonical] = biblioclass(
'', title, classes=[canonical], *field[1].children)
else:
docinfo.append(biblioclass('', *field[1].children))
except TransformError:
if len(field[-1]) == 1 \
and isinstance(field[-1][0], nodes.paragraph):
utils.clean_rcs_keywords(
field[-1][0], self.rcs_keyword_substitutions)
docinfo.append(field)
nodelist = []
if len(docinfo) != 0:
nodelist.append(docinfo)
for name in ('dedication', 'abstract'):
if topics[name]:
nodelist.append(topics[name])
return nodelist
def check_empty_biblio_field(self, field, name):
if len(field[-1]) < 1:
field[-1] += self.document.reporter.warning(
'Cannot extract empty bibliographic field "%s".' % name,
base_node=field)
return None
return 1
def check_compound_biblio_field(self, field, name):
if len(field[-1]) > 1:
field[-1] += self.document.reporter.warning(
'Cannot extract compound bibliographic field "%s".' % name,
base_node=field)
return None
if not isinstance(field[-1][0], nodes.paragraph):
field[-1] += self.document.reporter.warning(
'Cannot extract bibliographic field "%s" containing '
'anything other than a single paragraph.' % name,
base_node=field)
return None
return 1
rcs_keyword_substitutions = [
(re.compile(r'\$' r'Date: (\d\d\d\d)[-/](\d\d)[-/](\d\d)[ T][\d:]+'
r'[^$]* \$', re.IGNORECASE), r'\1-\2-\3'),
(re.compile(r'\$' r'RCSfile: (.+),v \$', re.IGNORECASE), r'\1'),
(re.compile(r'\$[a-zA-Z]+: (.+) \$'), r'\1'),]
def extract_authors(self, field, name, docinfo):
try:
if len(field[1]) == 1:
if isinstance(field[1][0], nodes.paragraph):
authors = self.authors_from_one_paragraph(field)
elif isinstance(field[1][0], nodes.bullet_list):
authors = self.authors_from_bullet_list(field)
else:
raise TransformError
else:
authors = self.authors_from_paragraphs(field)
authornodes = [nodes.author('', '', *author)
for author in authors if author]
if len(authornodes) >= 1:
docinfo.append(nodes.authors('', *authornodes))
else:
raise TransformError
except TransformError:
field[-1] += self.document.reporter.warning(
'Bibliographic field "%s" incompatible with extraction: '
'it must contain either a single paragraph (with authors '
'separated by one of "%s"), multiple paragraphs (one per '
'author), or a bullet list with one paragraph (one author) '
'per item.'
% (name, ''.join(self.language.author_separators)),
base_node=field)
raise
def authors_from_one_paragraph(self, field):
text = field[1][0].astext().strip()
if not text:
raise TransformError
for authorsep in self.language.author_separators:
authornames = text.split(authorsep)
if len(authornames) > 1:
break
authornames = [author.strip() for author in authornames]
authors = [[nodes.Text(author)] for author in authornames if author]
return authors
def authors_from_bullet_list(self, field):
authors = []
for item in field[1][0]:
if len(item) != 1 or not isinstance(item[0], nodes.paragraph):
raise TransformError
authors.append(item[0].children)
if not authors:
raise TransformError
return authors
def authors_from_paragraphs(self, field):
for item in field[1]:
if not isinstance(item, nodes.paragraph):
raise TransformError
authors = [item.children for item in field[1]]
return authors
| {
"repo_name": "Lyleo/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python3/docutils/transforms/frontmatter.py",
"copies": "2",
"size": "19298",
"license": "mit",
"hash": 2588048283898825700,
"line_mean": 35.5492424242,
"line_max": 82,
"alpha_frac": 0.5620789719,
"autogenerated": false,
"ratio": 4.724112607099143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6286191578999143,
"avg_score": null,
"num_lines": null
} |
"""
This module defines standard interpreted text role functions, a registry for
interpreted text roles, and an API for adding to and retrieving from the
registry.
The interface for interpreted role functions is as follows::
def role_fn(name, rawtext, text, lineno, inliner,
options={}, content=[]):
code...
# Set function attributes for customization:
role_fn.options = ...
role_fn.content = ...
Parameters:
- ``name`` is the local name of the interpreted text role, the role name
actually used in the document.
- ``rawtext`` is a string containing the entire interpreted text construct.
Return it as a ``problematic`` node linked to a system message if there is a
problem.
- ``text`` is the interpreted text content, with backslash escapes converted
to nulls (``\x00``).
- ``lineno`` is the line number where the interpreted text beings.
- ``inliner`` is the Inliner object that called the role function.
It defines the following useful attributes: ``reporter``,
``problematic``, ``memo``, ``parent``, ``document``.
- ``options``: A dictionary of directive options for customization, to be
interpreted by the role function. Used for additional attributes for the
generated elements and other functionality.
- ``content``: A list of strings, the directive content for customization
("role" directive). To be interpreted by the role function.
Function attributes for customization, interpreted by the "role" directive:
- ``options``: A dictionary, mapping known option names to conversion
functions such as `int` or `float`. ``None`` or an empty dict implies no
options to parse. Several directive option conversion functions are defined
in the `directives` module.
All role functions implicitly support the "class" option, unless disabled
with an explicit ``{'class': None}``.
- ``content``: A boolean; true if content is allowed. Client code must handle
the case where content is required but not supplied (an empty content list
will be supplied).
Note that unlike directives, the "arguments" function attribute is not
supported for role customization. Directive arguments are handled by the
"role" directive itself.
Interpreted role functions return a tuple of two values:
- A list of nodes which will be inserted into the document tree at the
point where the interpreted role was encountered (can be an empty
list).
- A list of system messages, which will be inserted into the document tree
immediately after the end of the current inline block (can also be empty).
"""
__docformat__ = 'reStructuredText'
from docutils import nodes, utils
from docutils.parsers.rst import directives
from docutils.parsers.rst.languages import en as _fallback_language_module
from docutils.utils.code_analyzer import Lexer, LexerError
DEFAULT_INTERPRETED_ROLE = 'title-reference'
"""
The canonical name of the default interpreted role. This role is used
when no role is specified for a piece of interpreted text.
"""
_role_registry = {}
"""Mapping of canonical role names to role functions. Language-dependent role
names are defined in the ``language`` subpackage."""
_roles = {}
"""Mapping of local or language-dependent interpreted text role names to role
functions."""
def role(role_name, language_module, lineno, reporter):
"""
Locate and return a role function from its language-dependent name, along
with a list of system messages. If the role is not found in the current
language, check English. Return a 2-tuple: role function (``None`` if the
named role cannot be found) and a list of system messages.
"""
normname = role_name.lower()
messages = []
msg_text = []
if normname in _roles:
return _roles[normname], messages
if role_name:
canonicalname = None
try:
canonicalname = language_module.roles[normname]
except AttributeError, error:
msg_text.append('Problem retrieving role entry from language '
'module %r: %s.' % (language_module, error))
except KeyError:
msg_text.append('No role entry for "%s" in module "%s".'
% (role_name, language_module.__name__))
else:
canonicalname = DEFAULT_INTERPRETED_ROLE
# If we didn't find it, try English as a fallback.
if not canonicalname:
try:
canonicalname = _fallback_language_module.roles[normname]
msg_text.append('Using English fallback for role "%s".'
% role_name)
except KeyError:
msg_text.append('Trying "%s" as canonical role name.'
% role_name)
# The canonical name should be an English name, but just in case:
canonicalname = normname
# Collect any messages that we generated.
if msg_text:
message = reporter.info('\n'.join(msg_text), line=lineno)
messages.append(message)
# Look the role up in the registry, and return it.
if canonicalname in _role_registry:
role_fn = _role_registry[canonicalname]
register_local_role(normname, role_fn)
return role_fn, messages
else:
return None, messages # Error message will be generated by caller.
def register_canonical_role(name, role_fn):
"""
Register an interpreted text role by its canonical name.
:Parameters:
- `name`: The canonical name of the interpreted role.
- `role_fn`: The role function. See the module docstring.
"""
set_implicit_options(role_fn)
_role_registry[name] = role_fn
def register_local_role(name, role_fn):
"""
Register an interpreted text role by its local or language-dependent name.
:Parameters:
- `name`: The local or language-dependent name of the interpreted role.
- `role_fn`: The role function. See the module docstring.
"""
set_implicit_options(role_fn)
_roles[name] = role_fn
def set_implicit_options(role_fn):
"""
Add customization options to role functions, unless explicitly set or
disabled.
"""
if not hasattr(role_fn, 'options') or role_fn.options is None:
role_fn.options = {'class': directives.class_option}
elif 'class' not in role_fn.options:
role_fn.options['class'] = directives.class_option
def register_generic_role(canonical_name, node_class):
"""For roles which simply wrap a given `node_class` around the text."""
role = GenericRole(canonical_name, node_class)
register_canonical_role(canonical_name, role)
class GenericRole:
"""
Generic interpreted text role, where the interpreted text is simply
wrapped with the provided node class.
"""
def __init__(self, role_name, node_class):
self.name = role_name
self.node_class = node_class
def __call__(self, role, rawtext, text, lineno, inliner,
options={}, content=[]):
set_classes(options)
return [self.node_class(rawtext, utils.unescape(text), **options)], []
class CustomRole:
"""
Wrapper for custom interpreted text roles.
"""
def __init__(self, role_name, base_role, options={}, content=[]):
self.name = role_name
self.base_role = base_role
self.options = None
if hasattr(base_role, 'options'):
self.options = base_role.options
self.content = None
if hasattr(base_role, 'content'):
self.content = base_role.content
self.supplied_options = options
self.supplied_content = content
def __call__(self, role, rawtext, text, lineno, inliner,
options={}, content=[]):
opts = self.supplied_options.copy()
opts.update(options)
cont = list(self.supplied_content)
if cont and content:
cont += '\n'
cont.extend(content)
return self.base_role(role, rawtext, text, lineno, inliner,
options=opts, content=cont)
def generic_custom_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
""""""
# Once nested inline markup is implemented, this and other methods should
# recursively call inliner.nested_parse().
set_classes(options)
return [nodes.inline(rawtext, utils.unescape(text), **options)], []
generic_custom_role.options = {'class': directives.class_option}
######################################################################
# Define and register the standard roles:
######################################################################
register_generic_role('abbreviation', nodes.abbreviation)
register_generic_role('acronym', nodes.acronym)
register_generic_role('emphasis', nodes.emphasis)
register_generic_role('literal', nodes.literal)
register_generic_role('strong', nodes.strong)
register_generic_role('subscript', nodes.subscript)
register_generic_role('superscript', nodes.superscript)
register_generic_role('title-reference', nodes.title_reference)
def pep_reference_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
try:
pepnum = int(text)
if pepnum < 0 or pepnum > 9999:
raise ValueError
except ValueError:
msg = inliner.reporter.error(
'PEP number must be a number from 0 to 9999; "%s" is invalid.'
% text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
# Base URL mainly used by inliner.pep_reference; so this is correct:
ref = (inliner.document.settings.pep_base_url
+ inliner.document.settings.pep_file_url_template % pepnum)
set_classes(options)
return [nodes.reference(rawtext, 'PEP ' + utils.unescape(text), refuri=ref,
**options)], []
register_canonical_role('pep-reference', pep_reference_role)
def rfc_reference_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
try:
rfcnum = int(text)
if rfcnum <= 0:
raise ValueError
except ValueError:
msg = inliner.reporter.error(
'RFC number must be a number greater than or equal to 1; '
'"%s" is invalid.' % text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
# Base URL mainly used by inliner.rfc_reference, so this is correct:
ref = inliner.document.settings.rfc_base_url + inliner.rfc_url % rfcnum
set_classes(options)
node = nodes.reference(rawtext, 'RFC ' + utils.unescape(text), refuri=ref,
**options)
return [node], []
register_canonical_role('rfc-reference', rfc_reference_role)
def raw_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
if not inliner.document.settings.raw_enabled:
msg = inliner.reporter.warning('raw (and derived) roles disabled')
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
if 'format' not in options:
msg = inliner.reporter.error(
'No format (Writer name) is associated with this role: "%s".\n'
'The "raw" role cannot be used directly.\n'
'Instead, use the "role" directive to create a new role with '
'an associated format.' % role, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
set_classes(options)
node = nodes.raw(rawtext, utils.unescape(text, 1), **options)
node.source, node.line = inliner.reporter.get_source_and_line(lineno)
return [node], []
raw_role.options = {'format': directives.unchanged}
register_canonical_role('raw', raw_role)
def code_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
set_classes(options)
language = options.get('language', '')
classes = ['code']
if 'classes' in options:
classes.extend(options['classes'])
if language and language not in classes:
classes.append(language)
try:
tokens = Lexer(utils.unescape(text, 1), language,
inliner.document.settings.syntax_highlight)
except LexerError, error:
msg = inliner.reporter.warning(error)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
node = nodes.literal(rawtext, '', classes=classes)
# analyze content and add nodes for every token
for classes, value in tokens:
# print (classes, value)
if classes:
node += nodes.inline(value, value, classes=classes)
else:
# insert as Text to decrease the verbosity of the output
node += nodes.Text(value, value)
return [node], []
code_role.options = {'class': directives.class_option,
'language': directives.unchanged}
register_canonical_role('code', code_role)
def math_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
i = rawtext.find('`')
text = rawtext.split('`')[1]
node = nodes.math(rawtext, text)
return [node], []
register_canonical_role('math', math_role)
######################################################################
# Register roles that are currently unimplemented.
######################################################################
def unimplemented_role(role, rawtext, text, lineno, inliner, attributes={}):
msg = inliner.reporter.error(
'Interpreted text role "%s" not implemented.' % role, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
register_canonical_role('index', unimplemented_role)
register_canonical_role('named-reference', unimplemented_role)
register_canonical_role('anonymous-reference', unimplemented_role)
register_canonical_role('uri-reference', unimplemented_role)
register_canonical_role('footnote-reference', unimplemented_role)
register_canonical_role('citation-reference', unimplemented_role)
register_canonical_role('substitution-reference', unimplemented_role)
register_canonical_role('target', unimplemented_role)
# This should remain unimplemented, for testing purposes:
register_canonical_role('restructuredtext-unimplemented-role',
unimplemented_role)
def set_classes(options):
"""
Auxiliary function to set options['classes'] and delete
options['class'].
"""
if 'class' in options:
assert 'classes' not in options
options['classes'] = options['class']
del options['class']
| {
"repo_name": "Lyleo/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python2/docutils/parsers/rst/roles.py",
"copies": "2",
"size": "14696",
"license": "mit",
"hash": 3033968474072738000,
"line_mean": 36.3944020356,
"line_max": 79,
"alpha_frac": 0.6495645073,
"autogenerated": false,
"ratio": 4.109619686800895,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5759184194100895,
"avg_score": null,
"num_lines": null
} |
"""
Python 2/3 compatibility definitions.
This module currently provides the following helper symbols:
* bytes (name of byte string type; str in 2.x, bytes in 3.x)
* b (function converting a string literal to an ASCII byte string;
can be also used to convert a Unicode string into a byte string)
* u_prefix (unicode repr prefix: 'u' in 2.x, '' in 3.x)
(Required in docutils/test/test_publisher.py)
* BytesIO (a StringIO class that works with bytestrings)
"""
import sys
if sys.version_info < (3,0):
b = bytes = str
u_prefix = 'u'
from io import StringIO as BytesIO
else:
import builtins
bytes = builtins.bytes
u_prefix = ''
def b(s):
if isinstance(s, str):
return s.encode('latin1')
elif isinstance(s, bytes):
return s
else:
raise TypeError("Invalid argument %r for b()" % (s,))
# using this hack since 2to3 "fixes" the relative import
# when using ``from io import BytesIO``
BytesIO = __import__('io').BytesIO
if sys.version_info < (2,5):
import builtins
def __import__(name, globals={}, locals={}, fromlist=[], level=-1):
"""Compatibility definition for Python 2.4.
Silently ignore the `level` argument missing in Python < 2.5.
"""
# we need the level arg because the default changed in Python 3.3
return builtins.__import__(name, globals, locals, fromlist)
| {
"repo_name": "Lyleo/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python3/docutils/_compat.py",
"copies": "2",
"size": "1532",
"license": "mit",
"hash": -8611262683933146000,
"line_mean": 30.9166666667,
"line_max": 73,
"alpha_frac": 0.6462140992,
"autogenerated": false,
"ratio": 3.801488833746898,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5447702932946898,
"avg_score": null,
"num_lines": null
} |
from setuptools import setup, find_packages
setup(
name = 'docutils-aafigure',
version = '0.3',
description = "ASCII art figures for reStructuredText",
long_description = """\
This package provides a docutils directive that allows to integrate ASCII art
figures directly into the text.
reST example::
.. aafigure::
+-----+ ^
| | |
--->+ +---o--->
| | |
+-----+ V
Please see README.txt for examples.
requires docutils (>= 0.5).
""",
author = 'Chris Liechti',
author_email = '[email protected]',
install_requires = ['aafigure>=0.2', 'docutils>=0.5'],
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Documentation',
'Topic :: Utilities',
],
platforms = 'any',
py_modules = [ 'aafigure_directive'],
entry_points = {
'docutils.parsers.rst.directives': [
'aafigure = aafigure_directive:AAFigureDirective'
],
},
)
| {
"repo_name": "aquavitae/aafigure",
"path": "docutils/setup-docutils-plugin.py",
"copies": "3",
"size": "1422",
"license": "bsd-3-clause",
"hash": 9019880401935502000,
"line_mean": 26.3461538462,
"line_max": 77,
"alpha_frac": 0.5745428973,
"autogenerated": false,
"ratio": 3.843243243243243,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.014768657912293478,
"num_lines": 52
} |
"""
Auxiliary transforms mainly to be used by Writer components.
This module is called "writer_aux" because otherwise there would be
conflicting imports like this one::
from docutils import writers
from docutils.transforms import writers
"""
__docformat__ = 'reStructuredText'
from docutils import nodes, utils, languages
from docutils.transforms import Transform
class Compound(Transform):
"""
Flatten all compound paragraphs. For example, transform ::
<compound>
<paragraph>
<literal_block>
<paragraph>
into ::
<paragraph>
<literal_block classes="continued">
<paragraph classes="continued">
"""
default_priority = 910
def apply(self):
for compound in self.document.traverse(nodes.compound):
first_child = True
for child in compound:
if first_child:
if not isinstance(child, nodes.Invisible):
first_child = False
else:
child['classes'].append('continued')
# Substitute children for compound.
compound.replace_self(compound[:])
class Admonitions(Transform):
"""
Transform specific admonitions, like this:
<note>
<paragraph>
Note contents ...
into generic admonitions, like this::
<admonition classes="note">
<title>
Note
<paragraph>
Note contents ...
The admonition title is localized.
"""
default_priority = 920
def apply(self):
language = languages.get_language(self.document.settings.language_code,
self.document.reporter)
for node in self.document.traverse(nodes.Admonition):
node_name = node.__class__.__name__
# Set class, so that we know what node this admonition came from.
node['classes'].append(node_name)
if not isinstance(node, nodes.admonition):
# Specific admonition. Transform into a generic admonition.
admonition = nodes.admonition(node.rawsource, *node.children,
**node.attributes)
title = nodes.title('', language.labels[node_name])
admonition.insert(0, title)
node.replace_self(admonition)
| {
"repo_name": "Lyleo/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python3/docutils/transforms/writer_aux.py",
"copies": "4",
"size": "2561",
"license": "mit",
"hash": 7375289202569764000,
"line_mean": 28.1022727273,
"line_max": 79,
"alpha_frac": 0.579461148,
"autogenerated": false,
"ratio": 4.742592592592593,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7322053740592593,
"avg_score": null,
"num_lines": null
} |
"""Reader for existing document trees."""
from docutils import readers, utils, transforms
class Reader(readers.ReReader):
"""
Adapt the Reader API for an existing document tree.
The existing document tree must be passed as the ``source`` parameter to
the `docutils.core.Publisher` initializer, wrapped in a
`docutils.io.DocTreeInput` object::
pub = docutils.core.Publisher(
..., source=docutils.io.DocTreeInput(document), ...)
The original document settings are overridden; if you want to use the
settings of the original document, pass ``settings=document.settings`` to
the Publisher call above.
"""
supported = ('doctree',)
config_section = 'doctree reader'
config_section_dependencies = ('readers',)
def parse(self):
"""
No parsing to do; refurbish the document tree instead.
Overrides the inherited method.
"""
self.document = self.input
# Create fresh Transformer object, to be populated from Writer
# component.
self.document.transformer = transforms.Transformer(self.document)
# Replace existing settings object with new one.
self.document.settings = self.settings
# Create fresh Reporter object because it is dependent on
# (new) settings.
self.document.reporter = utils.new_reporter(
self.document.get('source', ''), self.document.settings)
| {
"repo_name": "Lyleo/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python2/docutils/readers/doctree.py",
"copies": "4",
"size": "1560",
"license": "mit",
"hash": 8778457277380569000,
"line_mean": 32.9130434783,
"line_max": 77,
"alpha_frac": 0.6705128205,
"autogenerated": false,
"ratio": 4.469914040114613,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 46
} |
__author__ = 'Martin Felder and Frank Sehnke'
import math, imp
from matplotlib.lines import Line2D
from pylab import clf, plot, axes, show, xlabel, ylabel, savefig, ioff, draw_if_interactive
class MultilinePlotter:
""" Basic plotting class build on pylab
Implementing by instancing the class with the number of different plots to show.
Every plot has an id so adding data is done by addData(id, xValue, yValue) of the given data point
:todo: Add possibility to stick markers to the plots
:todo: Some error checking and documentation
:todo: Derive from this to make classes for trn/tst data plotting with different linestyles
"""
# some nice color definitions for graphs (from colorbrewer.org)
graphColor = [(0.894117647, 0.101960784, 0.109803922), \
(0.215686275, 0.494117647, 0.721568627), \
(0.301960784, 0.68627451, 0.290196078), \
(0.596078431, 0.305882353, 0.639215686), \
(1, 0.498039216, 0), \
(1, 1, 0.2), \
(0.650980392, 0.337254902, 0.156862745), \
(0.968627451, 0.505882353, 0.749019608), \
(0.6, 0.6, 0.6)]
def __init__(self, maxLines=1, autoscale=0.0, **kwargs):
"""
:key maxLines: Number of Plots to draw and so max ID.
:key autoscale: If set to a factor > 1, axes are automatically expanded whenever out-range data points are added
:var indexList: The x-component of the data points
:var DataList: The y-component of the data points"""
self.indexList = []
self.dataList = []
self.Lines = []
self.autoscale = autoscale
clf()
self.Axes = axes(**kwargs)
self.nbLines = 0
self.defaultLineStyle = {}
self._checkMaxId(maxLines - 1)
self.replot = True # is the plot still current?
self.currentID = None
self.offset = 0 # external references to IDs are modified by this
def setOffset(self, offs):
""" Set an offset that modifies all subsequent references to line IDs
:key offs: The desired offset """
self.offset = offs
#def createFigure(self, size=[12,8], interactive=True):
#""" initialize the graphics output window """
## FIXME: doesn work, because axes() in the constructor already creates a figure
#pylab.figure(figsize=size)
#if interactive: pylab.ion()
def _checkMaxId(self, id):
""" Appends additional lines as necessary
:key id: Lines up to this id are added automatically """
if id >= self.nbLines:
for i in range(self.nbLines, id + 1):
# create a new line with corresponding x/y data, and attach it to the plot
l = Line2D([], [], color=self.graphColor[i % 9], **self.defaultLineStyle)
self.Lines.append(l)
self.Axes.add_line(l)
self.indexList.append([])
self.dataList.append([])
self.nbLines = id + 1
def addData(self, id0, x, y):
""" The given data point or points is appended to the given line.
:key id0: The plot ID (counted from 0) the data point(s) belong to.
:key x: The x-component of the data point(s)
:key y: The y-component of the data point(s)"""
id = id0 + self.offset
if not (isinstance(x, list) | isinstance(x, tuple)):
self._checkMaxId(id)
self.indexList[id].append(x)
self.dataList[id].append(y)
self.currentID = id
else:
for i, xi in enumerate(x):
self.addData(id0, xi, y[i])
self.replot = True
def setData(self, id0, x, y):
""" Data series id0 is replaced by the given lists
:key id0: The plot ID (counted from 0) the data point(s) belong to.
:key x: The x-component of the data points
:key y: The y-component of the data points"""
id = id0 + self.offset
self._checkMaxId(id)
self.indexList[id] = x
self.dataList[id] = y
self.replot = True
def saveData(self, filename):
""" Writes the data series for all points to a file
:key filename: The name of the output file """
file = open(filename, "w")
for i in range(self.nbLines):
datLen = len(self.indexList[i])
for j in range(datLen):
file.write(repr(self.indexList[i][j]) + "\n")
file.write(repr(self.dataList[i][j]) + "\n")
file.close()
def setLabels(self, x='', y='', title=''):
""" set axis labels and title """
self.Axes.set_xlabel(x)
self.Axes.set_ylabel(y)
self.Axes.set_title(title)
def setLegend(self, *args, **kwargs):
""" hand parameters to the legend """
self.Axes.legend(*args, **kwargs)
def setLineStyle(self, id=None, **kwargs):
""" hand parameters to the specified line(s), and set them as default for new lines
:key id: The line or lines (list!) to be modified - defaults to last one added """
if id is None:
id = self.currentID
if isinstance(id, list) | isinstance(id, tuple):
# apply to specified list of lines
self._checkMaxId(max(id) + self.offset)
for i in id:
self.Lines[i + self.offset].set(**kwargs)
elif id >= 0:
# apply to selected line
self._checkMaxId(id + self.offset)
self.Lines[id + self.offset].set(**kwargs)
else:
# apply to all lines
for l in self.Lines:
l.set(**kwargs)
# set as new default linestyle
if kwargs.has_key('color'):
kwargs.popitem('color')
self.defaultLineStyle = kwargs
def update(self):
""" Updates the current plot, if necessary """
if not self.replot:
return
xr = list(self.Axes.get_xlim())
yr = list(self.Axes.get_ylim())
for i in range(self.nbLines):
self.Lines[i].set_data(self.indexList[i], self.dataList[i])
if self.autoscale > 1.0:
if self.indexList[i][0] < xr[0]:
xr[0] = self.indexList[i][0]
ymn = min(self.dataList[i])
if ymn < yr[0]:
yr[0] = ymn
while self.indexList[i][-1] > xr[1]:
xr[1] = (xr[1] - xr[0]) * self.autoscale + xr[0]
ymx = max(self.dataList[i])
while ymx > yr[1]:
yr[1] = (yr[1] - yr[0]) * self.autoscale + yr[0]
if self.autoscale > 1.0:
self.Axes.set_xlim(tuple(xr))
self.Axes.set_ylim(tuple(yr))
#self.Axes.draw()
#pylab.show()
draw_if_interactive()
self.replot = False
def show(self, xLabel='', yLabel='', title='', popup=False, imgfile=None):
""" Plots the data internally and saves an image of it to the plotting directory.
:key title: The title of the plot.
:key xLable: The label for the x-axis
:key yLable: The label for the y-axis
:key popup: also produce a popup window with the image?"""
clf()
for i in range(self.nbLines):
plot(self.indexList[i], self.dataList[i])
xlabel(xLabel)
ylabel(yLabel)
title(title)
if imgfile == None:
imgfile = imp.find_module('pybrain')[1] + "/tools/plotting/plot.png"
savefig(imgfile)
if popup:
ioff()
show()
"""Small example to demonstrate how the plot class can be used"""
if __name__ == "__main__":
pbplot = MultilinePlotter(7)
for i in range(400000):
if i / 100000 == i / 100000.0:
for j in range(7):
pbplot.addData(j, i, math.sqrt(float(i * (j + 1))))
pbplot.show("WorldInteractions", "Fitness", "Example Plot", True)
| {
"repo_name": "rbalda/neural_ocr",
"path": "env/lib/python2.7/site-packages/pybrain/tools/plotting/multiline.py",
"copies": "1",
"size": "8055",
"license": "mit",
"hash": 7157790535138072000,
"line_mean": 36.9952830189,
"line_max": 116,
"alpha_frac": 0.5572936065,
"autogenerated": false,
"ratio": 3.6747262773722627,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9689448375192674,
"avg_score": 0.008514301735917728,
"num_lines": 212
} |
__author__ = 'Martin Felder and Frank Sehnke'
import math, imp
from matplotlib.lines import Line2D
from pylab import clf, plot, axes, show, xlabel, ylabel, savefig, ioff, draw_if_interactive #@UnresolvedImport
class MultilinePlotter:
""" Basic plotting class build on pylab
Implementing by instancing the class with the number of different plots to show.
Every plot has an id so adding data is done by addData(id, xValue, yValue) of the given data point
@todo: Add possibility to stick markers to the plots
@todo: Some error checking and documentation
@todo: Derive from this to make classes for trn/tst data plotting with different linestyles
"""
# some nice color definitions for graphs (from colorbrewer.org)
graphColor = [(0.894117647, 0.101960784, 0.109803922),\
(0.215686275, 0.494117647, 0.721568627),\
(0.301960784, 0.68627451, 0.290196078),\
(0.596078431, 0.305882353, 0.639215686),\
(1, 0.498039216, 0),\
(1, 1, 0.2),\
(0.650980392, 0.337254902, 0.156862745),\
(0.968627451, 0.505882353, 0.749019608),\
(0.6, 0.6, 0.6)]
def __init__(self, maxLines = 1, autoscale = 0.0, **kwargs):
"""
@param maxLines: Number of Plots to draw and so max ID.
@param autoscale: If set to a factor > 1, axes are automatically expanded whenever out-range data points are added
@var indexList: The x-component of the data points
@var DataList: The y-component of the data points"""
self.indexList = []
self.dataList = []
self.Lines = []
self.autoscale = autoscale
clf()
self.Axes = axes(**kwargs)
self.nbLines = 0
self.defaultLineStyle = {}
self._checkMaxId(maxLines-1)
self.replot = True # is the plot still current?
self.currentID = None
self.offset = 0 # external references to IDs are modified by this
def setOffset(self, offs):
""" Set an offset that modifies all subsequent references to line IDs
@param offs: The desired offset """
self.offset = offs
#def createFigure(self, size=[12,8], interactive=True):
#""" initialize the graphics output window """
## FIXME: doesn work, because axes() in the constructor already creates a figure
#pylab.figure(figsize=size)
#if interactive: pylab.ion()
def _checkMaxId(self, id):
""" Appends additional lines as necessary
@param id: Lines up to this id are added automatically """
if id >= self.nbLines:
for i in range(self.nbLines, id+1):
# create a new line with corresponding x/y data, and attach it to the plot
l = Line2D([],[], color=self.graphColor[i % 9], **self.defaultLineStyle)
self.Lines.append(l)
self.Axes.add_line(l)
self.indexList.append([])
self.dataList.append([])
self.nbLines=id+1
def addData(self, id0, x, y):
""" The given data point or points is appended to the given line.
@param id0: The plot ID (counted from 0) the data point(s) belong to.
@param x: The x-component of the data point(s)
@param y: The y-component of the data point(s)"""
id = id0 + self.offset
if not (isinstance(x,list) | isinstance(x,tuple)):
self._checkMaxId(id)
self.indexList[id].append(x)
self.dataList[id].append(y)
self.currentID = id
else:
for i, xi in enumerate(x):
self.addData(id0, xi, y[i])
self.replot = True
def setData(self,id0, x, y):
""" Data series id0 is replaced by the given lists
@param id0: The plot ID (counted from 0) the data point(s) belong to.
@param x: The x-component of the data points
@param y: The y-component of the data points"""
id = id0 + self.offset
self._checkMaxId(id)
self.indexList[id] = x
self.dataList[id] = y
self.replot = True
def saveData(self, filename):
""" Writes the data series for all points to a file
@param filename: The name of the output file """
file = open(filename,"w")
for i in range(self.nbLines):
datLen=len(self.indexList[i])
for j in range(datLen):
file.write(repr(self.indexList[i][j])+"\n")
file.write(repr(self.dataList[i][j])+"\n")
file.close()
def setLabels(self, x= '', y= '', title = ''):
""" set axis labels and title """
self.Axes.set_xlabel(x)
self.Axes.set_ylabel(y)
self.Axes.set_title(title)
def setLegend(self, *args,**kwargs):
""" hand parameters to the legend """
self.Axes.legend(*args,**kwargs)
def setLineStyle(self, id=None, **kwargs):
""" hand parameters to the specified line(s), and set them as default for new lines
@param id: The line or lines (list!) to be modified - defaults to last one added """
if id is None:
id = self.currentID
if isinstance(id,list) | isinstance(id,tuple):
# apply to specified list of lines
self._checkMaxId(max(id)+self.offset)
for i in id:
self.Lines[i+self.offset].set(**kwargs)
elif id >= 0:
# apply to selected line
self._checkMaxId(id+self.offset)
self.Lines[id+self.offset].set(**kwargs)
else:
# apply to all lines
for l in self.Lines:
l.set(**kwargs)
# set as new default linestyle
if kwargs.has_key('color'):
kwargs.popitem('color')
self.defaultLineStyle = kwargs
def update(self):
""" Updates the current plot, if necessary """
if not self.replot:
return
xr = list(self.Axes.get_xlim())
yr = list(self.Axes.get_ylim())
for i in range(self.nbLines):
self.Lines[i].set_data(self.indexList[i], self.dataList[i])
if self.autoscale > 1.0:
if self.indexList[i][0] < xr[0]:
xr[0] = self.indexList[i][0]
ymn = min(self.dataList[i])
if ymn < yr[0]:
yr[0] = ymn
while self.indexList[i][-1] > xr[1]:
xr[1] = (xr[1]-xr[0])*self.autoscale + xr[0]
ymx = max(self.dataList[i])
while ymx > yr[1]:
yr[1] = (yr[1]-yr[0])*self.autoscale + yr[0]
if self.autoscale > 1.0:
self.Axes.set_xlim( tuple(xr) )
self.Axes.set_ylim( tuple(yr) )
#self.Axes.draw()
#pylab.show()
draw_if_interactive()
self.replot = False
def show(self, xLabel = '', yLabel = '', title = '', popup = False, imgfile = None):
""" Plots the data internally and saves an image of it to the plotting directory.
@param title: The title of the plot.
@param xLable: The label for the x-axis
@param yLable: The label for the y-axis
@param popup: also produce a popup window with the image?"""
clf()
for i in range(self.nbLines):
plot(self.indexList[i], self.dataList[i])
xlabel(xLabel)
ylabel(yLabel)
title(title)
if imgfile == None:
imgfile = imp.find_module('pybrain')[1]+"/tools/plotting/plot.png"
savefig(imgfile)
if popup:
ioff()
show()
"""Small example to demonstrate how the plot class can be used"""
if __name__ == "__main__":
pbplot=MultilinePlotter(7)
for i in range(400000):
if i/100000 == i/100000.0:
for j in range(7):
pbplot.addData(j, i, math.sqrt(float(i*(j+1))))
pbplot.show("WorldInteractions", "Fitness", "Example Plot", True)
| {
"repo_name": "daanwierstra/pybrain",
"path": "pybrain/tools/plotting/multiline.py",
"copies": "1",
"size": "8010",
"license": "bsd-3-clause",
"hash": 7957644033361940000,
"line_mean": 38.4581280788,
"line_max": 118,
"alpha_frac": 0.5664169788,
"autogenerated": false,
"ratio": 3.669262482821805,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9646864736064118,
"avg_score": 0.017762945111537577,
"num_lines": 203
} |
__author__ = 'Martin Felder'
from .neuronlayer import NeuronLayer
from pybrain.tools.functions import safeExp
class MixtureDensityLayer(NeuronLayer):
""" Mixture of Gaussians output layer (Bishop 2006, Ch. 5.6) with diagonal
covariance matrix.
Assumes inbuf contains K*3 neurons, with the first K giving the mixing
coefficients, the next K the standard deviations and the last K the means.
"""
def __init__(self, dim, name = None, mix=5):
"""Initialize mixture density layer - mix gives the number of Gaussians
to mix, dim is the dimension of the target(!) vector."""
nUnits = mix * (dim + 2) # mean vec + stddev and mixing coeff
NeuronLayer.__init__(self, nUnits, name)
self.nGaussians = mix
self.nDims = dim
def _forwardImplementation(self, inbuf, outbuf):
"""Calculate layer outputs (Gaussian parameters etc., not function
values!) from given activations """
K = self.nGaussians
# Mixing parameters and stddevs
outbuf[0:K*2] = safeExp(inbuf[0:K*2])
outbuf[0:K] /= sum(outbuf[0:K])
# Means
outbuf[K*2:] = inbuf[K*2:]
def _backwardImplementation(self, outerr, inerr, outbuf, inbuf):
"""Calculate the derivatives of output wrt. corresponding input
activations."""
# Cannot calculate because we would need the targets!
# ==> we just pass through the stuff from the trainer, who takes care
# of the rest
inerr[:] = outerr
| {
"repo_name": "ii0/pybrain",
"path": "pybrain/structure/modules/mixturedensity.py",
"copies": "25",
"size": "1573",
"license": "bsd-3-clause",
"hash": 7885338859778479000,
"line_mean": 37.3658536585,
"line_max": 79,
"alpha_frac": 0.624920534,
"autogenerated": false,
"ratio": 3.903225806451613,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.021651701054150076,
"num_lines": 41
} |
__author__ = 'Martin Felder'
import numpy as np
from pybrain.supervised.trainers import RPropMinusTrainer, BackpropTrainer
from pybrain.structure.modules.mixturedensity import MixtureDensityLayer
def gaussian(x, mean, stddev):
""" return value of homogenous Gaussian at given vector point
x: vector, mean: vector, stddev: scalar """
tmp = -0.5 * sum(((x-mean)/stddev)**2)
return np.exp(tmp) / (np.power(2.*np.pi, 0.5*len(x)) * stddev)
class BackpropTrainerMix(BackpropTrainer):
""" Trainer for mixture model network. See Bishop 2006, Eqn. 5.153-5.157.
Due to PyBrain conventions it is more convenient (if not pretty) to treat the
MixtureDensityLayer as having a linear transfer function, and calculate
its derivative here."""
def setData(self, dataset):
# different output dimension check
self.ds = dataset
if dataset:
assert dataset.indim == self.module.indim
assert dataset.outdim == self.module.modulesSorted[-1].nDims
def _calcDerivs(self, seq):
""" calculate derivatives assuming we have a Network with a MixtureDensityLayer as output """
assert isinstance(self.module.modulesSorted[-1], MixtureDensityLayer)
self.module.reset()
for time, sample in enumerate(seq):
input = sample[0]
self.module.inputbuffer[time] = input
self.module.forward()
error = 0
nDims = self.module.modulesSorted[-1].nDims
nGauss = self.module.modulesSorted[-1].nGaussians
for time, sample in reversed(list(enumerate(seq))):
# Should these three lines be inside this 'for' block
# or outside? I moved them inside - Jack
gamma = []
means = []
stddevs = []
dummy, target = sample
par = self.module.outputbuffer[time] # parameters for mixture
# calculate error contributions from all Gaussians in the mixture
for k in range(nGauss):
coeff = par[k]
stddevs.append(par[k+nGauss])
idxm = 2*nGauss + k*nDims
means.append(par[idxm:idxm+nDims])
gamma.append(coeff * gaussian(target, means[-1], stddevs[-1]))
# calculate error for this pattern, and posterior for target
sumg = sum(gamma)
error -= np.log(sumg)
gamma = np.array(gamma)/sumg
invvariance = 1./par[nGauss:2*nGauss]**2
invstddev = 1./np.array(stddevs)
# calculate gradient wrt. mixture coefficients
grad_c = par[0:nGauss] - gamma
# calculate gradient wrt. standard deviations
grad_m = []
grad_s = []
for k in range(nGauss):
delta = means[k]-target
grad_m.append(gamma[k]*delta*invvariance[k])
grad_s.append(-gamma[k]*(np.dot(delta,delta)*invvariance[k]*invstddev[k] - invstddev[k]))
self.module.outputerror[time] = -np.r_[grad_c,grad_s,np.array(grad_m).flatten()]
self.module.backward()
return error, 1.0
class RPropMinusTrainerMix(BackpropTrainerMix,RPropMinusTrainer):
""" RProp trainer for mixture model network. See Bishop 2006, Eqn. 5.153-5.157. """
dummy = 0
| {
"repo_name": "Neural-Network/TicTacToe",
"path": "pybrain/supervised/trainers/mixturedensity.py",
"copies": "25",
"size": "3420",
"license": "bsd-3-clause",
"hash": 5983621002107026000,
"line_mean": 39.2352941176,
"line_max": 105,
"alpha_frac": 0.5932748538,
"autogenerated": false,
"ratio": 3.8383838383838382,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""
S5/HTML Slideshow Writer.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import docutils
from docutils import frontend, nodes, utils
from docutils.writers import html4css1
from docutils.parsers.rst import directives
from docutils._compat import b
themes_dir_path = utils.relative_path(
os.path.join(os.getcwd(), 'dummy'),
os.path.join(os.path.dirname(__file__), 'themes'))
def find_theme(name):
# Where else to look for a theme?
# Check working dir? Destination dir? Config dir? Plugins dir?
path = os.path.join(themes_dir_path, name)
if not os.path.isdir(path):
raise docutils.ApplicationError(
'Theme directory not found: %r (path: %r)' % (name, path))
return path
class Writer(html4css1.Writer):
settings_spec = html4css1.Writer.settings_spec + (
'S5 Slideshow Specific Options',
'For the S5/HTML writer, the --no-toc-backlinks option '
'(defined in General Docutils Options above) is the default, '
'and should not be changed.',
(('Specify an installed S5 theme by name. Overrides --theme-url. '
'The default theme name is "default". The theme files will be '
'copied into a "ui/<theme>" directory, in the same directory as the '
'destination file (output HTML). Note that existing theme files '
'will not be overwritten (unless --overwrite-theme-files is used).',
['--theme'],
{'default': 'default', 'metavar': '<name>',
'overrides': 'theme_url'}),
('Specify an S5 theme URL. The destination file (output HTML) will '
'link to this theme; nothing will be copied. Overrides --theme.',
['--theme-url'],
{'metavar': '<URL>', 'overrides': 'theme'}),
('Allow existing theme files in the ``ui/<theme>`` directory to be '
'overwritten. The default is not to overwrite theme files.',
['--overwrite-theme-files'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Keep existing theme files in the ``ui/<theme>`` directory; do not '
'overwrite any. This is the default.',
['--keep-theme-files'],
{'dest': 'overwrite_theme_files', 'action': 'store_false'}),
('Set the initial view mode to "slideshow" [default] or "outline".',
['--view-mode'],
{'choices': ['slideshow', 'outline'], 'default': 'slideshow',
'metavar': '<mode>'}),
('Normally hide the presentation controls in slideshow mode. '
'This is the default.',
['--hidden-controls'],
{'action': 'store_true', 'default': True,
'validator': frontend.validate_boolean}),
('Always show the presentation controls in slideshow mode. '
'The default is to hide the controls.',
['--visible-controls'],
{'dest': 'hidden_controls', 'action': 'store_false'}),
('Enable the current slide indicator ("1 / 15"). '
'The default is to disable it.',
['--current-slide'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Disable the current slide indicator. This is the default.',
['--no-current-slide'],
{'dest': 'current_slide', 'action': 'store_false'}),))
settings_default_overrides = {'toc_backlinks': 0}
config_section = 's5_html writer'
config_section_dependencies = ('writers', 'html4css1 writer')
def __init__(self):
html4css1.Writer.__init__(self)
self.translator_class = S5HTMLTranslator
class S5HTMLTranslator(html4css1.HTMLTranslator):
s5_stylesheet_template = """\
<!-- configuration parameters -->
<meta name="defaultView" content="%(view_mode)s" />
<meta name="controlVis" content="%(control_visibility)s" />
<!-- style sheet links -->
<script src="%(path)s/slides.js" type="text/javascript"></script>
<link rel="stylesheet" href="%(path)s/slides.css"
type="text/css" media="projection" id="slideProj" />
<link rel="stylesheet" href="%(path)s/outline.css"
type="text/css" media="screen" id="outlineStyle" />
<link rel="stylesheet" href="%(path)s/print.css"
type="text/css" media="print" id="slidePrint" />
<link rel="stylesheet" href="%(path)s/opera.css"
type="text/css" media="projection" id="operaFix" />\n"""
# The script element must go in front of the link elements to
# avoid a flash of unstyled content (FOUC), reproducible with
# Firefox.
disable_current_slide = """
<style type="text/css">
#currentSlide {display: none;}
</style>\n"""
layout_template = """\
<div class="layout">
<div id="controls"></div>
<div id="currentSlide"></div>
<div id="header">
%(header)s
</div>
<div id="footer">
%(title)s%(footer)s
</div>
</div>\n"""
# <div class="topleft"></div>
# <div class="topright"></div>
# <div class="bottomleft"></div>
# <div class="bottomright"></div>
default_theme = 'default'
"""Name of the default theme."""
base_theme_file = '__base__'
"""Name of the file containing the name of the base theme."""
direct_theme_files = (
'slides.css', 'outline.css', 'print.css', 'opera.css', 'slides.js')
"""Names of theme files directly linked to in the output HTML"""
indirect_theme_files = (
's5-core.css', 'framing.css', 'pretty.css', 'blank.gif', 'iepngfix.htc')
"""Names of files used indirectly; imported or used by files in
`direct_theme_files`."""
required_theme_files = indirect_theme_files + direct_theme_files
"""Names of mandatory theme files."""
def __init__(self, *args):
html4css1.HTMLTranslator.__init__(self, *args)
#insert S5-specific stylesheet and script stuff:
self.theme_file_path = None
self.setup_theme()
view_mode = self.document.settings.view_mode
control_visibility = ('visible', 'hidden')[self.document.settings
.hidden_controls]
self.stylesheet.append(self.s5_stylesheet_template
% {'path': self.theme_file_path,
'view_mode': view_mode,
'control_visibility': control_visibility})
if not self.document.settings.current_slide:
self.stylesheet.append(self.disable_current_slide)
self.add_meta('<meta name="version" content="S5 1.1" />\n')
self.s5_footer = []
self.s5_header = []
self.section_count = 0
self.theme_files_copied = None
def setup_theme(self):
if self.document.settings.theme:
self.copy_theme()
elif self.document.settings.theme_url:
self.theme_file_path = self.document.settings.theme_url
else:
raise docutils.ApplicationError(
'No theme specified for S5/HTML writer.')
def copy_theme(self):
"""
Locate & copy theme files.
A theme may be explicitly based on another theme via a '__base__'
file. The default base theme is 'default'. Files are accumulated
from the specified theme, any base themes, and 'default'.
"""
settings = self.document.settings
path = find_theme(settings.theme)
theme_paths = [path]
self.theme_files_copied = {}
required_files_copied = {}
# This is a link (URL) in HTML, so we use "/", not os.sep:
self.theme_file_path = '%s/%s' % ('ui', settings.theme)
if settings._destination:
dest = os.path.join(
os.path.dirname(settings._destination), 'ui', settings.theme)
if not os.path.isdir(dest):
os.makedirs(dest)
else:
# no destination, so we can't copy the theme
return
default = False
while path:
for f in os.listdir(path): # copy all files from each theme
if f == self.base_theme_file:
continue # ... except the "__base__" file
if ( self.copy_file(f, path, dest)
and f in self.required_theme_files):
required_files_copied[f] = 1
if default:
break # "default" theme has no base theme
# Find the "__base__" file in theme directory:
base_theme_file = os.path.join(path, self.base_theme_file)
# If it exists, read it and record the theme path:
if os.path.isfile(base_theme_file):
lines = open(base_theme_file).readlines()
for line in lines:
line = line.strip()
if line and not line.startswith('#'):
path = find_theme(line)
if path in theme_paths: # check for duplicates (cycles)
path = None # if found, use default base
else:
theme_paths.append(path)
break
else: # no theme name found
path = None # use default base
else: # no base theme file found
path = None # use default base
if not path:
path = find_theme(self.default_theme)
theme_paths.append(path)
default = True
if len(required_files_copied) != len(self.required_theme_files):
# Some required files weren't found & couldn't be copied.
required = list(self.required_theme_files)
for f in list(required_files_copied.keys()):
required.remove(f)
raise docutils.ApplicationError(
'Theme files not found: %s'
% ', '.join(['%r' % f for f in required]))
files_to_skip_pattern = re.compile(r'~$|\.bak$|#$|\.cvsignore$')
def copy_file(self, name, source_dir, dest_dir):
"""
Copy file `name` from `source_dir` to `dest_dir`.
Return 1 if the file exists in either `source_dir` or `dest_dir`.
"""
source = os.path.join(source_dir, name)
dest = os.path.join(dest_dir, name)
if dest in self.theme_files_copied:
return 1
else:
self.theme_files_copied[dest] = 1
if os.path.isfile(source):
if self.files_to_skip_pattern.search(source):
return None
settings = self.document.settings
if os.path.exists(dest) and not settings.overwrite_theme_files:
settings.record_dependencies.add(dest)
else:
src_file = open(source, 'rb')
src_data = src_file.read()
src_file.close()
dest_file = open(dest, 'wb')
dest_dir = dest_dir.replace(os.sep, '/')
dest_file.write(src_data.replace(
b('ui/default'),
dest_dir[dest_dir.rfind('ui/'):].encode(
sys.getfilesystemencoding())))
dest_file.close()
settings.record_dependencies.add(source)
return 1
if os.path.isfile(dest):
return 1
def depart_document(self, node):
self.head_prefix.extend([self.doctype,
self.head_prefix_template %
{'lang': self.settings.language_code}])
self.html_prolog.append(self.doctype)
self.meta.insert(0, self.content_type % self.settings.output_encoding)
self.head.insert(0, self.content_type % self.settings.output_encoding)
if self.math_header:
self.head.append(self.math_header)
header = ''.join(self.s5_header)
footer = ''.join(self.s5_footer)
title = ''.join(self.html_title).replace('<h1 class="title">', '<h1>')
layout = self.layout_template % {'header': header,
'title': title,
'footer': footer}
self.fragment.extend(self.body)
self.body_prefix.extend(layout)
self.body_prefix.append('<div class="presentation">\n')
self.body_prefix.append(
self.starttag({'classes': ['slide'], 'ids': ['slide0']}, 'div'))
if not self.section_count:
self.body.append('</div>\n')
self.body_suffix.insert(0, '</div>\n')
# skip content-type meta tag with interpolated charset value:
self.html_head.extend(self.head[1:])
self.html_body.extend(self.body_prefix[1:] + self.body_pre_docinfo
+ self.docinfo + self.body
+ self.body_suffix[:-1])
def depart_footer(self, node):
start = self.context.pop()
self.s5_footer.append('<h2>')
self.s5_footer.extend(self.body[start:])
self.s5_footer.append('</h2>')
del self.body[start:]
def depart_header(self, node):
start = self.context.pop()
header = ['<div id="header">\n']
header.extend(self.body[start:])
header.append('\n</div>\n')
del self.body[start:]
self.s5_header.extend(header)
def visit_section(self, node):
if not self.section_count:
self.body.append('\n</div>\n')
self.section_count += 1
self.section_level += 1
if self.section_level > 1:
# dummy for matching div's
self.body.append(self.starttag(node, 'div', CLASS='section'))
else:
self.body.append(self.starttag(node, 'div', CLASS='slide'))
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.section):
level = self.section_level + self.initial_header_level - 1
if level == 1:
level = 2
tag = 'h%s' % level
self.body.append(self.starttag(node, tag, ''))
self.context.append('</%s>\n' % tag)
else:
html4css1.HTMLTranslator.visit_subtitle(self, node)
def visit_title(self, node):
html4css1.HTMLTranslator.visit_title(self, node)
| {
"repo_name": "Lyleo/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python3/docutils/writers/s5_html/__init__.py",
"copies": "2",
"size": "14430",
"license": "mit",
"hash": 8775495460251253000,
"line_mean": 40.3467048711,
"line_max": 80,
"alpha_frac": 0.5617463617,
"autogenerated": false,
"ratio": 3.9839867476532302,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003640681100015007,
"num_lines": 349
} |
"""
Directives for table elements.
"""
__docformat__ = 'reStructuredText'
import sys
import os.path
import csv
from docutils import io, nodes, statemachine, utils
from docutils.utils.error_reporting import SafeString
from docutils.utils import SystemMessagePropagation
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
class Table(Directive):
"""
Generic table base class.
"""
optional_arguments = 1
final_argument_whitespace = True
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
def make_title(self):
if self.arguments:
title_text = self.arguments[0]
text_nodes, messages = self.state.inline_text(title_text,
self.lineno)
title = nodes.title(title_text, '', *text_nodes)
else:
title = None
messages = []
return title, messages
def process_header_option(self):
source = self.state_machine.get_source(self.lineno - 1)
table_head = []
max_header_cols = 0
if 'header' in self.options: # separate table header in option
rows, max_header_cols = self.parse_csv_data_into_rows(
self.options['header'].split('\n'), self.HeaderDialect(),
source)
table_head.extend(rows)
return table_head, max_header_cols
def check_table_dimensions(self, rows, header_rows, stub_columns):
if len(rows) < header_rows:
error = self.state_machine.reporter.error(
'%s header row(s) specified but only %s row(s) of data '
'supplied ("%s" directive).'
% (header_rows, len(rows), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
if len(rows) == header_rows > 0:
error = self.state_machine.reporter.error(
'Insufficient data supplied (%s row(s)); no data remaining '
'for table body, required by "%s" directive.'
% (len(rows), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
for row in rows:
if len(row) < stub_columns:
error = self.state_machine.reporter.error(
'%s stub column(s) specified but only %s columns(s) of '
'data supplied ("%s" directive).' %
(stub_columns, len(row), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
if len(row) == stub_columns > 0:
error = self.state_machine.reporter.error(
'Insufficient data supplied (%s columns(s)); no data remaining '
'for table body, required by "%s" directive.'
% (len(row), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
def get_column_widths(self, max_cols):
if 'widths' in self.options:
col_widths = self.options['widths']
if len(col_widths) != max_cols:
error = self.state_machine.reporter.error(
'"%s" widths do not match the number of columns in table '
'(%s).' % (self.name, max_cols), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
elif max_cols:
col_widths = [100 // max_cols] * max_cols
else:
error = self.state_machine.reporter.error(
'No table data detected in CSV file.', nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
return col_widths
def extend_short_rows_with_empty_cells(self, columns, parts):
for part in parts:
for row in part:
if len(row) < columns:
row.extend([(0, 0, 0, [])] * (columns - len(row)))
class RSTTable(Table):
def run(self):
if not self.content:
warning = self.state_machine.reporter.warning(
'Content block expected for the "%s" directive; none found.'
% self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [warning]
title, messages = self.make_title()
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
if len(node) != 1 or not isinstance(node[0], nodes.table):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: exactly '
'one table expected.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [error]
table_node = node[0]
table_node['classes'] += self.options.get('class', [])
self.add_name(table_node)
if title:
table_node.insert(0, title)
return [table_node] + messages
class CSVTable(Table):
option_spec = {'header-rows': directives.nonnegative_int,
'stub-columns': directives.nonnegative_int,
'header': directives.unchanged,
'widths': directives.positive_int_list,
'file': directives.path,
'url': directives.uri,
'encoding': directives.encoding,
'class': directives.class_option,
'name': directives.unchanged,
# field delimiter char
'delim': directives.single_char_or_whitespace_or_unicode,
# treat whitespace after delimiter as significant
'keepspace': directives.flag,
# text field quote/unquote char:
'quote': directives.single_char_or_unicode,
# char used to escape delim & quote as-needed:
'escape': directives.single_char_or_unicode,}
class DocutilsDialect(csv.Dialect):
"""CSV dialect for `csv_table` directive."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = True
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
def __init__(self, options):
if 'delim' in options:
self.delimiter = str(options['delim'])
if 'keepspace' in options:
self.skipinitialspace = False
if 'quote' in options:
self.quotechar = str(options['quote'])
if 'escape' in options:
self.doublequote = False
self.escapechar = str(options['escape'])
csv.Dialect.__init__(self)
class HeaderDialect(csv.Dialect):
"""CSV dialect to use for the "header" option data."""
delimiter = ','
quotechar = '"'
escapechar = '\\'
doublequote = False
skipinitialspace = True
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
def check_requirements(self):
pass
def run(self):
try:
if (not self.state.document.settings.file_insertion_enabled
and ('file' in self.options
or 'url' in self.options)):
warning = self.state_machine.reporter.warning(
'File and URL access deactivated; ignoring "%s" '
'directive.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [warning]
self.check_requirements()
title, messages = self.make_title()
csv_data, source = self.get_csv_data()
table_head, max_header_cols = self.process_header_option()
rows, max_cols = self.parse_csv_data_into_rows(
csv_data, self.DocutilsDialect(self.options), source)
max_cols = max(max_cols, max_header_cols)
header_rows = self.options.get('header-rows', 0)
stub_columns = self.options.get('stub-columns', 0)
self.check_table_dimensions(rows, header_rows, stub_columns)
table_head.extend(rows[:header_rows])
table_body = rows[header_rows:]
col_widths = self.get_column_widths(max_cols)
self.extend_short_rows_with_empty_cells(max_cols,
(table_head, table_body))
except SystemMessagePropagation, detail:
return [detail.args[0]]
except csv.Error, detail:
error = self.state_machine.reporter.error(
'Error with CSV data in "%s" directive:\n%s'
% (self.name, detail), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [error]
table = (col_widths, table_head, table_body)
table_node = self.state.build_table(table, self.content_offset,
stub_columns)
table_node['classes'] += self.options.get('class', [])
self.add_name(table_node)
if title:
table_node.insert(0, title)
return [table_node] + messages
def get_csv_data(self):
"""
Get CSV data from the directive content, from an external
file, or from a URL reference.
"""
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
error_handler = self.state.document.settings.input_encoding_error_handler
if self.content:
# CSV data is from directive content.
if 'file' in self.options or 'url' in self.options:
error = self.state_machine.reporter.error(
'"%s" directive may not both specify an external file and'
' have content.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
source = self.content.source(0)
csv_data = self.content
elif 'file' in self.options:
# CSV data is from an external file.
if 'url' in self.options:
error = self.state_machine.reporter.error(
'The "file" and "url" options may not be simultaneously'
' specified for the "%s" directive.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
source_dir = os.path.dirname(
os.path.abspath(self.state.document.current_source))
source = os.path.normpath(os.path.join(source_dir,
self.options['file']))
source = utils.relative_path(None, source)
try:
self.state.document.settings.record_dependencies.add(source)
csv_file = io.FileInput(source_path=source,
encoding=encoding,
error_handler=error_handler)
csv_data = csv_file.read().splitlines()
except IOError, error:
severe = self.state_machine.reporter.severe(
u'Problems with "%s" directive path:\n%s.'
% (self.name, SafeString(error)),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(severe)
elif 'url' in self.options:
# CSV data is from a URL.
# Do not import urllib2 at the top of the module because
# it may fail due to broken SSL dependencies, and it takes
# about 0.15 seconds to load.
import urllib2
source = self.options['url']
try:
csv_text = urllib2.urlopen(source).read()
except (urllib2.URLError, IOError, OSError, ValueError), error:
severe = self.state_machine.reporter.severe(
'Problems with "%s" directive URL "%s":\n%s.'
% (self.name, self.options['url'], SafeString(error)),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(severe)
csv_file = io.StringInput(
source=csv_text, source_path=source, encoding=encoding,
error_handler=(self.state.document.settings.\
input_encoding_error_handler))
csv_data = csv_file.read().splitlines()
else:
error = self.state_machine.reporter.warning(
'The "%s" directive requires content; none supplied.'
% self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
return csv_data, source
if sys.version_info < (3,):
# 2.x csv module doesn't do Unicode
def decode_from_csv(s):
return s.decode('utf-8')
def encode_for_csv(s):
return s.encode('utf-8')
else:
def decode_from_csv(s):
return s
def encode_for_csv(s):
return s
decode_from_csv = staticmethod(decode_from_csv)
encode_for_csv = staticmethod(encode_for_csv)
def parse_csv_data_into_rows(self, csv_data, dialect, source):
# csv.py doesn't do Unicode; encode temporarily as UTF-8
csv_reader = csv.reader([self.encode_for_csv(line + '\n')
for line in csv_data],
dialect=dialect)
rows = []
max_cols = 0
for row in csv_reader:
row_data = []
for cell in row:
# decode UTF-8 back to Unicode
cell_text = self.decode_from_csv(cell)
cell_data = (0, 0, 0, statemachine.StringList(
cell_text.splitlines(), source=source))
row_data.append(cell_data)
rows.append(row_data)
max_cols = max(max_cols, len(row))
return rows, max_cols
class ListTable(Table):
"""
Implement tables whose data is encoded as a uniform two-level bullet list.
For further ideas, see
http://docutils.sf.net/docs/dev/rst/alternatives.html#list-driven-tables
"""
option_spec = {'header-rows': directives.nonnegative_int,
'stub-columns': directives.nonnegative_int,
'widths': directives.positive_int_list,
'class': directives.class_option,
'name': directives.unchanged}
def run(self):
if not self.content:
error = self.state_machine.reporter.error(
'The "%s" directive is empty; content required.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return [error]
title, messages = self.make_title()
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
try:
num_cols, col_widths = self.check_list_content(node)
table_data = [[item.children for item in row_list[0]]
for row_list in node[0]]
header_rows = self.options.get('header-rows', 0)
stub_columns = self.options.get('stub-columns', 0)
self.check_table_dimensions(table_data, header_rows, stub_columns)
except SystemMessagePropagation, detail:
return [detail.args[0]]
table_node = self.build_table_from_list(table_data, col_widths,
header_rows, stub_columns)
table_node['classes'] += self.options.get('class', [])
self.add_name(table_node)
if title:
table_node.insert(0, title)
return [table_node] + messages
def check_list_content(self, node):
if len(node) != 1 or not isinstance(node[0], nodes.bullet_list):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'exactly one bullet list expected.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
list_node = node[0]
# Check for a uniform two-level bullet list:
for item_index in range(len(list_node)):
item = list_node[item_index]
if len(item) != 1 or not isinstance(item[0], nodes.bullet_list):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'two-level bullet list expected, but row %s does not '
'contain a second-level bullet list.'
% (self.name, item_index + 1), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
elif item_index:
# ATTN pychecker users: num_cols is guaranteed to be set in the
# "else" clause below for item_index==0, before this branch is
# triggered.
if len(item[0]) != num_cols:
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'uniform two-level bullet list expected, but row %s '
'does not contain the same number of items as row 1 '
'(%s vs %s).'
% (self.name, item_index + 1, len(item[0]), num_cols),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
else:
num_cols = len(item[0])
col_widths = self.get_column_widths(num_cols)
return num_cols, col_widths
def build_table_from_list(self, table_data, col_widths, header_rows, stub_columns):
table = nodes.table()
tgroup = nodes.tgroup(cols=len(col_widths))
table += tgroup
for col_width in col_widths:
colspec = nodes.colspec(colwidth=col_width)
if stub_columns:
colspec.attributes['stub'] = 1
stub_columns -= 1
tgroup += colspec
rows = []
for row in table_data:
row_node = nodes.row()
for cell in row:
entry = nodes.entry()
entry += cell
row_node += entry
rows.append(row_node)
if header_rows:
thead = nodes.thead()
thead.extend(rows[:header_rows])
tgroup += thead
tbody = nodes.tbody()
tbody.extend(rows[header_rows:])
tgroup += tbody
return table
| {
"repo_name": "Lyleo/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python2/docutils/parsers/rst/directives/tables.py",
"copies": "2",
"size": "20005",
"license": "mit",
"hash": 6184397134442494000,
"line_mean": 43.1611479029,
"line_max": 87,
"alpha_frac": 0.5480129968,
"autogenerated": false,
"ratio": 4.291997425445183,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009987222905986286,
"num_lines": 453
} |
"""
Directives for document parts.
"""
__docformat__ = 'reStructuredText'
from docutils import nodes, languages
from docutils.transforms import parts
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
class Contents(Directive):
"""
Table of contents.
The table of contents is generated in two passes: initial parse and
transform. During the initial parse, a 'pending' element is generated
which acts as a placeholder, storing the TOC title and any options
internally. At a later stage in the processing, the 'pending' element is
replaced by a 'topic' element, a title and the table of contents proper.
"""
backlinks_values = ('top', 'entry', 'none')
def backlinks(arg):
value = directives.choice(arg, Contents.backlinks_values)
if value == 'none':
return None
else:
return value
optional_arguments = 1
final_argument_whitespace = True
option_spec = {'depth': directives.nonnegative_int,
'local': directives.flag,
'backlinks': backlinks,
'class': directives.class_option}
def run(self):
if not (self.state_machine.match_titles
or isinstance(self.state_machine.node, nodes.sidebar)):
raise self.error('The "%s" directive may not be used within '
'topics or body elements.' % self.name)
document = self.state_machine.document
language = languages.get_language(document.settings.language_code,
document.reporter)
if self.arguments:
title_text = self.arguments[0]
text_nodes, messages = self.state.inline_text(title_text,
self.lineno)
title = nodes.title(title_text, '', *text_nodes)
else:
messages = []
if 'local' in self.options:
title = None
else:
title = nodes.title('', language.labels['contents'])
topic = nodes.topic(classes=['contents'])
topic['classes'] += self.options.get('class', [])
# the latex2e writer needs source and line for a warning:
topic.source, topic.line = self.state_machine.get_source_and_line()
topic.line -= 1
if 'local' in self.options:
topic['classes'].append('local')
if title:
name = title.astext()
topic += title
else:
name = language.labels['contents']
name = nodes.fully_normalize_name(name)
if not document.has_name(name):
topic['names'].append(name)
document.note_implicit_target(topic)
pending = nodes.pending(parts.Contents, rawsource=self.block_text)
pending.details.update(self.options)
document.note_pending(pending)
topic += pending
return [topic] + messages
class Sectnum(Directive):
"""Automatic section numbering."""
option_spec = {'depth': int,
'start': int,
'prefix': directives.unchanged_required,
'suffix': directives.unchanged_required}
def run(self):
pending = nodes.pending(parts.SectNum)
pending.details.update(self.options)
self.state_machine.document.note_pending(pending)
return [pending]
class Header(Directive):
"""Contents of document header."""
has_content = True
def run(self):
self.assert_has_content()
header = self.state_machine.document.get_decoration().get_header()
self.state.nested_parse(self.content, self.content_offset, header)
return []
class Footer(Directive):
"""Contents of document footer."""
has_content = True
def run(self):
self.assert_has_content()
footer = self.state_machine.document.get_decoration().get_footer()
self.state.nested_parse(self.content, self.content_offset, footer)
return []
| {
"repo_name": "Lyleo/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python3/docutils/parsers/rst/directives/parts.py",
"copies": "4",
"size": "4208",
"license": "mit",
"hash": 8668073503240808000,
"line_mean": 32.3968253968,
"line_max": 77,
"alpha_frac": 0.6021863118,
"autogenerated": false,
"ratio": 4.3203285420944555,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6922514853894456,
"avg_score": null,
"num_lines": null
} |
"""
Transforms related to document parts.
"""
__docformat__ = 'reStructuredText'
import re
import sys
from docutils import nodes, utils
from docutils.transforms import TransformError, Transform
class SectNum(Transform):
"""
Automatically assigns numbers to the titles of document sections.
It is possible to limit the maximum section level for which the numbers
are added. For those sections that are auto-numbered, the "autonum"
attribute is set, informing the contents table generator that a different
form of the TOC should be used.
"""
default_priority = 710
"""Should be applied before `Contents`."""
def apply(self):
self.maxdepth = self.startnode.details.get('depth', None)
self.startvalue = self.startnode.details.get('start', 1)
self.prefix = self.startnode.details.get('prefix', '')
self.suffix = self.startnode.details.get('suffix', '')
self.startnode.parent.remove(self.startnode)
if self.document.settings.sectnum_xform:
if self.maxdepth is None:
self.maxdepth = sys.maxint
self.update_section_numbers(self.document)
else: # store details for eventual section numbering by the writer
self.document.settings.sectnum_depth = self.maxdepth
self.document.settings.sectnum_start = self.startvalue
self.document.settings.sectnum_prefix = self.prefix
self.document.settings.sectnum_suffix = self.suffix
def update_section_numbers(self, node, prefix=(), depth=0):
depth += 1
if prefix:
sectnum = 1
else:
sectnum = self.startvalue
for child in node:
if isinstance(child, nodes.section):
numbers = prefix + (str(sectnum),)
title = child[0]
# Use for spacing:
generated = nodes.generated(
'', (self.prefix + '.'.join(numbers) + self.suffix
+ u'\u00a0' * 3),
classes=['sectnum'])
title.insert(0, generated)
title['auto'] = 1
if depth < self.maxdepth:
self.update_section_numbers(child, numbers, depth)
sectnum += 1
class Contents(Transform):
"""
This transform generates a table of contents from the entire document tree
or from a single branch. It locates "section" elements and builds them
into a nested bullet list, which is placed within a "topic" created by the
contents directive. A title is either explicitly specified, taken from
the appropriate language module, or omitted (local table of contents).
The depth may be specified. Two-way references between the table of
contents and section titles are generated (requires Writer support).
This transform requires a startnode, which contains generation
options and provides the location for the generated table of contents (the
startnode is replaced by the table of contents "topic").
"""
default_priority = 720
def apply(self):
try: # let the writer (or output software) build the contents list?
toc_by_writer = self.document.settings.use_latex_toc
except AttributeError:
toc_by_writer = False
details = self.startnode.details
if 'local' in details:
startnode = self.startnode.parent.parent
while not (isinstance(startnode, nodes.section)
or isinstance(startnode, nodes.document)):
# find the ToC root: a direct ancestor of startnode
startnode = startnode.parent
else:
startnode = self.document
self.toc_id = self.startnode.parent['ids'][0]
if 'backlinks' in details:
self.backlinks = details['backlinks']
else:
self.backlinks = self.document.settings.toc_backlinks
if toc_by_writer:
# move customization settings to the parent node
self.startnode.parent.attributes.update(details)
self.startnode.parent.remove(self.startnode)
else:
contents = self.build_contents(startnode)
if len(contents):
self.startnode.replace_self(contents)
else:
self.startnode.parent.parent.remove(self.startnode.parent)
def build_contents(self, node, level=0):
level += 1
sections = [sect for sect in node if isinstance(sect, nodes.section)]
entries = []
autonum = 0
depth = self.startnode.details.get('depth', sys.maxint)
for section in sections:
title = section[0]
auto = title.get('auto') # May be set by SectNum.
entrytext = self.copy_and_filter(title)
reference = nodes.reference('', '', refid=section['ids'][0],
*entrytext)
ref_id = self.document.set_id(reference)
entry = nodes.paragraph('', '', reference)
item = nodes.list_item('', entry)
if ( self.backlinks in ('entry', 'top')
and title.next_node(nodes.reference) is None):
if self.backlinks == 'entry':
title['refid'] = ref_id
elif self.backlinks == 'top':
title['refid'] = self.toc_id
if level < depth:
subsects = self.build_contents(section, level)
item += subsects
entries.append(item)
if entries:
contents = nodes.bullet_list('', *entries)
if auto:
contents['classes'].append('auto-toc')
return contents
else:
return []
def copy_and_filter(self, node):
"""Return a copy of a title, with references, images, etc. removed."""
visitor = ContentsFilter(self.document)
node.walkabout(visitor)
return visitor.get_entry_text()
class ContentsFilter(nodes.TreeCopyVisitor):
def get_entry_text(self):
return self.get_tree_copy().children
def visit_citation_reference(self, node):
raise nodes.SkipNode
def visit_footnote_reference(self, node):
raise nodes.SkipNode
def visit_image(self, node):
if node.hasattr('alt'):
self.parent.append(nodes.Text(node['alt']))
raise nodes.SkipNode
def ignore_node_but_process_children(self, node):
raise nodes.SkipDeparture
visit_interpreted = ignore_node_but_process_children
visit_problematic = ignore_node_but_process_children
visit_reference = ignore_node_but_process_children
visit_target = ignore_node_but_process_children
| {
"repo_name": "timonwong/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python2/docutils/transforms/parts.py",
"copies": "2",
"size": "6937",
"license": "mit",
"hash": -9157093350823155000,
"line_mean": 37.5388888889,
"line_max": 78,
"alpha_frac": 0.6093412138,
"autogenerated": false,
"ratio": 4.330212234706616,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5939553448506616,
"avg_score": null,
"num_lines": null
} |
"""
This package contains Docutils Reader modules.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils import utils, parsers, Component
from docutils.transforms import universal
if sys.version_info < (2,5):
from docutils._compat import __import__
class Reader(Component):
"""
Abstract base class for docutils Readers.
Each reader module or package must export a subclass also called 'Reader'.
The two steps of a Reader's responsibility are `scan()` and
`parse()`. Call `read()` to process a document.
"""
component_type = 'reader'
config_section = 'readers'
def get_transforms(self):
return Component.get_transforms(self) + [
universal.Decorations,
universal.ExposeInternals,
universal.StripComments,]
def __init__(self, parser=None, parser_name=None):
"""
Initialize the Reader instance.
Several instance attributes are defined with dummy initial values.
Subclasses may use these attributes as they wish.
"""
self.parser = parser
"""A `parsers.Parser` instance shared by all doctrees. May be left
unspecified if the document source determines the parser."""
if parser is None and parser_name:
self.set_parser(parser_name)
self.source = None
"""`docutils.io` IO object, source of input data."""
self.input = None
"""Raw text input; either a single string or, for more complex cases,
a collection of strings."""
def set_parser(self, parser_name):
"""Set `self.parser` by name."""
parser_class = parsers.get_parser_class(parser_name)
self.parser = parser_class()
def read(self, source, parser, settings):
self.source = source
if not self.parser:
self.parser = parser
self.settings = settings
self.input = self.source.read()
self.parse()
return self.document
def parse(self):
"""Parse `self.input` into a document tree."""
self.document = document = self.new_document()
self.parser.parse(self.input, document)
document.current_source = document.current_line = None
def new_document(self):
"""Create and return a new empty document tree (root node)."""
document = utils.new_document(self.source.source_path, self.settings)
return document
class ReReader(Reader):
"""
A reader which rereads an existing document tree (e.g. a
deserializer).
Often used in conjunction with `writers.UnfilteredWriter`.
"""
def get_transforms(self):
# Do not add any transforms. They have already been applied
# by the reader which originally created the document.
return Component.get_transforms(self)
_reader_aliases = {}
def get_reader_class(reader_name):
"""Return the Reader class from the `reader_name` module."""
reader_name = reader_name.lower()
if reader_name in _reader_aliases:
reader_name = _reader_aliases[reader_name]
try:
module = __import__(reader_name, globals(), locals(), level=0)
except ImportError:
module = __import__(reader_name, globals(), locals(), level=1)
return module.Reader
| {
"repo_name": "timonwong/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python3/docutils/readers/__init__.py",
"copies": "4",
"size": "3419",
"license": "mit",
"hash": 4314583484207792600,
"line_mean": 29.2566371681,
"line_max": 78,
"alpha_frac": 0.6440479672,
"autogenerated": false,
"ratio": 4.231435643564357,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6875483610764357,
"avg_score": null,
"num_lines": null
} |
"""
This package contains modules for standard tree transforms available
to Docutils components. Tree transforms serve a variety of purposes:
- To tie up certain syntax-specific "loose ends" that remain after the
initial parsing of the input plaintext. These transforms are used to
supplement a limited syntax.
- To automate the internal linking of the document tree (hyperlink
references, footnote references, etc.).
- To extract useful information from the document tree. These
transforms may be used to construct (for example) indexes and tables
of contents.
Each transform is an optional step that a Docutils component may
choose to perform on the parsed document.
"""
__docformat__ = 'reStructuredText'
from docutils import languages, ApplicationError, TransformSpec
class TransformError(ApplicationError): pass
class Transform:
"""
Docutils transform component abstract base class.
"""
default_priority = None
"""Numerical priority of this transform, 0 through 999 (override)."""
def __init__(self, document, startnode=None):
"""
Initial setup for in-place document transforms.
"""
self.document = document
"""The document tree to transform."""
self.startnode = startnode
"""Node from which to begin the transform. For many transforms which
apply to the document as a whole, `startnode` is not set (i.e. its
value is `None`)."""
self.language = languages.get_language(
document.settings.language_code, document.reporter)
"""Language module local to this document."""
def apply(self, **kwargs):
"""Override to apply the transform to the document tree."""
raise NotImplementedError('subclass must override this method')
class Transformer(TransformSpec):
"""
Stores transforms (`Transform` classes) and applies them to document
trees. Also keeps track of components by component type name.
"""
def __init__(self, document):
self.transforms = []
"""List of transforms to apply. Each item is a 3-tuple:
``(priority string, transform class, pending node or None)``."""
self.unknown_reference_resolvers = []
"""List of hook functions which assist in resolving references"""
self.document = document
"""The `nodes.document` object this Transformer is attached to."""
self.applied = []
"""Transforms already applied, in order."""
self.sorted = 0
"""Boolean: is `self.tranforms` sorted?"""
self.components = {}
"""Mapping of component type name to component object. Set by
`self.populate_from_components()`."""
self.serialno = 0
"""Internal serial number to keep track of the add order of
transforms."""
def add_transform(self, transform_class, priority=None, **kwargs):
"""
Store a single transform. Use `priority` to override the default.
`kwargs` is a dictionary whose contents are passed as keyword
arguments to the `apply` method of the transform. This can be used to
pass application-specific data to the transform instance.
"""
if priority is None:
priority = transform_class.default_priority
priority_string = self.get_priority_string(priority)
self.transforms.append(
(priority_string, transform_class, None, kwargs))
self.sorted = 0
def add_transforms(self, transform_list):
"""Store multiple transforms, with default priorities."""
for transform_class in transform_list:
priority_string = self.get_priority_string(
transform_class.default_priority)
self.transforms.append(
(priority_string, transform_class, None, {}))
self.sorted = 0
def add_pending(self, pending, priority=None):
"""Store a transform with an associated `pending` node."""
transform_class = pending.transform
if priority is None:
priority = transform_class.default_priority
priority_string = self.get_priority_string(priority)
self.transforms.append(
(priority_string, transform_class, pending, {}))
self.sorted = 0
def get_priority_string(self, priority):
"""
Return a string, `priority` combined with `self.serialno`.
This ensures FIFO order on transforms with identical priority.
"""
self.serialno += 1
return '%03d-%03d' % (priority, self.serialno)
def populate_from_components(self, components):
"""
Store each component's default transforms, with default priorities.
Also, store components by type name in a mapping for later lookup.
"""
for component in components:
if component is None:
continue
self.add_transforms(component.get_transforms())
self.components[component.component_type] = component
self.sorted = 0
# Set up all of the reference resolvers for this transformer. Each
# component of this transformer is able to register its own helper
# functions to help resolve references.
unknown_reference_resolvers = []
for i in components:
unknown_reference_resolvers.extend(i.unknown_reference_resolvers)
decorated_list = [(f.priority, f) for f in unknown_reference_resolvers]
decorated_list.sort()
self.unknown_reference_resolvers.extend([f[1] for f in decorated_list])
def apply_transforms(self):
"""Apply all of the stored transforms, in priority order."""
self.document.reporter.attach_observer(
self.document.note_transform_message)
while self.transforms:
if not self.sorted:
# Unsorted initially, and whenever a transform is added.
self.transforms.sort()
self.transforms.reverse()
self.sorted = 1
priority, transform_class, pending, kwargs = self.transforms.pop()
transform = transform_class(self.document, startnode=pending)
transform.apply(**kwargs)
self.applied.append((priority, transform_class, pending, kwargs))
| {
"repo_name": "Lyleo/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python2/docutils/transforms/__init__.py",
"copies": "4",
"size": "6459",
"license": "mit",
"hash": -3661103030339833300,
"line_mean": 36.5523255814,
"line_max": 79,
"alpha_frac": 0.650565103,
"autogenerated": false,
"ratio": 4.707725947521866,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7358291050521867,
"avg_score": null,
"num_lines": null
} |
# $Id$
# Christopher Lee [email protected]
# based upon pdfmetrics.py by Andy Robinson
from . import fontinfo
from . import latin1MetricsCache
##############################################################
#
# PDF Metrics
# This is a preamble to give us a stringWidth function.
# loads and caches AFM files, but won't need to as the
# standard fonts are there already
##############################################################
_stdenc_widths = {
'courier':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 0, 600, 600, 600,
600, 0, 600, 600, 600, 600, 600, 600, 600, 600, 0, 600, 0, 600, 600, 600, 600, 600, 600, 600,
600, 0, 600, 600, 0, 600, 600, 600, 600, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 600, 0,
600, 0, 0, 0, 0, 600, 600, 600, 600, 0, 0, 0, 0, 0, 600, 0, 0, 0, 600, 0, 0, 600, 600, 600, 600,
0, 0, 600],
'courier-bold':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 0, 600, 600, 600,
600, 0, 600, 600, 600, 600, 600, 600, 600, 600, 0, 600, 0, 600, 600, 600, 600, 600, 600, 600,
600, 0, 600, 600, 0, 600, 600, 600, 600, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 600, 0,
600, 0, 0, 0, 0, 600, 600, 600, 600, 0, 0, 0, 0, 0, 600, 0, 0, 0, 600, 0, 0, 600, 600, 600, 600,
0, 0, 600],
'courier-boldoblique':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 0, 600, 600, 600,
600, 0, 600, 600, 600, 600, 600, 600, 600, 600, 0, 600, 0, 600, 600, 600, 600, 600, 600, 600,
600, 0, 600, 600, 0, 600, 600, 600, 600, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 600, 0,
600, 0, 0, 0, 0, 600, 600, 600, 600, 0, 0, 0, 0, 0, 600, 0, 0, 0, 600, 0, 0, 600, 600, 600, 600,
0, 0, 600],
'courier-oblique':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 0, 600, 600, 600,
600, 0, 600, 600, 600, 600, 600, 600, 600, 600, 0, 600, 0, 600, 600, 600, 600, 600, 600, 600,
600, 0, 600, 600, 0, 600, 600, 600, 600, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 600, 0,
600, 0, 0, 0, 0, 600, 600, 600, 600, 0, 0, 0, 0, 0, 600, 0, 0, 0, 600, 0, 0, 600, 600, 600, 600,
0, 0, 600],
'helvetica':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
278, 278, 355, 556, 556, 889, 667, 222, 333, 333, 389, 584, 278, 333, 278, 278, 556, 556, 556,
556, 556, 556, 556, 556, 556, 556, 278, 278, 584, 584, 584, 556, 1015, 667, 667, 722, 722, 667,
611, 778, 722, 278, 500, 667, 556, 833, 722, 778, 667, 778, 722, 667, 611, 722, 667, 944, 667,
667, 611, 278, 278, 278, 469, 556, 222, 556, 556, 500, 556, 556, 278, 556, 556, 222, 222, 500,
222, 833, 556, 556, 556, 556, 333, 500, 278, 556, 500, 722, 500, 500, 500, 334, 260, 334, 584, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 333, 556, 556, 167, 556, 556, 556, 556, 191, 333, 556, 333, 333, 500, 500, 0, 556, 556, 556,
278, 0, 537, 350, 222, 333, 333, 556, 1000, 1000, 0, 611, 0, 333, 333, 333, 333, 333, 333, 333,
333, 0, 333, 333, 0, 333, 333, 333, 1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1000,
0, 370, 0, 0, 0, 0, 556, 778, 1000, 365, 0, 0, 0, 0, 0, 889, 0, 0, 0, 278, 0, 0, 222, 611, 944,
611, 0, 0, 834],
'helvetica-bold':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
278, 333, 474, 556, 556, 889, 722, 278, 333, 333, 389, 584, 278, 333, 278, 278, 556, 556, 556,
556, 556, 556, 556, 556, 556, 556, 333, 333, 584, 584, 584, 611, 975, 722, 722, 722, 722, 667,
611, 778, 722, 278, 556, 722, 611, 833, 722, 778, 667, 778, 722, 667, 611, 722, 667, 944, 667,
667, 611, 333, 278, 333, 584, 556, 278, 556, 611, 556, 611, 556, 333, 611, 611, 278, 278, 556,
278, 889, 611, 611, 611, 611, 389, 556, 333, 611, 556, 778, 556, 556, 500, 389, 280, 389, 584, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 333, 556, 556, 167, 556, 556, 556, 556, 238, 500, 556, 333, 333, 611, 611, 0, 556, 556, 556,
278, 0, 556, 350, 278, 500, 500, 556, 1000, 1000, 0, 611, 0, 333, 333, 333, 333, 333, 333, 333,
333, 0, 333, 333, 0, 333, 333, 333, 1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1000,
0, 370, 0, 0, 0, 0, 611, 778, 1000, 365, 0, 0, 0, 0, 0, 889, 0, 0, 0, 278, 0, 0, 278, 611, 944,
611, 0, 0, 834],
'helvetica-boldoblique':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
278, 333, 474, 556, 556, 889, 722, 278, 333, 333, 389, 584, 278, 333, 278, 278, 556, 556, 556,
556, 556, 556, 556, 556, 556, 556, 333, 333, 584, 584, 584, 611, 975, 722, 722, 722, 722, 667,
611, 778, 722, 278, 556, 722, 611, 833, 722, 778, 667, 778, 722, 667, 611, 722, 667, 944, 667,
667, 611, 333, 278, 333, 584, 556, 278, 556, 611, 556, 611, 556, 333, 611, 611, 278, 278, 556,
278, 889, 611, 611, 611, 611, 389, 556, 333, 611, 556, 778, 556, 556, 500, 389, 280, 389, 584, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 333, 556, 556, 167, 556, 556, 556, 556, 238, 500, 556, 333, 333, 611, 611, 0, 556, 556, 556,
278, 0, 556, 350, 278, 500, 500, 556, 1000, 1000, 0, 611, 0, 333, 333, 333, 333, 333, 333, 333,
333, 0, 333, 333, 0, 333, 333, 333, 1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1000,
0, 370, 0, 0, 0, 0, 611, 778, 1000, 365, 0, 0, 0, 0, 0, 889, 0, 0, 0, 278, 0, 0, 278, 611, 944,
611, 0, 0, 834],
'helvetica-oblique':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
278, 278, 355, 556, 556, 889, 667, 222, 333, 333, 389, 584, 278, 333, 278, 278, 556, 556, 556,
556, 556, 556, 556, 556, 556, 556, 278, 278, 584, 584, 584, 556, 1015, 667, 667, 722, 722, 667,
611, 778, 722, 278, 500, 667, 556, 833, 722, 778, 667, 778, 722, 667, 611, 722, 667, 944, 667,
667, 611, 278, 278, 278, 469, 556, 222, 556, 556, 500, 556, 556, 278, 556, 556, 222, 222, 500,
222, 833, 556, 556, 556, 556, 333, 500, 278, 556, 500, 722, 500, 500, 500, 334, 260, 334, 584, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 333, 556, 556, 167, 556, 556, 556, 556, 191, 333, 556, 333, 333, 500, 500, 0, 556, 556, 556,
278, 0, 537, 350, 222, 333, 333, 556, 1000, 1000, 0, 611, 0, 333, 333, 333, 333, 333, 333, 333,
333, 0, 333, 333, 0, 333, 333, 333, 1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1000,
0, 370, 0, 0, 0, 0, 556, 778, 1000, 365, 0, 0, 0, 0, 0, 889, 0, 0, 0, 278, 0, 0, 222, 611, 944,
611, 0, 0, 834],
'symbol':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
250, 333, 713, 500, 549, 833, 778, 439, 333, 333, 500, 549, 250, 549, 250, 278, 500, 500, 500,
500, 500, 500, 500, 500, 500, 500, 278, 278, 549, 549, 549, 444, 549, 722, 667, 722, 612, 611,
763, 603, 722, 333, 631, 722, 686, 889, 722, 722, 768, 741, 556, 592, 611, 690, 439, 768, 645,
795, 611, 333, 863, 333, 658, 500, 500, 631, 549, 549, 494, 439, 521, 411, 603, 329, 603, 549,
549, 576, 521, 549, 549, 521, 549, 603, 439, 576, 713, 686, 493, 686, 494, 480, 200, 480, 549, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 620, 247, 549, 167, 713, 500, 753, 753, 753, 753, 1042, 987, 603, 987, 603, 400, 549, 411,
549, 549, 713, 494, 460, 549, 549, 549, 549, 1000, 603, 1000, 658, 823, 686, 795, 987, 768, 768,
823, 768, 768, 713, 713, 713, 713, 713, 713, 713, 768, 713, 790, 790, 890, 823, 549, 250, 713,
603, 603, 1042, 987, 603, 987, 603, 494, 329, 790, 790, 786, 713, 384, 384, 384, 384, 384, 384,
494, 494, 494, 494, 0, 329, 274, 686, 686, 686, 384, 384, 384, 384, 384, 384, 494, 494, 790],
'times-bold':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
250, 333, 555, 500, 500, 1000, 833, 333, 333, 333, 500, 570, 250, 333, 250, 278, 500, 500, 500,
500, 500, 500, 500, 500, 500, 500, 333, 333, 570, 570, 570, 500, 930, 722, 667, 722, 722, 667,
611, 778, 778, 389, 500, 778, 667, 944, 722, 778, 611, 778, 722, 556, 667, 722, 722, 1000, 722,
722, 667, 333, 278, 333, 581, 500, 333, 500, 556, 444, 556, 444, 333, 500, 556, 278, 333, 556,
278, 833, 556, 500, 556, 556, 444, 389, 333, 556, 500, 722, 500, 500, 444, 394, 220, 394, 520, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 333, 500, 500, 167, 500, 500, 500, 500, 278, 500, 500, 333, 333, 556, 556, 0, 500, 500, 500,
250, 0, 540, 350, 333, 500, 500, 500, 1000, 1000, 0, 500, 0, 333, 333, 333, 333, 333, 333, 333,
333, 0, 333, 333, 0, 333, 333, 333, 1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1000,
0, 300, 0, 0, 0, 0, 667, 778, 1000, 330, 0, 0, 0, 0, 0, 722, 0, 0, 0, 278, 0, 0, 278, 500, 722,
556, 0, 0, 750],
'times-bolditalic':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
250, 389, 555, 500, 500, 833, 778, 333, 333, 333, 500, 570, 250, 333, 250, 278, 500, 500, 500,
500, 500, 500, 500, 500, 500, 500, 333, 333, 570, 570, 570, 500, 832, 667, 667, 667, 722, 667,
667, 722, 778, 389, 500, 667, 611, 889, 722, 722, 611, 722, 667, 556, 611, 722, 667, 889, 667,
611, 611, 333, 278, 333, 570, 500, 333, 500, 500, 444, 500, 444, 333, 500, 556, 278, 278, 500,
278, 778, 556, 500, 500, 500, 389, 389, 278, 556, 444, 667, 500, 444, 389, 348, 220, 348, 570, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 389, 500, 500, 167, 500, 500, 500, 500, 278, 500, 500, 333, 333, 556, 556, 0, 500, 500, 500,
250, 0, 500, 350, 333, 500, 500, 500, 1000, 1000, 0, 500, 0, 333, 333, 333, 333, 333, 333, 333,
333, 0, 333, 333, 0, 333, 333, 333, 1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 944, 0,
266, 0, 0, 0, 0, 611, 722, 944, 300, 0, 0, 0, 0, 0, 722, 0, 0, 0, 278, 0, 0, 278, 500, 722, 500,
0, 0, 750],
'times-italic':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
250, 333, 420, 500, 500, 833, 778, 333, 333, 333, 500, 675, 250, 333, 250, 278, 500, 500, 500,
500, 500, 500, 500, 500, 500, 500, 333, 333, 675, 675, 675, 500, 920, 611, 611, 667, 722, 611,
611, 722, 722, 333, 444, 667, 556, 833, 667, 722, 611, 722, 611, 500, 556, 722, 611, 833, 611,
556, 556, 389, 278, 389, 422, 500, 333, 500, 500, 444, 500, 444, 278, 500, 500, 278, 278, 444,
278, 722, 500, 500, 500, 500, 389, 389, 278, 500, 444, 667, 444, 444, 389, 400, 275, 400, 541, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 389, 500, 500, 167, 500, 500, 500, 500, 214, 556, 500, 333, 333, 500, 500, 0, 500, 500, 500,
250, 0, 523, 350, 333, 556, 556, 500, 889, 1000, 0, 500, 0, 333, 333, 333, 333, 333, 333, 333,
333, 0, 333, 333, 0, 333, 333, 333, 889, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 889, 0,
276, 0, 0, 0, 0, 556, 722, 944, 310, 0, 0, 0, 0, 0, 667, 0, 0, 0, 278, 0, 0, 278, 500, 667, 500,
0, 0, 750],
'times-roman':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
250, 333, 408, 500, 500, 833, 778, 333, 333, 333, 500, 564, 250, 333, 250, 278, 500, 500, 500,
500, 500, 500, 500, 500, 500, 500, 278, 278, 564, 564, 564, 444, 921, 722, 667, 667, 722, 611,
556, 722, 722, 333, 389, 722, 611, 889, 722, 722, 556, 722, 667, 556, 611, 722, 722, 944, 722,
722, 611, 333, 278, 333, 469, 500, 333, 444, 500, 444, 500, 444, 333, 500, 500, 278, 278, 500,
278, 778, 500, 500, 500, 500, 333, 389, 278, 500, 500, 722, 500, 500, 444, 480, 200, 480, 541, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 333, 500, 500, 167, 500, 500, 500, 500, 180, 444, 500, 333, 333, 556, 556, 0, 500, 500, 500,
250, 0, 453, 350, 333, 444, 444, 500, 1000, 1000, 0, 444, 0, 333, 333, 333, 333, 333, 333, 333,
333, 0, 333, 333, 0, 333, 333, 333, 1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 889, 0,
276, 0, 0, 0, 0, 611, 722, 889, 310, 0, 0, 0, 0, 0, 667, 0, 0, 0, 278, 0, 0, 278, 500, 722, 500,
0, 0, 750],
'zapfdingbats':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
278, 974, 961, 974, 980, 719, 789, 790, 791, 690, 960, 939, 549, 855, 911, 933, 911, 945, 974,
755, 846, 762, 761, 571, 677, 763, 760, 759, 754, 494, 552, 537, 577, 692, 786, 788, 788, 790,
793, 794, 816, 823, 789, 841, 823, 833, 816, 831, 923, 744, 723, 749, 790, 792, 695, 776, 768,
792, 759, 707, 708, 682, 701, 826, 815, 789, 789, 707, 687, 696, 689, 786, 787, 713, 791, 785,
791, 873, 761, 762, 762, 759, 759, 892, 892, 788, 784, 438, 138, 277, 415, 392, 392, 668, 668, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 732, 544, 544, 910, 667, 760, 760, 776, 595, 694, 626, 788, 788, 788, 788, 788, 788, 788, 788,
788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788,
788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 894, 838, 1016, 458, 748, 924,
748, 918, 927, 928, 928, 834, 873, 828, 924, 924, 917, 930, 931, 463, 883, 836, 836, 867, 867,
696, 696, 874, 0, 874, 760, 946, 771, 865, 771, 888, 967, 888, 831, 873, 927, 970, 234]
}
ascent_descent = {'Courier': (629, -157),
'Courier-Bold': (626, -142),
'Courier-BoldOblique': (626, -142),
'Courier-Oblique': (629, -157),
'Helvetica': (718, -207),
'Helvetica-Bold': (718, -207),
'Helvetica-BoldOblique': (718, -207),
'Helvetica-Oblique': (718, -207),
'Symbol': (0, 0),
'Times-Bold': (676, -205),
'Times-BoldItalic': (699, -205),
'Times-Italic': (683, -205),
'Times-Roman': (683, -217),
'ZapfDingbats': (0, 0)}
_Widths = {'StandardEncoding': _stdenc_widths, 'Latin1Encoding': latin1MetricsCache.FontWidths}
def stringwidth(text, font, encoding):
if font in fontinfo.NonRomanFonts:
widths = _Widths['StandardEncoding'][font.lower()]
else:
try:
widths = _Widths[encoding][font.lower()]
except Exception:
raise KeyError("Improper encoding {0} or font name {1}".format(encoding, font))
w = 0
for char in text:
chr_idx = ord(char)
if chr_idx < len(widths):
chr_width = widths[chr_idx]
else:
chr_width = max(widths)
w = w + chr_width
return w
| {
"repo_name": "rvianello/rdkit",
"path": "rdkit/sping/PS/psmetrics.py",
"copies": "11",
"size": "17605",
"license": "bsd-3-clause",
"hash": -4213266296111556600,
"line_mean": 73.914893617,
"line_max": 100,
"alpha_frac": 0.5130360693,
"autogenerated": false,
"ratio": 2.0080985513858787,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8021134620685879,
"avg_score": null,
"num_lines": null
} |
# $Id$
# Christopher Lee [email protected]
# based upon pdfmetrics.py by Andy Robinson
import string
from . import fontinfo
from . import latin1MetricsCache
##############################################################
#
# PDF Metrics
# This is a preamble to give us a stringWidth function.
# loads and caches AFM files, but won't need to as the
# standard fonts are there already
##############################################################
_stdenc_widths = {
'courier':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 0, 600, 600, 600,
600, 0, 600, 600, 600, 600, 600, 600, 600, 600, 0, 600, 0, 600, 600, 600, 600, 600, 600, 600,
600, 0, 600, 600, 0, 600, 600, 600, 600, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 600, 0,
600, 0, 0, 0, 0, 600, 600, 600, 600, 0, 0, 0, 0, 0, 600, 0, 0, 0, 600, 0, 0, 600, 600, 600, 600,
0, 0, 600],
'courier-bold':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 0, 600, 600, 600,
600, 0, 600, 600, 600, 600, 600, 600, 600, 600, 0, 600, 0, 600, 600, 600, 600, 600, 600, 600,
600, 0, 600, 600, 0, 600, 600, 600, 600, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 600, 0,
600, 0, 0, 0, 0, 600, 600, 600, 600, 0, 0, 0, 0, 0, 600, 0, 0, 0, 600, 0, 0, 600, 600, 600, 600,
0, 0, 600],
'courier-boldoblique':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 0, 600, 600, 600,
600, 0, 600, 600, 600, 600, 600, 600, 600, 600, 0, 600, 0, 600, 600, 600, 600, 600, 600, 600,
600, 0, 600, 600, 0, 600, 600, 600, 600, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 600, 0,
600, 0, 0, 0, 0, 600, 600, 600, 600, 0, 0, 0, 0, 0, 600, 0, 0, 0, 600, 0, 0, 600, 600, 600, 600,
0, 0, 600],
'courier-oblique':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 0, 600, 600, 600,
600, 0, 600, 600, 600, 600, 600, 600, 600, 600, 0, 600, 0, 600, 600, 600, 600, 600, 600, 600,
600, 0, 600, 600, 0, 600, 600, 600, 600, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 600, 0,
600, 0, 0, 0, 0, 600, 600, 600, 600, 0, 0, 0, 0, 0, 600, 0, 0, 0, 600, 0, 0, 600, 600, 600, 600,
0, 0, 600],
'helvetica':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
278, 278, 355, 556, 556, 889, 667, 222, 333, 333, 389, 584, 278, 333, 278, 278, 556, 556, 556,
556, 556, 556, 556, 556, 556, 556, 278, 278, 584, 584, 584, 556, 1015, 667, 667, 722, 722, 667,
611, 778, 722, 278, 500, 667, 556, 833, 722, 778, 667, 778, 722, 667, 611, 722, 667, 944, 667,
667, 611, 278, 278, 278, 469, 556, 222, 556, 556, 500, 556, 556, 278, 556, 556, 222, 222, 500,
222, 833, 556, 556, 556, 556, 333, 500, 278, 556, 500, 722, 500, 500, 500, 334, 260, 334, 584, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 333, 556, 556, 167, 556, 556, 556, 556, 191, 333, 556, 333, 333, 500, 500, 0, 556, 556, 556,
278, 0, 537, 350, 222, 333, 333, 556, 1000, 1000, 0, 611, 0, 333, 333, 333, 333, 333, 333, 333,
333, 0, 333, 333, 0, 333, 333, 333, 1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1000,
0, 370, 0, 0, 0, 0, 556, 778, 1000, 365, 0, 0, 0, 0, 0, 889, 0, 0, 0, 278, 0, 0, 222, 611, 944,
611, 0, 0, 834],
'helvetica-bold':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
278, 333, 474, 556, 556, 889, 722, 278, 333, 333, 389, 584, 278, 333, 278, 278, 556, 556, 556,
556, 556, 556, 556, 556, 556, 556, 333, 333, 584, 584, 584, 611, 975, 722, 722, 722, 722, 667,
611, 778, 722, 278, 556, 722, 611, 833, 722, 778, 667, 778, 722, 667, 611, 722, 667, 944, 667,
667, 611, 333, 278, 333, 584, 556, 278, 556, 611, 556, 611, 556, 333, 611, 611, 278, 278, 556,
278, 889, 611, 611, 611, 611, 389, 556, 333, 611, 556, 778, 556, 556, 500, 389, 280, 389, 584, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 333, 556, 556, 167, 556, 556, 556, 556, 238, 500, 556, 333, 333, 611, 611, 0, 556, 556, 556,
278, 0, 556, 350, 278, 500, 500, 556, 1000, 1000, 0, 611, 0, 333, 333, 333, 333, 333, 333, 333,
333, 0, 333, 333, 0, 333, 333, 333, 1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1000,
0, 370, 0, 0, 0, 0, 611, 778, 1000, 365, 0, 0, 0, 0, 0, 889, 0, 0, 0, 278, 0, 0, 278, 611, 944,
611, 0, 0, 834],
'helvetica-boldoblique':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
278, 333, 474, 556, 556, 889, 722, 278, 333, 333, 389, 584, 278, 333, 278, 278, 556, 556, 556,
556, 556, 556, 556, 556, 556, 556, 333, 333, 584, 584, 584, 611, 975, 722, 722, 722, 722, 667,
611, 778, 722, 278, 556, 722, 611, 833, 722, 778, 667, 778, 722, 667, 611, 722, 667, 944, 667,
667, 611, 333, 278, 333, 584, 556, 278, 556, 611, 556, 611, 556, 333, 611, 611, 278, 278, 556,
278, 889, 611, 611, 611, 611, 389, 556, 333, 611, 556, 778, 556, 556, 500, 389, 280, 389, 584, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 333, 556, 556, 167, 556, 556, 556, 556, 238, 500, 556, 333, 333, 611, 611, 0, 556, 556, 556,
278, 0, 556, 350, 278, 500, 500, 556, 1000, 1000, 0, 611, 0, 333, 333, 333, 333, 333, 333, 333,
333, 0, 333, 333, 0, 333, 333, 333, 1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1000,
0, 370, 0, 0, 0, 0, 611, 778, 1000, 365, 0, 0, 0, 0, 0, 889, 0, 0, 0, 278, 0, 0, 278, 611, 944,
611, 0, 0, 834],
'helvetica-oblique':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
278, 278, 355, 556, 556, 889, 667, 222, 333, 333, 389, 584, 278, 333, 278, 278, 556, 556, 556,
556, 556, 556, 556, 556, 556, 556, 278, 278, 584, 584, 584, 556, 1015, 667, 667, 722, 722, 667,
611, 778, 722, 278, 500, 667, 556, 833, 722, 778, 667, 778, 722, 667, 611, 722, 667, 944, 667,
667, 611, 278, 278, 278, 469, 556, 222, 556, 556, 500, 556, 556, 278, 556, 556, 222, 222, 500,
222, 833, 556, 556, 556, 556, 333, 500, 278, 556, 500, 722, 500, 500, 500, 334, 260, 334, 584, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 333, 556, 556, 167, 556, 556, 556, 556, 191, 333, 556, 333, 333, 500, 500, 0, 556, 556, 556,
278, 0, 537, 350, 222, 333, 333, 556, 1000, 1000, 0, 611, 0, 333, 333, 333, 333, 333, 333, 333,
333, 0, 333, 333, 0, 333, 333, 333, 1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1000,
0, 370, 0, 0, 0, 0, 556, 778, 1000, 365, 0, 0, 0, 0, 0, 889, 0, 0, 0, 278, 0, 0, 222, 611, 944,
611, 0, 0, 834],
'symbol':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
250, 333, 713, 500, 549, 833, 778, 439, 333, 333, 500, 549, 250, 549, 250, 278, 500, 500, 500,
500, 500, 500, 500, 500, 500, 500, 278, 278, 549, 549, 549, 444, 549, 722, 667, 722, 612, 611,
763, 603, 722, 333, 631, 722, 686, 889, 722, 722, 768, 741, 556, 592, 611, 690, 439, 768, 645,
795, 611, 333, 863, 333, 658, 500, 500, 631, 549, 549, 494, 439, 521, 411, 603, 329, 603, 549,
549, 576, 521, 549, 549, 521, 549, 603, 439, 576, 713, 686, 493, 686, 494, 480, 200, 480, 549, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 620, 247, 549, 167, 713, 500, 753, 753, 753, 753, 1042, 987, 603, 987, 603, 400, 549, 411,
549, 549, 713, 494, 460, 549, 549, 549, 549, 1000, 603, 1000, 658, 823, 686, 795, 987, 768, 768,
823, 768, 768, 713, 713, 713, 713, 713, 713, 713, 768, 713, 790, 790, 890, 823, 549, 250, 713,
603, 603, 1042, 987, 603, 987, 603, 494, 329, 790, 790, 786, 713, 384, 384, 384, 384, 384, 384,
494, 494, 494, 494, 0, 329, 274, 686, 686, 686, 384, 384, 384, 384, 384, 384, 494, 494, 790],
'times-bold':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
250, 333, 555, 500, 500, 1000, 833, 333, 333, 333, 500, 570, 250, 333, 250, 278, 500, 500, 500,
500, 500, 500, 500, 500, 500, 500, 333, 333, 570, 570, 570, 500, 930, 722, 667, 722, 722, 667,
611, 778, 778, 389, 500, 778, 667, 944, 722, 778, 611, 778, 722, 556, 667, 722, 722, 1000, 722,
722, 667, 333, 278, 333, 581, 500, 333, 500, 556, 444, 556, 444, 333, 500, 556, 278, 333, 556,
278, 833, 556, 500, 556, 556, 444, 389, 333, 556, 500, 722, 500, 500, 444, 394, 220, 394, 520, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 333, 500, 500, 167, 500, 500, 500, 500, 278, 500, 500, 333, 333, 556, 556, 0, 500, 500, 500,
250, 0, 540, 350, 333, 500, 500, 500, 1000, 1000, 0, 500, 0, 333, 333, 333, 333, 333, 333, 333,
333, 0, 333, 333, 0, 333, 333, 333, 1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1000,
0, 300, 0, 0, 0, 0, 667, 778, 1000, 330, 0, 0, 0, 0, 0, 722, 0, 0, 0, 278, 0, 0, 278, 500, 722,
556, 0, 0, 750],
'times-bolditalic':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
250, 389, 555, 500, 500, 833, 778, 333, 333, 333, 500, 570, 250, 333, 250, 278, 500, 500, 500,
500, 500, 500, 500, 500, 500, 500, 333, 333, 570, 570, 570, 500, 832, 667, 667, 667, 722, 667,
667, 722, 778, 389, 500, 667, 611, 889, 722, 722, 611, 722, 667, 556, 611, 722, 667, 889, 667,
611, 611, 333, 278, 333, 570, 500, 333, 500, 500, 444, 500, 444, 333, 500, 556, 278, 278, 500,
278, 778, 556, 500, 500, 500, 389, 389, 278, 556, 444, 667, 500, 444, 389, 348, 220, 348, 570, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 389, 500, 500, 167, 500, 500, 500, 500, 278, 500, 500, 333, 333, 556, 556, 0, 500, 500, 500,
250, 0, 500, 350, 333, 500, 500, 500, 1000, 1000, 0, 500, 0, 333, 333, 333, 333, 333, 333, 333,
333, 0, 333, 333, 0, 333, 333, 333, 1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 944, 0,
266, 0, 0, 0, 0, 611, 722, 944, 300, 0, 0, 0, 0, 0, 722, 0, 0, 0, 278, 0, 0, 278, 500, 722, 500,
0, 0, 750],
'times-italic':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
250, 333, 420, 500, 500, 833, 778, 333, 333, 333, 500, 675, 250, 333, 250, 278, 500, 500, 500,
500, 500, 500, 500, 500, 500, 500, 333, 333, 675, 675, 675, 500, 920, 611, 611, 667, 722, 611,
611, 722, 722, 333, 444, 667, 556, 833, 667, 722, 611, 722, 611, 500, 556, 722, 611, 833, 611,
556, 556, 389, 278, 389, 422, 500, 333, 500, 500, 444, 500, 444, 278, 500, 500, 278, 278, 444,
278, 722, 500, 500, 500, 500, 389, 389, 278, 500, 444, 667, 444, 444, 389, 400, 275, 400, 541, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 389, 500, 500, 167, 500, 500, 500, 500, 214, 556, 500, 333, 333, 500, 500, 0, 500, 500, 500,
250, 0, 523, 350, 333, 556, 556, 500, 889, 1000, 0, 500, 0, 333, 333, 333, 333, 333, 333, 333,
333, 0, 333, 333, 0, 333, 333, 333, 889, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 889, 0,
276, 0, 0, 0, 0, 556, 722, 944, 310, 0, 0, 0, 0, 0, 667, 0, 0, 0, 278, 0, 0, 278, 500, 667, 500,
0, 0, 750],
'times-roman':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
250, 333, 408, 500, 500, 833, 778, 333, 333, 333, 500, 564, 250, 333, 250, 278, 500, 500, 500,
500, 500, 500, 500, 500, 500, 500, 278, 278, 564, 564, 564, 444, 921, 722, 667, 667, 722, 611,
556, 722, 722, 333, 389, 722, 611, 889, 722, 722, 556, 722, 667, 556, 611, 722, 722, 944, 722,
722, 611, 333, 278, 333, 469, 500, 333, 444, 500, 444, 500, 444, 333, 500, 500, 278, 278, 500,
278, 778, 500, 500, 500, 500, 333, 389, 278, 500, 500, 722, 500, 500, 444, 480, 200, 480, 541, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 333, 500, 500, 167, 500, 500, 500, 500, 180, 444, 500, 333, 333, 556, 556, 0, 500, 500, 500,
250, 0, 453, 350, 333, 444, 444, 500, 1000, 1000, 0, 444, 0, 333, 333, 333, 333, 333, 333, 333,
333, 0, 333, 333, 0, 333, 333, 333, 1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 889, 0,
276, 0, 0, 0, 0, 611, 722, 889, 310, 0, 0, 0, 0, 0, 667, 0, 0, 0, 278, 0, 0, 278, 500, 722, 500,
0, 0, 750],
'zapfdingbats':
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
278, 974, 961, 974, 980, 719, 789, 790, 791, 690, 960, 939, 549, 855, 911, 933, 911, 945, 974,
755, 846, 762, 761, 571, 677, 763, 760, 759, 754, 494, 552, 537, 577, 692, 786, 788, 788, 790,
793, 794, 816, 823, 789, 841, 823, 833, 816, 831, 923, 744, 723, 749, 790, 792, 695, 776, 768,
792, 759, 707, 708, 682, 701, 826, 815, 789, 789, 707, 687, 696, 689, 786, 787, 713, 791, 785,
791, 873, 761, 762, 762, 759, 759, 892, 892, 788, 784, 438, 138, 277, 415, 392, 392, 668, 668, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 732, 544, 544, 910, 667, 760, 760, 776, 595, 694, 626, 788, 788, 788, 788, 788, 788, 788, 788,
788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788,
788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 788, 894, 838, 1016, 458, 748, 924,
748, 918, 927, 928, 928, 834, 873, 828, 924, 924, 917, 930, 931, 463, 883, 836, 836, 867, 867,
696, 696, 874, 0, 874, 760, 946, 771, 865, 771, 888, 967, 888, 831, 873, 927, 970, 234]
}
ascent_descent = {'Courier': (629, -157),
'Courier-Bold': (626, -142),
'Courier-BoldOblique': (626, -142),
'Courier-Oblique': (629, -157),
'Helvetica': (718, -207),
'Helvetica-Bold': (718, -207),
'Helvetica-BoldOblique': (718, -207),
'Helvetica-Oblique': (718, -207),
'Symbol': (0, 0),
'Times-Bold': (676, -205),
'Times-BoldItalic': (699, -205),
'Times-Italic': (683, -205),
'Times-Roman': (683, -217),
'ZapfDingbats': (0, 0)}
_Widths = {'StandardEncoding': _stdenc_widths, 'Latin1Encoding': latin1MetricsCache.FontWidths}
def stringwidth(text, font, encoding):
if font in fontinfo.NonRomanFonts:
widths = _Widths['StandardEncoding'][string.lower(font)]
else:
try:
widths = _Widths[encoding][string.lower(font)]
except Exception:
raise KeyError("Improper encoding {0} or font name {1}".format(encoding, font))
w = 0
for char in text:
w = w + widths[ord(char)]
return w
| {
"repo_name": "jandom/rdkit",
"path": "rdkit/sping/PS/psmetrics.py",
"copies": "1",
"size": "17511",
"license": "bsd-3-clause",
"hash": 9115350483324072000,
"line_mean": 74.8051948052,
"line_max": 100,
"alpha_frac": 0.5133915824,
"autogenerated": false,
"ratio": 2.0046937607326845,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.797237256729606,
"avg_score": 0.009142555167324911,
"num_lines": 231
} |
class gen_tuple(object):
def __init__(self, tup): self.tup = tup
def __enter__(self): return self.tup
def __exit__(self, type, value, tb): pass
Gen_empty = gen_tuple(())
Gen_once = gen_tuple((None,))
class knowledge_base(object):
''' This object is a master repository for knowledge entities of different
names. These knowledge entities could be facts or rules. The
cumulative information maintained in a knowledge_base represents all
knowledge within a specific domain.
In the syntax: "name1.name2(arg_pattern...)", the knowledge_base name
is "name1".
'''
def __init__(self, engine, name, entity_list_type = None, register = True):
self.name = name
self.entity_lists = {} # {name: entity_list}
self.entity_list_type = entity_list_type
self.initialized = False # used by self.init2
if register: self.register(engine)
else: self.engine = engine
def register(self, engine):
r'''
Called at most once either from __init__ or after loading from a
pickle.
'''
self.engine = engine
name = self.name
if name in engine.knowledge_bases:
raise AssertionError("knowledge_base %s already exists" % name)
if name in engine.rule_bases:
raise AssertionError("name clash between %s '%s' and "
"rule_base '%s'" %
(self.__class__.__name__, name, name))
engine.knowledge_bases[name] = self
def __getstate__(self):
r'''
User must call 'register' on the new instance after loading it
from the pickle. We do this so that we don't end up pickling the
whole engine!
'''
ans = vars(self).copy()
del ans['engine']
return ans
def init2(self):
''' overridden by subclasses. '''
pass
def reset(self):
for entity in self.entity_lists.itervalues(): entity.reset()
def __repr__(self): return "<%s %s>" % (self.__class__.__name__, self.name)
def get_entity_list(self, entity_name):
ans = self.entity_lists.get(entity_name)
if ans is None:
if self.entity_list_type:
ans = self.entity_lists[entity_name] \
= self.entity_list_type(entity_name)
else:
raise KeyError("%s not found in knowledge_base %s" %
(entity_name, self.name))
return ans
def lookup(self, bindings, pat_context, entity_name, patterns):
entity = self.entity_lists.get(entity_name)
if entity is None: return Gen_empty
return entity.lookup(bindings, pat_context, patterns)
def prove(self, bindings, pat_context, entity_name, patterns):
entity = self.entity_lists.get(entity_name)
if entity is None: return Gen_empty
return entity.prove(bindings, pat_context, patterns)
def add_fc_rule_ref(self, entity_name, fc_rule, foreach_index):
self.get_entity_list(entity_name) \
.add_fc_rule_ref(fc_rule, foreach_index)
class knowledge_entity_list(object):
''' This object keeps track of all of the knowledge entities sharing the
same name. For example, these knowledge entities could be all the
facts of the same name or all of the rules of the same name.
Generally, all of the entities in this list may come to bear on
looking up or proving a single fact or goal.
In the syntax: "name1.name2(arg_pattern...)", the knowledge entity
name is "name2".
'''
def __init__(self, name):
self.name = name
def __repr__(self): return "<%s %s>" % (self.__class__.__name__, self.name)
def reset(self):
pass
def prove(self, bindings, pat_context, patterns):
return self.lookup(bindings, pat_context, patterns)
def add_fc_rule_ref(self, fc_rule, foreach_index):
pass
| {
"repo_name": "myaskevich/pyke",
"path": "pyke/knowledge_base.py",
"copies": "2",
"size": "5178",
"license": "mit",
"hash": -6564139966906779000,
"line_mean": 37.9248120301,
"line_max": 79,
"alpha_frac": 0.628742515,
"autogenerated": false,
"ratio": 4.118536197295147,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5747278712295147,
"avg_score": null,
"num_lines": null
} |
from __future__ import with_statement
import os, os.path
import sys
import pyke
from pyke import knowledge_engine
from pyke.krb_compiler import compiler_bc
from pyke.krb_compiler import krbparser
#from pyke import contexts
#contexts.debug = ('patterns_out1', 'patterns_out',)
Ast_names = frozenset((
'file',
'parent',
'fc_rule',
'fc_predicate',
'assert',
'python_assertion',
'python_eq',
'python_in',
'python_check',
'counting',
'bc_rule',
'goal',
'bc_predicate',
'symbol',
'pattern_var',
'as',
'plan_spec',
'pattern_data',
'pattern_tuple',
# 'anonymous_var',
))
def dump(ast, f = sys.stderr, need_nl = False, indent = 0):
if not isinstance(ast, tuple) or len(ast) == 0:
f.write(repr(ast))
return False
if ast[0] in Ast_names:
indent += 2
if need_nl:
f.write("\n")
f.write(' ' * indent)
f.write('(%s' % ast[0])
for arg in ast[1:]:
f.write(', ')
dump(arg, f, True, indent)
f.write(')')
return True
f.write('(')
did_nl = dump(ast[0], f, False, indent)
for arg in ast[1:]:
f.write(', ')
did_nl |= dump(arg, f, did_nl, indent)
f.write(')')
return did_nl
def to_relative(from_path, to_path):
'''Calculates the relative path to get from from_path to to_path.
>>> to_relative('/a/b/c', '/a/b/d/e')
'../d/e'
>>> to_relative('/a/b/c', '/b/d/e')
'/b/d/e'
>>> to_relative('/a/b/c', '/a/b/c/e')
'e'
>>> to_relative('/a/b/c', '/a/b2/d/e')
'../../b2/d/e'
'''
from_path = os.path.abspath(from_path)
to_path = os.path.abspath(to_path)
prefix = ''
while os.path.join(from_path, to_path[len(from_path) + 1:]) != to_path:
new_from_path = os.path.dirname(from_path)
if new_from_path == from_path: return to_path
from_path = new_from_path
prefix = os.path.join(prefix, '..')
return os.path.join(prefix, to_path[len(from_path) + 1:])
def compile_krb(rb_name, generated_root_pkg, generated_root_dir, filename):
engine = knowledge_engine.engine(('*direct*', compiler_bc))
try:
fc_name = rb_name + '_fc.py'
bc_name = rb_name + '_bc.py'
plan_name = rb_name + '_plans.py'
fc_path = os.path.join(generated_root_dir, fc_name)
bc_path = os.path.join(generated_root_dir, bc_name)
plan_path = os.path.join(generated_root_dir, plan_name)
ast = krbparser.parse(krbparser, filename)
#sys.stderr.write("got ast\n")
# dump(ast)
# sys.stderr.write('\n\n')
engine.reset()
engine.activate('compiler')
(fc_lines, bc_lines, plan_lines), plan = \
engine.prove_1('compiler', 'compile',
(generated_root_pkg, rb_name, ast), 3)
krb_filename = to_relative(generated_root_dir, filename)
ans = []
if fc_lines:
sys.stderr.write("writing [%s]/%s\n" %
(generated_root_pkg, os.path.basename(fc_path)))
write_file(fc_lines +
("",
"Krb_filename = %r" % krb_filename,),
fc_path)
ans.append(fc_name)
elif os.path.lexists(fc_path): os.remove(fc_path)
if bc_lines:
sys.stderr.write("writing [%s]/%s\n" %
(generated_root_pkg, os.path.basename(bc_path)))
write_file(bc_lines +
("",
"Krb_filename = %r" % krb_filename,),
bc_path)
ans.append(bc_name)
elif os.path.lexists(bc_path): os.remove(bc_path)
if plan_lines:
sys.stderr.write("writing [%s]/%s\n" %
(generated_root_pkg,
os.path.basename(plan_path)))
#sys.stderr.write("plan_lines:\n")
#for line in plan_lines:
# sys.stderr.write(" " + repr(line) + "\n")
write_file(plan_lines +
("",
"Krb_filename = %r" % krb_filename,),
plan_path)
ans.insert(len(ans) - 1, plan_name) # want this loaded before _bc
elif os.path.lexists(plan_path): os.remove(plan_path)
#sys.stderr.write("done!\n")
return ans
except:
if os.path.lexists(fc_path): os.remove(fc_path)
if os.path.lexists(bc_path): os.remove(bc_path)
if os.path.lexists(plan_path): os.remove(plan_path)
raise
def compile_goal(goal_str):
return krbparser.parse_goal(krbparser, goal_str)
def compile_kfb(filename):
global kfbparser
try:
kfbparser
except NameError:
from pyke.krb_compiler import kfbparser
return kfbparser.parse(kfbparser, filename)
def compile_kqb(filename):
global kqb_parser
try:
kqb_parser
except NameError:
from pyke.krb_compiler import kqb_parser
return kqb_parser.parse_kqb(filename)
def write_file(lines, filename):
with open(filename, 'w') as f:
indents = [0]
lineno_map = []
write_file2(lines, f, indents, lineno_map, 0)
if lineno_map:
f.write("Krb_lineno_map = (\n")
for map_entry in lineno_map:
f.write(" %s,\n" % str(map_entry))
f.write(")\n")
def write_file2(lines, f, indents, lineno_map, lineno, starting_lineno = None):
for line in lines:
if line == 'POPINDENT':
assert len(indents) > 1
del indents[-1]
elif isinstance(line, tuple):
if len(line) == 2 and line[0] == 'INDENT':
indents.append(indents[-1] + line[1])
elif len(line) == 2 and line[0] == 'STARTING_LINENO':
assert starting_lineno is None, \
"missing ENDING_LINENO for STARTING_LINENO %d" % \
starting_lineno[1]
starting_lineno = line[1], lineno + 1
elif len(line) == 2 and line[0] == 'ENDING_LINENO':
assert starting_lineno is not None, \
"missing STARTING_LINENO for ENDING_LINENO %d" % \
line[1]
lineno_map.append(((starting_lineno[1], lineno),
(starting_lineno[0], line[1])))
starting_lineno = None
else:
lineno, starting_lineno = \
write_file2(line, f, indents, lineno_map, lineno,
starting_lineno)
else:
f.write(' ' * indents[-1] + line + '\n')
lineno += 1
return lineno, starting_lineno
| {
"repo_name": "myaskevich/pyke",
"path": "pyke/krb_compiler/__init__.py",
"copies": "2",
"size": "7961",
"license": "mit",
"hash": 7264692455936885000,
"line_mean": 34.8558558559,
"line_max": 79,
"alpha_frac": 0.5497487437,
"autogenerated": false,
"ratio": 3.5409252669039146,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.014652526699760098,
"num_lines": 222
} |
from __future__ import with_statement
import sys
import types
import os, os.path
import re
import contextlib
if sys.version_info[0:2] == (2, 5):
import itertools
class chain(object):
old_chain = itertools.chain
def __new__(cls, *args):
return cls.old_chain(*args)
@staticmethod
def from_iterable(i):
for iterable in i:
for x in iterable: yield x
itertools.chain = chain
import pyke
from pyke import contexts
debug = False
Sys_path = tuple(os.getcwd() if p == ''
else os.path.normpath(os.path.abspath(p))
for p in sys.path)
class CanNotProve(StandardError):
pass
class engine(object):
_Variables = tuple(contexts.variable('ans_%d' % i) for i in range(100))
def __init__(self, *search_paths, **kws):
r'''All search_paths are relative to reference_path.
Each search_path may be:
path -- a path relative to reference_path to search for
source files, placing the compiled knowledge bases
in '.compiled_krb'.
module -- the module's __file__ is taken as the path.
(None|path|module, target_package)
-- use target_package rather than '.compiled_krb'.
This is a package name in Python dotted name
notation relative to path. Use None to use the
compiled knowledge bases in the target_package
without scanning for source files.
kws can be: load_fc, load_bc, load_fb and load_qb. They are all
boolean valued and default to True.
'''
# import this stuff here to avoid import cycles...
global condensedPrint, pattern, fact_base, goal, rule_base, special, \
target_pkg
from pyke import (condensedPrint, pattern, fact_base, goal, rule_base,
special, target_pkg)
for keyword in kws.iterkeys():
if keyword not in ('load_fc', 'load_bc', 'load_fb', 'load_qb'):
raise TypeError("engine.__init__() got an unexpected keyword "
"argument %r" %
keyword)
self.knowledge_bases = {}
self.rule_bases = {}
special.create_for(self)
if len(search_paths) == 1 and isinstance(search_paths[0], tuple) and \
search_paths[0][0] == '*direct*' and \
isinstance(search_paths[0][1], types.ModuleType):
# secret hook for the compiler to initialize itself (so the
# compiled python module can be in an egg).
search_paths[0][1].populate(self)
else:
target_pkgs = {} # {target_package_name: target_pkg}
for path in search_paths:
self._create_target_pkg(path, target_pkgs)
for target_package in target_pkgs.itervalues():
if debug:
print >>sys.stderr, "target_package:", target_package
target_package.compile(self)
target_package.write()
target_package.load(self, **kws)
for kb in self.knowledge_bases.itervalues(): kb.init2()
for rb in self.rule_bases.itervalues(): rb.init2()
def _create_target_pkg(self, path, target_pkgs):
# Does target_pkg.add_source_package.
if debug: print >> sys.stderr, "engine._create_target_pkg:", path
# First, figure out source_package_name, source_package_dir
# and target_package_name:
target_package_name = '.compiled_krb' # default
if isinstance(path, (tuple, list)):
path, target_package_name = path
if isinstance(path, types.ModuleType):
path = path.__file__
if not isinstance(path, (types.StringTypes, types.NoneType)):
raise ValueError("illegal path argument: string expected, got " + \
str(type(path)))
if debug:
print >> sys.stderr, "_create_target_pkg path:", \
repr(path)
print >> sys.stderr, "_create_target_pkg target_package_name:", \
repr(target_package_name)
# Handle the case where there are no source files (for a distributed
# app that wants to hide its knowledge bases):
if path is None:
assert target_package_name[0] != '.', \
"engine: relative target, %s, illegal " \
"with no source package" % \
target_package_name
if target_package_name not in target_pkgs:
# This import must succeed!
tp = _get_target_pkg(target_package_name +
'.compiled_pyke_files')
if tp is None:
raise AssertionError("%s: compiled with different version "
"of Pyke" %
target_package_name)
tp.reset(check_sources=False)
target_pkgs[target_package_name] = tp
return
path = os.path.normpath(os.path.abspath(path))
path_to_package, source_package_name, remainder_path, zip_file_flag = \
_pythonify_path(path)
if debug:
print >> sys.stderr, "_create_target_pkg path to " \
"_pythonify_path:", \
repr(path)
print >> sys.stderr, " path_to_package:", repr(path_to_package)
print >> sys.stderr, " source_package_name:", \
repr(source_package_name)
print >> sys.stderr, " remainder_path:", repr(remainder_path)
print >> sys.stderr, " zip_file_flag:", zip_file_flag
target_filename = None
# Convert relative target_package_name (if specified) to absolute form:
if target_package_name[0] == '.':
num_dots = \
len(target_package_name) - len(target_package_name.lstrip('.'))
if debug:
print >> sys.stderr, "_create_target_pkg num_dots:", num_dots
if num_dots == 1:
base_package = source_package_name
else:
base_package = \
'.'.join(source_package_name.split('.')[:-(num_dots - 1)])
if base_package:
target_package_name = \
base_package + '.' + target_package_name[num_dots:]
else:
target_package_name = target_package_name[num_dots:]
target_filename = \
os.path.join(path_to_package,
os.path.join(*target_package_name.split('.')),
'compiled_pyke_files.py')
if debug:
print >> sys.stderr, "_create_target_pkg " \
"absolute target_package_name:", \
target_package_name
if target_package_name in target_pkgs:
tp = target_pkgs[target_package_name]
else:
target_name = target_package_name + '.compiled_pyke_files'
if debug:
print >> sys.stderr, "_create_target_pkg target_name:", \
target_name
tp = None
try:
# See if compiled_pyke_files already exists.
tp = _get_target_pkg(target_name)
except ImportError:
pass
if tp is None:
if debug:
print >> sys.stderr, "_create_target_pkg: no target module"
tp = target_pkg.target_pkg(target_name, target_filename)
tp.reset()
target_pkgs[target_package_name] = tp
source_package_dir = \
os.path.join(path_to_package,
os.path.join(*source_package_name.split('.')))
if not os.path.isdir(source_package_dir):
source_package_dir = os.path.dirname(source_package_dir)
remainder_path = os.path.dirname(remainder_path)
tp.add_source_package(source_package_name, remainder_path,
source_package_dir)
def get_ask_module(self):
if not hasattr(self, 'ask_module'):
from pyke import ask_tty
self.ask_module = ask_tty
return self.ask_module
def reset(self):
r'''Erases all case-specific facts and deactivates all rule bases.
'''
for rb in self.rule_bases.itervalues(): rb.reset()
for kb in self.knowledge_bases.itervalues(): kb.reset()
def get_kb(self, kb_name, _new_class = None):
ans = self.knowledge_bases.get(kb_name)
if ans is None:
if _new_class: ans = _new_class(self, kb_name)
else: raise KeyError("knowledge_base %s not found" % kb_name)
return ans
def get_rb(self, rb_name):
ans = self.rule_bases.get(rb_name)
if ans is None: raise KeyError("rule_base %s not found" % rb_name)
return ans
def get_create(self, rb_name, parent = None, exclude_list = ()):
ans = self.rule_bases.get(rb_name)
if ans is None:
ans = rule_base.rule_base(self, rb_name, parent, exclude_list)
elif ans.parent != parent or ans.exclude_set != frozenset(exclude_list):
raise AssertionError("duplicate rule_base: %s" % rb_name)
return ans
def get_ke(self, kb_name, entity_name):
return self.get_kb(kb_name).get_entity_list(entity_name)
def add_universal_fact(self, kb_name, fact_name, args):
r'''Universal facts are not deleted by engine.reset.
'''
if isinstance(args, types.StringTypes):
raise TypeError("engine.add_universal_fact: "
"illegal args type, %s" % type(args))
args = tuple(args)
return self.get_kb(kb_name, fact_base.fact_base) \
.add_universal_fact(fact_name, args)
def add_case_specific_fact(self, kb_name, fact_name, args):
r'''Case specific facts are deleted by engine.reset.
'''
if isinstance(args, types.StringTypes):
raise TypeError("engine.add_case_specific_fact: "
"illegal args type, %s" % type(args))
args = tuple(args)
return self.get_kb(kb_name, fact_base.fact_base) \
.add_case_specific_fact(fact_name, args)
def assert_(self, kb_name, entity_name, args):
if isinstance(args, types.StringTypes):
raise TypeError("engine.assert_: "
"illegal args type, %s" % type(args))
args = tuple(args)
return self.get_kb(kb_name, fact_base.fact_base) \
.assert_(entity_name, args)
def activate(self, *rb_names):
r'''Activate rule bases.
This runs all forward-chaining rules in the activated rule bases, so
add your facts before doing this!
'''
for rb_name in rb_names: self.get_rb(rb_name).activate()
def lookup(self, kb_name, entity_name, pat_context, patterns):
return self.get_kb(kb_name).lookup(pat_context, pat_context,
entity_name, patterns)
def prove_goal(self, goal_str, **args):
r'''Proves goal_str with logic variables set to args.
This returns a context manager that you use in a with statement:
Ugly setup to use the family_relations example. You can ignore
this... :-(
>>> source_dir = os.path.dirname(os.path.dirname(__file__))
>>> family_relations_dir = \
... os.path.join(source_dir, 'examples/family_relations')
>>> sys.path.insert(0, family_relations_dir)
>>> from pyke import knowledge_engine
>>> my_engine = knowledge_engine.engine(family_relations_dir)
>>> my_engine.activate('bc_example')
OK, here's the example!
>>> with my_engine.prove_goal(
... 'family.how_related($person1, $person2, $how_related)',
... person1='bruce') as it:
... for vars, plan in it:
... print "bruce is related to", vars['person2'], "as", \
... vars['how_related']
vars is a dictionary of all of the logic variables in the goal
(without the '$') and their values. The plan is a callable python
function.
If you only want the first answer, see engine.prove_1_goal.
'''
return goal.compile(goal_str).prove(self, **args)
def prove_1_goal(self, goal_str, **args):
r'''Proves goal_str with logic variables set to args.
Returns the vars and plan for the first solution found. Raises
knowledge_engine.CanNotProve if no solutions are found.
Ugly setup to use the family_relations example. You can ignore
this... :-(
>>> source_dir = os.path.dirname(os.path.dirname(__file__))
>>> family_relations_dir = \
... os.path.join(source_dir, 'examples/family_relations')
>>> sys.path.insert(0, family_relations_dir)
>>> from pyke import knowledge_engine
>>> my_engine = knowledge_engine.engine(family_relations_dir)
>>> my_engine.activate('bc_example')
OK, here's the example!
>>> vars, plan = \
... my_engine.prove_1_goal(
... 'bc_example.how_related($person1, $person2, $how_related)',
... person1='bruce',
... person2='m_thomas')
>>> print "bruce is related to m_thomas as", vars['how_related']
bruce is related to m_thomas as ('father', 'son')
If you want more than one answer, see engine.prove_goal.
'''
return goal.compile(goal_str).prove_1(self, **args)
def prove(self, kb_name, entity_name, pat_context, patterns):
r'''Deprecated. Use engine.prove_goal.
'''
return self.get_kb(kb_name).prove(pat_context, pat_context,
entity_name, patterns)
def prove_n(self, kb_name, entity_name, fixed_args = (), num_returns = 0):
'''Returns a context manager for a generator of:
a tuple of len == num_returns, and a plan (or None).
Deprecated. Use engine.prove_goal.
'''
if isinstance(fixed_args, types.StringTypes):
raise TypeError("engine.prove_n: fixed_args must not be a string, "
"did you forget a , (%(arg)s) => (%(arg)s,)?" %
{'arg': repr(fixed_args)})
def gen():
context = contexts.simple_context()
vars = self._Variables[:num_returns]
try:
with self.prove(kb_name, entity_name, context,
tuple(pattern.pattern_literal(arg)
for arg in fixed_args) + vars) \
as it:
for plan in it:
final = {}
ans = tuple(context.lookup_data(var.name, final = final)
for var in vars)
if plan: plan = plan.create_plan(final)
yield ans, plan
finally:
context.done()
return contextlib.closing(gen())
def prove_1(self, kb_name, entity_name, fixed_args = (), num_returns = 0):
r'''Returns a tuple of len == num_returns, and a plan (or None).
Deprecated. Use engine.prove_1_goal.
'''
try:
# All we need is the first one!
with self.prove_n(kb_name, entity_name, fixed_args, num_returns) \
as it:
return iter(it).next()
except StopIteration:
raise CanNotProve("Can not prove %s.%s%s" %
(kb_name, entity_name,
condensedPrint.cprint(
fixed_args + self._Variables[:num_returns])))
def print_stats(self, f = sys.stdout):
for kb \
in sorted(self.knowledge_bases.itervalues(), key=lambda kb: kb.name):
kb.print_stats(f)
def trace(self, rb_name, rule_name):
self.get_rb(rb_name).trace(rule_name)
def untrace(self, rb_name, rule_name):
self.get_rb(rb_name).untrace(rule_name)
Compiled_suffix = None
def _get_target_pkg(target_name):
global Compiled_suffix
if debug: print >> sys.stderr, "_get_target_pkg", target_name
module = target_pkg.import_(target_name)
path = module.__file__
if debug: print >> sys.stderr, "_get_target_pkg __file__ is", path
do_reload = False
if path.endswith('.py'):
if Compiled_suffix is None:
# We don't know whether this compiles to .pyc or .pyo yet, so do a
# reload just to be sure...
do_reload = True
else:
source_path = path
path = path[:-3] + Compiled_suffix
else:
assert path.endswith(('.pyc', '.pyo')), \
'unknown file extension: %r' % (path,)
Compiled_suffix = path[-4:]
source_path = path[:-1]
if not do_reload:
if debug:
print >> sys.stderr, "source path is", source_path
if os.path.exists(source_path):
print >> sys.stderr, "source path exists"
print >> sys.stderr, "source path mtime", \
os.path.getmtime(source_path)
else:
print >> sys.stderr, "source path does not exist"
print >> sys.stderr, "compiled path is", path
if os.path.exists(path):
print >> sys.stderr, "compiled path exists"
print >> sys.stderr, "compiled path mtime", \
os.path.getmtime(path)
else:
print >> sys.stderr, "compiled path does not exist"
if not os.path.exists(path) or \
os.path.exists(source_path) and \
os.path.getmtime(source_path) > os.path.getmtime(path):
do_reload = True
if do_reload:
if debug:
print >> sys.stderr, "_get_target_pkg doing reload for", target_name
module = reload(module)
suffix = module.__file__[-4:]
if suffix in ('.pyc', '.pyo'):
Compiled_suffix = suffix
if getattr(module, 'target_pkg_version', None) != pyke.target_pkg_version:
if debug:
print >> sys.stderr, "_get_target_pkg doing invalid version for", \
target_name
return None
return getattr(module, 'get_target_pkg')()
def _pythonify_path(path):
r'''Returns path_to_package, package_name, remainder_path, zip_file_flag.
If zip_file_flag is set, remainder_path is ''.
'''
path = os.path.normpath(os.path.abspath(path))
if path.endswith(('.py', '.pyw', '.pyc', '.pyo')):
path = os.path.dirname(path)
package_name = ''
remainder_path = ''
remainder_package_name = ''
ans = '', '', path, False
while path:
if in_sys_path(path):
if len(remainder_path) < len(ans[2]) or \
len(remainder_path) == len(ans[2]) and \
len(package_name) > len(ans[1]):
if os.path.isdir(path):
ans = path, package_name, remainder_path, False
else:
ans = path, remainder_package_name, '', True
parent_path, dir = os.path.split(path)
if parent_path == '' or parent_path == path:
break
if _is_package_dir(path):
if package_name:
package_name = dir + '.' + package_name
else:
package_name = dir
else:
package_path = os.path.join(*package_name.split('.'))
package_name = ''
if remainder_path:
remainder_path = os.path.join(dir, package_path, remainder_path)
else:
remainder_path = os.path.join(dir, package_path)
if remainder_package_name:
remainder_package_name = dir + '.' + remainder_package_name
else:
remainder_package_name = dir
path = parent_path
return ans
def _is_package_dir(path):
if not os.path.isdir(path): return False
return os.path.exists(os.path.join(path, '__init__.py')) or \
os.path.exists(os.path.join(path, '__init__.pyw')) or \
os.path.exists(os.path.join(path, '__init__.pyc')) or \
os.path.exists(os.path.join(path, '__init__.pyo'))
def in_sys_path(path):
r'''Assumes path is a normalized abspath.
'''
return path in Sys_path
| {
"repo_name": "myaskevich/pyke",
"path": "pyke/knowledge_engine.py",
"copies": "1",
"size": "22320",
"license": "mit",
"hash": -4063833356434544600,
"line_mean": 40.4081632653,
"line_max": 80,
"alpha_frac": 0.5420941798,
"autogenerated": false,
"ratio": 4.1156186612576064,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0069556757168919466,
"num_lines": 539
} |
from __future__ import with_statement
import sys
import types
import os, os.path
import re
import contextlib
if sys.version_info[0] < 3:
import itertools
class chain(object):
old_chain = itertools.chain
def __new__(cls, *args):
return cls.old_chain(*args)
@staticmethod
def from_iterable(i):
for iterable in i:
for x in iterable: yield x
itertools.chain = chain
import pyke
from pyke import contexts
debug = False
Sys_path = tuple(os.getcwd() if p == ''
else os.path.normpath(os.path.abspath(p))
for p in sys.path)
class CanNotProve(StandardError):
pass
class engine(object):
_Variables = tuple(contexts.variable('ans_%d' % i) for i in range(100))
def __init__(self, *search_paths, **kws):
r'''All search_paths are relative to reference_path.
Each search_path may be:
path -- a path relative to reference_path to search for
source files, placing the compiled knowledge bases
in '.compiled_krb'.
module -- the module's __file__ is taken as the path.
(None|path|module, target_package)
-- use target_package rather than '.compiled_krb'.
This is a package name in Python dotted name
notation relative to path. Use None to use the
compiled knowledge bases in the target_package
without scanning for source files.
kws can be: load_fc, load_bc, load_fb and load_qb. They are all
boolean valued and default to True.
'''
# import this stuff here to avoid import cycles...
global condensedPrint, pattern, fact_base, goal, rule_base, special, \
target_pkg
from pyke import (condensedPrint, pattern, fact_base, goal, rule_base,
special, target_pkg)
for keyword in kws.iterkeys():
if keyword not in ('load_fc', 'load_bc', 'load_fb', 'load_qb'):
raise TypeError("engine.__init__() got an unexpected keyword "
"argument %r" %
keyword)
self.knowledge_bases = {}
self.rule_bases = {}
special.create_for(self)
if len(search_paths) == 1 and isinstance(search_paths[0], tuple) and \
search_paths[0][0] == '*direct*' and \
isinstance(search_paths[0][1], types.ModuleType):
# secret hook for the compiler to initialize itself (so the
# compiled python module can be in an egg).
search_paths[0][1].populate(self)
else:
target_pkgs = {} # {target_package_name: target_pkg}
for path in search_paths:
self._create_target_pkg(path, target_pkgs)
for target_package in target_pkgs.itervalues():
if debug:
print >>sys.stderr, "target_package:", target_package
target_package.compile(self)
target_package.write()
target_package.load(self, **kws)
for kb in self.knowledge_bases.itervalues(): kb.init2()
for rb in self.rule_bases.itervalues(): rb.init2()
def _create_target_pkg(self, path, target_pkgs):
# Does target_pkg.add_source_package.
if debug: print >> sys.stderr, "engine._create_target_pkg:", path
# First, figure out source_package_name, source_package_dir
# and target_package_name:
target_package_name = '.compiled_krb' # default
if isinstance(path, (tuple, list)):
path, target_package_name = path
if isinstance(path, types.ModuleType):
path = path.__file__
if not isinstance(path, (types.StringTypes, types.NoneType)):
raise ValueError("illegal path argument: string expected, got " + \
str(type(path)))
if debug:
print >> sys.stderr, "_create_target_pkg path:", \
repr(path)
print >> sys.stderr, "_create_target_pkg target_package_name:", \
repr(target_package_name)
# Handle the case where there are no source files (for a distributed
# app that wants to hide its knowledge bases):
if path is None:
assert target_package_name[0] != '.', \
"engine: relative target, %s, illegal " \
"with no source package" % \
target_package_name
if target_package_name not in target_pkgs:
# This import must succeed!
tp = _get_target_pkg(target_package_name +
'.compiled_pyke_files')
if tp is None:
raise AssertionError("%s: compiled with different version "
"of Pyke" %
target_package_name)
tp.reset(check_sources=False)
target_pkgs[target_package_name] = tp
return
path = os.path.normpath(os.path.abspath(path))
path_to_package, source_package_name, remainder_path, zip_file_flag = \
_pythonify_path(path)
if debug:
print >> sys.stderr, "_create_target_pkg path to " \
"_pythonify_path:", \
repr(path)
print >> sys.stderr, " path_to_package:", repr(path_to_package)
print >> sys.stderr, " source_package_name:", \
repr(source_package_name)
print >> sys.stderr, " remainder_path:", repr(remainder_path)
print >> sys.stderr, " zip_file_flag:", zip_file_flag
target_filename = None
# Convert relative target_package_name (if specified) to absolute form:
if target_package_name[0] == '.':
num_dots = \
len(target_package_name) - len(target_package_name.lstrip('.'))
if debug:
print >> sys.stderr, "_create_target_pkg num_dots:", num_dots
if num_dots == 1:
base_package = source_package_name
else:
base_package = \
'.'.join(source_package_name.split('.')[:-(num_dots - 1)])
if base_package:
target_package_name = \
base_package + '.' + target_package_name[num_dots:]
else:
target_package_name = target_package_name[num_dots:]
target_filename = \
os.path.join(path_to_package,
os.path.join(*target_package_name.split('.')),
'compiled_pyke_files.py')
if debug:
print >> sys.stderr, "_create_target_pkg " \
"absolute target_package_name:", \
target_package_name
if target_package_name in target_pkgs:
tp = target_pkgs[target_package_name]
else:
target_name = target_package_name + '.compiled_pyke_files'
if debug:
print >> sys.stderr, "_create_target_pkg target_name:", \
target_name
tp = None
try:
# See if compiled_pyke_files already exists.
tp = _get_target_pkg(target_name)
except ImportError:
pass
if tp is None:
if debug:
print >> sys.stderr, "_create_target_pkg: no target module"
tp = target_pkg.target_pkg(target_name, target_filename)
tp.reset()
target_pkgs[target_package_name] = tp
source_package_dir = \
os.path.join(path_to_package,
os.path.join(*source_package_name.split('.')))
if not os.path.isdir(source_package_dir):
source_package_dir = os.path.dirname(source_package_dir)
remainder_path = os.path.dirname(remainder_path)
tp.add_source_package(source_package_name, remainder_path,
source_package_dir)
def get_ask_module(self):
if not hasattr(self, 'ask_module'):
from pyke import ask_tty
self.ask_module = ask_tty
return self.ask_module
def reset(self):
r'''Erases all case-specific facts and deactivates all rule bases.
'''
for rb in self.rule_bases.itervalues(): rb.reset()
for kb in self.knowledge_bases.itervalues(): kb.reset()
def get_kb(self, kb_name, _new_class = None):
ans = self.knowledge_bases.get(kb_name)
if ans is None:
if _new_class: ans = _new_class(self, kb_name)
else: raise KeyError("knowledge_base %s not found" % kb_name)
return ans
def get_rb(self, rb_name):
ans = self.rule_bases.get(rb_name)
if ans is None: raise KeyError("rule_base %s not found" % rb_name)
return ans
def get_create(self, rb_name, parent = None, exclude_list = ()):
ans = self.rule_bases.get(rb_name)
if ans is None:
ans = rule_base.rule_base(self, rb_name, parent, exclude_list)
elif ans.parent != parent or ans.exclude_set != frozenset(exclude_list):
raise AssertionError("duplicate rule_base: %s" % rb_name)
return ans
def get_ke(self, kb_name, entity_name):
return self.get_kb(kb_name).get_entity_list(entity_name)
def add_universal_fact(self, kb_name, fact_name, args):
r'''Universal facts are not deleted by engine.reset.
'''
if isinstance(args, types.StringTypes):
raise TypeError("engine.add_universal_fact: "
"illegal args type, %s" % type(args))
args = tuple(args)
return self.get_kb(kb_name, fact_base.fact_base) \
.add_universal_fact(fact_name, args)
def add_case_specific_fact(self, kb_name, fact_name, args):
r'''Case specific facts are deleted by engine.reset.
'''
if isinstance(args, types.StringTypes):
raise TypeError("engine.add_case_specific_fact: "
"illegal args type, %s" % type(args))
args = tuple(args)
return self.get_kb(kb_name, fact_base.fact_base) \
.add_case_specific_fact(fact_name, args)
def assert_(self, kb_name, entity_name, args):
if isinstance(args, types.StringTypes):
raise TypeError("engine.assert_: "
"illegal args type, %s" % type(args))
args = tuple(args)
return self.get_kb(kb_name, fact_base.fact_base) \
.assert_(entity_name, args)
def activate(self, *rb_names):
r'''Activate rule bases.
This runs all forward-chaining rules in the activated rule bases, so
add your facts before doing this!
'''
for rb_name in rb_names: self.get_rb(rb_name).activate()
def lookup(self, kb_name, entity_name, pat_context, patterns):
return self.get_kb(kb_name).lookup(pat_context, pat_context,
entity_name, patterns)
def prove_goal(self, goal_str, **args):
r'''Proves goal_str with logic variables set to args.
This returns a context manager that you use in a with statement:
Ugly setup to use the family_relations example. You can ignore
this... :-(
>>> source_dir = os.path.dirname(os.path.dirname(__file__))
>>> family_relations_dir = \
... os.path.join(source_dir, 'examples/family_relations')
>>> sys.path.insert(0, family_relations_dir)
>>> from pyke import knowledge_engine
>>> my_engine = knowledge_engine.engine(family_relations_dir)
>>> my_engine.activate('bc_example')
OK, here's the example!
>>> with my_engine.prove_goal(
... 'family.how_related($person1, $person2, $how_related)',
... person1='bruce') as it:
... for vars, plan in it:
... print "bruce is related to", vars['person2'], "as", \
... vars['how_related']
vars is a dictionary of all of the logic variables in the goal
(without the '$') and their values. The plan is a callable python
function.
If you only want the first answer, see engine.prove_1_goal.
'''
return goal.compile(goal_str).prove(self, **args)
def prove_1_goal(self, goal_str, **args):
r'''Proves goal_str with logic variables set to args.
Returns the vars and plan for the first solution found. Raises
knowledge_engine.CanNotProve if no solutions are found.
Ugly setup to use the family_relations example. You can ignore
this... :-(
>>> source_dir = os.path.dirname(os.path.dirname(__file__))
>>> family_relations_dir = \
... os.path.join(source_dir, 'examples/family_relations')
>>> sys.path.insert(0, family_relations_dir)
>>> from pyke import knowledge_engine
>>> my_engine = knowledge_engine.engine(family_relations_dir)
>>> my_engine.activate('bc_example')
OK, here's the example!
>>> vars, plan = \
... my_engine.prove_1_goal(
... 'bc_example.how_related($person1, $person2, $how_related)',
... person1='bruce',
... person2='m_thomas')
>>> print "bruce is related to m_thomas as", vars['how_related']
bruce is related to m_thomas as ('father', 'son')
If you want more than one answer, see engine.prove_goal.
'''
return goal.compile(goal_str).prove_1(self, **args)
def prove(self, kb_name, entity_name, pat_context, patterns):
r'''Deprecated. Use engine.prove_goal.
'''
return self.get_kb(kb_name).prove(pat_context, pat_context,
entity_name, patterns)
def prove_n(self, kb_name, entity_name, fixed_args = (), num_returns = 0):
'''Returns a context manager for a generator of:
a tuple of len == num_returns, and a plan (or None).
Deprecated. Use engine.prove_goal.
'''
if isinstance(fixed_args, types.StringTypes):
raise TypeError("engine.prove_n: fixed_args must not be a string, "
"did you forget a , (%(arg)s) => (%(arg)s,)?" %
{'arg': repr(fixed_args)})
def gen():
context = contexts.simple_context()
vars = self._Variables[:num_returns]
try:
with self.prove(kb_name, entity_name, context,
tuple(pattern.pattern_literal(arg)
for arg in fixed_args) + vars) \
as it:
for plan in it:
final = {}
ans = tuple(context.lookup_data(var.name, final = final)
for var in vars)
if plan: plan = plan.create_plan(final)
yield ans, plan
finally:
context.done()
return contextlib.closing(gen())
def prove_1(self, kb_name, entity_name, fixed_args = (), num_returns = 0):
r'''Returns a tuple of len == num_returns, and a plan (or None).
Deprecated. Use engine.prove_1_goal.
'''
try:
# All we need is the first one!
with self.prove_n(kb_name, entity_name, fixed_args, num_returns) \
as it:
return iter(it).next()
except StopIteration:
raise CanNotProve("Can not prove %s.%s%s" %
(kb_name, entity_name,
condensedPrint.cprint(
fixed_args + self._Variables[:num_returns])))
def print_stats(self, f = sys.stdout):
for kb \
in sorted(self.knowledge_bases.itervalues(), key=lambda kb: kb.name):
kb.print_stats(f)
def trace(self, rb_name, rule_name):
self.get_rb(rb_name).trace(rule_name)
def untrace(self, rb_name, rule_name):
self.get_rb(rb_name).untrace(rule_name)
Compiled_suffix = None
def _get_target_pkg(target_name):
global Compiled_suffix
if debug: print >> sys.stderr, "_get_target_pkg", target_name
module = target_pkg.import_(target_name)
path = module.__file__
if debug: print >> sys.stderr, "_get_target_pkg __file__ is", path
do_reload = False
if path.endswith('.py'):
if Compiled_suffix is None:
# We don't know whether this compiles to .pyc or .pyo yet, so do a
# reload just to be sure...
do_reload = True
else:
source_path = path
path = path[:-3] + Compiled_suffix
else:
assert path.endswith(('.pyc', '.pyo')), \
'unknown file extension: %r' % (path,)
Compiled_suffix = path[-4:]
source_path = path[:-1]
if not do_reload:
if debug:
print >> sys.stderr, "source path is", source_path
if os.path.exists(source_path):
print >> sys.stderr, "source path exists"
print >> sys.stderr, "source path mtime", \
os.path.getmtime(source_path)
else:
print >> sys.stderr, "source path does not exist"
print >> sys.stderr, "compiled path is", path
if os.path.exists(path):
print >> sys.stderr, "compiled path exists"
print >> sys.stderr, "compiled path mtime", \
os.path.getmtime(path)
else:
print >> sys.stderr, "compiled path does not exist"
if not os.path.exists(path) or \
os.path.exists(source_path) and \
os.path.getmtime(source_path) > os.path.getmtime(path):
do_reload = True
if do_reload:
if debug:
print >> sys.stderr, "_get_target_pkg doing reload for", target_name
module = reload(module)
suffix = module.__file__[-4:]
if suffix in ('.pyc', '.pyo'):
Compiled_suffix = suffix
if getattr(module, 'target_pkg_version', None) != pyke.target_pkg_version:
if debug:
print >> sys.stderr, "_get_target_pkg doing invalid version for", \
target_name
return None
return getattr(module, 'get_target_pkg')()
def _pythonify_path(path):
r'''Returns path_to_package, package_name, remainder_path, zip_file_flag.
If zip_file_flag is set, remainder_path is ''.
'''
path = os.path.normpath(os.path.abspath(path))
if path.endswith(('.py', '.pyw', '.pyc', '.pyo')):
path = os.path.dirname(path)
package_name = ''
remainder_path = ''
remainder_package_name = ''
ans = '', '', path, False
while path:
if in_sys_path(path):
if len(remainder_path) < len(ans[2]) or \
len(remainder_path) == len(ans[2]) and \
len(package_name) > len(ans[1]):
if os.path.isdir(path):
ans = path, package_name, remainder_path, False
else:
ans = path, remainder_package_name, '', True
parent_path, dir = os.path.split(path)
if parent_path == '' or parent_path == path:
break
if _is_package_dir(path):
if package_name:
package_name = dir + '.' + package_name
else:
package_name = dir
else:
package_path = os.path.join(*package_name.split('.'))
package_name = ''
if remainder_path:
remainder_path = os.path.join(dir, package_path, remainder_path)
else:
remainder_path = os.path.join(dir, package_path)
if remainder_package_name:
remainder_package_name = dir + '.' + remainder_package_name
else:
remainder_package_name = dir
path = parent_path
return ans
def _is_package_dir(path):
if not os.path.isdir(path): return False
return os.path.exists(os.path.join(path, '__init__.py')) or \
os.path.exists(os.path.join(path, '__init__.pyw')) or \
os.path.exists(os.path.join(path, '__init__.pyc')) or \
os.path.exists(os.path.join(path, '__init__.pyo'))
def in_sys_path(path):
r'''Assumes path is a normalized abspath.
'''
return path in Sys_path
| {
"repo_name": "e-loue/pyke",
"path": "pyke/knowledge_engine.py",
"copies": "1",
"size": "22312",
"license": "mit",
"hash": 3638767033191368000,
"line_mean": 40.3933209647,
"line_max": 80,
"alpha_frac": 0.5421989153,
"autogenerated": false,
"ratio": 4.117940199335548,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0069556757168919466,
"num_lines": 539
} |
import functools
from pyke import fc_rule, immutable_dict
class bc_rule(fc_rule.rule):
''' This represents a single backward-chaining rule. Most of its
behavior is inherited.
'''
def __init__(self, name, rule_base, goal_name, bc_fn, plan_fn,
goal_arg_patterns, plan_vars, patterns):
super(bc_rule, self).__init__(name, rule_base, patterns)
self.goal_name = goal_name
self.orig_bc_fn = bc_fn
self.bc_fn = bc_fn
self.plan_fn = plan_fn
self.goal_arg_pats = goal_arg_patterns
self.plan_vars = plan_vars
rule_base.add_bc_rule(self)
def goal_arg_patterns(self):
return self.goal_arg_pats
def make_plan(self, context, final):
return functools.partial(self.plan_fn,
immutable_dict.immutable_dict(
(var_name, context.lookup_data(var_name, final=final))
for var_name in self.plan_vars))
def trace(self):
self.bc_fn = self.surrogate
def surrogate(self, rule, arg_patterns, arg_context):
print "%s.%s%s" % (rule.rule_base.root_name, rule.name,
tuple(arg.as_data(arg_context, True)
for arg in arg_patterns))
for prototype_plan in self.orig_bc_fn(rule, arg_patterns, arg_context):
print "%s.%s succeeded with %s" % \
(rule.rule_base.root_name, rule.name,
tuple(arg.as_data(arg_context, True)
for arg in arg_patterns))
yield prototype_plan
print "%s.%s failed" % (rule.rule_base.root_name, rule.name)
def untrace(self):
self.bc_fn = self.orig_bc_fn
| {
"repo_name": "e-loue/pyke",
"path": "pyke/bc_rule.py",
"copies": "2",
"size": "2859",
"license": "mit",
"hash": 232469461288321700,
"line_mean": 41.0294117647,
"line_max": 79,
"alpha_frac": 0.6469559132,
"autogenerated": false,
"ratio": 3.8517520215633425,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5498707934763343,
"avg_score": null,
"num_lines": null
} |
import itertools
from pyke import knowledge_base
class StopProof(Exception): pass
class stopIteratorContext(object):
def __init__(self, rule_base, iterator_context):
self.rule_base = rule_base
self.context = iterator_context
def __enter__(self):
return stopIterator(self.rule_base, self.context.__enter__())
def __exit__(self, type, value, tb):
self.context.__exit__(type, value, tb)
class stopIterator(object):
def __init__(self, rule_base, iterator):
self.rule_base = rule_base
self.iterator = iter(iterator)
def __iter__(self): return self
def next(self):
if self.iterator:
try:
return self.iterator.next()
except StopProof:
self.iterator = None
self.rule_base.num_bc_rule_failures += 1
raise StopIteration
class chain_context(object):
def __init__(self, outer_it):
self.outer_it = outer_iterable(outer_it)
def __enter__(self):
return itertools.chain.from_iterable(self.outer_it)
def __exit__(self, type, value, tb): self.outer_it.close()
class outer_iterable(object):
def __init__(self, outer_it):
self.outer_it = iter(outer_it)
self.inner_it = None
def __iter__(self): return self
def close(self):
if hasattr(self.inner_it, '__exit__'):
self.inner_it.__exit__(None, None, None)
elif hasattr(self.inner_it, 'close'): self.inner_it.close()
if hasattr(self.outer_it, 'close'): self.outer_it.close()
def next(self):
ans = self.outer_it.next()
if hasattr(ans, '__enter__'):
self.inner_it = ans
return ans.__enter__()
ans = iter(ans)
self.inner_it = ans
return ans
class rule_base(knowledge_base.knowledge_base):
def __init__(self, engine, name, parent = None, exclude_list = ()):
super(rule_base, self).__init__(engine, name, rule_list, False)
if name in engine.rule_bases:
raise AssertionError("rule_base %s already exists" % name)
if name in engine.knowledge_bases:
raise AssertionError("name clash between rule_base '%s' and "
"fact_base '%s'" % (name, name))
engine.rule_bases[name] = self
self.fc_rules = []
self.parent = parent
self.exclude_set = frozenset(exclude_list)
self.rules = {} # {name: rule}
def add_fc_rule(self, fc_rule):
if fc_rule.name in self.rules:
raise AssertionError("%s rule_base: duplicate rule name: %s" %
(self.name, fc_rule.name))
self.rules[fc_rule.name] = fc_rule
self.fc_rules.append(fc_rule)
def add_bc_rule(self, bc_rule):
if bc_rule.name in self.rules:
raise AssertionError("%s rule_base: duplicate rule name: %s" %
(self.name, bc_rule.name))
self.rules[bc_rule.name] = bc_rule
self.get_entity_list(bc_rule.goal_name).add_bc_rule(bc_rule)
def init2(self):
if not self.initialized:
self.initialized = True
if self.parent:
parent = self.engine.rule_bases.get(self.parent)
if parent is None:
raise KeyError("rule_base %s: parent %s not found" % \
(self.name, self.parent))
self.parent = parent
self.parent.init2()
self.root_name = self.parent.root_name
else:
self.root_name = self.name
self.reset()
def derived_from(self, rb):
parent = self.parent
while parent:
if parent == rb: return True
parent = parent.parent
return False
def register_fc_rules(self, stop_at_rb):
rb = self
while rb is not stop_at_rb:
for fc_rule in rb.fc_rules: fc_rule.register_rule()
if not rb.parent: break
rb = rb.parent
def run_fc_rules(self, stop_at_rb):
rb = self
while rb is not stop_at_rb:
for fc_rule in rb.fc_rules: fc_rule.run()
if not rb.parent: break
rb = rb.parent
def activate(self):
current_rb = self.engine.knowledge_bases.get(self.root_name)
if current_rb:
assert self.derived_from(current_rb), \
"%s.activate(): not derived from current rule_base, %s" % \
(self.name, current_rb.name)
self.engine.knowledge_bases[self.root_name] = self
self.register_fc_rules(current_rb)
self.run_fc_rules(current_rb)
def reset(self):
if self.root_name in self.engine.knowledge_bases:
del self.engine.knowledge_bases[self.root_name]
for fc_rule in self.fc_rules: fc_rule.reset()
self.num_fc_rules_triggered = 0
self.num_fc_rules_rerun = 0
self.num_prove_calls = 0
self.num_bc_rules_matched = 0
self.num_bc_rule_successes = 0
self.num_bc_rule_failures = 0
def gen_rule_lists_for(self, goal_name):
rule_base = self
while True:
rl = rule_base.entity_lists.get(goal_name)
if rl: yield rl
if rule_base.parent and goal_name not in rule_base.exclude_set:
rule_base = rule_base.parent
else:
break
def prove(self, bindings, pat_context, goal_name, patterns):
self.num_prove_calls += 1
return stopIteratorContext(self,
chain_context(
rl.prove(bindings, pat_context, patterns)
for rl in self.gen_rule_lists_for(goal_name)))
def print_stats(self, f):
f.write("%s: %d fc_rules, %d triggered, %d rerun\n" %
(self.name, len(self.fc_rules), self.num_fc_rules_triggered,
self.num_fc_rules_rerun))
num_bc_rules = sum(rule_list.num_bc_rules()
for rule_list in self.entity_lists.itervalues())
f.write("%s: %d bc_rules, %d goals, %d rules matched\n" %
(self.name, num_bc_rules, self.num_prove_calls,
self.num_bc_rules_matched))
f.write("%s %d successes, %d failures\n" %
(' ' * len(self.name), self.num_bc_rule_successes,
self.num_bc_rule_failures))
if self.parent: self.parent.print_stats(f)
def trace(self, rule_name):
for rule_list in self.entity_lists.itervalues():
if rule_list.trace(rule_name): return
raise KeyError("trace: rule %s not found" % rule_name)
def untrace(self, rule_name):
for rule_list in self.entity_lists.itervalues():
if rule_list.untrace(rule_name): return
raise KeyError("untrace: rule %s not found" % rule_name)
class rule_list(knowledge_base.knowledge_entity_list):
def __init__(self, name):
self.name = name
self.bc_rules = []
def add_bc_rule(self, bc_rule):
self.bc_rules.append(bc_rule)
def prove(self, bindings, pat_context, patterns):
""" Returns a context manager for a generator that binds patterns to
successively proven goals, yielding the plan (or None, if no plan)
for each successful match. Undoes bindings upon continuation,
so that no bindings remain at StopIteration.
"""
return chain_context(
bc_rule.bc_fn(bc_rule, patterns, pat_context)
for bc_rule in self.bc_rules)
def num_bc_rules(self):
return len(self.bc_rules)
def trace(self, rule_name):
for bc_rule in self.bc_rules:
if bc_rule.name == rule_name:
bc_rule.trace()
return True
return False
def untrace(self, rule_name):
for bc_rule in self.bc_rules:
if bc_rule.name == rule_name:
bc_rule.untrace()
return True
return False
| {
"repo_name": "myaskevich/pyke",
"path": "pyke/rule_base.py",
"copies": "2",
"size": "9221",
"license": "mit",
"hash": 1563472559245670400,
"line_mean": 36.3279352227,
"line_max": 79,
"alpha_frac": 0.5853579176,
"autogenerated": false,
"ratio": 3.789560213727908,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.011875286033163419,
"num_lines": 247
} |
import pyke
def fc_head(rb_name):
return (
"# %s_fc.py" % rb_name,
"",
"from __future__ import with_statement",
"from pyke import contexts, pattern, fc_rule, knowledge_base",
"",
"pyke_version = %r" % pyke.version,
"compiler_version = %r" % pyke.compiler_version,
)
def bc_head(rb_name):
return (
"# %s_bc.py" % rb_name,
"",
"from __future__ import with_statement",
"import itertools",
"from pyke import contexts, pattern, bc_rule",
"",
"pyke_version = %r" % pyke.version,
"compiler_version = %r" % pyke.compiler_version,
)
def plan_head(rb_name):
return (
"# %s_plans.py" % rb_name,
"",
"pyke_version = %r" % pyke.version,
"compiler_version = %r" % pyke.compiler_version,
)
def goal(rb_name, rule_name, goal_info, pred_plan_lines, python_lines):
# returns plan_lines, goal_fn_head, goal_fn_tail, goal_decl_lines
goal, goal_name, pattern_args, taking, start_lineno, end_lineno = goal_info
assert goal == 'goal'
goal_fn_head = (
"",
"def %s(rule, arg_patterns, arg_context):" % rule_name,
("INDENT", 2),
"engine = rule.rule_base.engine",
"patterns = rule.goal_arg_patterns()",
"if len(arg_patterns) == len(patterns):",
("INDENT", 2),
"context = contexts.bc_context(rule)",
"try:",
("INDENT", 2),
("STARTING_LINENO", start_lineno),
"if all(itertools.imap(lambda pat, arg:",
("INDENT", 2),
("INDENT", 20),
("INDENT", 2),
"pat.match_pattern(context, context,",
("INDENT", 18),
"arg, arg_context),",
"POPINDENT",
"POPINDENT",
"patterns,",
"arg_patterns)):",
("ENDING_LINENO", end_lineno),
"POPINDENT",
"rule.rule_base.num_bc_rules_matched += 1",
)
goal_fn_tail = (
"POPINDENT",
"POPINDENT",
"finally:",
("INDENT", 2),
"context.done()",
"POPINDENT",
"POPINDENT",
"POPINDENT",
)
if not taking and not pred_plan_lines and not python_lines:
plan_fn_name = "None"
plan_lines = ()
else:
plan_fn_name = "%s_plans.%s" % (rb_name, rule_name)
def_start = "def %s" % rule_name
taking = [line.strip() for line in taking if line.strip()]
if not taking:
def_head = def_start + '(context):'
else:
if taking[0][0] != '(' or taking[-1][-1] != ')':
from pyke.krb_compiler import scanner
end = scanner.lexer.lexpos
taking_start = scanner.lexer.lexdata.rfind('taking', 0, end)
if taking_start < 0:
raise SyntaxError("'taking' clause: missing parenthesis",
scanner.syntaxerror_params())
taking_start += len('taking')
while taking_start < len(scanner.lexdata) and \
scanner.lexdata[taking_start].isspace():
taking_start += 1
lineno = scanner.lexer.lineno - \
scanner.lexer.lexdata.count('\n', taking_start,
scanner.lexer.lexpos)
raise SyntaxError("'taking' clause: missing parenthesis",
scanner.syntaxerror_params(taking_start,
lineno))
taking[0] = def_start + "(context, " + taking[0][1:]
taking[-1] += ':'
if len(taking) == 1:
def_head = taking[0]
else:
def_head = (taking[0],
('INDENT', 4),
tuple(taking[1:]),
"POPINDENT",
)
plan_lines = ("",
def_head,
('INDENT', 2),
pred_plan_lines,
python_lines,
"POPINDENT",
)
goal_decl_lines = (
"",
"bc_rule.bc_rule(%r, This_rule_base, %r," % (rule_name, goal_name),
("INDENT", 16),
"%s, %s," % (rule_name, plan_fn_name),
) + list_format(pattern_args, "(", "),")
return plan_lines, goal_fn_head, goal_fn_tail, goal_decl_lines
def add_start(l, start):
'''
>>> add_start(('a', 'b', 'c'), '^')
(0, ['^a', 'b', 'c'])
>>> add_start(('POPINDENT', ('INDENT', 2), ((('b',), 'c'),),), '^')
(2, ['POPINDENT', ('INDENT', 2), ((('^b',), 'c'),)])
>>> add_start((('POPINDENT', ('INDENT', 2)), ((('b',), 'c'),),), '^')
(1, [('POPINDENT', ('INDENT', 2)), ((('^b',), 'c'),)])
>>> add_start(('POPINDENT', ('INDENT', 2)), '^')
(0, ['^', 'POPINDENT', ('INDENT', 2)])
'''
ans = list(l)
for first, x in enumerate(ans):
if x != 'POPINDENT' and \
not (isinstance(x, (tuple, list)) and x[0] == 'INDENT'):
if not isinstance(x, (tuple, list)):
ans[first] = start + ans[first]
return first, ans
f, x2 = add_start(x, start)
if len(x) == len(x2):
ans[first] = tuple(x2)
return first, ans
first = 0
ans.insert(first, start)
return first, ans
def add_end(l, end):
'''
>>> add_end(('a', 'b', 'c'), '^')
(2, ['a', 'b', 'c^'])
>>> add_end(((((('b',), 'c'),),), 'POPINDENT', ('INDENT', 2)), '^')
(0, [(((('b',), 'c^'),),), 'POPINDENT', ('INDENT', 2)])
>>> add_end((((('b',), 'c'),), ('POPINDENT', ('INDENT', 2))), '^')
(0, [((('b',), 'c^'),), ('POPINDENT', ('INDENT', 2))])
>>> add_end(('POPINDENT', ('INDENT', 2)), '^')
(2, ['POPINDENT', ('INDENT', 2), '^'])
'''
ans = list(l)
for last in range(len(ans) - 1, -1, -1):
x = ans[last]
if x != 'POPINDENT' and \
not (isinstance(x, (tuple, list)) and x[0] == 'INDENT'):
if not isinstance(x, (tuple, list)):
ans[last] += end
return last, ans
e, x2 = add_end(x, end)
if len(x) == len(x2):
ans[last] = tuple(x2)
return last, ans
last = len(ans)
ans.insert(last, end)
return last, ans
def add_brackets(l, start = '(', end = ')'):
'''
>>> add_brackets(('a', 'b', 'c'))
('(a', ('INDENT', 1), 'b', 'c)', 'POPINDENT')
>>> add_brackets(('(a', ('INDENT', 1), 'b', 'c)', 'POPINDENT'))
('((a', ('INDENT', 1), ('INDENT', 1), 'b', 'c))', 'POPINDENT', 'POPINDENT')
'''
if not l: return start + end
first, ans = add_start(l, start)
last, ans = add_end(ans, end)
if last > first:
ans.insert(last + 1, "POPINDENT")
ans.insert(first + 1, ("INDENT", 1))
return tuple(ans)
def list_format(l, start, end, separator = ','):
ans = [element + separator for element in l]
if not ans: return (start + end,)
ans[0] = start + ans[0]
ans[-1] += end
if len(ans) > 1:
ans.insert(1, ("INDENT", 1))
ans.append("POPINDENT")
return tuple(ans)
def merge_pattern(pattern, pattern_list):
# returns pat_num, new_pattern_list
if pattern in pattern_list:
return list(pattern_list).index(pattern), pattern_list
return len(pattern_list), pattern_list + (pattern,)
def merge_patterns(patterns, pattern_list):
# returns pat_nums, new_pattern_list
pat_nums = []
for pat in patterns:
pat_num, pattern_list = merge_pattern(pat, pattern_list)
pat_nums.append(pat_num)
return tuple(pat_nums), pattern_list
def syntax_error(msg, lineno, pos):
raise SyntaxError(msg, scanner.syntaxerror_params(pos, lineno))
| {
"repo_name": "myaskevich/pyke",
"path": "pyke/krb_compiler/helpers.py",
"copies": "2",
"size": "9054",
"license": "mit",
"hash": 5565170328007970000,
"line_mean": 36.1024590164,
"line_max": 83,
"alpha_frac": 0.5040318127,
"autogenerated": false,
"ratio": 3.6255506607929515,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5129582473492952,
"avg_score": null,
"num_lines": null
} |
import subprocess
import contextlib
from pyke import knowledge_base, rule_base
# claim_goal, fact, prove_all, gather_all
class special_knowledge_base(knowledge_base.knowledge_base):
def __init__(self, engine):
super(special_knowledge_base, self).__init__(engine, 'special')
def add_fn(self, fn):
if fn.name in self.entity_lists:
raise KeyError("%s.%s already exists" % (self.name, fn.name))
self.entity_lists[fn.name] = fn
def print_stats(self, f):
pass
class special_fn(knowledge_base.knowledge_entity_list):
def __init__(self, special_base, name):
super(special_fn, self).__init__(name)
special_base.add_fn(self)
def lookup(self, bindings, pat_context, patterns):
raise AssertionError("special.%s may not be used in forward chaining "
"rules" % self.name)
def prove(self, bindings, pat_context, patterns):
raise AssertionError("special.%s may not be used in backward chaining "
"rules" % self.name)
class special_both(special_fn):
def prove(self, bindings, pat_context, patterns):
return self.lookup(bindings, pat_context, patterns)
class claim_goal(special_fn):
r'''
>>> class stub(object):
... def add_fn(self, fn): pass
>>> cg = claim_goal(stub())
>>> mgr = cg.prove(None, None, None)
>>> gen = iter(mgr.__enter__())
>>> gen.next()
>>> gen.next()
Traceback (most recent call last):
...
StopProof
>>> mgr.__exit__(None, None, None)
>>> cg.lookup(None, None, None)
Traceback (most recent call last):
...
AssertionError: special.claim_goal may not be used in forward chaining rules
'''
def __init__(self, special_base):
super(claim_goal, self).__init__(special_base, 'claim_goal')
def prove(self, bindings, pat_context, patterns):
def gen():
yield
raise rule_base.StopProof
return contextlib.closing(gen())
def run_cmd(pat_context, cmd_pat, cwd_pat=None, stdin_pat=None):
r'''
>>> from pyke import pattern
>>> run_cmd(None, pattern.pattern_literal(('true',)))
(0, '', '')
>>> run_cmd(None, pattern.pattern_literal(('false',)))
(1, '', '')
>>> ret, out, err = run_cmd(None, pattern.pattern_literal(('pwd',)))
>>> ret
0
>>> err
''
>>> import os
>>> cwd = os.getcwd() + '\n'
>>> out == cwd
True
>>> run_cmd(None, pattern.pattern_literal(('pwd',)),
... pattern.pattern_literal('/home/bruce'))
(0, '/home/bruce\n', '')
'''
stdin = None if stdin_pat is None \
else stdin_pat.as_data(pat_context)
process = subprocess.Popen(cmd_pat.as_data(pat_context),
bufsize=-1,
universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd= None if cwd_pat is None
else cwd_pat.as_data(pat_context))
out, err = process.communicate(stdin)
return process.returncode, out, err
class check_command(special_both):
r'''
>>> from pyke import pattern, contexts
>>> class stub(object):
... def add_fn(self, fn): pass
>>> cc = check_command(stub())
>>> ctxt = contexts.simple_context()
>>> mgr = cc.lookup(ctxt, ctxt, (pattern.pattern_literal(('true',)),))
>>> gen = iter(mgr.__enter__())
>>> gen.next()
>>> ctxt.dump()
>>> gen.next()
Traceback (most recent call last):
...
StopIteration
>>> ctxt.dump()
>>> mgr.__exit__(None, None, None)
>>> mgr = cc.lookup(ctxt, ctxt, (pattern.pattern_literal(('false',)),))
>>> gen = iter(mgr.__enter__())
>>> gen.next()
Traceback (most recent call last):
...
StopIteration
>>> ctxt.dump()
>>> mgr.__exit__(None, None, None)
>>> mgr = cc.prove(ctxt, ctxt, (pattern.pattern_literal(('true',)),))
>>> gen = iter(mgr.__enter__())
>>> gen.next()
>>> ctxt.dump()
>>> gen.next()
Traceback (most recent call last):
...
StopIteration
>>> ctxt.dump()
>>> mgr.__exit__(None, None, None)
'''
def __init__(self, special_base):
super(check_command, self).__init__(special_base, 'check_command')
def lookup(self, bindings, pat_context, patterns):
if len(patterns) < 1: return knowledge_base.Gen_empty
retcode, out, err = run_cmd(pat_context, patterns[0],
patterns[1] if len(patterns) > 1
else None,
patterns[2] if len(patterns) > 2
else None)
if retcode: return knowledge_base.Gen_empty
return knowledge_base.Gen_once
class command(special_both):
r'''
>>> from pyke import pattern, contexts
>>> class stub(object):
... def add_fn(self, fn): pass
>>> c = command(stub())
>>> ctxt = contexts.simple_context()
>>> mgr = c.lookup(ctxt, ctxt,
... (contexts.variable('ans'),
... pattern.pattern_literal(('echo', 'hi'))))
>>> gen = iter(mgr.__enter__())
>>> gen.next()
>>> ctxt.dump()
ans: ('hi',)
>>> gen.next()
Traceback (most recent call last):
...
StopIteration
>>> ctxt.dump()
>>> mgr.__exit__(None, None, None)
>>> mgr = c.lookup(ctxt, ctxt,
... (contexts.variable('ans'),
... pattern.pattern_literal(('cat',)),
... pattern.pattern_literal(None),
... pattern.pattern_literal('line1\nline2\nline3\n')))
>>> gen = iter(mgr.__enter__())
>>> gen.next()
>>> ctxt.dump()
ans: ('line1', 'line2', 'line3')
>>> gen.next()
Traceback (most recent call last):
...
StopIteration
>>> ctxt.dump()
>>> mgr.__exit__(None, None, None)
'''
def __init__(self, special_base):
super(command, self).__init__(special_base, 'command')
def lookup(self, bindings, pat_context, patterns):
if len(patterns) < 2: return knowledge_base.Gen_empty
retcode, out, err = run_cmd(pat_context, patterns[1],
patterns[2] if len(patterns) > 2
else None,
patterns[3] if len(patterns) > 3
else None)
if retcode != 0:
raise subprocess.CalledProcessError(
retcode,
' '.join(patterns[1].as_data(pat_context)))
def gen():
mark = bindings.mark(True)
try:
outlines = tuple(out.rstrip('\n').split('\n'))
if patterns[0].match_data(bindings, pat_context, outlines):
bindings.end_save_all_undo()
yield
else:
bindings.end_save_all_undo()
finally:
bindings.undo_to_mark(mark)
return contextlib.closing(gen())
class general_command(special_both):
r'''
>>> from pyke import pattern, contexts
>>> class stub(object):
... def add_fn(self, fn): pass
>>> gc = general_command(stub())
>>> ctxt = contexts.simple_context()
>>> ctxt.dump()
>>> mgr = gc.lookup(ctxt, ctxt,
... (contexts.variable('ans'),
... pattern.pattern_literal(('echo', 'hi'))))
>>> gen = iter(mgr.__enter__())
>>> gen.next()
>>> ctxt.dump()
ans: (0, 'hi\n', '')
>>> gen.next()
Traceback (most recent call last):
...
StopIteration
>>> ctxt.dump()
>>> mgr.__exit__(None, None, None)
'''
def __init__(self, special_base):
super(general_command, self).__init__(special_base, 'general_command')
def lookup(self, bindings, pat_context, patterns):
if len(patterns) < 2: return knowledge_base.Gen_empty
ans = run_cmd(pat_context, patterns[1],
patterns[2] if len(patterns) > 2 else None,
patterns[3] if len(patterns) > 3 else None)
def gen():
mark = bindings.mark(True)
try:
if patterns[0].match_data(bindings, pat_context, ans):
bindings.end_save_all_undo()
yield
else:
bindings.end_save_all_undo()
finally:
bindings.undo_to_mark(mark)
return contextlib.closing(gen())
def create_for(engine):
special_base = special_knowledge_base(engine)
claim_goal(special_base)
check_command(special_base)
command(special_base)
general_command(special_base)
| {
"repo_name": "e-loue/pyke",
"path": "pyke/special.py",
"copies": "2",
"size": "10605",
"license": "mit",
"hash": 1066498832495014700,
"line_mean": 36.4699646643,
"line_max": 84,
"alpha_frac": 0.5203696718,
"autogenerated": false,
"ratio": 4.23482428115016,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.575519395295016,
"avg_score": null,
"num_lines": null
} |
import sys
import types
import re
def cprint(obj, maxlen = 80, maxdepth = 4, maxlines = 20):
items = cprint2(obj, maxdepth)
#sys.stderr.write("cprint items: %s\n" % str(items))
return format(items, maxlen, maxlen, maxlines)[0]
def format_len(x):
"""
>>> format_len('abc')
3
>>> format_len(('(', ('(', 'def', ')'), 'yz', ')'))
11
"""
if not isinstance(x, (list, tuple)): return len(x)
if len(x) > 3: sep_len = 2 * (len(x) - 3)
else: sep_len = 0
return sum(map(format_len, x)) + sep_len
def format(x, lenleft, maxlen, maxlines, indent = 0):
r"""
>>> format('"hello mom this is a long str"', 7, 80, 9)
('"he..."', 0)
>>> format(('(', 'a', 'b', 'c', ')'), 80, 80, 9)
('(a, b, c)', 0)
>>> format(('(', 'a', 'b', 'c', ')'), 8, 80, 9)
('(a,\n b,\n c)', 2)
"""
if not isinstance(x, (list, tuple)):
if len(x) <= lenleft: return x, 0
if isinstance(x, types.StringTypes) and x[-1] in "'\"":
if lenleft >= 5: return x[:lenleft-4] + '...' + x[-1], 0
else:
if lenleft >= 4: return x[:lenleft-3] + '...', 0
return '&', 0
if len(x) == 0: return '', 0
if format_len(x) <= lenleft:
return x[0] + \
', '.join(format(y, lenleft, maxlen, maxlines)[0]
for y in x[1:-1]) + \
x[-1], 0
indent += 2
ans = x[0]
lines_taken = 0
if len(x) > 2:
first, taken = \
format(x[1], lenleft - len(ans), maxlen, maxlines, indent + 2)
ans += first
lines_taken += taken
for y in x[2:-1]:
if lines_taken >= maxlines:
ans += ', ...'
break
line, taken = \
format(y, maxlen - indent, maxlen, maxlines - lines_taken,
indent)
ans += ',\n' + indent * ' ' + line
lines_taken += taken + 1
return ans + x[-1], lines_taken
def cprint2(obj, maxdepth):
if isinstance(obj, types.TupleType):
return printSeq('(', ')', obj, maxdepth)
if isinstance(obj, types.ListType):
return printSeq('[', ']', obj, maxdepth)
if isinstance(obj, types.DictType):
return printDict(obj, maxdepth)
if isinstance(obj, types.StringTypes):
return printStr(obj)
try:
return str(obj)
except StandardError, e:
exc_type, exc_value, exc_traceback = sys.exc_info()
import traceback
if isinstance(obj, types.InstanceType): obj_type = obj.__class__
else: obj_type = type(obj)
return "While trying to cprint a %s, got: %s" % \
(obj_type,
traceback.format_exception_only(exc_type, exc_value))
str_chk = re.compile('[a-zA-Z_][a-zA-Z0-9_]*$')
def printStr(str):
"""
>>> printStr('hello_34_A')
'hello_34_A'
>>> printStr('hello 34_A')
"'hello 34_A'"
"""
if str_chk.match(str): return str
return repr(str)
def printSeq(startChar, endChar, seq, maxdepth):
"""
>>> printSeq('(', ')', (1, 2, 3), 4)
['(', '1', '2', '3', ')']
>>> printSeq('(', ')', (), 4)
['(', ')']
"""
if maxdepth < 1: return '&'
maxdepth -= 1
return [startChar] + [cprint2(x, maxdepth) for x in seq] + [endChar]
def item(key, value, maxdepth, separator):
"""
>>> item('hello', 'bob', 3, '=')
'hello=bob'
>>> item(('hello', 'there'), 'bob', 3, '=')
['(', 'hello', 'there', ')=bob']
>>> item('hello', ('extra', 'bob'), 3, '=')
['hello=(', 'extra', 'bob', ')']
>>> item(('hello', 'there'), ('extra', 'bob'), 3, '=')
['(', 'hello', 'there', ')=(', 'extra', 'bob', ')']
"""
keyans = cprint2(key, maxdepth)
valans = cprint2(value, maxdepth)
if isinstance(keyans, list):
keyans[-1] += separator
if isinstance(valans, list):
keyans[-1] += valans[0]
keyans.extend(valans[1:])
else:
keyans[-1] += valans
return keyans
if isinstance(valans, list):
valans[0] = keyans + separator + valans[0]
return valans
return keyans + separator + valans
def printDict(dict, maxdepth,
startChar = '{', endChar = '}', separator = ': '):
"""
>>> printDict({1:2, 3:4, 5:(6,7)}, 5)
['{', '1: 2', '3: 4', ['5: (', '6', '7', ')'], '}']
>>> printDict({}, 5)
['{', '}']
"""
if maxdepth < 1: return '&'
maxdepth -= 1
keys = dict.keys()
keys.sort()
return [startChar] + \
[item(key, dict[key], maxdepth, separator) for key in keys] + \
[endChar]
| {
"repo_name": "e-loue/pyke",
"path": "pyke/condensedPrint.py",
"copies": "2",
"size": "5884",
"license": "mit",
"hash": -7602048279995621000,
"line_mean": 33.6058823529,
"line_max": 79,
"alpha_frac": 0.5267720551,
"autogenerated": false,
"ratio": 3.4383401519579193,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4965112207057919,
"avg_score": null,
"num_lines": null
} |
import types
import itertools
class pattern(object):
def __ne__(self, b): return not (self == b)
def simple_match_pattern(self, bindings, my_context, pattern_b, b_context):
return self.match_pattern(bindings, my_context, pattern_b, b_context)
def lookup(self, context, allow_variable_in_ans = False):
return self
class pattern_literal(pattern):
def __init__(self, literal):
self.literal = literal
def __hash__(self): return hash(self.literal)
def __eq__(self, b):
if isinstance(b, pattern_literal): return self.literal == b.literal
return self.literal == b
def match_data(self, bindings, my_context, data):
return self.literal == data
def match_pattern(self, bindings, my_context, pattern_b, b_context):
if isinstance(pattern_b, pattern_literal):
return self.literal == pattern_b.literal
return pattern_b.match_data(bindings, b_context, self.literal)
def as_data(self, my_context, allow_vars = False, final = None):
return self.literal
def is_data(self, my_context):
return True
class pattern_tuple(pattern):
def __init__(self, elements, rest_var = None):
self.elements = tuple(elements)
self.rest_var = rest_var
def __hash__(self):
return hash(self.elements) ^ hash(self.rest_var)
def __eq__(self, b):
return isinstance(b, pattern_tuple) and \
self.elements == b.elements and self.rest_var == b.rest_var
def match_data(self, bindings, my_context, data):
if isinstance(data, types.StringTypes): return False
try:
data = tuple(data)
except TypeError:
return False
if len(self.elements) > len(data) or \
self.rest_var is None and len(self.elements) < len(data):
return False
for x, y in itertools.izip(self.elements, data):
if not x.match_data(bindings, my_context, y): return False
if self.rest_var is not None:
return self.rest_var.match_data(bindings, my_context,
tuple(data[len(self.elements):]))
return True
def simple_match_pattern(self, bindings, my_context, pattern_b, b_context):
return self, my_context
def match_pattern(self, bindings, my_context, pattern_b, b_context):
simple_ans = pattern_b.simple_match_pattern(bindings, b_context,
self, my_context)
if isinstance(simple_ans, bool): return simple_ans
pattern_b, b_context = simple_ans
if not isinstance(pattern_b, pattern):
return self.match_data(bindings, my_context, pattern_b)
assert isinstance(pattern_b, pattern_tuple), "Internal logic error"
my_len = len(self.elements)
b_len = len(pattern_b.elements)
if pattern_b.rest_var is None and my_len > b_len or \
self.rest_var is None and my_len < b_len:
return False
for x, y in itertools.izip(self.elements, pattern_b.elements):
if not x.match_pattern(bindings, my_context, y, b_context):
return False
if my_len <= b_len and self.rest_var is not None:
# This is where the two rest_vars are bound together if my_len ==
# b_len.
tail_val, tail_context = pattern_b._tail(my_len, b_context)
if tail_context is None:
if not self.rest_var.match_data(bindings, my_context, tail_val):
return False
else:
if not self.rest_var.match_pattern(bindings, my_context,
tail_val, tail_context):
return False
elif pattern_b.rest_var is not None:
tail_val, tail_context = self._tail(b_len, my_context)
if tail_context is None:
if not pattern_b.rest_var.match_data(bindings, b_context,
tail_val):
return False
else:
if not pattern_b.rest_var.match_pattern(bindings, b_context,
tail_val, tail_context):
return False
return True
def as_data(self, my_context, allow_vars = False, final = None):
ans = tuple(x.as_data(my_context, allow_vars, final)
for x in self.elements)
if self.rest_var is None:
return ans
rest = my_context.lookup_data(self.rest_var.name, allow_vars, final)
if isinstance(rest, tuple): return ans + rest
return ans + ('*' + rest,)
def _tail(self, n, my_context):
""" Return a copy of myself with the first n elements removed.
"""
if n == len(self.elements):
if self.rest_var is None: return (), None
return self.rest_var, my_context
rest_elements = self.elements[n:]
if self.rest_var is None and \
all(isinstance(x, pattern_literal) for x in rest_elements):
return tuple(x.literal for x in rest_elements), None
return pattern_tuple(self.elements[n:], self.rest_var), my_context
def is_data(self, my_context):
arg_test = all(arg_pat.is_data(my_context) for arg_pat in self.elements)
if not arg_test or self.rest_var is None: return arg_test
return self.rest_var.is_data(my_context)
| {
"repo_name": "myaskevich/pyke",
"path": "pyke/pattern.py",
"copies": "2",
"size": "6638",
"license": "mit",
"hash": -4000698853090909000,
"line_mean": 41.5448717949,
"line_max": 80,
"alpha_frac": 0.6102154588,
"autogenerated": false,
"ratio": 3.983793517406963,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5594008976206962,
"avg_score": null,
"num_lines": null
} |
"""
A context is used to store all of a rule's pattern variable bindings for a
specific rule invocation during inferencing (both forward-chaining and
backward-chaining rules).
>>> from pyke import pattern
>>> c = simple_context()
>>> var = variable('foo')
>>> var
$foo
>>> var.name
'foo'
>>> c.bind(var.name, c, 123)
True
>>> c.lookup(var)
(123, None)
>>> c.lookup_data(var.name)
123
This is a shallow binding scheme which must "unbind" variables when a
rule is done. Each time a rule is invoked, a new context object is
created to store the bindings for the variables in that rule. When the
rule is done, this context is abandoned; so the variables bound there
do not need to be individually unbound. But bindings done to
variables in other contexts _do_ need to be individually unbound.
These bindings happen when a variable in the calling rule, which must
be bound in that rule's context, is matched to a non-variable pattern (i.e.,
pattern_literal or pattern_tuple) in the called rule's context.
For example, a caller, rule A, has its own context:
>>> A_context = simple_context()
and a pattern of $foo within a subgoal in its 'when' clause:
>>> A_pattern = variable('foo')
In proving this subgoal, rule B is being tried. A new context is
created for rule B:
>>> B_context = simple_context()
Rule B has a literal 123 in its 'proven' clause:
>>> B_pattern = pattern.pattern_literal(123)
Now B's 'proven' clause is pattern matched to A's subgoal, which
results in:
>>> B_pattern.match_pattern(B_context, B_context, A_pattern, A_context)
True
The pattern matches! But the $foo variable belongs to A, so it must be
bound in A's context, not B's.
>>> A_context.lookup(A_pattern)
(123, None)
>>> B_context.lookup(A_pattern)
Traceback (most recent call last):
...
KeyError: '$foo not bound'
This is done by using the current rule's context as a controlling context
for all the bindings that occur within that rule. If the binding is for
a variable in another context, the controlling context remembers the
binding in its undo_list. When the rule is finished, it executes a 'done'
method on its context which undoes all of the bindings in its undo_list.
>>> B_context.done()
>>> A_context.lookup(A_pattern)
Traceback (most recent call last):
...
KeyError: '$foo not bound'
Thus, in binding a variable to a value, there are three contexts
involved:
controlling_context.bind(variable, variable_context,
value [, value_context])
The value_context must be omitted if value is any python data, and
must be included if value is a pattern (including another variable).
Variables are bound to as just strings so that they can be accessed
easily from python code without needing the variable objects.
>>> A_context.bind('foo', A_context, 123)
True
>>> A_context.lookup_data('foo')
123
But to differentiate variables from string literals in the patterns,
variables are special objects. When a variable is bound as data (to
another variable), it is bound as a variable object (again, to
differentiate it from plain python strings).
>>> a_var = variable('a_var')
>>> a_var
$a_var
>>> b_var = variable('b_var')
>>> b_var
$b_var
>>> B_context.bind(b_var.name, B_context, a_var, A_context)
True
>>> B_context.lookup(b_var)
Traceback (most recent call last):
...
KeyError: '$a_var not bound'
>>> ans = B_context.lookup(b_var, True)
>>> ans # doctest: +ELLIPSIS
($a_var, <pyke.contexts.simple_context object at ...>)
>>> ans[1] is A_context
True
But to differentiate variables from string literals in the patterns,
variables are special objects. When a variable is bound as data (to
another variable), it is bound as a variable object (again, to
differentiate it from plain python strings).
>>> type(ans[0])
<class 'pyke.contexts.variable'>
The anonymous variables have names starting with '_'. Binding
requests on anonymous variables are silently ignored.
>>> anonymous('_ignored')
$_ignored
>>> A_context.bind('_bogus', A_context, 567)
False
>>> A_context.lookup_data('_bogus')
Traceback (most recent call last):
...
KeyError: '$_bogus not bound'
>>> A_context.lookup_data('_bogus', True)
'$_bogus'
"""
import sys
from pyke import pattern, unique
_Not_found = unique.unique('Not_found')
# Set to a sequence (or frozenset) of variable names to trace their bindings:
debug = ()
class simple_context(object):
def __init__(self):
self.bindings = {}
self.undo_list = []
self.save_all_undo_count = 0
def dump(self):
for var_name in sorted(self.bindings.iterkeys()):
print "%s: %s" % (var_name, repr(self.lookup_data(var_name, True)))
def bind(self, var_name, var_context, val, val_context = None):
""" val_context must be None iff val is not a pattern.
Returns True if a new binding was created.
"""
assert not isinstance(val, pattern.pattern) \
if val_context is None \
else isinstance(val, pattern.pattern)
if var_name[0] == '_': return False
if var_context is self:
assert var_name not in self.bindings
if val_context is not None:
val, val_context = val_context.lookup(val, True)
if val_context == var_context and isinstance(val, variable) and \
val.name == var_name:
# binding $x to $x; no binding necessary!
return False
if var_name in debug:
if val_context:
sys.stderr.write("binding %s in %s to %s in %s\n" %
(var_name, var_context, val, val_context))
else:
sys.stderr.write("binding %s in %s to %s\n" %
(var_name, var_context, val))
self.bindings[var_name] = (val, val_context)
if self.save_all_undo_count:
self.undo_list.append((var_name, self))
return True
ans = var_context.bind(var_name, var_context, val, val_context)
if ans: self.undo_list.append((var_name, var_context))
return ans
def is_bound(self, var):
val, where = var, self
while where is not None and isinstance(val, variable):
ans = where.bindings.get(val.name)
if ans is None: return False
val, where = ans
# where is None or not isinstance(val, variable)
return where is None or val.is_data(where)
def lookup_data(self, var_name, allow_vars = False, final = None):
""" Converts the answer into data only (without any patterns in it).
If there are unbound variables anywhere in the data, a KeyError is
generated.
"""
if final is not None:
val = final.get((var_name, self), _Not_found)
if val is not _Not_found: return val
binding = self.bindings.get(var_name)
if binding is None:
if allow_vars: return "$" + var_name
raise KeyError("$%s not bound" % var_name)
val, context = binding
if context is not None:
val = val.as_data(context, allow_vars, final)
if isinstance(val, bc_context): val = val.create_plan(final)
if final is not None: final[var_name, self] = val
return val
def lookup(self, var, allow_variable_in_ans = False):
""" Returns value, val_context.
Returns (var, self) if not bound and allow_variable_in_ans, else
raises KeyError.
"""
val, where = var, self
while where is not None and isinstance(val, variable):
ans = where.bindings.get(val.name)
if ans is None: break
val, where = ans
else:
# where is None or not isinstance(val, variable)
return val, where
# where is not None and isinstance(val, variable)
if allow_variable_in_ans: return val, where
raise KeyError("%s not bound" % str(val))
def mark(self, save_all_undo = False):
if save_all_undo: self.save_all_undo_count += 1
return len(self.undo_list)
def end_save_all_undo(self):
assert self.save_all_undo_count > 0
self.save_all_undo_count -= 1
def undo_to_mark(self, mark, *var_names_to_undo):
for var_name, var_context in self.undo_list[mark:]:
var_context._unbind(var_name)
del self.undo_list[mark:]
for var_name in var_names_to_undo:
self._unbind(var_name)
def done(self):
""" Unbinds all variables bound through 'self's 'bind' method.
The assumption here is that 'self' is being abandoned, so we don't
need to worry about self.bindings.
"""
for var_name, var_context in self.undo_list:
var_context._unbind(var_name)
def _unbind(self, var_name):
del self.bindings[var_name]
class bc_context(simple_context):
def __init__(self, rule):
super(bc_context, self).__init__()
self.rule = rule
def name(self): return self.rule.name
def __repr__(self):
return "<bc_context for %s at 0x%x>" % (self.name(), id(self))
def create_plan(self, final = None):
if final is None: final = {}
return self.rule.make_plan(self, final)
class variable(pattern.pattern):
""" The code to force variables of the same name to be the same object is
probably not needed anymore...
"""
Variables = {}
def __new__(cls, name):
var = cls.Variables.get(name)
if var is None:
var = super(variable, cls).__new__(cls)
cls.Variables[name] = var
return var
def __init__(self, name):
self.name = name
def __repr__(self): return '$' + self.name
def lookup(self, my_context, allow_variable_in_ans = False):
return my_context.lookup(self, allow_variable_in_ans)
def match_data(self, bindings, my_context, data):
if self.name in debug:
sys.stderr.write("%s.match_data(%s, %s, %s)\n" %
(self, bindings, my_context, data))
var, var_context = my_context.lookup(self, True)
if isinstance(var, variable):
bindings.bind(var.name, var_context, data)
return True
if self.name in debug:
sys.stderr.write("%s.match_data: lookup got %s in %s\n" %
(self, var, var_context))
if var_context is None: return var == data
return var.match_data(bindings, var_context, data)
def simple_match_pattern(self, bindings, my_context, pattern_b, b_context):
var, var_context = my_context.lookup(self, True)
if isinstance(var, variable):
bindings.bind(var.name, var_context, pattern_b, b_context)
return True
if var_context is None:
return pattern_b.match_data(bindings, b_context, var)
return var.simple_match_pattern(bindings, var_context,
pattern_b, b_context)
def match_pattern(self, bindings, my_context, pattern_b, b_context):
var, var_context = my_context.lookup(self, True)
if isinstance(var, variable):
bindings.bind(var.name, var_context, pattern_b, b_context)
return True
if var_context is None:
return pattern_b.match_data(bindings, b_context, var)
return var.match_pattern(bindings, var_context, pattern_b, b_context)
def as_data(self, my_context, allow_vars = False, final = None):
return my_context.lookup_data(self.name, allow_vars, final)
def is_data(self, my_context):
return my_context.is_bound(self)
class anonymous(variable):
def __init__(self, name):
assert name[0] == '_', \
"anonymous variables must start with '_', not %s" % name
super(anonymous, self).__init__(name)
def lookup(self, my_context, allow_variable_in_ans = False):
if allow_variable_in_ans: return self, my_context
raise KeyError("$%s not bound" % self.name)
def match_data(self, bindings, my_context, data):
return True
def match_pattern(self, bindings, my_context, pattern_b, b_context):
return True
def as_data(self, my_context, allow_vars = False, final = None):
if allow_vars: return "$%s" % self.name
raise KeyError("$%s not bound" % self.name)
def is_data(self, my_context):
return False
| {
"repo_name": "myaskevich/pyke",
"path": "pyke/contexts.py",
"copies": "2",
"size": "14332",
"license": "mit",
"hash": -5124680605850875000,
"line_mean": 36.9126984127,
"line_max": 80,
"alpha_frac": 0.6063777824,
"autogenerated": false,
"ratio": 3.9753120665742023,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.010244113502895594,
"num_lines": 378
} |
'''
A fact_base is one of the kinds of knowledge_bases (see also, rule_base
and special).
>>> from pyke import knowledge_engine
>>> engine = knowledge_engine.engine()
>>> fb = fact_base(engine, 'fb_name')
>>> fb
<fact_base fb_name>
>>> fb.dump_universal_facts()
>>> fb.dump_specific_facts()
A fact_base is nothing more than a list of facts. Each fact has a name
and a tuple of arguments. These arguments are python data (not
patterns).
Fact_bases support two kinds of facts: universal facts (universally
true) and case specific facts (only true in a specific situation).
>>> fb.add_universal_fact('some_universal_fact', ('a', 2))
>>> fb.add_case_specific_fact('some_specific_fact', ('b', ('hi', 32)))
>>> fb.dump_universal_facts()
some_universal_fact('a', 2)
>>> fb.dump_specific_facts()
some_specific_fact('b', ('hi', 32))
The 'reset' method deletes all case specific facts, but leaves the
universal facts.
>>> fb.reset()
>>> fb.dump_universal_facts()
some_universal_fact('a', 2)
>>> fb.dump_specific_facts()
Normally, universal facts are established once at program
initialization time and case specific facts are established both just
prior to each invocation of the expert system as well as by assertions
in forward chaining rules.
>>> fb.assert_('some_fact', ('a', 2, ('hi', 'mom')))
>>> fb.dump_universal_facts()
some_universal_fact('a', 2)
>>> fb.dump_specific_facts()
some_fact('a', 2, ('hi', 'mom'))
>>> fb.assert_('some_fact', ('a', 3, ('hi', 'mom')))
>>> fb.dump_specific_facts()
some_fact('a', 2, ('hi', 'mom'))
some_fact('a', 3, ('hi', 'mom'))
>>> fb.assert_('some_other_fact', ())
>>> fb.dump_specific_facts()
some_fact('a', 2, ('hi', 'mom'))
some_fact('a', 3, ('hi', 'mom'))
some_other_fact()
Duplicate facts are not allowed and trying to assert a duplicate fact is
silently ignored.
>>> fb.assert_('some_fact', ('a', 2, ('hi', 'mom')))
>>> fb.dump_specific_facts()
some_fact('a', 2, ('hi', 'mom'))
some_fact('a', 3, ('hi', 'mom'))
some_other_fact()
'''
import itertools
import contextlib
from pyke import knowledge_base, contexts
class fact_base(knowledge_base.knowledge_base):
''' Not much to fact_bases. The real work is done in fact_list! '''
def __init__(self, engine, name, register = True):
super(fact_base, self).__init__(engine, name, fact_list, register)
def dump_universal_facts(self):
for fl_name in sorted(self.entity_lists.iterkeys()):
self.entity_lists[fl_name].dump_universal_facts()
def dump_specific_facts(self):
for fl_name in sorted(self.entity_lists.iterkeys()):
self.entity_lists[fl_name].dump_specific_facts()
def add_universal_fact(self, fact_name, args):
self.get_entity_list(fact_name).add_universal_fact(args)
def add_case_specific_fact(self, fact_name, args):
self.get_entity_list(fact_name).add_case_specific_fact(args)
def assert_(self, fact_name, args):
self.add_case_specific_fact(fact_name, args)
def get_stats(self):
num_fact_lists = num_universal = num_case_specific = 0
for fact_list in self.entity_lists.itervalues():
universal, case_specific = fact_list.get_stats()
num_universal += universal
num_case_specific += case_specific
num_fact_lists += 1
return num_fact_lists, num_universal, num_case_specific
def print_stats(self, f):
num_fact_lists, num_universal, num_case_specific = self.get_stats()
f.write("%s: %d fact names, %d universal facts, "
"%d case_specific facts\n" %
(self.name, num_fact_lists, num_universal, num_case_specific))
class fact_list(knowledge_base.knowledge_entity_list):
def __init__(self, name):
super(fact_list, self).__init__(name)
self.universal_facts = [] # [(arg...)...]
self.case_specific_facts = [] # [(arg...)...]
self.hashes = {} # (len, (index...)): (other_indices,
# {(arg...): [other_args_from_factn...]})
self.fc_rule_refs = [] # (fc_rule, foreach_index)
def reset(self):
self.case_specific_facts = []
self.hashes.clear()
self.fc_rule_refs = []
def dump_universal_facts(self):
for args in self.universal_facts:
print '%s%s' % (self.name, args)
def dump_specific_facts(self):
for args in self.case_specific_facts:
print '%s%s' % (self.name, args)
def add_fc_rule_ref(self, fc_rule, foreach_index):
self.fc_rule_refs.append((fc_rule, foreach_index))
def get_affected_fc_rules(self):
return (fc_rule for fc_rule, foreach_index in self.fc_rule_refs)
def lookup(self, bindings, pat_context, patterns):
""" Returns a context manager for a generator that binds patterns to
successive facts, yielding None for each successful match.
Undoes bindings upon continuation, so that no bindings remain at
StopIteration.
"""
indices = tuple(enum for enum in enumerate(patterns)
if enum[1].is_data(pat_context))
other_indices, other_arg_lists = \
self._get_hashed(len(patterns),
tuple(index[0] for index in indices),
tuple(index[1].as_data(pat_context)
for index in indices))
def gen():
if other_arg_lists:
for args in other_arg_lists:
mark = bindings.mark(True)
end_done = False
try:
if all(itertools.imap(
lambda i, arg:
patterns[i].match_data(bindings,
pat_context,
arg),
other_indices,
args)):
bindings.end_save_all_undo()
end_done = True
yield
finally:
if not end_done: bindings.end_save_all_undo()
bindings.undo_to_mark(mark)
return contextlib.closing(gen())
def _get_hashed(self, len, indices, args):
ans = self.hashes.get((len, indices))
if ans is None: ans = self._hash(len, indices)
other_indices, arg_map = ans
return other_indices, arg_map.get(args, ())
def _hash(self, length, indices):
args_hash = {}
new_entry = (tuple(i for i in range(length) if i not in indices),
args_hash)
self.hashes[length, indices] = new_entry
for args in itertools.chain(self.universal_facts,
self.case_specific_facts):
if len(args) == length:
selected_args = tuple(arg for i, arg in enumerate(args)
if i in indices)
args_hash.setdefault(selected_args, []) \
.append(tuple(arg for i, arg in enumerate(args)
if i not in indices))
return new_entry
def add_universal_fact(self, args):
assert args not in self.case_specific_facts, \
"add_universal_fact: fact already present as specific fact"
if args not in self.universal_facts:
self.universal_facts.append(args)
self.add_args(args)
def add_case_specific_fact(self, args):
if args not in self.universal_facts and \
args not in self.case_specific_facts:
self.case_specific_facts.append(args)
self.add_args(args)
for fc_rule, foreach_index in self.fc_rule_refs:
fc_rule.new_fact(args, foreach_index)
def add_args(self, args):
for (length, indices), (other_indices, arg_map) \
in self.hashes.iteritems():
if length == len(args):
selected_args = tuple(arg for i, arg in enumerate(args)
if i in indices)
arg_map.setdefault(selected_args, []) \
.append(tuple(arg for i, arg in enumerate(args)
if i not in indices))
def get_stats(self):
return len(self.universal_facts), len(self.case_specific_facts)
| {
"repo_name": "e-loue/pyke",
"path": "pyke/fact_base.py",
"copies": "2",
"size": "10027",
"license": "mit",
"hash": 1010745363183702800,
"line_mean": 40.775,
"line_max": 79,
"alpha_frac": 0.5637342908,
"autogenerated": false,
"ratio": 4.047638272103351,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.011672854542151468,
"num_lines": 240
} |
'''
Forward chaining rules (fc_rule) are one of two types of rules in a
rule_base (the other being backward chaining rules -- bc_rule).
All forward chaining is done automatically as each rule_base is
activated. This is done in two steps:
1. All fc_rules are registered with the fact_lists referenced in
their 'foreach' clause by calling fc_rule.register_rule() on
each fc_rule (including the parent rule_base's fc_rules).
This will cause the fact_list to invoke fc_rule.new_fact each time
a new fact for that fact_list (by that name) is asserted (by the
same or another fc_rule).
2. The fc_rule.run() function is called on each fc_rule (including
the parent rule_base's fc_rules).
The forward chaining rule is compiled into a python function which does
the actual inferencing work for both the 'run' case and the 'new_fact'
case, depending on the arguments passed to it. Each fc_rule object
remembers its associated compiled python function.
The fc_rule object tracks the progress of the forward chaining for that
rule. If the rule has not been run yet, it ignores new_facts since it
will see the new fact when it is later run.
'''
from pyke import contexts, fact_base
import itertools
class rule(object):
''' Common to both fc_rules and bc_rules. '''
def __init__(self, name, rule_base, patterns):
self.name = name
self.rule_base = rule_base
self.patterns = patterns
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.name)
def pattern(self, pattern_index):
return self.patterns[pattern_index]
class fc_rule(rule):
def __init__(self, name, rule_base, rule_fn, foreach_facts, patterns):
super(fc_rule, self).__init__(name, rule_base, patterns)
rule_base.add_fc_rule(self)
self.rule_fn = rule_fn
self.foreach_facts = foreach_facts # (kb_name, fact_name, arg_pats,
# multi_match?)...
self.ran = False
def register_rule(self):
for i, (kb_name, fact_name, arg_patterns, multi_match) \
in enumerate(self.foreach_facts):
self.rule_base.engine.get_kb(kb_name, fact_base.fact_base) \
.add_fc_rule_ref(fact_name, self, i)
def reset(self):
self.ran = False
def run(self):
self.ran = True
self.rule_fn(self)
def new_fact(self, fact_args, n):
if self.ran:
arg_patterns = self.foreach_facts[n][2]
if len(fact_args) == len(arg_patterns):
context = contexts.simple_context()
if all(itertools.imap(lambda pat, arg:
pat.match_data(context, context, arg),
arg_patterns,
fact_args)):
self.rule_base.num_fc_rules_rerun += 1
if self.foreach_facts[n][3]:
self.rule_fn(self)
else:
self.rule_fn(self, context, n)
context.done()
def foreach_patterns(self, foreach_index):
return self.foreach_facts[foreach_index][2]
| {
"repo_name": "myaskevich/pyke",
"path": "pyke/fc_rule.py",
"copies": "2",
"size": "4442",
"license": "mit",
"hash": -2048087142515364400,
"line_mean": 39.3727272727,
"line_max": 80,
"alpha_frac": 0.6309389777,
"autogenerated": false,
"ratio": 4.033605812897366,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.022732251504181327,
"num_lines": 110
} |
'''
This example shows how people are related. The primary data (facts) that
are used to figure everything out are in family.kfb.
There are four independent rule bases that all do the same thing. The
fc_example rule base only uses forward-chaining rules. The bc_example
rule base only uses backward-chaining rules. The bc2_example rule base
also only uses backward-chaining rules, but with a few optimizations that
make it run 100 times faster than bc_example. And the example rule base
uses all three (though it's a poor use of plans).
Once the pyke engine is created, all the rule bases loaded and all the
primary data established as universal facts; there are five functions
that can be used to run each of the three rule bases: fc_test, bc_test,
bc2_test, test and general.
'''
from __future__ import with_statement
import contextlib
import sys
import time
from pyke import knowledge_engine, krb_traceback, goal
# Compile and load .krb files in same directory that I'm in (recursively).
engine = knowledge_engine.engine(__file__)
fc_goal = goal.compile('family.how_related($person1, $person2, $relationship)')
def fc_test(person1 = 'bruce'):
'''
This function runs the forward-chaining example (fc_example.krb).
'''
engine.reset() # Allows us to run tests multiple times.
start_time = time.time()
engine.activate('fc_example') # Runs all applicable forward-chaining rules.
fc_end_time = time.time()
fc_time = fc_end_time - start_time
print "doing proof"
with fc_goal.prove(engine, person1=person1) as gen:
for vars, plan in gen:
print "%s, %s are %s" % \
(person1, vars['person2'], vars['relationship'])
prove_time = time.time() - fc_end_time
print
print "done"
engine.print_stats()
print "fc time %.2f, %.0f asserts/sec" % \
(fc_time, engine.get_kb('family').get_stats()[2] / fc_time)
def bc_test(person1 = 'bruce'):
engine.reset() # Allows us to run tests multiple times.
start_time = time.time()
engine.activate('bc_example')
fc_end_time = time.time()
fc_time = fc_end_time - start_time
print "doing proof"
try:
with engine.prove_goal(
'bc_example.how_related($person1, $person2, $relationship)',
person1=person1) \
as gen:
for vars, plan in gen:
print "%s, %s are %s" % \
(person1, vars['person2'], vars['relationship'])
except StandardError:
# This converts stack frames of generated python functions back to the
# .krb file.
krb_traceback.print_exc()
sys.exit(1)
prove_time = time.time() - fc_end_time
print
print "done"
engine.print_stats()
print "bc time %.2f, %.0f goals/sec" % \
(prove_time, engine.get_kb('bc_example').num_prove_calls / prove_time)
def bc2_test(person1 = 'bruce'):
engine.reset() # Allows us to run tests multiple times.
start_time = time.time()
engine.activate('bc2_example')
fc_end_time = time.time()
fc_time = fc_end_time - start_time
print "doing proof"
try:
with engine.prove_goal(
'bc2_example.how_related($person1, $person2, $relationship)',
person1=person1) \
as gen:
for vars, plan in gen:
print "%s, %s are %s" % \
(person1, vars['person2'], vars['relationship'])
except StandardError:
# This converts stack frames of generated python functions back to the
# .krb file.
krb_traceback.print_exc()
sys.exit(1)
prove_time = time.time() - fc_end_time
print
print "done"
engine.print_stats()
print "bc time %.2f, %.0f goals/sec" % \
(prove_time,
engine.get_kb('bc2_example').num_prove_calls / prove_time)
def test(person1 = 'bruce'):
engine.reset() # Allows us to run tests multiple times.
# Also runs all applicable forward-chaining rules.
start_time = time.time()
engine.activate('example')
fc_end_time = time.time()
fc_time = fc_end_time - start_time
print "doing proof"
try:
# In this case, the relationship is returned when you run the plan.
with engine.prove_goal(
'example.how_related($person1, $person2)',
person1=person1) \
as gen:
for vars, plan in gen:
print "%s, %s are %s" % (person1, vars['person2'], plan())
except StandardError:
# This converts stack frames of generated python functions back to the
# .krb file.
krb_traceback.print_exc()
sys.exit(1)
prove_time = time.time() - fc_end_time
print
print "done"
engine.print_stats()
print "fc time %.2f, %.0f asserts/sec" % \
(fc_time, engine.get_kb('family').get_stats()[2] / fc_time)
print "bc time %.2f, %.0f goals/sec" % \
(prove_time, engine.get_kb('example').num_prove_calls / prove_time)
print "total time %.2f" % (fc_time + prove_time)
# Need some extra goodies for general()...
from pyke import contexts, pattern
def general(person1 = None, person2 = None, relationship = None):
engine.reset() # Allows us to run tests multiple times.
start_time = time.time()
engine.activate('bc2_example') # same rule base as bc2_test()
fc_end_time = time.time()
fc_time = fc_end_time - start_time
print "doing proof"
args = {}
if person1: args['person1'] = person1
if person2: args['person2'] = person2
if relationship: args['relationship'] = relationship
try:
with engine.prove_goal(
'bc2_example.how_related($person1, $person2, $relationship)',
**args
) as gen:
for vars, plan in gen:
print "%s, %s are %s" % (vars['person1'],
vars['person2'],
vars['relationship'])
except StandardError:
# This converts stack frames of generated python functions back to the
# .krb file.
krb_traceback.print_exc()
sys.exit(1)
prove_time = time.time() - fc_end_time
print
print "done"
engine.print_stats()
print "bc time %.2f, %.0f goals/sec" % \
(prove_time,
engine.get_kb('bc2_example').num_prove_calls / prove_time)
import types
def make_pattern(x):
if isinstance(x, types.StringTypes):
if x[0] == '$': return contexts.variable(x[1:])
return pattern.pattern_literal(x)
if isinstance(x, (tuple, list)):
return pattern.pattern_tuple(tuple(make_pattern(element)
for element in x))
return pattern.pattern_literal(x)
| {
"repo_name": "myaskevich/pyke",
"path": "examples/family_relations/driver.py",
"copies": "2",
"size": "8004",
"license": "mit",
"hash": -3869335701660608000,
"line_mean": 36.0509259259,
"line_max": 80,
"alpha_frac": 0.6230163689,
"autogenerated": false,
"ratio": 3.691420664206642,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5314437033106643,
"avg_score": null,
"num_lines": null
} |
""" See http://www.dabeaz.com/ply/ply.html for syntax of grammer definitions.
"""
from __future__ import with_statement
import itertools
import warnings
import os, os.path
from pyke.krb_compiler.ply import yacc
from pyke.krb_compiler import scanner
from pyke import pattern, contexts
tokens = scanner.tokens
goal_mode = False
def p_top(p):
''' top : file
| python_goal
'''
p[0] = p[1]
def p_goal(p):
''' python_goal : CHECK_TOK IDENTIFIER_TOK '.' IDENTIFIER_TOK LP_TOK patterns_opt RP_TOK
'''
p[0] = (p[2], p[4], p[6], pattern_vars)
def p_file(p):
''' file : nl_opt parent_opt fc_rules bc_rules_opt
'''
p[0] = ('file', p[2], (tuple(p[3]), ()), p[4])
def p_file_fc_extras(p):
''' file : nl_opt parent_opt fc_rules fc_extras bc_rules_opt
'''
p[0] = ('file', p[2], (tuple(p[3]), p[4]), p[5])
def p_file_bc(p):
''' file : nl_opt parent_opt bc_rules_section
'''
p[0] = ('file', p[2], ((), ()), p[3])
# Uncomment this to generate an error in the grammer.
#def p_bogus(p):
# ''' file : nl_opt parent_opt bc_rules_opt
# '''
# p[0] = ('file', p[2], ((), ()), p[3])
def p_parent(p):
''' parent_opt : EXTENDING_TOK IDENTIFIER_TOK without_opt NL_TOK
'''
p[0] = ('parent', p[2], tuple(p[3]))
def p_second(p):
''' without_opt : WITHOUT_TOK without_names comma_opt
'''
p[0] = p[2]
def p_fourth(p):
''' when_opt : WHEN_TOK NL_TOK reset_plan_vars INDENT_TOK bc_premises DEINDENT_TOK
'''
p[0] = p[5]
def p_reset_plan_vars(p):
''' reset_plan_vars :
'''
global plan_var, plan_var_number
plan_var_number = 1
plan_var = 'plan#%d' % plan_var_number
p[0] = None
def p_inc_plan_vars(p):
''' inc_plan_vars :
'''
global plan_var, plan_var_number
plan_var_number += 1
plan_var = 'plan#%d' % plan_var_number
p[0] = None
def p_fifth(p):
''' bc_extras_opt : BC_EXTRAS_TOK NL_TOK start_extra_statements INDENT_TOK python_extras_code NL_TOK DEINDENT_TOK
fc_extras : FC_EXTRAS_TOK NL_TOK start_extra_statements INDENT_TOK python_extras_code NL_TOK DEINDENT_TOK
plan_extras_opt : PLAN_EXTRAS_TOK NL_TOK start_extra_statements INDENT_TOK python_extras_code NL_TOK DEINDENT_TOK
with_opt : WITH_TOK NL_TOK start_python_statements INDENT_TOK python_plan_code NL_TOK DEINDENT_TOK
'''
p[0] = p[5]
def p_none(p):
''' bc_require_opt :
comma_opt :
comma_opt : ','
colon_opt :
data : NONE_TOK
fc_require_opt :
nl_opt :
nl_opt : NL_TOK
parent_opt :
plan_spec : NL_TOK
rest_opt : comma_opt
'''
p[0] = None
def p_colon_deprication(p):
''' colon_opt : ':'
'''
warnings.warn_explicit("use of ':' deprecated after rule names",
DeprecationWarning,
scanner.lexer.filename, p.lineno(1))
p[0] = None
def p_fc_rule(p):
''' fc_rule : IDENTIFIER_TOK colon_opt NL_TOK INDENT_TOK foreach_opt ASSERT_TOK NL_TOK INDENT_TOK assertions DEINDENT_TOK DEINDENT_TOK
'''
p[0] = ('fc_rule', p[1], p[5], tuple(p[9]))
def p_foreach(p):
''' foreach_opt : FOREACH_TOK NL_TOK INDENT_TOK fc_premises DEINDENT_TOK
'''
p[0] = tuple(p[4])
def p_fc_premise(p):
''' fc_premise : IDENTIFIER_TOK '.' IDENTIFIER_TOK LP_TOK patterns_opt RP_TOK NL_TOK
'''
p[0] = ('fc_premise', p[1], p[3], tuple(p[5]), p.lineno(1), p.lineno(6))
def p_fc_first_1(p):
''' fc_premise : FIRST_TOK NL_TOK INDENT_TOK fc_premises DEINDENT_TOK
'''
p[0] = ('fc_first', tuple(p[4]), p.lineno(1))
def p_fc_first_n(p):
''' fc_premise : FIRST_TOK fc_premise
'''
p[0] = ('fc_first', (p[2],), p.lineno(1))
def p_fc_notany(p):
''' fc_premise : NOTANY_TOK NL_TOK INDENT_TOK fc_premises DEINDENT_TOK
'''
p[0] = ('fc_notany', tuple(p[4]), p.lineno(1))
def p_fc_forall(p):
''' fc_premise : FORALL_TOK NL_TOK INDENT_TOK fc_premises DEINDENT_TOK fc_require_opt
'''
p[0] = ('fc_forall', tuple(p[4]), p[6], p.lineno(1), p.linespan(6)[1])
def p_fc_require_opt(p):
''' fc_require_opt : REQUIRE_TOK NL_TOK INDENT_TOK fc_premises DEINDENT_TOK
'''
p[0] = tuple(p[4])
def p_python_eq(p):
''' python_premise : pattern start_python_code '=' python_rule_code NL_TOK
'''
p[0] = ('python_eq', p[1], p[4], p.linespan(1)[0], p.linespan(4)[1])
def p_python_in(p):
''' python_premise : pattern start_python_code IN_TOK python_rule_code NL_TOK
'''
p[0] = ('python_in', p[1], p[4], p.linespan(1)[0], p.linespan(4)[1])
def p_python_check(p):
''' python_premise : start_python_code CHECK_TOK python_rule_code NL_TOK
'''
p[0] = ('python_check', p[3], p.lineno(2), p.linespan(3)[1])
def p_python_block_n(p):
''' python_premise : check_nl PYTHON_TOK NL_TOK start_python_assertion INDENT_TOK python_rule_code NL_TOK DEINDENT_TOK
'''
p[0] = ('python_block', p[6], p.lineno(2), p.linespan(6)[1])
def p_python_block_1(p):
''' python_premise : check_nl PYTHON_TOK start_python_code NOT_NL_TOK python_rule_code NL_TOK
'''
p[0] = ('python_block', p[5], p.lineno(2), p.linespan(5)[1])
def p_assertion(p):
''' assertion : IDENTIFIER_TOK '.' IDENTIFIER_TOK LP_TOK patterns_opt RP_TOK NL_TOK
'''
p[0] = ('assert', p[1], p[3], tuple(p[5]), p.lineno(1), p.lineno(6))
def p_python_assertion_n(p):
''' assertion : check_nl PYTHON_TOK NL_TOK start_python_assertion INDENT_TOK python_rule_code NL_TOK DEINDENT_TOK
'''
p[0] = ('python_assertion', p[6], p.lineno(2), p.linespan(6)[1])
def p_python_assertion_1(p):
''' assertion : check_nl PYTHON_TOK start_python_code NOT_NL_TOK python_rule_code NL_TOK
'''
p[0] = ('python_assertion', p[5], p.lineno(2), p.linespan(5)[1])
def p_check_nl(p):
''' check_nl :
'''
scanner.lexer.begin('checknl')
p[0] = None
def p_bc_rule(p):
''' bc_rule : IDENTIFIER_TOK colon_opt NL_TOK INDENT_TOK USE_TOK goal when_opt with_opt DEINDENT_TOK
'''
p[0] = ('bc_rule', p[1], p[6], tuple(p[7]), tuple(p[8][0]), tuple(p[8][1]))
def p_empty_bc_rules_opt(p):
''' bc_rules_opt :
'''
p[0] = ((), (), ())
def p_bc_rules_section(p):
''' bc_rules_section : bc_rules bc_extras_opt plan_extras_opt
'''
p[0] = (tuple(p[1]), p[2], p[3])
def p_goal_no_taking(p):
''' goal : IDENTIFIER_TOK LP_TOK patterns_opt RP_TOK NL_TOK
'''
p[0] = ('goal', p[1], tuple(p[3]), (), p.lineno(1), p.lineno(4))
def p_goal_taking(p):
''' goal : IDENTIFIER_TOK LP_TOK patterns_opt RP_TOK taking
'''
p[0] = ('goal', p[1], tuple(p[3]), p[5], p.lineno(1), p.lineno(4))
def p_name_sym(p):
''' name : IDENTIFIER_TOK
'''
p[0] = repr(p[1])
def p_name_pat_var(p):
''' name : PATTERN_VAR_TOK
'''
p[0] = "context.lookup_data(%s)" % p[1]
def p_bc_premise1(p):
''' bc_premise : name LP_TOK patterns_opt RP_TOK plan_spec
'''
p[0] = ('bc_premise', False, None, p[1], tuple(p[3]), p[5],
p.linespan(1)[0], p.lineno(4))
def p_bc_premise2(p):
''' bc_premise : '!' name LP_TOK patterns_opt RP_TOK plan_spec
'''
p[0] = ('bc_premise', True, None, p[2], tuple(p[4]), p[6],
p.lineno(1), p.lineno(5))
def p_bc_premise3(p):
''' bc_premise : name '.' name LP_TOK patterns_opt RP_TOK plan_spec
'''
p[0] = ('bc_premise', False, p[1], p[3], tuple(p[5]), p[7],
p.linespan(1)[0], p.lineno(6))
def p_bc_premise4(p):
''' bc_premise : '!' name '.' name LP_TOK patterns_opt RP_TOK plan_spec
'''
p[0] = ('bc_premise', True, p[2], p[4], tuple(p[6]), p[8],
p.lineno(1), p.lineno(7))
def p_bc_first_1f(p):
''' bc_premise : FIRST_TOK NL_TOK INDENT_TOK bc_premises DEINDENT_TOK
'''
p[0] = ('bc_first', False, tuple(p[4]), p.lineno(1))
def p_bc_first_nf(p):
''' bc_premise : FIRST_TOK bc_premise
'''
p[0] = ('bc_first', False, (p[2],), p.lineno(1))
def p_bc_first_1t(p):
''' bc_premise : '!' FIRST_TOK NL_TOK INDENT_TOK bc_premises DEINDENT_TOK
'''
p[0] = ('bc_first', True, tuple(p[4]), p.lineno(1))
def p_bc_first_nt(p):
''' bc_premise : '!' FIRST_TOK bc_premise
'''
p[0] = ('bc_first', True, (p[2],), p.lineno(1))
def p_bc_notany(p):
''' bc_premise : NOTANY_TOK NL_TOK INDENT_TOK bc_premises DEINDENT_TOK
'''
p[0] = ('bc_notany', tuple(p[4]), p.lineno(1))
def p_bc_forall(p):
''' bc_premise : FORALL_TOK NL_TOK INDENT_TOK bc_premises DEINDENT_TOK bc_require_opt
'''
p[0] = ('bc_forall', tuple(p[4]), p[6], p.lineno(1), p.linespan(6)[1])
def p_bc_require_opt(p):
''' bc_require_opt : REQUIRE_TOK NL_TOK INDENT_TOK bc_premises DEINDENT_TOK
'''
p[0] = tuple(p[4])
def p_as(p):
''' plan_spec : AS_TOK PATTERN_VAR_TOK NL_TOK
'''
p[0] = ('as', p[2][1:-1])
def p_step_code(p):
''' plan_spec : STEP_TOK NUMBER_TOK NL_TOK start_python_plan_call INDENT_TOK python_plan_code NL_TOK DEINDENT_TOK
'''
p[0] = ('plan_spec', p[2], plan_var, p[6][0], p[6][1],
p.lineno(1), p.lexpos(1))
def p_code(p):
''' plan_spec : NL_TOK start_python_plan_call INDENT_TOK python_plan_code NL_TOK DEINDENT_TOK
'''
p[0] = ('plan_spec', None, plan_var, p[4][0], p[4][1], p[4][2], p[4][3])
def p_start_python_code(p):
''' start_python_code :
'''
scanner.start_code(var_format = "context.lookup_data('%s')")
p[0] = None
def p_start_python_plan_call(p):
''' start_python_plan_call :
'''
scanner.start_code(plan_name = plan_var, multiline = True)
p[0] = None
def p_start_python_statements(p):
''' start_python_statements :
'''
scanner.start_code(multiline = True)
p[0] = None
def p_start_extra_statements(p):
''' start_extra_statements :
'''
scanner.start_code(multiline = True, var_format = None)
p[0] = None
def p_start_python_assertion(p):
''' start_python_assertion :
'''
scanner.start_code(multiline = True,
var_format = "context.lookup_data('%s')")
p[0] = None
def p_python_rule_code(p):
''' python_rule_code : CODE_TOK
'''
p[0] = p[1]
def p_python_plan_code(p):
''' python_plan_code : CODE_TOK
'''
p[0] = p[1]
def p_python_extras_code(p):
''' python_extras_code : CODE_TOK
'''
p[0] = p[1][0]
def p_pattern_var(p):
''' variable : PATTERN_VAR_TOK
'''
global pattern_vars
if goal_mode:
pattern_vars.append(p[1])
p[0] = contexts.variable(p[1])
else:
p[0] = "contexts.variable(%s)" % p[1]
def p_anonymous_var(p):
''' variable : ANONYMOUS_VAR_TOK
'''
if goal_mode:
p[0] = contexts.anonymous(p[1])
else:
p[0] = "contexts.anonymous(%s)" % p[1]
def p_first(p):
''' bc_premise : python_premise
bc_rules_opt : bc_rules_section
data : NUMBER_TOK
fc_premise : python_premise
pattern : pattern_proper
pattern_proper : variable
patterns_opt : patterns comma_opt
'''
p[0] = p[1]
def p_last(p):
''' rest_opt : ',' '*' variable
'''
p[0] = p[len(p)-1]
def p_data_string(p):
''' data : STRING_TOK
'''
if goal_mode:
if p[1].startswith("'''") or p[1].startswith('"""'):
p[0] = scanner.unescape(p[1][3:-3])
else:
p[0] = scanner.unescape(p[1][1:-1])
else:
p[0] = p[1]
def p_taking(p):
''' taking : start_python_code TAKING_TOK python_rule_code NL_TOK
'''
p[0] = p[3][0]
def p_taking2(p):
''' taking : NL_TOK INDENT_TOK start_python_code TAKING_TOK python_rule_code NL_TOK DEINDENT_TOK
'''
p[0] = p[5][0]
def p_quoted_last(p):
''' data : IDENTIFIER_TOK
'''
if goal_mode:
p[0] = p[len(p)-1]
else:
p[0] = "'" + p[len(p)-1] + "'"
def p_false(p):
''' data : FALSE_TOK
'''
p[0] = False
def p_true(p):
''' data : TRUE_TOK
'''
p[0] = True
def p_start_list(p):
''' assertions : assertion
bc_premises : bc_premise
bc_rules : bc_rule
data_list : data
fc_premises : fc_premise
fc_rules : fc_rule
patterns : pattern
patterns_proper : pattern_proper
without_names : IDENTIFIER_TOK
'''
p[0] = [p[1]]
def p_empty_tuple(p):
''' bc_extras_opt :
data : LP_TOK RP_TOK
foreach_opt :
patterns_opt :
plan_extras_opt :
when_opt :
without_opt :
'''
p[0] = ()
def p_double_empty_tuple(p):
''' with_opt :
'''
p[0] = (), ()
def p_append_list(p):
''' assertions : assertions assertion
bc_premises : bc_premises inc_plan_vars bc_premise
bc_rules : bc_rules bc_rule
data_list : data_list ',' data
fc_premises : fc_premises fc_premise
fc_rules : fc_rules fc_rule
patterns : patterns ',' pattern
patterns_proper : patterns_proper ',' pattern
without_names : without_names ',' IDENTIFIER_TOK
'''
p[1].append(p[len(p)-1])
p[0] = p[1]
def p_pattern(p):
''' pattern : data '''
if goal_mode:
p[0] = pattern.pattern_literal(p[1])
else:
p[0] = "pattern.pattern_literal(%s)" % str(p[1])
def p_pattern_tuple1(p):
''' pattern_proper : LP_TOK '*' variable RP_TOK '''
if goal_mode:
p[0] = pattern.pattern_tuple((), p[3])
else:
p[0] = "pattern.pattern_tuple((), %s)" % p[3]
def p_pattern_tuple2(p):
''' pattern_proper : LP_TOK data_list ',' '*' variable RP_TOK '''
if goal_mode:
p[0] = pattern.pattern_tuple(
tuple(pattern.pattern_literal(x) for x in p[2]),
p[5])
else:
p[0] = "pattern.pattern_tuple((%s), %s)" % \
(' '.join("pattern.pattern_literal(%s)," % str(x)
for x in p[2]),
p[5])
def p_pattern_tuple3(p):
''' pattern_proper : LP_TOK data_list ',' patterns_proper rest_opt RP_TOK '''
if goal_mode:
p[0] = pattern.pattern_tuple(
tuple(itertools.chain(
(pattern.pattern_literal(x) for x in p[2]),
p[4])),
p[5])
else:
p[0] = "pattern.pattern_tuple((%s), %s)" % \
(' '.join(itertools.chain(
("pattern.pattern_literal(%s)," % str(x)
for x in p[2]),
(str(x) + ',' for x in p[4]))),
p[5])
def p_pattern_tuple4(p):
''' pattern_proper : LP_TOK patterns_proper rest_opt RP_TOK '''
if goal_mode:
p[0] = pattern.pattern_tuple(p[2], p[3])
else:
p[0] = "pattern.pattern_tuple((%s), %s)" % \
(' '.join(str(x) + ',' for x in p[2]),
p[3])
def p_tuple(p):
''' data : LP_TOK data_list comma_opt RP_TOK '''
if goal_mode:
p[0] = tuple(p[2])
else:
p[0] = '(' + ' '.join(str(x) + ',' for x in p[2]) + ')'
def p_error(t):
if t is None:
raise SyntaxError("invalid syntax", scanner.syntaxerror_params())
else:
raise SyntaxError("invalid syntax",
scanner.syntaxerror_params(t.lexpos, t.lineno))
parser = None
def init(this_module, check_tables = False, debug = 0):
global parser
if parser is None:
outputdir = os.path.dirname(this_module.__file__)
if debug:
parser = yacc.yacc(module=this_module, write_tables=0,
debug=debug, debugfile='krbparser.yacc.out',
outputdir=outputdir)
else:
if check_tables:
krbparser_mtime = os.path.getmtime(this_module.__file__)
tables_name = os.path.join(outputdir, 'krbparser_tables.py')
try:
ok = os.path.getmtime(tables_name) >= krbparser_mtime
except OSError:
ok = False
if not ok:
#print "regenerating krbparser_tables"
try: os.remove(tables_name)
except OSError: pass
try: os.remove(tables_name + 'c')
except OSError: pass
try: os.remove(tables_name + 'o')
except OSError: pass
parser = yacc.yacc(module=this_module, debug=0,
optimize=1, write_tables=1,
tabmodule='pyke.krb_compiler.krbparser_tables',
outputdir=outputdir)
# Use the first line for normal use, the second for testing changes in the
# grammer (the first line does not report grammer errors!).
def parse(this_module, filename, check_tables = False, debug = 0):
#def parse(this_module, filename, check_tables = True, debug = 1):
global goal_mode
init(this_module, check_tables, debug)
with open(filename) as f:
scanner.init(scanner, debug, check_tables)
scanner.lexer.lineno = 1
scanner.lexer.filename = filename
scanner.kfb_mode = False
scanner.goal_mode = False
goal_mode = False
#parser.restart()
return parser.parse(f.read() + '\n', lexer=scanner.lexer, tracking=True,
debug=debug)
def parse_goal(this_module, s, check_tables = False, debug = 0):
global goal_mode, pattern_vars
init(this_module, check_tables, debug)
scanner.init(scanner, debug, check_tables)
scanner.lexer.lineno = 1
scanner.lexer.filename = s
scanner.kfb_mode = False
scanner.goal_mode = True
goal_mode = True
pattern_vars = []
#parser.restart()
return parser.parse('check ' + s, lexer=scanner.lexer, tracking=True,
debug=debug)
def run(this_module, filename='TEST/krbparse_test.krb'):
r""" Used for testing.
>>> import os, os.path
>>> from pyke.krb_compiler import krbparser
>>> run(krbparser, os.path.join(os.path.dirname(__file__),
... 'TEST/krbparse_test.krb'))
('file',
None,
((('fc_rule',
'name1',
(('fc_premise',
'a',
'b',
("pattern.pattern_literal('x')", "contexts.variable('b')"),
7,
7),),
(('assert', 'c', 'd', ("contexts.variable('b')",), 9, 9),)),),
()),
((('bc_rule',
'name2',
('goal',
'x',
('pattern.pattern_literal(1)',
"contexts.variable('c')",
"pattern.pattern_literal((1, 'b',))",
"pattern.pattern_tuple((pattern.pattern_literal(1), pattern.pattern_literal('b'), contexts.variable('c'),), None)"),
(' (a, b, c)',),
12,
12),
(('bc_premise',
False,
"'a'",
"'b'",
("pattern.pattern_literal('x')", "contexts.variable('c')"),
('plan_spec',
None,
'plan#1',
('line1', "line2 (context['d']) end2"),
('d',),
15,
219),
14,
14),
('bc_premise',
False,
None,
"'x'",
('pattern.pattern_literal(1)',
'pattern.pattern_literal(2)',
'pattern.pattern_literal(3)'),
('plan_spec',
None,
'plan#2',
("some (context['plan'])(stuff) \\",
' and more stuff fail here'),
('plan',),
18,
280),
17,
17)),
("a (context['plan']) b",),
('plan',)),
('bc_rule',
'name3',
('goal',
'x',
('pattern.pattern_literal(1)',
"contexts.variable('c')",
"pattern.pattern_literal((1, 'b',))",
"pattern.pattern_tuple((pattern.pattern_literal(1), pattern.pattern_literal('b'), contexts.variable('c'),), None)"),
(),
24,
24),
(('bc_premise',
False,
"'a'",
"'b'",
("pattern.pattern_literal('x')", "contexts.variable('c')"),
('plan_spec',
None,
'plan#1',
('line1', "line2 (context['d']) end2"),
('d',),
27,
452),
26,
26),
('bc_premise',
False,
None,
"'x'",
('pattern.pattern_literal(1)',
'pattern.pattern_literal(2)',
'pattern.pattern_literal(3)'),
('as', 'foo_fn'),
29,
29)),
(),
())),
(),
()))
"""
import pprint
pprint.pprint(parse(this_module, filename, True))
| {
"repo_name": "e-loue/pyke",
"path": "pyke/krb_compiler/krbparser.py",
"copies": "2",
"size": "22230",
"license": "mit",
"hash": 2056975809660439800,
"line_mean": 29.703038674,
"line_max": 138,
"alpha_frac": 0.5273291646,
"autogenerated": false,
"ratio": 3.0136930585683297,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45410222231683295,
"avg_score": null,
"num_lines": null
} |
""" See http://www.dabeaz.com/ply/ply.html for syntax of grammer definitions.
"""
from __future__ import with_statement
import string
import os, os.path
from pyke.krb_compiler.ply import lex
debug=0
kfb_mode = False
goal_mode = False
states = (
('indent', 'exclusive'),
('code', 'exclusive'),
('checknl', 'exclusive'),
)
kfb_keywords = frozenset((
'False',
'None',
'True',
))
keywords = frozenset((
'as',
'assert',
'bc_extras',
'check',
'extending',
'False',
'fc_extras',
'first',
'forall',
'foreach',
'in',
'None',
'notany',
'plan_extras',
'python',
'require',
'step',
'taking',
'True',
'use',
'when',
'with',
'without',
))
base_kfb_tokens = (
# 'DATE_TOK', # FIX: Add the definition for this!
'IDENTIFIER_TOK',
# 'LB_TOK',
# 'LC_TOK',
'LP_TOK',
'NL_TOK',
'NUMBER_TOK',
# 'RB_TOK',
# 'RC_TOK',
'RP_TOK',
'STRING_TOK',
)
base_krb_tokens = base_kfb_tokens + (
'ANONYMOUS_VAR_TOK',
'CODE_TOK',
'DEINDENT_TOK',
'INDENT_TOK',
'NOT_NL_TOK',
'PATTERN_VAR_TOK',
)
kfb_tokens = tuple(x.upper() + '_TOK' for x in kfb_keywords) + base_kfb_tokens
tokens = tuple(x.upper() + '_TOK' for x in keywords) + base_krb_tokens
literals = '*:,!.=' # FIX: delete ':'
t_ignore = ' \t'
t_ignore_comment = r'\#.*'
def t_continuation(t):
r'\\(\r)?\n'
t.lexer.lineno += 1
def t_NL_TOK(t):
# newline, followed by any number of empty or comment only lines
r'(\r)?\n([ \t]*(\#.*)?(\r)?\n)*'
t.lexer.lineno += t.value.count('\n')
if kfb_mode: return t
if nesting_level == 0:
t.lexer.begin('indent')
t.lexer.skip(-1) # put the final '\n' back for tp_indent_sp!
return t
indent_levels = []
# to prevent getting a warning...
t_indent_ignore = ''
def t_indent_sp(t):
# ply doesn't like re's that can be empty, so we'll include the prior
# newline char in the re and then skip over it when we count the indent
# level. The tp_NL_TOK function does a skip(-1) to retain the final '\n'
# for t_indent_sp.
r'\n[ \t]*'
indent = count_indent(t.value[1:])[0]
current_indent = indent_levels[-1] if indent_levels else 0
if debug:
print "t_indent_sp: t.value", repr(t.value), "indent", indent, \
"current_indent", current_indent, \
"indent_levels", indent_levels, \
"t.lexpos", t.lexpos, \
"t.lexer.lexpos", t.lexer.lexpos, \
"t.lexer.lexdata[]", repr(t.lexer.lexdata[t.lexpos])
if indent > current_indent:
t.type = 'INDENT_TOK'
indent_levels.append(indent)
t.lexer.begin('INITIAL')
if debug: print "INDENT_TOK: indent_levels", indent_levels
return t
if indent < current_indent:
if indent > 0 and indent not in indent_levels:
raise SyntaxError(
"deindent doesn't match any previous indent level",
syntaxerror_params(t.lexpos))
t.type = 'DEINDENT_TOK'
del indent_levels[-1]
if indent < (indent_levels[-1] if indent_levels else 0):
if debug: print " -- pushing indent back"
t.lexer.skip(-len(t.value))
else:
if debug: print " -- doing begin('INITIAL')"
t.lexer.begin('INITIAL')
if debug: print "DEINDENT_TOK: indent_levels", indent_levels
return t
# else indent == current_indent
t.lexer.begin('INITIAL')
if debug: print "no indent: indent_levels", indent_levels
t_checknl_ignore = ' \t'
def t_checknl_nl(t):
# optional comment followed by newline
r'(\#.*)?(\r)?\n'
t.lexer.lineno += 1
t.lexer.begin('indent')
t.lexer.skip(-1) # put the final '\n' back for tp_indent_sp!
t.type = 'NL_TOK'
return t
def t_checknl_other(t):
# something other than newline
r'[^\#\r\n]'
t.lexer.skip(-1) # put the final char back!
t.type = 'NOT_NL_TOK'
return t
def start_code(plan_name = None, multiline = False,
var_format = "(context['%s'])"):
global current_line, code, current_plan_name, code__level
global pattern_var_format, plan_vars_needed, code_nesting_level
global code_lineno, code_lexpos
global code_indent_level
pattern_var_format = var_format
plan_vars_needed = []
current_line = ''
code = []
if multiline: code_indent_level = indent_levels[-1]
else: code_indent_level = 1000000000
current_plan_name = plan_name
code_nesting_level = 0
code_lineno = code_lexpos = None
lexer.begin('code')
def mark(t):
global code_lineno, code_lexpos
if code_lineno is None:
code_lineno = t.lexer.lineno
code_lexpos = t.lexpos
# to prevent getting a warning...
t_code_ignore = ''
def t_code_string(t):
r"'''([^\\]|\\.)*?'''|" \
r'"""([^\\]|\\.)*?"""|' \
r"'([^'\\\n\r]|\\.|\\(\r)?\n)*?'|" \
r'"([^"\\\n\r]|\\.|\\(\r)?\n)*?"'
global current_line
current_line += t.value
mark(t)
if debug: print "scanner saw string:", t.value
t.lexer.lineno += t.value.count('\n')
def t_code_comment(t):
r'[ \t\f\r]*\#.*'
global current_line
if debug: print "scanner saw comment:", t.value
#current_line += t.value
def t_code_plan(t):
r'\$\$'
global current_line
mark(t)
if debug:
print "scanner saw '$$', current_plan_name is", current_plan_name
if not current_plan_name:
raise SyntaxError("'$$' only allowed in plan_specs within the "
"'when' clause",
syntaxerror_params(t.lexpos))
current_line += pattern_var_format % current_plan_name
plan_vars_needed.append(current_plan_name)
def t_code_pattern_var(t):
r'\$[a-zA-Z_][a-zA-Z0-9_]*\b'
global current_line
mark(t)
if not pattern_var_format:
raise SyntaxError("$<name> only allowed in backward chaining rules",
syntaxerror_params(t.lexpos))
current_line += pattern_var_format % t.value[1:]
plan_vars_needed.append(t.value[1:])
if debug: print "scanner saw pattern_var:", t.value
def t_code_continuation(t):
r'\\(\r)?\n'
global current_line
t.lexer.lineno += 1
current_line += '\\'
code.append(current_line)
current_line = ''
if debug: print "scanner saw continuation:", t.value
def t_code_open(t):
r'[{([]'
global current_line, code_nesting_level
mark(t)
code_nesting_level += 1
current_line += t.value
def t_code_close(t):
r'[]})]'
global current_line, code_nesting_level
mark(t)
if code_nesting_level <= 0:
raise SyntaxError("unmatched %s" % repr(t.value),
syntaxerror_params(t.lexpos))
code_nesting_level -= 1
current_line += t.value
def t_code_symbol(t):
r'''[0-9a-zA-Z_]+'''
global current_line
mark(t)
current_line += t.value
if debug: print "scanner saw symbol:", t.value
def t_code_space(t):
r'''[ \t]+'''
global current_line
current_line += t.value
if debug: print "scanner saw space chars:", t.value
def t_code_other(t):
r'''[^][(){}$\\'"\r\n0-9a-zA-Z_ \t]+'''
global current_line
mark(t)
current_line += t.value
if debug: print "scanner saw other chars:", t.value
def t_code_NL_TOK(t):
r'(\r)?\n([ \t]*(\#.*)?(\r)?\n)*[ \t]*'
global current_line
if current_line:
code.append(current_line)
current_line = ''
indent = count_indent(t.value[t.value.rindex('\n') + 1:])[0]
if debug: print "scanner saw nl:", t.value, "new indent is", indent
if indent < code_indent_level and code_nesting_level == 0:
t.lexer.skip(-len(t.value))
t.type = 'CODE_TOK'
t.value = tuple(code), tuple(plan_vars_needed), code_lineno, code_lexpos
if debug: print "scanner begin('INITIAL')"
t.lexer.begin('INITIAL')
return t
t.lexer.lineno += t.value.count('\n')
current_line = ' ' * (indent - code_indent_level)
# strings:
def t_tsqstring(t):
r"[uU]?[rR]?'''([^\\]|\\.)*?'''"
#t.value = unescape(t.value[3:-3])
t.type = 'STRING_TOK'
t.lexer.lineno += t.value.count('\n')
return t
def t_tdqstring(t):
r'[uU]?[rR]?"""([^\\]|\\.)*?"""'
#t.value = unescape(t.value[3:-3])
t.type = 'STRING_TOK'
t.lexer.lineno += t.value.count('\n')
return t
def t_sqstring(t):
r"[uU]?[rR]?'([^'\\\n\r]|\\.|\\(\r)?\n)*?'"
#t.value = unescape(t.value[1:-1])
t.lexer.lineno += t.value.count('\n')
t.type = 'STRING_TOK'
return t
def t_dqstring(t):
r'[uU]?[rR]?"([^"\\\n\r]|\\.|\\(\r)?\n)*?"'
#t.value = unescape(t.value[1:-1])
t.type = 'STRING_TOK'
t.lexer.lineno += t.value.count('\n')
return t
# end strings
def t_ANONYMOUS_VAR_TOK(t):
r'\$_([a-zA-Z_][a-zA-Z0-9_]*)?'
if kfb_mode: t_ANY_error(t)
if goal_mode:
t.value = t.value[1:]
else:
t.value = "'" + t.value[1:] + "'"
return t
def t_PATTERN_VAR_TOK(t):
r'\$[a-zA-Z][a-zA-Z0-9_]*'
if kfb_mode: t_ANY_error(t)
if goal_mode:
t.value = t.value[1:]
else:
t.value = "'" + t.value[1:] + "'"
return t
def t_IDENTIFIER_TOK(t):
r'[a-zA-Z_][a-zA-Z0-9_]*'
if kfb_mode and t.value in kfb_keywords or \
not kfb_mode and t.value in keywords:
t.type = t.value.upper() + '_TOK'
return t
# numbers:
def t_float(t):
r'[-+]?([0-9]+(\.[0-9]*([eE][-+]?[0-9]+)?|[eE][-+]?[0-9]+)|\.[0-9]+([eE][-+]?[0-9]+)?)'
t.value = float(t.value)
t.type = 'NUMBER_TOK'
return t
def t_hexint(t):
r'[-+]?0[xX][0-9a-fA-F]+'
t.value = int(t.value, 16)
t.type = 'NUMBER_TOK'
return t
def t_octalint(t):
r'[-+]?0[0-7]*'
t.value = int(t.value, 8)
t.type = 'NUMBER_TOK'
return t
def t_int(t):
r'[-+]?[1-9][0-9]*'
t.value = int(t.value)
t.type = 'NUMBER_TOK'
return t
# end numbers
nesting_level = 0
def t_LB_TOK(t):
r'\['
global nesting_level
nesting_level += 1
#return t
def t_LC_TOK(t):
r'\{'
global nesting_level
nesting_level += 1
#return t
def t_LP_TOK(t):
r'\('
global nesting_level
nesting_level += 1
return t
def t_RB_TOK(t):
r'\]'
global nesting_level
assert nesting_level > 0
nesting_level -= 1
#return t
def t_RC_TOK(t):
r'\}'
global nesting_level
assert nesting_level > 0
nesting_level -= 1
#return t
def t_RP_TOK(t):
r'\)'
global nesting_level
assert nesting_level > 0
nesting_level -= 1
return t
def t_ANY_error(t):
raise SyntaxError("illegal character %s" % repr(t.value[0]),
syntaxerror_params(t.lexpos))
# helper functions:
def count_indent(s, count_all=False):
r'''
>>> count_indent('')
(0, 0)
>>> count_indent(' ')
(3, 3)
>>> count_indent(' stuff')
(3, 3)
>>> count_indent('\t')
(8, 1)
>>> count_indent('\t ')
(9, 2)
>>> count_indent('\t\t')
(16, 2)
>>> count_indent(' \t')
(8, 4)
>>> count_indent(' \t')
(8, 8)
>>> count_indent(' \t')
(16, 9)
>>> count_indent(' a\t', True)
(8, 3)
>>> count_indent(' a ', True)
(3, 3)
'''
indent = 0
chars = 0
for c in s:
if c == '\t': indent = (indent + 8) & ~7
elif c == ' ' or count_all: indent += 1
else: break
chars += 1
return indent, chars
escapes = {
'a': '\a',
'b': '\b',
'f': '\f',
'n': '\n',
'r': '\r',
't': '\t',
'v': '\v',
'\\': '\\',
'\'': '\'',
'\"': '\"',
}
def unescape(s):
start = 0
ans = []
i = s.find('\\', start)
while i >= 0:
ans.append(s[start:i])
e = escapes.get(s[i+1])
if e: # single char escape code
ans.append(e)
start = i + 2
elif s[i+1] == '\n': # ignore \ at end of line
start = i + 2
elif s[i+1] == '\r': # ignore \ at end of line
if s[i+2] == '\n': start = i + 3
else: start = i + 2
elif s[i+1:i+3] == 'N{':
end = s.index('}', i + 3)
ans.append(unicodedata.lookup(s[i+3:end]))
start = end + 1
elif s[i+1] == 'u':
ans.append(unichr(int(s[i+2:i+6], 16)))
start = i + 6
elif s[i+1] == 'U':
ans.append(unichr(int(s[i+2:i+10], 16)))
start = i + 10
elif s[i+1] in string.octdigits:
if s[i+2] not in string.octdigits:
ans.append(unichr(int(s[i+2:i+3], 8)))
start = i + 3
elif s[i+3] not in string.octdigits:
ans.append(unichr(int(s[i+2:i+4], 8)))
start = i + 4
else:
ans.append(unichr(int(s[i+2:i+5], 8)))
start = i + 5
elif s[i+1] == 'x':
if s[i+3] not in string.hexdigits:
ans.append(unichr(int(s[i+2:i+3], 16)))
start = i + 3
else:
ans.append(unichr(int(s[i+2:i+4], 16)))
start = i + 4
else:
ans.append(s[i])
start = i + 1
i = s.find('\\', start)
ans.append(s[start:])
return ''.join(ans)
class token_iterator(object):
''' This is only used for testing the scanner.
'''
def __init__(self, input):
lexer.lineno = 1
lexer.input(input)
def __iter__(self): return self
def next(self):
t = lex.token()
if t: return t
raise StopIteration
def tokenize(s):
r'''
>>> from pyke.krb_compiler import scanner
>>> init(scanner, 0, True)
>>> tokenize("# This is a comment\n# line 2 of comment\n\n"
... "# comment after blank line\n")
LexToken(NL_TOK,'\n# line 2 of comment\n\n# comment after blank line\n',1,19)
>>> tokenize('name1\n forall foreach\n \nname2')
LexToken(IDENTIFIER_TOK,'name1',1,0)
LexToken(NL_TOK,'\n',1,5)
LexToken(INDENT_TOK,'\n ',2,5)
LexToken(FORALL_TOK,'forall',2,10)
LexToken(FOREACH_TOK,'foreach',2,19)
LexToken(NL_TOK,'\n \n',2,26)
LexToken(DEINDENT_TOK,'\n',4,38)
LexToken(IDENTIFIER_TOK,'name2',4,39)
'''
for t in token_iterator(s):
print t
def tokenize_file(filename = 'TEST/scan_test'):
r""" Used for testing.
>>> from pyke.krb_compiler import scanner
>>> init(scanner, 0, True)
>>> import os, os.path
>>> tokenize_file(os.path.join(os.path.dirname(__file__),
... 'TEST/scan_test'))
LexToken(NL_TOK,'\n# line 2 of comment\n\n# comment after blank line\n',1,19)
LexToken(IDENTIFIER_TOK,'name1',5,68)
LexToken(:,':',5,73)
LexToken(NL_TOK,'\n',5,74)
LexToken(INDENT_TOK,'\n ',6,74)
LexToken(FOREACH_TOK,'foreach',6,79)
LexToken(NL_TOK,'\n',6,86)
LexToken(INDENT_TOK,'\n\t',7,86)
LexToken(LP_TOK,'(',7,88)
LexToken(NUMBER_TOK,100,7,89)
LexToken(NUMBER_TOK,64,7,93)
LexToken(ANONYMOUS_VAR_TOK,"'_'",7,98)
LexToken(PATTERN_VAR_TOK,"'foo'",7,101)
LexToken(NUMBER_TOK,256,8,118)
LexToken(NUMBER_TOK,0,8,124)
LexToken(RP_TOK,')',8,125)
LexToken(NL_TOK,'\n',8,126)
LexToken(NUMBER_TOK,3.1400000000000001,9,129)
LexToken(NUMBER_TOK,0.98999999999999999,9,134)
LexToken(NUMBER_TOK,3.0,10,143)
LexToken(NUMBER_TOK,0.29999999999999999,10,146)
LexToken(NUMBER_TOK,3000000.0,10,149)
LexToken(NUMBER_TOK,3.0000000000000001e-06,10,153)
LexToken(NL_TOK,'\n',10,158)
LexToken(DEINDENT_TOK,'\n ',11,158)
LexToken(ASSERT_TOK,'assert',11,163)
LexToken(NL_TOK,'\n',11,169)
LexToken(INDENT_TOK,'\n\t',12,169)
LexToken(STRING_TOK,"'this is a string'",12,172)
LexToken(STRING_TOK,'"so is this"',12,191)
LexToken(STRING_TOK,"'''\n\tand this \\t too'''",12,204)
LexToken(STRING_TOK,"'should be\\\n able to do this too'",13,229)
LexToken(TRUE_TOK,'True',15,278)
LexToken(NL_TOK,'\n',15,283)
LexToken(!,'!',16,292)
LexToken(IDENTIFIER_TOK,'can',16,293)
LexToken(IDENTIFIER_TOK,'I',17,311)
LexToken(IDENTIFIER_TOK,'do',17,313)
LexToken(IDENTIFIER_TOK,'this',17,316)
LexToken(NL_TOK,'\n',17,320)
LexToken(IDENTIFIER_TOK,'too',18,329)
LexToken(NL_TOK,'\n',18,332)
LexToken(DEINDENT_TOK,'\n',19,332)
LexToken(DEINDENT_TOK,'\n',19,332)
"""
with open(filename) as f:
tokenize(f.read())
def syntaxerror_params(pos = None, lineno = None):
'''
Returns (filename, lineno, column, line) for use in as the second
argument to SyntaxError exceptions.
'''
if pos is None: pos = lexer.lexpos
if pos > len(lexer.lexdata): pos = len(lexer.lexdata)
end = pos
if lineno is None: lineno = lexer.lineno
while end > 0 and (end >= len(lexer.lexdata) or
lexer.lexdata[end] in '\r\n'):
end -= 1
start = end
if debug: print "pos", pos, "lineno", lineno, "end", end
start = max(lexer.lexdata.rfind('\r', 0, end),
lexer.lexdata.rfind('\n', 0, end)) + 1
column = pos - start + 1
end1 = lexer.lexdata.find('\r', end)
end2 = lexer.lexdata.find('\n', end)
if end1 < 0:
if end2 < 0: end = len(lexer.lexdata)
else: end = end2
elif end2 < 0: end = end1
else: end = min(end1, end2)
if goal_mode and start == 0 and lexer.lexdata.startswith('check ', start):
start += 6
column -= 6
if debug: print "start", start, "column", column, "end", end
return (lexer.filename, lineno, column, lexer.lexdata[start:end])
lexer = None
def init(this_module, debug_param, check_tables = False, kfb = False):
global indent_levels, nesting_level, kfb_mode, lexer, debug
indent_levels = []
nesting_level = 0
kfb_mode = kfb
debug = debug_param
if lexer is None:
if debug_param:
lexer = lex.lex(module=this_module, debug=1)
else:
if check_tables:
scanner_mtime = os.path.getmtime(this_module.__file__)
tables_name = \
os.path.join(os.path.dirname(this_module.__file__),
'scanner_tables.py')
try:
ok = os.path.getmtime(tables_name) >= scanner_mtime
except OSError:
ok = False
if not ok:
#print "regenerating scanner_tables"
try: os.remove(tables_name)
except OSError: pass
try: os.remove(tables_name + 'c')
except OSError: pass
try: os.remove(tables_name + 'o')
except OSError: pass
lexer = lex.lex(module=this_module, optimize=1,
lextab='pyke.krb_compiler.scanner_tables',
outputdir=os.path.dirname(this_module.__file__))
| {
"repo_name": "e-loue/pyke",
"path": "pyke/krb_compiler/scanner.py",
"copies": "2",
"size": "20547",
"license": "mit",
"hash": -3832207072218947600,
"line_mean": 28.7768115942,
"line_max": 91,
"alpha_frac": 0.540299815,
"autogenerated": false,
"ratio": 3.0780524344569287,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9568812018539857,
"avg_score": 0.009908046183414357,
"num_lines": 690
} |
from __future__ import with_statement
from string import Template
import re
import os.path
from pyke import question_base
from pyke import user_question
from pyke import qa_helpers
from pyke.krb_compiler import scanner
class kqb_parser(object):
blank_line = re.compile(ur'(\s*#)|(\s*$)', re.UNICODE)
tokenizer = re.compile(ur''' [ \t\f\r\v]* (?: \#.* )? (?:
(["']) (?P<str> (?: \\. | .)*? ) \1 | # this must be first!
[[] (?P<prompt> (?: \\. | .)*? ) []] |
[$] (?P<param> [a-zA-Z_] [a-zA-Z_0-9]* ) |
/ (?P<regexp1> (?: \\. | .)*? ) / |
/// (?P<regexp2> (?: \\. | \n | .)*? ) /// | # FIX: this won't work!
(?P<const> True | False | None ) |
(?P<id> [a-zA-Z_] [a-zA-Z_0-9]* ) |
(?P<number> (?: \d+ (?: \.\d* )? |
\.\d+ )
(?: [eE][-+]?\d+ )? ) |
(?P<lparen> [(] ) |
(?P<rparen> [)] ) |
(?P<comma> , ) |
(?P<bar> [|] ) |
(?P<bang> ! ) |
(?P<equal> = ) |
(?P<hyphen> - ) |
(?P<colon> : )
) [ \t\f\r\v]* (?: \#.* )? ''', re.UNICODE | re.VERBOSE)
pushed_token = None
def __init__(self, f):
# f needs readline() and name.
self.f = f
self.lineno = 0
self.line = ''
self.column = 0
self.eof = False
def readline(self):
r'''
>>> from StringIO import StringIO
>>> p = kqb_parser(StringIO("""
... line 1
... # this should be ignored
... line 2
...
... line 3
... """))
>>> p.readline()
>>> p.indent, p.line, p.lineno, p.column
(0, 'line 1', 2, 0)
>>> p.readline()
>>> p.indent, p.line, p.lineno, p.column
(1, ' line 2', 4, 1)
>>> p.readline()
>>> p.indent, p.line, p.lineno, p.column
(2, ' line 3', 6, 2)
>>> p.eof
False
>>> p.readline()
>>> p.eof
True
'''
while True:
line = self.f.readline()
if line == '':
self.eof = True
break
self.lineno += 1
line = line.rstrip('\n')
if not self.blank_line.match(line):
self.indent, self.column = scanner.count_indent(line)
self.line = line
break
def SyntaxError(self, msg, last_token=True):
if last_token:
raise SyntaxError(msg,
(self.f.name, self.last_lineno,
self.last_column + 1, self.last_line))
raise SyntaxError(msg,
(self.f.name, self.lineno, self.column + 1,
self.line))
def push_token(self):
#print "push_token:", self.last_token # FIX
self.pushed_token = self.last_token
self.pushed_indent = self.indent
self.pushed_column = self.column
self.indent = self.last_indent
self.column = self.last_column
def get_token(self, check_token=None):
r'''
>>> from StringIO import StringIO
>>> f = StringIO(r"""
... line 1=2.5: ( /\s* /, $foo) # comment
... ,|!-True "hi\n" [mom]
... """)
>>> f.name = 'StringIO'
>>> p = kqb_parser(f)
>>> p.get_token()
('id', 'line')
>>> p.get_token()
('number', 1)
>>> p.get_token()
('equal', None)
>>> p.get_token()
('number', 2.5)
>>> p.get_token()
('colon', None)
>>> p.get_token()
('lparen', None)
>>> p.get_token()
('regexp', '\\s* ')
>>> p.get_token()
('comma', None)
>>> p.get_token()
('param', 'foo')
>>> p.get_token()
('rparen', None)
>>> p.get_token()
('comma', None)
>>> p.get_token()
('bar', None)
>>> p.get_token()
('bang', None)
>>> p.get_token()
('hyphen', None)
>>> p.get_token()
('const', True)
>>> p.get_token()
('str', 'hi\n')
>>> p.get_token()
('prompt', 'mom')
>>> p.get_token()
(None, None)
'''
if self.pushed_token:
ans = self.pushed_token
self.indent = self.pushed_indent
self.column = self.pushed_column
self.pushed_token = self.pushed_column = self.pushed_indent = None
if check_token and check_token != ans[0]:
self.SyntaxError("expected %s, got %s" % (check_token, ans[0]))
#print "get_token: returning pushed_token", ans # FIX
return ans
if self.column < len(self.line): self.skip_spaces()
if self.column >= len(self.line):
self.readline()
if self.eof:
self.last_token = None, None
#print "get_token: returning EOF" # FIX
return self.last_token
self.last_line = self.line
self.last_lineno = self.lineno
self.last_column = self.column
self.last_indent = self.indent
match = self.tokenizer.match(self.line, self.column)
if not match: self.SyntaxError("Scanner error: no legal token")
token = match.lastgroup
chars = match.group(token)
end = match.end()
indent, ignore = scanner.count_indent(self.line[self.column:end], True)
self.indent += indent
self.column = end
if token == 'str' or token == 'prompt':
value = scanner.unescape(chars)
elif token == 'const':
value = eval(chars)
elif token == 'number':
try:
value = int(chars)
except ValueError:
value = float(chars)
elif token == 'param' or token == 'id':
value = chars
elif token == 'regexp1' or token == 'regexp2':
# FIX
token = 'regexp'
value = chars
self.lineno += chars.count('\n')
last_nl = chars.rfind('\n')
if last_nl >= 0:
self.column = len(chars) - last_nl + 4
else:
value = None
if check_token and check_token != token:
self.SyntaxError("expected %s, got %s" % (check_token, token))
self.last_token = str(token), value
#print "get_token: returning", self.last_token # FIX
return self.last_token
def get_block_string(self, stop=None, hanging=False, ending_newlines=False):
r'''
>>> from StringIO import StringIO
>>> f = StringIO(r"""
... line 1 # comment
... more stuff
... last line
... blah hanging line 1
...
... line 2
... indented
... last line
... ! the end !
... """)
>>> f.name = 'StringIO'
>>> p = kqb_parser(f)
>>> p.get_block_string()
u'line 1 # comment\n more stuff\nlast line'
>>> p.column = 4
>>> p.indent = 4
>>> p.get_block_string('!', True)
u'hanging line 1\n\nline 2\n indented\nlast line'
>>> f = StringIO(r"""
... ! line 1 # comment
... more stuff
... last line
... blah
... """)
>>> f.name = 'StringIO'
>>> p = kqb_parser(f)
>>> p.readline()
>>> p.get_token('bang')
('bang', None)
>>> p.get_block_string(hanging=True)
u'line 1 # comment\n more stuff\nlast line'
'''
if hanging:
indent, more_chars = \
scanner.count_indent(self.line[self.column:])
self.indent += indent
self.column += more_chars
else:
self.readline()
indent = self.indent
if self.eof: self.SyntaxError("expected block string, got EOF", False)
rest_line = self.line[self.column:]
if self.blank_line.match(rest_line):
ans = []
else:
ans = [rest_line]
while not self.eof:
last_lineno = self.lineno
self.readline()
if ending_newlines:
for i in range(self.lineno - last_lineno - 1): ans.append('')
if self.eof or self.indent < indent or \
stop and self.line[self.column:].startswith(stop):
break
if not ending_newlines:
for i in range(self.lineno - last_lineno - 1): ans.append('')
ans.append(' ' * (self.indent - indent) + self.line[self.column:])
if not ans: self.SyntaxError("expected block string", False)
return u'\n'.join(scanner.unescape(str) for str in ans)
def parse_simple_match(self):
token, value = self.get_token()
if token == 'str' or token == 'id' or token == 'number' or \
token == 'const':
next_token, next_value = self.get_token()
if next_token == 'prompt' and token == 'str':
final_token, final_value = self.get_token('regexp')
return qa_helpers.regexp(final_value, value, next_value)
if next_token == 'regexp' and token == 'str':
return qa_helpers.regexp(next_value, value)
if next_token == 'equal':
return qa_helpers.qmap(self.parse_simple_match(), value)
if next_token == 'hyphen' and token == 'number':
final_token, final_value = self.get_token()
if final_token == 'number':
return slice(value, final_value)
self.push_token()
return slice(value, None)
self.push_token()
return value
if token == 'prompt':
next_token, next_value = self.get_token('regexp')
return qa_helpers.regexp(next_value, prompt=value)
if token == 'lparen':
ans = self.parse_match()
self.get_token('rparen')
return ans
if token == 'regexp':
return qa_helpers.regexp(value)
if token == 'hyphen':
next_token, next_value = self.get_token('number')
return slice(None, next_value)
self.SyntaxError("expected match, got %s" % token)
def parse_match(self):
r'''
>>> from StringIO import StringIO
>>> def do(str):
... ans = StringIO(str)
... ans.name = 'StringIO'
... return kqb_parser(ans).parse_match()
>>> do(r'/reg\exp/ bob')
<regexp /reg\exp/>
>>> do(r'"msg"/reg\exp/ bob')
<regexp 'msg'/reg\exp/>
>>> do(r'[prompt]/reg\exp/ bob')
<regexp [prompt]/reg\exp/>
>>> do(r'"msg"[prompt]/reg\exp/ bob')
<regexp 'msg'[prompt]/reg\exp/>
>>> do(r"44 = id")
<qmap 44 = 'id'>
>>> do(r"-5 bob")
slice(None, 5, None)
>>> do(r"0-5 bob")
slice(0, 5, None)
>>> do(r"0- bob")
slice(0, None, None)
>>> do(r"/regexp/|44|3-5 bob")
(<regexp /regexp/>, 44, slice(3, 5, None))
>>> do(r"44 id")
44
'''
ans = [self.parse_simple_match()]
token, value = self.get_token()
while token == 'bar':
ans.append(self.parse_simple_match())
token, value = self.get_token()
self.push_token()
if len(ans) == 1: return ans[0]
return tuple(ans)
def get_value(self):
token, value = self.get_token()
if token not in ('const', 'number', 'id', 'str'):
self.SyntaxError("expected value, got %s" % token)
return value
def skip_spaces(self, pre_increment=0):
if pre_increment:
indent, chars = \
scanner.count_indent(self.line[self.column:
self.column+pre_increment], True)
self.indent += indent
self.column += chars
indent, chars = scanner.count_indent(self.line[self.column:])
self.indent += indent
self.column += chars
def parse_alternatives(self):
r'''
>>> from StringIO import StringIO
>>> def do(str):
... ans = StringIO(str)
... ans.name = 'StringIO'
... p = kqb_parser(ans)
... alt, review = p.parse_alternatives()
... for tag, msg in alt:
... print '%s: %s' % (repr(tag), repr(msg.template))
... for key, msg in sorted(review, key=lambda x: repr(x[0])):
... print repr(key), '!', repr(msg.template)
>>> do(r"""
... 1: hi mom
... how are you?
... ! Just reward!
... bob: yep this is bob
... ! =1
... 44: nope, this is just 44
... ! = bob
... next
... """)
1: u'hi mom\nhow are you?'
'bob': u'yep this is bob'
44: u'nope, this is just 44'
(1, 'bob', 44) ! u'Just reward!'
'''
if self.column >= len(self.line):
self.readline()
if self.eof or self.indent == 0:
self.SyntaxError("no alternatives", False)
indent = self.indent
review = {}
ans = []
while not self.eof and self.indent == indent:
tag = self.get_value()
if tag in review: self.SyntaxError("duplicate tag: %s" % tag)
self.get_token('colon')
ans.append((tag, Template(self.get_block_string(stop='!',
hanging=True))))
if self.line[self.column] == '!':
self.skip_spaces(1)
if self.line[self.column] == '=':
self.indent += 1
self.column += 1
old_value = self.get_value()
while not isinstance(review[old_value], tuple):
old_value = review[old_value]
review[old_value][0].append(tag)
review[tag] = old_value
self.readline()
else:
review[tag] = \
[tag], Template(self.get_block_string(hanging=True))
if not self.eof and self.indent > indent:
self.SyntaxError("unexpected indent", False)
return tuple(ans), \
tuple((value[0][0] if len(value[0]) == 1
else tuple(value[0]),
value[1])
for value in review.itervalues()
if isinstance(value, tuple)) \
if review \
else None
def parse_review(self):
r'''
>>> from StringIO import StringIO
>>> def do(str):
... ans = StringIO(str)
... ans.name = 'StringIO'
... p = kqb_parser(ans)
... review = p.parse_review()
... for key, msg in sorted(review, key=lambda x: repr(x[0])):
... print repr(key), '!', repr(msg.template)
>>> do(r"""
... 1 ! hi mom
... how are you?
... ! Just reward!
... bob! yep this is bob
... 3-5! nope, this is just 44
... next
... """)
'bob' ! u'yep this is bob'
1 ! u'hi mom\nhow are you?\n! Just reward!'
slice(3, 5, None) ! u'nope, this is just 44'
'''
if self.column >= len(self.line):
self.readline()
if self.eof or self.indent == 0:
#print "parse_review: None" # FIX
return None
#print "parse_review: self.indent", self.indent, \
# "self.column", self.column # FIX
indent = self.indent
review = []
while not self.eof and self.indent == indent:
match = self.parse_match()
self.get_token('bang')
review.append(
(match, Template(self.get_block_string(hanging=True))))
if not self.eof and self.indent > indent:
self.SyntaxError("unexpected indent", False)
#print "parse_review:", tuple(review) # FIX
return tuple(review)
def parse_questions(self):
r''' question_base.question generator.
>>> from StringIO import StringIO
>>> def do(str):
... ans = StringIO(str)
... ans.name = 'StringIO'
... p = kqb_parser(ans)
... for q in p.parse_questions():
... print q
>>> do(r"""
... question1($ans)
... This is the question?
... ---
... $ans = yn
...
... question2($ans)
... This is the second question?
... ---
... $ans = select_1
... 1: first
... 2: second
... 3: third
...
... """)
<question question1($ans): $ans = <yn: u'This is the question?'>>
<question question2($ans): $ans = <select_1(1: 2: 3:): u'This is the second question?'>>
'''
self.readline()
while not self.eof:
if self.indent > 0: self.SyntaxError("unexpected indent", False)
token, name = self.get_token('id')
self.get_token('lparen')
params = []
token, param = self.get_token()
if token != 'rparen':
while True:
if token != 'param':
self.SyntaxError("expected $param, got %s" % token)
params.append(param)
token, ignore = self.get_token()
if token == 'rparen': break
if token != 'comma':
self.SyntaxError("expected comma or rparen, got %s" %
token)
token, param = self.get_token()
format = self.get_block_string(stop='---', ending_newlines=True)
self.readline() # ---
token, answer_param = self.get_token('param')
self.get_token('equal')
token, cls = self.get_token('id')
user_q = getattr(user_question, cls)(format)
user_q.parse(self)
yield question_base.question(name, tuple(params), answer_param,
user_q)
if self.column >= len(self.line): self.readline()
def parse_kqb(filename):
name = os.path.basename(filename)[:-4]
with open(filename, 'rU') as f:
base = question_base.question_base(name)
parser = kqb_parser(f)
for question in parser.parse_questions():
base.add_question(question)
return base
| {
"repo_name": "myaskevich/pyke",
"path": "pyke/krb_compiler/kqb_parser.py",
"copies": "2",
"size": "20830",
"license": "mit",
"hash": -612870806219799600,
"line_mean": 37.3591160221,
"line_max": 100,
"alpha_frac": 0.4574871573,
"autogenerated": false,
"ratio": 4.048396501457726,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005745356452893099,
"num_lines": 543
} |
import linecache
import traceback
import os.path
import sys
def print_tb(traceback, limit=None, file=None):
if file is None: file = sys.stderr
for line in format_list(extract_tb(traceback, limit)): file.write(line)
def print_exception(type, value, traceback, limit=None, file=None):
if file is None: file = sys.stderr
if traceback:
file.write('Traceback (most recent call last):\n')
print_tb(traceback, limit, file)
lines = format_exception_only(type, value)
file.write(lines[0])
for line in lines[1:]: file.write(' ' + line)
def print_exc(limit=None, file=None):
type, value, traceback = sys.exc_info()
print_exception(type, value, traceback, limit, file)
def format_exc(limit=None):
type, value, traceback = sys.exc_info()
return format_exception(type, value, traceback, limit)
def print_last(limit=None, file=None):
print_exception(sys.last_type, sys.last_value, sys.last_traceback,
limit, file)
def print_stack(f=None, limit=None, file=None):
if file is None: file = sys.stderr
for line in format_list(extract_stack(f, limit)): file.write(line)
def extract_tb(tb, limit=None):
ans = convert_tb(traceback.extract_tb(tb))
if limit is not None and len(ans) > limit:
return ans[len(ans) - limit:]
return ans
def extract_stack(f=None, limit=None):
ans = convert_tb(traceback.extract_stack(f))
if limit is not None and len(ans) > limit:
return ans[len(ans) - limit:]
return ans
format_list = traceback.format_list
format_exception_only = traceback.format_exception_only
def format_exception(type, value, traceback, limit=None):
ans = []
if traceback:
ans.append('Traceback (most recent call last):\n')
ans.extend(format_list(extract_tb(traceback, limit)))
lines = format_exception_only(type, value)
ans.append(lines[0])
for line in lines[1:]: ans.append(' ' + line)
return ''.join(ans)
def format_tb(tb, limit=None):
return format_list(extract_tb(tb, limit))
def format_stack(f=None, limit=None):
return format_list(extract_stack(f, limit))
def convert_lineno(module, lineno):
for (py_start, py_end), (krb_start, krb_end) in module.Krb_lineno_map:
if py_start <= lineno <= py_end: return krb_start
def convert_tb(extracted_tb):
'''
extracted_tb is list of (filename, lineno, functionname, line)
'''
ans = []
batch = []
for info in extracted_tb:
filename, lineno, functionname, line = info
if filename.endswith('_fc.py') or filename.endswith('_bc.py'):
pathname = filename[:-3]
while True:
module_name = pathname.replace(os.path.sep, '.')
if module_name in sys.modules:
module = sys.modules[module_name]
if hasattr(module, 'Krb_filename'):
krb_lineno = convert_lineno(module, lineno)
if krb_lineno is not None:
if not ans: ans = batch
batch = []
krb_filename = \
os.path.normpath(
os.path.join(os.path.dirname(module.__file__),
module.Krb_filename))
linecache.checkcache(krb_filename)
line = linecache.getline(krb_filename, krb_lineno)
if line: line = line.strip()
else: line = None
ans.append((krb_filename, krb_lineno, functionname,
line))
info = None
else:
ans.extend(batch)
ans.append(info)
batch = []
info = None
break
sep_index = pathname.find(os.path.sep)
if sep_index < 0: break
pathname = pathname[sep_index + 1:]
if info: batch.append(info)
return ans + batch
| {
"repo_name": "myaskevich/pyke",
"path": "pyke/krb_traceback.py",
"copies": "2",
"size": "5308",
"license": "mit",
"hash": -1245906059322055700,
"line_mean": 38.3111111111,
"line_max": 80,
"alpha_frac": 0.6009044658,
"autogenerated": false,
"ratio": 4.026555386949924,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0221038237416987,
"num_lines": 135
} |
import os.path
import wsgi_app
import wsgiref.simple_server
class RequestHandlerNoLogging(wsgiref.simple_server.WSGIRequestHandler):
def log_request(self, code='-', size='-'): pass
def init(trace_sql = False, db_engine = 'sqlite3'):
if db_engine.lower() == 'sqlite3':
import sqlite3 as db
import sqlgen.load_sqlite3_schema as load_schema
db_connection = \
db.connect(os.path.join(os.path.dirname(load_schema.__file__),
'sqlite3.db'))
elif db_engine.lower() == 'mysql':
import MySQLdb as db
import sqlgen.load_mysql_schema as load_schema
db_connection = db.connect(user="movie_user", passwd="user_pw",
db="movie_db")
else:
raise ValueError("simple_server.init: unrecognized db_engine: " +
db_engine)
load_schema.load_schema(wsgi_app.init(db_connection, trace_sql), db,
db_connection)
def run(port = 8080, logging = True, trace_sql = False, db_engine = 'sqlite3'):
init(trace_sql, db_engine)
server_address = ('', port)
httpd = wsgiref.simple_server.WSGIServer(
server_address,
wsgiref.simple_server.WSGIRequestHandler
if logging
else RequestHandlerNoLogging)
httpd.set_app(wsgi_app.wsgi_app)
print "Server running..."
httpd.serve_forever()
if __name__ == "__main__":
run()
| {
"repo_name": "e-loue/pyke",
"path": "examples/web_framework/simple_server.py",
"copies": "2",
"size": "2609",
"license": "mit",
"hash": 9189302604904880000,
"line_mean": 41.064516129,
"line_max": 79,
"alpha_frac": 0.6660276074,
"autogenerated": false,
"ratio": 4.0941915227629515,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5760219130162952,
"avg_score": null,
"num_lines": null
} |
import sys
import os, os.path
from pyke import knowledge_engine, krb_traceback
import sqlgen
# Possibly interesting values:
# CONTENT_LENGTH:
# CONTENT_TYPE: application/x-www-form-urlencoded
# PATH_INFO: /hello/mom/and/dad.html
# QUERY_STRING: this=value&that=too
# REMOTE_ADDR: 127.0.0.1
# REQUEST_METHOD: GET
# SCRIPT_NAME:
# wsgi.errors: <file>
# wsgi.file_wrapper: <file>
# wsgi.multiprocess: False
# wsgi.multithread: True
# wsgi.run_once: False
class trace_cursor(object):
def __init__(self, cursor):
self.cursor = cursor
def execute(self, command, parameters=None):
sys.stderr.write("\ncursor.execute got:\n")
sys.stderr.write(command + '\n')
if parameters: sys.stderr.write("with: %s\n" % str(parameters))
return self.cursor.execute(command, parameters)
def __getattr__(self, attr):
return getattr(self.cursor, attr)
def init(db_connection, trace_sql=False):
global Engine, Db_connection, Db_cursor
Engine = knowledge_engine.engine(sqlgen, __file__)
Db_connection = db_connection
Db_cursor = db_connection.cursor()
if trace_sql: Db_cursor = trace_cursor(Db_cursor)
return Engine
Debug = 0
Web_framework_dir = os.path.dirname(__file__)
def gen_plan(environ, starting_tables, template_name):
Engine.reset()
def add_fact(fb_name, env_var):
fact_name = env_var.split('.')[-1].lower()
value = environ.get(env_var)
if value is not None and value != '':
if Debug:
print "asserting %s.%s(%s) from %s" % \
(fb_name, fact_name, value, env_var)
Engine.add_case_specific_fact(fb_name, fact_name, (value,))
elif Debug:
print "skipping %s.%s: got %s from %s" % \
(fb_name, fact_name, value, env_var)
return value
def add_http(env_var):
fact_name = env_var[5:].lower()
value = environ.get(env_var)
if value is not None and value != '':
if Debug:
print "asserting header.%s(%s) from %s" % \
(fact_name, value, env_var)
Engine.add_case_specific_fact("header", fact_name, (value,))
elif Debug:
print "skipping header.%s: got %s from %s" % \
(fact_name, value, env_var)
return value
add_fact("header", "CONTENT_TYPE")
add_fact("request", "REQUEST_METHOD")
add_fact("request", "PATH_INFO")
add_fact("request", "SCRIPT_NAME")
add_fact("request", "QUERY_STRING")
add_fact("request", "REMOTE_ADDR")
add_fact("wsgi", "wsgi.multiprocess")
add_fact("wsgi", "wsgi.multithread")
add_fact("wsgi", "wsgi.run_once")
for env_var in environ.iterkeys():
if env_var.startswith('HTTP_'): add_http(env_var)
if Debug > 1:
for key, value in environ.iteritems():
print "environ: %s = %s" % (key, value)
length = int(environ.get("CONTENT_LENGTH") or '0')
if length:
request_file = environ['wsgi.input']
Engine.add_case_specific_fact("request", "body",
(request_file.read(length),))
Engine.activate('database', 'web')
try:
no_vars, plan = \
Engine.prove_1_goal('web.process($starting_tables, $template_name)',
starting_tables=starting_tables,
template_name=template_name)
except:
traceback = krb_traceback.format_exc(100)
return None, traceback
return plan, None
Plans_cache = {}
def wsgi_app(environ, start_response):
global Plans_cache
# Parse the path:
components = environ["PATH_INFO"].lstrip('/').split('/')
template_name = os.path.join(Web_framework_dir, components[-1])
starting_tables = []
starting_keys = {}
for i in range(0, len(components) - 1, 2):
starting_tables.append(components[i])
starting_keys[components[i]] = int(components[i + 1])
# Convert to tuple so that it can be used as a dict key and sort so
# different orders compare equal.
starting_tables = tuple(sorted(starting_tables))
template_mtime = os.stat(template_name).st_mtime
mtime, plan = Plans_cache.get((starting_tables, template_name),
(None, None))
if mtime is None or mtime < template_mtime:
print "get_plan(..., %s, %s)" % (starting_tables, template_name)
plan, traceback = gen_plan(environ, starting_tables, template_name)
if plan is None:
Db_connection.rollback()
start_response('500 Server Error', [('Content-Type', 'text/plain')])
return traceback
Plans_cache[starting_tables, template_name] = template_mtime, plan
status, headers, document = plan(Db_connection, Db_cursor, starting_keys)
start_response(status, headers)
return document
| {
"repo_name": "e-loue/pyke",
"path": "examples/web_framework/wsgi_app.py",
"copies": "2",
"size": "6076",
"license": "mit",
"hash": -363633158272844200,
"line_mean": 36.5,
"line_max": 80,
"alpha_frac": 0.6274897119,
"autogenerated": false,
"ratio": 3.743068391866913,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.010945595267591975,
"num_lines": 162
} |
import sys
import os, os.path
import signal
import functools
import wsgiref.simple_server
import wsgi_app
def kill(pids, signum, frame):
sys.stderr.write("preforked_server(%d) caught SIGINT\n" % os.getpid())
sys.stderr.write("preforked_server(%d) self.pids is %s\n" %
(os.getpid(), str(pids)))
for pid in pids: os.kill(pid, signal.SIGTERM)
sys.exit(1)
class RequestHandlerNoLogging(wsgiref.simple_server.WSGIRequestHandler):
def log_request(self, code='-', size='-'): pass
class server(wsgiref.simple_server.WSGIServer):
def __init__(self, server_address, rq_handler_class, num_processes,
trace_sql, db_engine):
self.num_processes = num_processes
self.trace_sql = trace_sql
self.db_engine = db_engine
wsgiref.simple_server.WSGIServer.__init__(self, server_address,
rq_handler_class)
def init_wsgi(self):
if self.db_engine.lower() == 'sqlite3':
import sqlite3 as db
import sqlgen.load_sqlite3_schema as load_schema
db_connection = \
db.connect(os.path.join(os.path.dirname(load_schema.__file__),
'sqlite3.db'))
elif self.db_engine.lower() == 'mysql':
import MySQLdb as db
import sqlgen.load_mysql_schema as load_schema
db_connection = db.connect(user="movie_user", passwd="user_pw",
db="movie_db")
else:
raise ValueError("prefork_server.init_wsgi: "
"unrecognized db_engine: " +
self.db_engine)
load_schema.load_schema(wsgi_app.init(db_connection, self.trace_sql),
db, db_connection)
def name(self): return "prefork_server(%d)" % self.num_processes
def server_activate(self):
wsgiref.simple_server.WSGIServer.server_activate(self)
pids = []
for i in xrange(self.num_processes - 1):
pid = os.fork()
if pid == 0:
self.init_wsgi()
break
pids.append(pid)
else:
# only run by parent process
self.init_wsgi()
signal.signal(signal.SIGINT, functools.partial(kill, pids))
def run(num_processes = 2, port = 8080, logging = False, trace_sql = False,
db_engine = 'sqlite3'):
server_address = ('', port)
httpd = server(server_address,
wsgiref.simple_server.WSGIRequestHandler
if logging
else RequestHandlerNoLogging,
num_processes, trace_sql, db_engine)
httpd.set_app(wsgi_app.wsgi_app)
print "Server running..."
httpd.serve_forever()
if __name__ == "__main__":
run()
| {
"repo_name": "e-loue/pyke",
"path": "examples/web_framework/preforked_server.py",
"copies": "2",
"size": "3995",
"license": "mit",
"hash": -6296789764230844000,
"line_mean": 41.0421052632,
"line_max": 79,
"alpha_frac": 0.6169253881,
"autogenerated": false,
"ratio": 4.088024564994882,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5704949953094882,
"avg_score": null,
"num_lines": null
} |
class immutable_dict(dict):
''' >>> im = immutable_dict((('a', 1), ('b', 2)))
>>> len(im)
2
>>> im['a']
1
>>> im['b']
2
>>> tuple(sorted(im.keys()))
('a', 'b')
>>> tuple(sorted(im.values()))
(1, 2)
>>> 'a' in im
True
>>> 'c' in im
False
>>> del im['a']
Traceback (most recent call last):
...
TypeError: del (a) not allowed on plan context
>>> im['a'] = 3
Traceback (most recent call last):
...
TypeError: not allowed to change pattern variables (a) in plan
>>> im.clear()
Traceback (most recent call last):
...
TypeError: clear not allowed on plan context
>>> im.pop('a')
Traceback (most recent call last):
...
TypeError: pop (a) not allowed on plan context
>>> im.popitem()
Traceback (most recent call last):
...
TypeError: popitem not allowed on plan context
>>> im.setdefault('a', [])
Traceback (most recent call last):
...
TypeError: setdefault (a) not allowed on plan context
>>> im.update({'c': 3})
Traceback (most recent call last):
...
TypeError: update not allowed on plan context
'''
def __delitem__(self, key):
raise TypeError("del (%s) not allowed on plan context" % key)
def __setitem__(self, key, value):
raise TypeError("not allowed to change pattern variables (%s) in plan" %
key)
def clear(self):
raise TypeError("clear not allowed on plan context")
def pop(self, key, default = None):
raise TypeError("pop (%s) not allowed on plan context" % key)
def popitem(self):
raise TypeError("popitem not allowed on plan context")
def setdefault(self, key, default = None):
raise TypeError("setdefault (%s) not allowed on plan context" % key)
def update(self, dict2 = None, **kwargs):
raise TypeError("update not allowed on plan context")
| {
"repo_name": "e-loue/pyke",
"path": "pyke/immutable_dict.py",
"copies": "2",
"size": "3263",
"license": "mit",
"hash": -1543296057474327300,
"line_mean": 34.8461538462,
"line_max": 80,
"alpha_frac": 0.6063764562,
"autogenerated": false,
"ratio": 4.39622641509434,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.027604543901182815,
"num_lines": 91
} |
from __future__ import with_statement
import os.path
import contextlib
import sqlite3 as db
from pyke import test
import load_sqlite3_schema
Sqlgen_dir = os.path.dirname(load_sqlite3_schema.__file__)
Sqlite3_db = os.path.join(Sqlgen_dir, "sqlite3.db")
class cursor(object):
rowcount = 1 # This is only check for unique queries...
def __init__(self, width):
self.width = width
def execute(self, str, parameters=None):
print "execute got:"
print str
if parameters: print "with:", parameters
def fetchone(self, base = 44):
return (base,) * self.width
def fetchall(self):
return tuple(self.fetchone(i) for i in range(1, 5))
def init():
test.init()
with contextlib.closing(db.connect(Sqlite3_db)) as conn:
load_sqlite3_schema.load_schema(test.Engine, db, conn)
def run_plan(globals, locals):
plan = locals['plan']
args = locals['args']
starting_keys = dict(zip(args[0], range(1, len(args[0]) + 1)))
print "executing the plan with debug database cursor"
ans = plan(cursor(len(args[1])), starting_keys)
print "plan returned:", ans
while True:
print
data_values = raw_input("%s: " % str(args[0])).split()
if not data_values: break
starting_keys = dict(zip(args[0], data_values))
print "executing the plan with real database cursor"
with contextlib.closing(db.connect(Sqlite3_db)) as conn:
with contextlib.closing(conn.cursor()) as cur:
ans = plan(cur, starting_keys)
print "plan returned:", ans
def run():
if not test.Did_init: init()
test.run('database', fn_to_run_plan = run_plan)
def doc_test():
import doctest
import sys
sys.exit(doctest.testmod()[0])
if __name__ == "__main__":
doc_test()
| {
"repo_name": "e-loue/pyke",
"path": "examples/sqlgen/driver_sqlite3.py",
"copies": "2",
"size": "2942",
"license": "mit",
"hash": 3226999718417864000,
"line_mean": 34.8658536585,
"line_max": 79,
"alpha_frac": 0.6841210473,
"autogenerated": false,
"ratio": 3.785070785070785,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5469191832370786,
"avg_score": null,
"num_lines": null
} |
from __future__ import with_statement
import types
from pyke import knowledge_engine, krb_traceback, pattern, contexts
def parse(str):
str = str.strip()
if str[0] == '(': return parse_tuple(str[1:])
if str[0] in "0123456789.-+": return parse_number(str)
if str[0] in "\"'": return parse_string(str)
if str[0].isalpha() or str[0] in "_$*": return parse_identifier(str)
else: return parse_symbol(str)
def parse_number(str):
'''
>>> parse_number('123abc')
(123, 'abc')
>>> parse_number('123e17abc')
(1.23e+19, 'abc')
>>> parse_number('-123abc')
(-123, 'abc')
>>> parse_number('-1.23e-7abc')
(-1.23e-07, 'abc')
'''
for i in range(1, len(str)):
if str[i] not in "0123456789.-+e": break
return eval(str[:i]), str[i:]
def parse_string(str):
r'''
>>> parse_string("'hello' mom")
('hello', ' mom')
>>> parse_string(r'"hello\" mom"')
('hello" mom', '')
'''
quote = str[0]
end = str.index(quote, 1)
while str[end - 1] == '\\':
end = str.index(quote, end + 1)
return eval(str[:end + 1]), str[end + 1:]
def parse_identifier(str):
'''
>>> parse_identifier("abc, def")
('abc', ', def')
>>> parse_identifier("$abc, def")
('$abc', ', def')
>>> parse_identifier("*$abc, def")
('*$abc', ', def')
>>> parse_identifier("*")
('*', '')
>>> parse_identifier("True, 0")
(True, ', 0')
>>> parse_identifier("False, 0")
(False, ', 0')
>>> parse_identifier("None, 0")
(None, ', 0')
'''
if len(str) == 1: return str, ''
start = 2 if str.startswith('*$') else 1
for i in range(start, len(str)):
if not str[i].isalnum() and str[i] != '_': break
if str[0] == '*' and (i < 3 or str[1] != '$'): return parse_symbol(str)
if str[0] == '$' and i < 2: return parse_symbol(str)
if str[:i] == 'None': return None, str[i:]
if str[:i] == 'True': return True, str[i:]
if str[:i] == 'False': return False, str[i:]
return str[:i], str[i:]
def parse_symbol(str):
'''
>>> parse_symbol(", def")
(',', ' def')
>>> parse_symbol("$+, def")
('$+', ', def')
>>> parse_symbol("+)")
('+', ')')
>>> parse_symbol("+]")
('+', ']')
'''
if len(str) == 1: return str, ''
for i in range(1, len(str)):
if str[i].isspace() or str[i].isalnum() or str[i] in "\"'()[]{},;_`":
break
return str[:i], str[i:]
def parse_tuple(str):
'''
>>> parse_tuple("))")
((), ')')
>>> parse_tuple("a, b), c)")
(('a', 'b'), ', c)')
>>> parse_tuple("a, (b), c)")
(('a', ('b',), 'c'), '')
'''
ans = []
str = str.lstrip()
while str[0] != ')':
element, str = parse(str)
ans.append(element)
str = str.lstrip()
if str[0] == ',': str = str[1:].lstrip()
return tuple(ans), str[1:]
def is_pattern(data):
'''
>>> is_pattern('abc')
False
>>> is_pattern(123)
False
>>> is_pattern(())
False
>>> is_pattern((1,2,3))
False
>>> is_pattern((1,2,'*$_'))
True
'''
if isinstance(data, tuple):
if data and is_rest_var(data[-1]): return True
return any(is_pattern(element) for element in data)
if isinstance(data, types.StringTypes) and len(data) > 1 and \
data[0] == '$' and (data[1].isalpha() or data[1] == '_'):
return True
return False
def is_rest_var(data):
r'''
>>> is_rest_var('foo')
False
>>> is_rest_var('$foo')
False
>>> is_rest_var('*foo')
False
>>> is_rest_var('$*foo')
False
>>> is_rest_var('*$foo')
True
>>> is_rest_var('*$')
False
>>> is_rest_var('*')
False
>>> is_rest_var('')
False
'''
return isinstance(data, types.StringTypes) and len(data) > 2 and \
data.startswith('*$') and (data[2].isalpha() or data[2] == '_')
def as_pattern(data):
if isinstance(data, tuple) and is_pattern(data):
if is_rest_var(data[-1]):
name = data[-1][2:]
if name[0] == '_':
rest_var = contexts.anonymous(name)
else:
rest_var = contexts.variable(name)
return pattern.pattern_tuple(tuple(as_pattern(element)
for element in data[:-1]),
rest_var)
return pattern.pattern_tuple(tuple(as_pattern(element)
for element in data))
if isinstance(data, types.StringTypes) and is_pattern(data):
name = data[1:]
if name[0] == '_': return contexts.anonymous(name)
return contexts.variable(name)
return pattern.pattern_literal(data)
Did_init = False
def init(*args, **kws):
global Engine, Did_init
Engine = knowledge_engine.engine(*args, **kws)
Did_init = True
def eval_plan(globals, locals):
while True:
print
expr = raw_input("run plan: ").strip()
if not expr: break
ans = eval(expr, globals.copy(), locals.copy())
print "plan returned:", ans
def run(rule_bases_to_activate,
default_rb = None, init_fn = None, fn_to_run_plan = eval_plan,
plan_globals = {}):
if not Did_init: init()
if not isinstance(rule_bases_to_activate, (tuple, list)):
rule_bases_to_activate = (rule_bases_to_activate,)
if default_rb is None: default_rb = rule_bases_to_activate[0]
while True:
print
goal_str = raw_input("goal: ")
if not goal_str: break
goal, args_str = parse(goal_str)
if goal == "trace":
args = args_str.split()
if len(args) == 1:
Engine.trace(default_rb, args[0])
else:
Engine.trace(*args)
continue
if goal == "untrace":
args = args_str.split()
if len(args) == 1:
Engine.untrace(default_rb, args[0])
else:
Engine.untrace(*args)
continue
args_str = args_str.strip()
rb_name = default_rb
if args_str[0] == '.':
rb_name = goal
goal, args_str = parse(args_str[1:])
args = parse(args_str)[0]
print "proving: %s.%s%s" % (rb_name, goal, args)
goal_args = tuple(as_pattern(arg) for arg in args)
Engine.reset()
if init_fn: init_fn(Engine)
context = contexts.simple_context()
try:
Engine.activate(*rule_bases_to_activate)
with Engine.prove(rb_name, goal, context, goal_args) as it:
for prototype_plan in it:
final = {}
print "got: %s%s" % \
(goal, tuple(arg.as_data(context, True, final)
for arg in goal_args))
if not prototype_plan:
print "no plan returned"
else:
plan = prototype_plan.create_plan(final)
fn_to_run_plan(plan_globals, locals())
except:
krb_traceback.print_exc(100)
| {
"repo_name": "myaskevich/pyke",
"path": "pyke/test.py",
"copies": "2",
"size": "8488",
"license": "mit",
"hash": 4371175305715410400,
"line_mean": 31.6423076923,
"line_max": 79,
"alpha_frac": 0.5190291033,
"autogenerated": false,
"ratio": 3.633133561643836,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008820566287631703,
"num_lines": 260
} |
import contextlib
from pyke import unique
from pyke import knowledge_base
class question_base(knowledge_base.knowledge_base):
r'''
Each instance keeps track of a related set of questions.
'''
def __init__(self, name):
r'''
This is only used by the compiler, so only creates an instance
suitable for pickling.
Specifically, this means that the self.engine is just set to None
and the instance is not registered with any engine.
'''
super(question_base, self).__init__(None, name, register=False)
def add_question(self, question):
name = question.name
if name in self.entity_lists:
raise AssertionError("question_base %s: duplicate question, %s" %
(self.name, name))
self.entity_lists[name] = question
question.set_knowledge_base(self)
def get_ask_module(self):
if hasattr(self, 'ask_module'): return self.ask_module
return self.engine.get_ask_module()
class question(knowledge_base.knowledge_entity_list):
r'''
This represents one question in a question_base. It takes care of
lookup parameters and caching and delegates the work of actually
asking the user a question to the user_question object by calling its
'ask' method passing the format parameters.
'''
not_found = unique.unique('question.not_found')
def __init__(self, name, params, answer_param, user_question):
super(question, self).__init__(name)
self.params = tuple(params)
self.answer_param = answer_param
try:
self.answer_param_position = list(params).index(answer_param)
except ValueError:
raise ValueError("question %s: answer parameter, %s, "
"not in params list: %s" % (answer_param, params))
self.input_param_positions = \
tuple(filter(lambda i: i != self.answer_param_position,
range(len(self.params))))
self.user_question = user_question
self.cache = {}
def __repr__(self):
return "<question %s(%s): $%s = %s>" % \
(self.name, ', '.join('$' + p for p in self.params),
self.answer_param, repr(self.user_question))
def set_knowledge_base(self, question_base):
self.knowledge_base = question_base
self.user_question.set_question_base(question_base)
def lookup(self, bindings, pat_context, patterns):
input_params = tuple((self.params[i],
unicode(patterns[i].as_data(pat_context)))
for i in self.input_param_positions)
format_params = dict(input_params)
ans = self.cache.get(input_params, self.not_found)
if ans is self.not_found:
ans = self.cache[input_params] = \
self.user_question.ask(format_params)
def gen():
mark = bindings.mark(True)
end_done = False
try:
if patterns[self.answer_param_position] \
.match_data(bindings, pat_context, ans):
bindings.end_save_all_undo()
end_done = True
yield
finally:
if not end_done: bindings.end_save_all_undo()
bindings.undo_to_mark(mark)
return contextlib.closing(gen())
def reset(self):
self.cache.clear()
| {
"repo_name": "myaskevich/pyke",
"path": "pyke/question_base.py",
"copies": "2",
"size": "4648",
"license": "mit",
"hash": 5757325998737887000,
"line_mean": 39.4086956522,
"line_max": 79,
"alpha_frac": 0.6234129546,
"autogenerated": false,
"ratio": 4.228389444949954,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.021170433223729156,
"num_lines": 115
} |
'''
This module loads universal facts representing a schema into a pyke engine.
All facts are put in the "schema" fact base.
It adds five kinds of facts:
paramstyle(style)
column(table_name, col_name, type, null, key, default, extra)
primary_key(table_name, columns)
many_to_1(table_many, table_1, table_many_columns, table_1_columns)
links_to(depth, start_table, end_table, joins)
The many_to_1 facts are determined by column names ending with "_id".
The part before "_id" is taken as the table name being referred to with a
primary key of ('id').
This module only exports one function: load_schema, which you'd call once
at startup after you've created your pyke engine.
'''
from __future__ import with_statement
import contextlib
from pyke import goal
debug = False
def load_schema(engine, dbi_module, connection):
_add_fact(engine, "paramstyle", (dbi_module.paramstyle,))
with contextlib.closing(connection.cursor()) as table_cursor:
table_cursor.execute("""select name from sqlite_master
where type='table'
""")
with contextlib.closing(connection.cursor()) as column_cursor:
for name, in table_cursor.fetchall():
_load_table(engine, column_cursor, name)
depth = 1
while _links_to(engine, depth): depth += 1
def _load_table(engine, cursor, table_name):
# This doesn't allow sql parameters!
cursor.execute("pragma table_info(%s)" % table_name)
for col_num, col_name, type, null_flag, default, key in cursor.fetchall():
_create_column(engine, table_name, col_name, type, null_flag != 99,
key, default, None)
def _create_column(engine, table_name, col_name, type, null, key, default,
extra):
#null = null.upper() == 'YES'
if not key: key = None
if not default: default = None
if not extra: extra = None
_add_fact(engine, "column",
(table_name, col_name, type, null, key, default, extra))
if col_name == 'id':
_add_fact(engine, "primary_key", (table_name, (col_name,)))
if col_name.endswith('_id'):
to_table = col_name[:-3]
_add_fact(engine, "many_to_1",
(table_name, to_table, (col_name,), ('id',)))
many_to_1 = goal.compile(
'schema.many_to_1($from_table, $to_table, $from_columns, $to_columns)')
links_to = goal.compile(
'schema.links_to($depth, $from_table, $to_table, $joins)')
many_to_1_to = goal.compile(
'schema.many_to_1($to_table, $end_table, $to_columns, $end_columns)')
def _links_to(engine, depth):
ans = False
if depth == 1:
with many_to_1.prove(engine) as gen1:
for vars, bogus_plan in gen1:
from_table, to_table, from_columns, to_columns = \
vars['from_table'], vars['to_table'], vars['from_columns'], \
vars['to_columns']
_add_fact(engine, "links_to",
(1, from_table, to_table,
((to_table, from_table, from_columns, to_columns),)))
ans = True
return ans
with links_to.prove(engine, depth=depth - 1) as gen2:
for vars, bogus_plan1 in gen2:
from_table, to_table, joins = \
vars['from_table'], vars['to_table'], vars['joins']
with many_to_1_to.prove(engine, to_table=to_table) as gen3:
for vars, bogus_plan2 in gen3:
end_table, to_columns, end_columns = \
vars['end_table'], vars['to_columns'], vars['end_columns']
if end_table != from_table and \
not any(end_table == join_clause[0]
for join_clause in joins):
_add_fact(engine, "links_to",
(depth, from_table, end_table,
joins + ((end_table, to_table, to_columns,
end_columns),)))
ans = True
return ans
def _add_fact(engine, fact, args):
if debug: print "schema", fact, args
engine.add_universal_fact("schema", fact, args)
| {
"repo_name": "e-loue/pyke",
"path": "examples/sqlgen/load_sqlite3_schema.py",
"copies": "2",
"size": "5381",
"license": "mit",
"hash": 3711296034321175600,
"line_mean": 41.03125,
"line_max": 80,
"alpha_frac": 0.6085501859,
"autogenerated": false,
"ratio": 3.810198300283286,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5418748486183286,
"avg_score": null,
"num_lines": null
} |
r'''
A "match" here is one of:
- an instance of qa_helpers.regexp
- msg (for error message)
- prompt (without the [])
- match(str) returns converted value (None if no match)
- an instance of qa_helpers.qmap
- test (a match)
- value (value to use)
- an instance of slice (step must be None)
- a tuple of matches (implied "or")
- some other python value (which stands for itself)
A "review" here is a tuple of (match, review_string)
"Alternatives" here is a tuple of (tag, label_string)
'''
import sys
import itertools
from pyke import qa_helpers
encoding = 'UTF-8'
# The answer has been converted to lowercase before these matches:
yes_match = ('y', 'yes', 't', 'true')
no_match = ('n', 'no', 'f', 'false')
def get_answer(question, match_prompt, conv_fn=None, test=None, review=None):
r'''
>>> from StringIO import StringIO
>>> sys.stdin = StringIO('4\n')
>>> get_answer(u'enter number?', '[0-10]', qa_helpers.to_int,
... slice(3,5))
______________________________________________________________________________
enter number? [0-10] 4
>>> sys.stdin = StringIO('2\n4\n')
>>> get_answer(u'enter number?', '\n[0-10]', qa_helpers.to_int,
... slice(3,5))
______________________________________________________________________________
enter number?
[0-10] answer should be between 3 and 5, got 2
<BLANKLINE>
Try Again:
______________________________________________________________________________
enter number?
[0-10] 4
>>> sys.stdin = StringIO('4\n')
>>> get_answer(u'enter number?\n', '[0-10]', qa_helpers.to_int, slice(3,5),
... ((3, u'not enough'), (4, u'hurray!'), (5, u'too much')))
______________________________________________________________________________
enter number?
[0-10] hurray!
4
'''
if not question[-1].isspace() and \
(not match_prompt or not match_prompt[0].isspace()):
question += ' '
question += match_prompt
if match_prompt and not match_prompt[-1].isspace(): question += ' '
if encoding: question = question.encode(encoding)
while True:
print "_" * 78
ans = raw_input(question)
try:
if encoding and sys.version_info[0] < 3: ans = ans.decode(encoding)
if conv_fn: ans = conv_fn(ans)
if test: ans = qa_helpers.match(ans, test)
break
except ValueError, e:
print "answer should be %s, got %s" % (str(e), repr(ans))
print
print "Try Again:"
if review:
def matches2(ans, test):
try:
qa_helpers.match(ans, test)
return True
except ValueError:
return False
def matches(ans, test):
if isinstance(ans, (tuple, list)):
return any(itertools.imap(lambda elem: matches2(elem, test),
ans))
return matches2(ans, test)
for review_test, review_str in review:
if matches(ans, review_test):
print review_str
return ans
def ask_yn(question, review=None):
r'''
>>> from StringIO import StringIO
>>> sys.stdin = StringIO('yes\n')
>>> ask_yn(u'got it?')
______________________________________________________________________________
got it? (y/n) True
>>> sys.stdin = StringIO('N\n')
>>> ask_yn(u'got it?')
______________________________________________________________________________
got it? (y/n) False
'''
return get_answer(question, u"(y/n)", conv_fn=lambda str: str.lower(),
test=(qa_helpers.qmap(yes_match, True),
qa_helpers.qmap(no_match, False)),
review=review)
def ask_integer(question, match=None, review=None):
r'''
>>> from StringIO import StringIO
>>> sys.stdin = StringIO('4\n')
>>> ask_integer(u'enter number?')
______________________________________________________________________________
enter number? (int) 4
'''
return get_answer(question, qa_helpers.match_prompt(match, int, u"[%s]",
u'(int)'),
conv_fn=qa_helpers.to_int,
test=match,
review=review)
def ask_float(question, match=None, review=None):
r'''
>>> from StringIO import StringIO
>>> sys.stdin = StringIO('4\n')
>>> ask_float(u'enter number?')
______________________________________________________________________________
enter number? (float) 4.0
'''
return get_answer(question, qa_helpers.match_prompt(match, float, u"[%s]",
u'(float)'),
conv_fn=qa_helpers.to_float,
test=match,
review=review)
def ask_number(question, match=None, review=None):
r'''
>>> from StringIO import StringIO
>>> sys.stdin = StringIO('4\n')
>>> ask_number(u'enter number?')
______________________________________________________________________________
enter number? (number) 4
'''
return get_answer(question, qa_helpers.match_prompt(match, int, u"[%s]",
u'(number)'),
conv_fn=qa_helpers.to_number,
test=match,
review=review)
def ask_string(question, match=None, review=None):
r'''
>>> from StringIO import StringIO
>>> sys.stdin = StringIO('yes\n')
>>> ask_string(u'enter string?')
______________________________________________________________________________
enter string? u'yes'
'''
return get_answer(question, qa_helpers.match_prompt(match, str, u"[%s]",
u''),
test=match,
review=review)
def ask_select_1(question, alternatives, review=None):
r'''
>>> from StringIO import StringIO
>>> sys.stdin = StringIO('2\n')
>>> ask_select_1(u'which one?',
... (('a', u'first one'), ('b', u'second one'),
... ('c', u'third one')))
______________________________________________________________________________
which one?
1. first one
2. second one
3. third one
? [1-3] 'b'
'''
match = slice(1, len(alternatives))
question += u''.join(u'\n%3d. %s' %
(i + 1, u'\n '.join(text.split(u'\n')))
for i, (tag, text) in enumerate(alternatives))
i = get_answer(question, qa_helpers.match_prompt(match, int, u"\n? [%s]"),
conv_fn=qa_helpers.to_int,
test=match,
review=review)
return alternatives[i-1][0]
def ask_select_n(question, alternatives, review=None):
r'''
>>> from StringIO import StringIO
>>> sys.stdin = StringIO('1,3\n')
>>> ask_select_n(u'which one?',
... (('a', u'first one'), ('b', u'second one'),
... ('c', u'third one')))
______________________________________________________________________________
which one?
1. first one
2. second one
3. third one
? [1-3, ...] ('a', 'c')
'''
match = slice(1, len(alternatives))
question += u''.join(u'\n%3d. %s' %
(i + 1, u'\n '.join(text.split('\n')))
for i, (tag, text) in enumerate(alternatives))
i_tuple = get_answer(question, qa_helpers.match_prompt(match, int,
u"\n? [%s, ...]"),
conv_fn=lambda str:
qa_helpers.to_tuple(str,
conv_fn=qa_helpers.to_int,
test=match),
review=review)
return tuple(alternatives[i-1][0] for i in i_tuple)
| {
"repo_name": "myaskevich/pyke",
"path": "pyke/ask_tty.py",
"copies": "2",
"size": "9599",
"license": "mit",
"hash": -857102453628380900,
"line_mean": 39.3277310924,
"line_max": 86,
"alpha_frac": 0.4568660138,
"autogenerated": false,
"ratio": 4.302106678619453,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.011314685438157867,
"num_lines": 238
} |
r'''
'''
import itertools
from pyke import contexts
class selector(object):
def __init__(self):
self.all = [] # [(pat_context, pattern, link)...]
self.full_vars = [] # [(pat_context, pattern, [link])...]
self.partial_vars = [] # [(pat_context, pattern, [link])...]
self.data = {} # {data: [link]}
def add(self, pat_context, pattern, link):
self.all.append((pat_context, pattern, link))
if pattern.is_data(pat_context):
self.data.set_default(pattern.as_data(pat_context), []).append(link)
else:
self.vars # FIX ??
def gen_subset(self, pat_context, pattern):
''' yields links without binding any pattern variables.
'''
if pattern.is_data(pat_context):
for link in self.data[pattern.as_data(pat_context)]: yield link
else:
for p_c, p, links in self.full_vars:
for link in links: yield link
if not isinstance(pattern, contexts.variable):
self.partial_vars # FIX subsets
def gen_match(self, pat_context, pattern):
''' yields links binding pattern variables.
'''
if pattern.is_data(pat_context):
for link in self.data[pattern.as_data(pat_context)]: yield link
elif isinstance(pattern, contexts.variable):
self.all # FIX all
else:
self.partial_vars # FIX matches
self.full_vars # FIX all
class cache_args(object):
def __init__(self):
self.args_list = [] # [(pat_context, (pattern...))...]
self.hashes = {} # (len, (index...)): (other_indices,
# {(arg...): [other_args_from_factn...]})
def reset(self):
self.args_list = []
self.hashes.clear()
def lookup(self, bindings, pat_context, patterns):
""" Binds patterns to successive facts, yielding None for each
successful match. Undoes bindings upon continuation, so that no
bindings remain at StopIteration.
"""
indices = tuple(enum for enum in enumerate(patterns)
if enum[1].is_data(pat_context))
other_indices, other_arg_lists = \
self._get_hashed(len(patterns),
tuple(index[0] for index in indices),
tuple(index[1].as_data(pat_context)
for index in indices))
if other_arg_lists:
for args in other_arg_lists:
mark = bindings.mark(True)
try:
if all(itertools.imap(lambda i, arg:
patterns[i].match_data(bindings,
pat_context,
arg),
other_indices,
args)):
bindings.end_save_all_undo()
yield
else:
bindings.end_save_all_undo()
finally:
bindings.undo_to_mark(mark)
def _get_hashed(self, len, indices, args):
ans = self.hashes.get((len, indices))
if ans is None: ans = self._hash(len, indices)
other_indices, arg_map = ans
return other_indices, arg_map.get(args, ())
def _hash(self, length, indices):
args_hash = {}
new_entry = (tuple(i for i in range(length) if i not in indices),
args_hash)
self.hashes[length, indices] = new_entry
for args in itertools.chain(self.universal_facts,
self.case_specific_facts):
if len(args) == length:
selected_args = tuple(arg for i, arg in enumerate(args)
if i in indices)
args_hash.setdefault(selected_args, []) \
.append(tuple(arg for i, arg in enumerate(args)
if i not in indices))
return new_entry
def add_universal_fact(self, args):
assert args not in self.case_specific_facts, \
"add_universal_fact: fact already present as specific fact"
if args not in self.universal_facts:
self.universal_facts.append(args)
self.add_args(args)
def add_case_specific_fact(self, args):
if args not in self.universal_facts and \
args not in self.case_specific_facts:
self.case_specific_facts.append(args)
self.add_args(args)
for fc_rule, foreach_index in self.fc_rule_refs:
fc_rule.new_fact(args, foreach_index)
def add_args(self, args):
for (length, indices), (other_indices, arg_map) \
in self.hashes.iteritems():
if length == len(args):
selected_args = tuple(arg for i, arg in enumerate(args)
if i in indices)
arg_map.setdefault(selected_args, []) \
.append(tuple(arg for i, arg in enumerate(args)
if i not in indices))
def test():
import doctest
import sys
# FIX sys.exit(doctest.testmod()[0])
if __name__ == "__main__":
test()
| {
"repo_name": "myaskevich/pyke",
"path": "experimental/cache_args.py",
"copies": "2",
"size": "6587",
"license": "mit",
"hash": -1053969762853435600,
"line_mean": 43.5,
"line_max": 80,
"alpha_frac": 0.5461585181,
"autogenerated": false,
"ratio": 4.36158940397351,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.590774792207351,
"avg_score": null,
"num_lines": null
} |
r'''
This uses a parser object which is expected to have the following methods:
- get_token(check_token=None)
- parse_match()
- parse_alternatives()
- parse_review()
'''
from string import Template
class user_question(object):
match = None
review = None
def __init__(self, format):
self.format = Template(format)
def __repr__(self):
head = "<%s" % self.__class__.__name__
if self.match: head += self.repr_match()
head += ": %s" % repr(self.format.template)
if self.review:
head += " %s" % ' ! '.join(repr(m) for m, text in self.review)
return head + ">"
def repr_match(self):
return "(%s)" % repr(self.match)
def parse(self, parser):
self.parse_args(parser)
self.review = parser.parse_review()
def parse_args(self, parser): pass
def set_question_base(self, question_base):
self.question_base = question_base
def get_ask_module(self):
return self.question_base.get_ask_module()
def ask(self, format_params):
ask_fn = getattr(self.get_ask_module(),
'ask_' + self.__class__.__name__)
if self.review:
review = tuple((match, template.substitute(format_params))
for match, template in self.review)
else:
review = None
arg2 = self.prepare_arg2(format_params)
if arg2:
return ask_fn(self.format.substitute(format_params), arg2,
review=review)
return ask_fn(self.format.substitute(format_params),
review=review)
def prepare_arg2(self, format_params):
return self.match
class yn(user_question):
pass
class match_args(user_question):
def parse_args(self, parser):
token, value = parser.get_token()
if token == 'lparen':
self.match = parser.parse_match()
parser.get_token('rparen')
else:
parser.push_token()
class integer(match_args): pass
class float(match_args): pass
class number(match_args): pass
class string(match_args): pass
class select_1(user_question):
def repr_match(self):
return "(%s)" % ' '.join(repr(t) + ':' for t, text in self.match)
def parse(self, parser):
self.match, self.review = parser.parse_alternatives()
def prepare_arg2(self, format_params):
return tuple((tag, label.substitute(format_params))
for tag, label in self.match)
class select_n(select_1): pass
| {
"repo_name": "myaskevich/pyke",
"path": "pyke/user_question.py",
"copies": "2",
"size": "3714",
"license": "mit",
"hash": -8653161216245100000,
"line_mean": 31.2869565217,
"line_max": 79,
"alpha_frac": 0.6350659844,
"autogenerated": false,
"ratio": 3.9374337221633087,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5572499706563309,
"avg_score": null,
"num_lines": null
} |
""" See http://www.dabeaz.com/ply/ply.html for syntax of grammer definitions.
"""
from __future__ import with_statement
import os, os.path
from pyke.krb_compiler.ply import yacc
from pyke.krb_compiler import scanner
from pyke import fact_base
tokens = scanner.kfb_tokens
def p_file(p):
''' file : nl_opt facts_opt
facts_opt :
facts_opt : facts nl_opt
facts : fact
facts : facts NL_TOK fact
'''
pass
def p_fact0(p):
''' fact : IDENTIFIER_TOK LP_TOK RP_TOK '''
Fact_base.add_universal_fact(p[1], ())
def p_fact1(p):
''' fact : IDENTIFIER_TOK LP_TOK data_list RP_TOK '''
Fact_base.add_universal_fact(p[1], tuple(p[3]))
def p_none(p):
''' data : NONE_TOK
comma_opt :
comma_opt : ','
nl_opt :
nl_opt : NL_TOK
'''
p[0] = None
def p_number(p):
''' data : NUMBER_TOK
'''
p[0] = p[1]
def p_string(p):
''' data : STRING_TOK
'''
p[0] = eval(p[1])
def p_quoted_last(p):
''' data : IDENTIFIER_TOK
'''
p[0] = p[1]
def p_false(p):
''' data : FALSE_TOK
'''
p[0] = False
def p_true(p):
''' data : TRUE_TOK
'''
p[0] = True
def p_empty_tuple(p):
''' data : LP_TOK RP_TOK
'''
p[0] = ()
def p_start_list(p):
''' data_list : data
'''
p[0] = [p[1]]
def p_append_list(p):
''' data_list : data_list ',' data
'''
p[1].append(p[len(p)-1])
p[0] = p[1]
def p_tuple(p):
''' data : LP_TOK data_list comma_opt RP_TOK '''
p[0] = tuple(p[2])
def p_error(t):
if t is None:
raise SyntaxError("invalid syntax", scanner.syntaxerror_params())
else:
raise SyntaxError("invalid syntax",
scanner.syntaxerror_params(t.lexpos, t.lineno))
parser = None
def init(this_module, check_tables = False, debug = 0):
global parser
if parser is None:
outputdir = os.path.dirname(this_module.__file__)
if debug:
parser = yacc.yacc(module=this_module, write_tables=0,
debug=debug, debugfile='kfbparser.yacc.out',
outputdir=outputdir)
else:
if check_tables:
kfbparser_mtime = os.path.getmtime(this_module.__file__)
tables_name = os.path.join(outputdir, 'kfbparser_tables.py')
try:
ok = os.path.getmtime(tables_name) >= kfbparser_mtime
except OSError:
ok = False
if not ok:
#print "regenerating kfbparser_tables"
try: os.remove(tables_name)
except OSError: pass
try: os.remove(tables_name + 'c')
except OSError: pass
try: os.remove(tables_name + 'o')
except OSError: pass
parser = yacc.yacc(module=this_module, debug=0,
optimize=1, write_tables=1,
tabmodule='pyke.krb_compiler.kfbparser_tables',
outputdir=outputdir)
# Use the first line for normal use, the second for testing changes in the
# grammer (the first line does not report grammer errors!).
def parse(this_module, filename, check_tables = False, debug = 0):
#def parse(this_module, filename, check_tables = False, debug = 1):
'''
>>> from pyke.krb_compiler import kfbparser
>>> kfbparser.parse(kfbparser,
... os.path.join(os.path.dirname(__file__),
... 'TEST/kfbparse_test.kfb'),
... True)
<fact_base kfbparse_test>
'''
global Fact_base
init(this_module, check_tables, debug)
name = os.path.basename(filename)[:-4]
Fact_base = fact_base.fact_base(None, name, False)
with open(filename) as f:
scanner.init(scanner, debug, check_tables, True)
scanner.lexer.lineno = 1
scanner.lexer.filename = filename
#parser.restart()
parser.parse(f.read(), lexer=scanner.lexer, tracking=True, debug=debug)
ans = Fact_base
Fact_base = None
return ans
| {
"repo_name": "e-loue/pyke",
"path": "pyke/krb_compiler/kfbparser.py",
"copies": "2",
"size": "5316",
"license": "mit",
"hash": -2784047242026846700,
"line_mean": 30.449704142,
"line_max": 79,
"alpha_frac": 0.5838193791,
"autogenerated": false,
"ratio": 3.533909574468085,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5117728953568085,
"avg_score": null,
"num_lines": null
} |
"""
Each target_pkg object keeps track of all of the compiled files within one
compiled_krb package.
"""
from __future__ import with_statement
import os, os.path
import time
import sys
import re
import pyke
debug = False
Name_test = re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*$')
class target_pkg(object):
r'''This manages all of the target files in a compiled_krb directory.
There is one instance per compiled_krb directory. It keeps track of
everything in that directory and manages recompiling the sources when
the compiled targets are missing or out of date.
This instance is stored permanently in the "targets" variable of the
compiled_pyke_files.py module in the compiled_krb directory.
This maintains the following information for each compiled target file:
source_package, source_filepath, compile_time, target_filename.
'''
def __init__(self, module_name, filename = None,
pyke_version = pyke.version,
loader = None, sources = None, compiler_version = 0):
r'''
The parameters are:
module_name: the complete dotted name of the compiled_pyke_files
module for this object.
filename: the absolute path to the compiled_pyke_files.py/c/o
file.
pyke_version: the version of pyke used to compile the target files.
loader: the __loader__ attribute of the compiled_pyke_files
module (only set if the compiled_krb directory has
been zipped, otherwise None).
sources: {(source_package_name, path_from_package,
source_filepath):
[compile_time, target_file...]}
compiler_version:
the version of the pyke compiler used to compile all
of the targets in this compiled_krb directory.
This class is instantiated in two different circumstances:
1. From compiled_krb/compiled_pyke_files.py with a list of all of the
compiled files in that compiled_krb directory.
In this case, all of the parameters are passed to __init__.
2. From knowledge_engine.engine.__init__ (actually _create_target_pkg).
In this case, only the first parameter is passed to __init__.
Either way, after importing compiled_pyke_files or creating a new
instance directly, reset is called by
knowledge_engine.engine._create_target_pkg.
'''
# compiled_krb package name
self.package_name = module_name.rsplit('.', 1)[0]
if sources is None:
# compiled_pyke_files.py does not exist.
# Creating a new target_pkg object from scratch.
try:
# See if the self.package_name (e.g., compiled_krb) exists.
target_package_dir = \
os.path.dirname(import_(self.package_name).__file__)
except ImportError:
if debug:
print >> sys.stderr, "target_pkg: no target package", \
self.package_name
# Create the target_package.
last_dot = self.package_name.rfind('.')
if last_dot < 0:
assert filename is not None
package_parent_dir = \
os.path.dirname(os.path.dirname(filename))
else:
package_parent_dir = \
os.path.dirname(
# This import better work!
import_(self.package_name[:last_dot]).__file__)
if filename is not None:
assert os.path.normpath(
os.path.abspath(package_parent_dir)) == \
os.path.normpath(
os.path.dirname(os.path.dirname(filename))), \
"Internal error: %r != %r" % (
os.path.normpath(
os.path.abspath(package_parent_dir)),
os.path.normpath(
os.path.dirname(os.path.dirname(filename))))
if debug:
print >> sys.stderr, "target_pkg package_parent_dir:", \
package_parent_dir
target_package_dir = \
os.path.join(package_parent_dir,
self.package_name[last_dot + 1:])
if debug:
print >> sys.stderr, "target_pkg target_package_dir:", \
target_package_dir
if not os.path.lexists(target_package_dir):
if debug:
print >> sys.stderr, "target_pkg: mkdir", \
target_package_dir
os.mkdir(target_package_dir)
# Does __init__.py file exist?
init_filepath = \
os.path.join(target_package_dir, '__init__.py')
if debug:
print >> sys.stderr, "target_pkg init_filepath:", \
init_filepath
if not os.path.lexists(init_filepath):
# Create empty __init__.py file.
if debug:
print >> sys.stderr, "target_pkg: creating", \
init_filepath
open(init_filepath, 'w').close()
filename = os.path.join(target_package_dir,
'compiled_pyke_files.py')
if filename.endswith('.py'):
self.filename = filename
else:
self.filename = filename[:-1]
self.directory = os.path.dirname(self.filename)
if debug:
print >> sys.stderr, "target_pkg:", self.package_name, self.filename
self.loader = loader
if compiler_version == pyke.compiler_version:
# {(source_package_name, source_filepath):
# [compile_time, target_filename, ...]}
self.sources = sources if sources is not None else {}
elif self.loader is None:
# Force recompile of everything
self.sources = {}
else:
# Loading incorrect pyke.compiler_version from zip file.
# Can't recompile to zip file...
raise AssertionError("%s: wrong version of pyke, "
"running %s, compiled for %s" %
(module_name, pyke.version, pyke_version))
def reset(self, check_sources = True):
''' This should be called once by engine.__init__ prior to calling
add_source_package.
'''
if debug: print >> sys.stderr, "target_pkg.reset"
self.dirty = False
self.check_sources = check_sources
# {(source_package_name, path_from_package): source_package_dir}
self.source_packages = {}
self.compiled_targets = set() # set of target_filename
self.rb_names = set()
def add_source_package(self, source_package_name, path_from_package,
source_package_dir):
if debug:
print >> sys.stderr, "target_pkg.add_source_package " \
"source_package_name:", \
repr(source_package_name)
print >> sys.stderr, " path_from_package:", \
repr(path_from_package)
print >> sys.stderr, " source_package_dir:", \
repr(source_package_dir)
if not self.loader:
assert (source_package_name, path_from_package) not in \
self.source_packages, \
"duplicate source package: %s" % path_from_package
source_dir = os.path.normpath(os.path.join(source_package_dir,
path_from_package))
self.source_packages[source_package_name, path_from_package] = \
source_dir
sources = set([])
for dirpath, dirnames, filenames \
in os.walk(source_dir, onerror=_raise_exc):
for filename in filenames:
if filename.endswith(('.krb', '.kfb', '.kqb')):
source_abspath = os.path.join(dirpath, filename)
assert dirpath.startswith(source_dir)
source_relpath = \
os.path.join(dirpath[len(source_dir)+1:],
filename)
self.add_source(source_package_name, path_from_package,
source_relpath,
os.path.getmtime(source_abspath))
sources.add(source_relpath)
# Delete old source file info for files that are no longer present
for deleted_filepath \
in [src_filepath
for src_pkg_name, src_path_from_pkg, src_filepath
in self.sources.iterkeys()
if src_pkg_name == source_package_name and
src_path_from_pkg == path_from_package and
src_filepath not in sources]:
if debug:
print >> sys.stderr, "del:", source_package_name, filepath
del self.sources[source_package_name, path_from_package,
deleted_filepath]
def add_source(self, source_package_name, path_from_package,
source_filepath, source_mtime):
if debug:
print >> sys.stderr, "target_pkg.add_source:", \
source_package_name, path_from_package, \
source_filepath
rb_name = os.path.splitext(os.path.basename(source_filepath))[0]
if debug: print >> sys.stderr, "rb_name:", rb_name
if not Name_test.match(rb_name):
raise ValueError("%s: %s illegal as python identifier" %
(source_filepath, rb_name))
if rb_name in self.rb_names:
raise ValueError("%s: duplicate knowledge base name" % rb_name)
self.rb_names.add(rb_name)
key = source_package_name, path_from_package, source_filepath
if debug: print >> sys.stderr, "key:", key
if self.sources.get(key, (0,))[0] < source_mtime:
if debug:
print >> sys.stderr, source_filepath, "needs to be compiled"
self.sources[key] = []
self.dirty = True
def do_by_ext(self, prefix, filename, *args):
ext = os.path.splitext(filename)[1][1:]
return getattr(self, "%s_%s" % (prefix, ext))(filename, *args)
def compile(self, engine):
if debug: print >> sys.stderr, "%s.compile:" % self.package_name
global krb_compiler
if self.check_sources and not self.loader:
initialized = False
for (source_package_name, path_from_package, source_filename), \
value \
in self.sources.iteritems():
if not value and \
(source_package_name, path_from_package) in \
self.source_packages:
if not initialized:
try:
krb_compiler
except NameError:
from pyke import krb_compiler
initialized = True
target_files = \
self.do_by_ext('compile',
os.path.join(
self.source_packages[source_package_name,
path_from_package],
source_filename))
if debug: print >> sys.stderr, "target_files:", target_files
value.append(time.time())
value.extend(target_files)
self.compiled_targets.update(target_files)
def compile_krb(self, source_filename):
if debug: print >> sys.stderr, "compile_krb:", source_filename
rb_name = os.path.basename(source_filename)[:-4]
return krb_compiler.compile_krb(rb_name, self.package_name,
self.directory, source_filename)
def compile_kfb(self, source_filename):
if debug: print >> sys.stderr, "compile_kfb:", source_filename
try:
fbc_name = os.path.basename(source_filename)[:-4] + '.fbc'
fbc_path = os.path.join(self.directory, fbc_name)
self.pickle_it(krb_compiler.compile_kfb(source_filename), fbc_path)
return (fbc_name,)
except:
if os.path.lexists(fbc_path): os.remove(fbc_path)
raise
def compile_kqb(self, source_filename):
if debug: print >> sys.stderr, "compile_kqb:", source_filename
try:
qbc_name = os.path.basename(source_filename)[:-4] + '.qbc'
qbc_path = os.path.join(self.directory, qbc_name)
self.pickle_it(krb_compiler.compile_kqb(source_filename), qbc_path)
return (qbc_name,)
except:
if os.path.lexists(qbc_path): os.remove(qbc_path)
raise
def write(self):
if debug: print >> sys.stderr, "target_pkg.write"
if self.dirty:
sys.stderr.write('writing [%s]/%s\n' %
(self.package_name,
os.path.basename(self.filename)))
with open(self.filename, 'w') as f:
f.write("# compiled_pyke_files.py\n\n")
f.write("from pyke import target_pkg\n\n")
f.write("pyke_version = %r\n" % pyke.version)
f.write("compiler_version = %r\n" % pyke.compiler_version)
f.write("target_pkg_version = %r\n\n" % pyke.target_pkg_version)
f.write("try:\n")
f.write(" loader = __loader__\n")
f.write("except NameError:\n")
f.write(" loader = None\n\n")
f.write("def get_target_pkg():\n")
f.write(" return target_pkg.target_pkg(__name__, __file__, "
"pyke_version, loader, {\n")
for key, value in self.sources.iteritems():
if debug: print >> sys.stderr, "write got:", key, value
if (key[0], key[1]) in self.source_packages:
if debug: print >> sys.stderr, "writing:", key, value
f.write(" %r:\n" % (key,))
f.write(" %r,\n" % (value,))
f.write(" },\n")
f.write(" compiler_version)\n\n")
if os.path.exists(self.filename + 'c'):
os.remove(self.filename + 'c')
if os.path.exists(self.filename + 'o'):
os.remove(self.filename + 'o')
def load(self, engine, load_fc = True, load_bc = True,
load_fb = True, load_qb = True):
load_flags = {'load_fc': load_fc, 'load_bc': load_bc,
'load_fb': load_fb, 'load_qb': load_qb}
if debug: print >> sys.stderr, "target_pkg.load:", load_flags
for (source_package_name, path_from_package, source_filename), value \
in self.sources.iteritems():
if not self.check_sources or self.loader or \
(source_package_name, path_from_package) in self.source_packages:
for target_filename in value[1:]:
if debug: print >> sys.stderr, "load:", target_filename
self.do_by_ext('load', target_filename, engine, load_flags)
def load_py(self, target_filename, engine, flags):
if debug: print >> sys.stderr, "load_py:", target_filename
target_module = target_filename[:-3] # strip '.py' extension.
module_path = self.package_name + '.' + target_module
if target_module.endswith('_fc'):
if flags['load_fc']:
self.load_module(module_path, target_filename, engine)
elif target_module.endswith('_bc'):
if flags['load_bc']:
self.load_module(module_path, target_filename, engine)
elif target_module.endswith('_plans'):
if flags['load_bc']:
self.load_module(module_path, target_filename, engine, False)
else:
raise AssertionError("target_pkg.load_py: "
"unknown target file type: %s" %
target_filename)
def load_fbc(self, target_filename, engine, flags):
if debug: print >> sys.stderr, "load_fbc:", target_filename
if flags['load_fb']:
self.load_pickle(target_filename, engine)
def load_qbc(self, target_filename, engine, flags):
if debug: print >> sys.stderr, "load_qbc:", target_filename
if flags['load_qb']:
self.load_pickle(target_filename, engine)
def load_module(self, module_path, filename, engine, do_import = True):
if debug: print >> sys.stderr, "load_module:", module_path, filename
module = None
if module_path in sys.modules:
if debug: print >> sys.stderr, "load_module: already imported"
module = sys.modules[module_path]
if filename in self.compiled_targets:
if debug: print >> sys.stderr, "load_module: reloading"
module = reload(module)
elif do_import:
if debug: print >> sys.stderr, "load_module: importing"
module = import_(module_path)
if module is not None and \
getattr(module, 'compiler_version', 0) != pyke.compiler_version:
raise AssertionError("%s: incorrect pyke version: running "
"%s, expected %s" %
(filename, pyke.version,
module.pyke_version))
if do_import: module.populate(engine)
def load_pickle(self, filename, engine):
global pickle
if debug: print >> sys.stderr, "load_pickle:", filename
try:
pickle
except NameError:
import cPickle as pickle
full_path = os.path.join(self.directory, filename)
if self.loader:
import contextlib
import StringIO
ctx_lib = \
contextlib.closing(
StringIO.StringIO(self.loader.get_data(full_path)))
else:
ctx_lib = open(full_path, 'rb')
with ctx_lib as f:
versions = pickle.load(f)
if isinstance(versions, tuple):
pyke_version, compiler_version = versions
else:
pyke_version, compiler_version = versions, 0
if compiler_version != pyke.compiler_version:
raise AssertionError("%s: incorrect pyke version: running "
"%s, expected %s" %
(filename, pyke.version, pyke_version))
pickle.load(f).register(engine)
def pickle_it(self, obj, path):
global pickle
try:
pickle
except NameError:
import cPickle as pickle
import copy_reg
copy_reg.pickle(slice, lambda s: (slice, (s.start, s.stop, s.step)))
sys.stderr.write("writing [%s]/%s\n" %
(self.package_name, os.path.basename(path)))
with open(path, 'wb') as f:
pickle.dump((pyke.version, pyke.compiler_version), f)
pickle.dump(obj, f)
def _raise_exc(exc): raise exc
def import_(modulename):
''' modulepath does not include .py
'''
if debug: print >> sys.stderr, "import_:", modulename
mod = __import__(modulename)
for comp in modulename.split('.')[1:]:
mod = getattr(mod, comp)
return mod
| {
"repo_name": "e-loue/pyke",
"path": "pyke/target_pkg.py",
"copies": "2",
"size": "21588",
"license": "mit",
"hash": -743658591476484200,
"line_mean": 45.2248394004,
"line_max": 80,
"alpha_frac": 0.5249918933,
"autogenerated": false,
"ratio": 4.405510204081633,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5930502097381632,
"avg_score": null,
"num_lines": null
} |
# $Id$
# -*- coding: utf-8 -*-
"""Snoop file format."""
from __future__ import absolute_import
import time
from . import dpkt
# RFC 1761
SNOOP_MAGIC = 0x736E6F6F70000000
SNOOP_VERSION = 2
SDL_8023 = 0
SDL_8024 = 1
SDL_8025 = 2
SDL_8026 = 3
SDL_ETHER = 4
SDL_HDLC = 5
SDL_CHSYNC = 6
SDL_IBMCC = 7
SDL_FDDI = 8
SDL_OTHER = 9
dltoff = {SDL_ETHER: 14}
class PktHdr(dpkt.Packet):
"""snoop packet header.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of snoop packet header.
TODO.
"""
__byte_order__ = '!'
__hdr__ = (
('orig_len', 'I', 0),
('incl_len', 'I', 0),
('rec_len', 'I', 0),
('cum_drops', 'I', 0),
('ts_sec', 'I', 0),
('ts_usec', 'I', 0),
)
class FileHdr(dpkt.Packet):
"""snoop file header.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of snoop file header.
TODO.
"""
__byte_order__ = '!'
__hdr__ = (
('magic', 'Q', SNOOP_MAGIC),
('v', 'I', SNOOP_VERSION),
('linktype', 'I', SDL_ETHER),
)
class Writer(object):
"""Simple snoop dumpfile writer.
TODO: Longer class information....
Attributes:
TODO.
"""
def __init__(self, fileobj, linktype=SDL_ETHER):
self.__f = fileobj
fh = FileHdr(linktype=linktype)
self.__f.write(str(fh))
def writepkt(self, pkt, ts=None):
if ts is None:
ts = time.time()
s = str(pkt)
n = len(s)
pad_len = 4 - n % 4 if n % 4 else 0
ph = PktHdr(orig_len=n, incl_len=n,
rec_len=PktHdr.__hdr_len__ + n + pad_len,
ts_sec=int(ts),
ts_usec=int((int(ts) - float(ts)) * 1000000.0))
self.__f.write(str(ph))
self.__f.write(s + '\0' * pad_len)
def close(self):
self.__f.close()
class Reader(object):
"""Simple pypcap-compatible snoop file reader.
TODO: Longer class information....
Attributes:
TODO.
"""
def __init__(self, fileobj):
self.name = fileobj.name
self.fd = fileobj.fileno()
self.__f = fileobj
buf = self.__f.read(FileHdr.__hdr_len__)
self.__fh = FileHdr(buf)
self.__ph = PktHdr
if self.__fh.magic != SNOOP_MAGIC:
raise ValueError('invalid snoop header')
self.dloff = dltoff[self.__fh.linktype]
self.filter = ''
def fileno(self):
return self.fd
def datalink(self):
return self.__fh.linktype
def setfilter(self, value, optimize=1):
return NotImplementedError
def readpkts(self):
return list(self)
def dispatch(self, cnt, callback, *args):
if cnt > 0:
for i in range(cnt):
ts, pkt = next(self)
callback(ts, pkt, *args)
else:
for ts, pkt in self:
callback(ts, pkt, *args)
def loop(self, callback, *args):
self.dispatch(0, callback, *args)
def __iter__(self):
self.__f.seek(FileHdr.__hdr_len__)
while 1:
buf = self.__f.read(PktHdr.__hdr_len__)
if not buf: break
hdr = self.__ph(buf)
buf = self.__f.read(hdr.rec_len - PktHdr.__hdr_len__)
yield (hdr.ts_sec + (hdr.ts_usec / 1000000.0), buf[:hdr.incl_len])
| {
"repo_name": "smutt/dpkt",
"path": "dpkt/snoop.py",
"copies": "3",
"size": "3405",
"license": "bsd-3-clause",
"hash": -7985357575731490000,
"line_mean": 21.4013157895,
"line_max": 78,
"alpha_frac": 0.5107195301,
"autogenerated": false,
"ratio": 3.2336182336182335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5244337763718233,
"avg_score": null,
"num_lines": null
} |
# $Id$
# -*- coding: utf-8 -*-
"""Snoop file format."""
import sys, time
import dpkt
# RFC 1761
SNOOP_MAGIC = 0x736E6F6F70000000L
SNOOP_VERSION = 2
SDL_8023 = 0
SDL_8024 = 1
SDL_8025 = 2
SDL_8026 = 3
SDL_ETHER = 4
SDL_HDLC = 5
SDL_CHSYNC = 6
SDL_IBMCC = 7
SDL_FDDI = 8
SDL_OTHER = 9
dltoff = {SDL_ETHER: 14}
class PktHdr(dpkt.Packet):
"""snoop packet header."""
__byte_order__ = '!'
__hdr__ = (
('orig_len', 'I', 0),
('incl_len', 'I', 0),
('rec_len', 'I', 0),
('cum_drops', 'I', 0),
('ts_sec', 'I', 0),
('ts_usec', 'I', 0),
)
class FileHdr(dpkt.Packet):
"""snoop file header."""
__byte_order__ = '!'
__hdr__ = (
('magic', 'Q', SNOOP_MAGIC),
('v', 'I', SNOOP_VERSION),
('linktype', 'I', SDL_ETHER),
)
class Writer(object):
"""Simple snoop dumpfile writer."""
def __init__(self, fileobj, linktype=SDL_ETHER):
self.__f = fileobj
fh = FileHdr(linktype=linktype)
self.__f.write(str(fh))
def writepkt(self, pkt, ts=None):
if ts is None:
ts = time.time()
s = str(pkt)
n = len(s)
pad_len = 4 - n % 4 if n % 4 else 0
ph = PktHdr(orig_len=n, incl_len=n,
rec_len=PktHdr.__hdr_len__ + n + pad_len,
ts_sec=int(ts),
ts_usec=int((int(ts) - float(ts)) * 1000000.0))
self.__f.write(str(ph))
self.__f.write(s + '\0' * pad_len)
def close(self):
self.__f.close()
class Reader(object):
"""Simple pypcap-compatible snoop file reader."""
def __init__(self, fileobj):
self.name = fileobj.name
self.fd = fileobj.fileno()
self.__f = fileobj
buf = self.__f.read(FileHdr.__hdr_len__)
self.__fh = FileHdr(buf)
self.__ph = PktHdr
if self.__fh.magic != SNOOP_MAGIC:
raise ValueError('invalid snoop header')
self.dloff = dltoff[self.__fh.linktype]
self.filter = ''
def fileno(self):
return self.fd
def datalink(self):
return self.__fh.linktype
def setfilter(self, value, optimize=1):
return NotImplementedError
def readpkts(self):
return list(self)
def dispatch(self, cnt, callback, *args):
if cnt > 0:
for i in range(cnt):
ts, pkt = self.next()
callback(ts, pkt, *args)
else:
for ts, pkt in self:
callback(ts, pkt, *args)
def loop(self, callback, *args):
self.dispatch(0, callback, *args)
def __iter__(self):
self.__f.seek(FileHdr.__hdr_len__)
while 1:
buf = self.__f.read(PktHdr.__hdr_len__)
if not buf: break
hdr = self.__ph(buf)
buf = self.__f.read(hdr.rec_len - PktHdr.__hdr_len__)
yield (hdr.ts_sec + (hdr.ts_usec / 1000000.0), buf[:hdr.incl_len])
| {
"repo_name": "jack8daniels2/dpkt",
"path": "dpkt/snoop.py",
"copies": "6",
"size": "2951",
"license": "bsd-3-clause",
"hash": -6821254005144230000,
"line_mean": 23.1885245902,
"line_max": 78,
"alpha_frac": 0.5025415114,
"autogenerated": false,
"ratio": 3.0997899159663866,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6602331427366387,
"avg_score": null,
"num_lines": null
} |
# $Id$
# example to test shell renderer (*shudder*)
from vtk import *
from vtkdevide import *
import os
import stat
def ce_cb(obj, evt_name):
if obj.GetKeyCode() == 'm':
crm = splatmapper.GetRenderMode()
splatmapper.SetRenderMode(not crm)
print "rendermode switched to %d" % (not crm)
elif obj.GetKeyCode() == '\'':
cur = splatmapper.GetEllipsoidDiameter()
splatmapper.SetEllipsoidDiameter(cur - 0.1)
print "EllipsoidDiameter == %s" % str(cur - 0.1)
elif obj.GetKeyCode() == ',':
cur = splatmapper.GetEllipsoidDiameter()
splatmapper.SetEllipsoidDiameter(cur + 0.1)
print "EllipsoidDiameter == %s" % str(cur + 0.1)
elif obj.GetKeyCode() == 'd':
cur = splatmapper.GetGaussianRadialExtent()
splatmapper.SetGaussianRadialExtent(cur - 0.1)
print "GaussianRadialExtent == %s" % str(cur - 0.1)
elif obj.GetKeyCode() == 'h':
cur = splatmapper.GetGaussianRadialExtent()
splatmapper.SetGaussianRadialExtent(cur + 0.1)
print "GaussianRadialExtent == %s" % str(cur + 0.1)
elif obj.GetKeyCode() == 't':
cur = splatmapper.GetGaussianSigma()
splatmapper.SetGaussianSigma(cur - 0.1)
print "GaussianSigma == %s" % str(cur - 0.1)
elif obj.GetKeyCode() == 'n':
cur = splatmapper.GetGaussianSigma()
splatmapper.SetGaussianSigma(cur + 0.1)
print "GaussianSigma == %s" % str(cur + 0.1)
rwi.Render()
filenames_init = os.listdir('zuydweg')
# go through list of files in directory, perform trivial tests
# and create a new list of files
dicom_fullnames = []
for filename in filenames_init:
# make full filename
fullname = os.path.join('zuydweg', filename)
# at the moment, we check that it's a regular file
if stat.S_ISREG(os.stat(fullname)[stat.ST_MODE]):
dicom_fullnames.append(fullname)
dicomr = vtkDICOMVolumeReader()
for fullname in dicom_fullnames:
# this will simply add a file to the buffer list of the
# vtkDICOMVolumeReader (will not set mtime)
print "%s\n" % fullname
dicomr.add_dicom_filename(fullname)
dicomr.SetSeriesInstanceIdx(0)
otf = vtkPiecewiseFunction()
otf.AddPoint(0.0, 0.0)
otf.AddPoint(199, 0.0)
otf.AddPoint(200, 1.0)
otf.AddPoint(2800.0, 1.0)
ctf = vtkColorTransferFunction()
ctf.AddRGBPoint(0.0, 0.0, 0.0, 0.0)
ctf.AddRGBPoint(199, 0.0, 0.0, 0.0)
ctf.AddRGBPoint(200, 1.0, 0.937, 0.859)
ctf.AddRGBPoint(2800, 1.0, 0.937, 0.859)
#se = vtkShellExtractor()
#se.SetInput(hdfr.GetOutput())
#se.SetOpacityTF(otf)
#se.SetOmegaL(0.8)
#se.SetOmegaH(0.99)
#se.Update()
splatmapper = vtkOpenGLVolumeShellSplatMapper()
splatmapper.SetOmegaL(0.9)
splatmapper.SetOmegaH(0.9)
splatmapper.SetInput(dicomr.GetOutput())
splatmapper.SetRenderMode(1)
vprop = vtkVolumeProperty()
vprop.SetScalarOpacity(otf)
vprop.SetColor(ctf);
vprop.ShadeOn()
vprop.SetAmbient(0.1)
vprop.SetDiffuse(0.7)
vprop.SetSpecular(0.2)
vprop.SetSpecularPower(10)
volume = vtkVolume()
volume.SetProperty(vprop)
volume.SetMapper(splatmapper)
ren = vtkRenderer()
ren.AddVolume(volume)
ren.GetActiveCamera().ParallelProjectionOn()
renwin = vtkRenderWindow()
renwin.AddRenderer(ren)
rwi = vtkRenderWindowInteractor()
rwi.SetRenderWindow(renwin)
rwi.AddObserver('CharEvent', ce_cb)
#rwi.LightFollowCameraOn();
rwi.Initialize()
rwi.Start()
| {
"repo_name": "chrisidefix/devide.vtkdevide",
"path": "Examples/Rendering/Python/shell_render_zuydweg.py",
"copies": "2",
"size": "3378",
"license": "bsd-3-clause",
"hash": 7741304271072571000,
"line_mean": 26.4634146341,
"line_max": 62,
"alpha_frac": 0.6856127886,
"autogenerated": false,
"ratio": 2.8196994991652753,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45053122877652757,
"avg_score": null,
"num_lines": null
} |
# $Id$
# example to test shell renderer (*shudder*)
from vtk import *
from vtkdevide import *
import time
def bench(camera, rwi):
initial_time = time.clock()
for i in range(36):
camera.Azimuth(10)
rwi.Render()
end_time = time.clock()
print "FPS == %f" % (36 / (end_time - initial_time))
def ce_cb(obj, evt_name):
if obj.GetKeyCode() == 'm':
crm = splatmapper.GetRenderMode()
splatmapper.SetRenderMode(not crm)
print "rendermode switched to %d" % (not crm)
if obj.GetKeyCode() == 'i':
com = splatmapper.GetPerspectiveOrderingMode()
com = com + 1
if com > 2:
com = 0
splatmapper.SetPerspectiveOrderingMode(com)
print "ordering mode switched to %d" % (com)
elif obj.GetKeyCode() == '\'':
cur = splatmapper.GetEllipsoidDiameter()
splatmapper.SetEllipsoidDiameter(cur - 0.1)
print "EllipsoidDiameter == %s" % str(cur - 0.1)
elif obj.GetKeyCode() == ',':
cur = splatmapper.GetEllipsoidDiameter()
splatmapper.SetEllipsoidDiameter(cur + 0.1)
print "EllipsoidDiameter == %s" % str(cur + 0.1)
elif obj.GetKeyCode() == 'd':
cur = splatmapper.GetGaussianRadialExtent()
splatmapper.SetGaussianRadialExtent(cur - 0.1)
print "GaussianRadialExtent == %s" % str(cur - 0.1)
elif obj.GetKeyCode() == 'h':
cur = splatmapper.GetGaussianRadialExtent()
splatmapper.SetGaussianRadialExtent(cur + 0.1)
print "GaussianRadialExtent == %s" % str(cur + 0.1)
elif obj.GetKeyCode() == 't':
cur = splatmapper.GetGaussianSigma()
splatmapper.SetGaussianSigma(cur - 0.1)
print "GaussianSigma == %s" % str(cur - 0.1)
elif obj.GetKeyCode() == 'n':
cur = splatmapper.GetGaussianSigma()
splatmapper.SetGaussianSigma(cur + 0.1)
print "GaussianSigma == %s" % str(cur + 0.1)
elif obj.GetKeyCode() == 'b':
bench(ren.GetActiveCamera(), rwi)
rwi.Render()
vr = vtkStructuredPointsReader()
vr.SetFileName("r256.vtk")
otf = vtkPiecewiseFunction()
otf.AddPoint(0.0, 0.0)
otf.AddPoint(1249.9, 0.0)
otf.AddPoint(1250.0, 1)
otf.AddPoint(2800.0, 1)
ctf = vtkColorTransferFunction()
ctf.AddRGBPoint(0.0, 0.0, 0.0, 0.0)
ctf.AddRGBPoint(1249.9, 0.0, 0.0, 0.0)
ctf.AddRGBPoint(1250, 1.0, 0.937, 0.859)
ctf.AddRGBPoint(2800, 1.0, 0.937, 0.859)
#se = vtkShellExtractor()
#se.SetInput(hdfr.GetOutput())
#se.SetOpacityTF(otf)
#se.SetOmegaL(0.8)
#se.SetOmegaH(0.99)
#se.Update()
splatmapper = vtkOpenGLVolumeShellSplatMapper()
splatmapper.SetOmegaL(0.3)
splatmapper.SetOmegaH(0.3)
splatmapper.SetInput(vr.GetOutput())
splatmapper.SetRenderMode(0)
vprop = vtkVolumeProperty()
vprop.SetScalarOpacity(otf)
vprop.SetColor(ctf);
vprop.ShadeOn()
vprop.SetAmbient(0.1)
vprop.SetDiffuse(0.7)
vprop.SetSpecular(0.2)
vprop.SetSpecularPower(10)
volume = vtkVolume()
volume.SetProperty(vprop)
volume.SetMapper(splatmapper)
ren = vtkRenderer()
ren.SetBackground(0.5, 0.5, 0.5)
ren.AddVolume(volume)
#ren.GetActiveCamera().ParallelProjectionOn()
renwin = vtkRenderWindow()
renwin.AddRenderer(ren)
rwi = vtkRenderWindowInteractor()
rwi.SetRenderWindow(renwin)
rwi.AddObserver('CharEvent', ce_cb)
#rwi.LightFollowCameraOn();
rwi.Initialize()
rwi.Start()
| {
"repo_name": "chrisidefix/devide.vtkdevide",
"path": "Examples/Rendering/Python/shell_render.py",
"copies": "2",
"size": "3358",
"license": "bsd-3-clause",
"hash": 890421088794909300,
"line_mean": 26.0806451613,
"line_max": 59,
"alpha_frac": 0.652173913,
"autogenerated": false,
"ratio": 2.8030050083472453,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9333964672641799,
"avg_score": 0.024242849741089267,
"num_lines": 124
} |
# $Id$
# example to test shell renderer (*shudder*)
from vtk import *
import vtk
from vtkdevide import *
import time
def bench(camera, rwi):
initial_time = time.clock()
for i in range(36):
camera.Azimuth(10)
rwi.Render()
end_time = time.clock()
print "FPS == %f" % (36 / (end_time - initial_time))
def bench2(camera, rwi):
initial_time = time.clock()
numberOfRenders = 10 * (36 + 1)
for i in range(10):
for j in range(36):
camera.Azimuth(10)
rwi.Render()
camera.Elevation(36 * i)
rwi.Render()
end_time = time.clock()
print "FPS == %f" % (numberOfRenders / (end_time - initial_time))
def ce_cb(obj, evt_name):
if obj.GetKeyCode() == 'm':
crm = splatmapper.GetRenderMode()
splatmapper.SetRenderMode(not crm)
print "rendermode switched to %d" % (not crm)
if obj.GetKeyCode() == 'i':
com = splatmapper.GetPerspectiveOrderingMode()
com = com + 1
if com > 3:
com = 0
splatmapper.SetPerspectiveOrderingMode(com)
print "ordering mode switched to %d" % (com)
elif obj.GetKeyCode() == '\'':
cur = splatmapper.GetEllipsoidDiameter()
splatmapper.SetEllipsoidDiameter(cur - 0.1)
print "EllipsoidDiameter == %s" % str(cur - 0.1)
elif obj.GetKeyCode() == ',':
cur = splatmapper.GetEllipsoidDiameter()
splatmapper.SetEllipsoidDiameter(cur + 0.1)
print "EllipsoidDiameter == %s" % str(cur + 0.1)
elif obj.GetKeyCode() == 'd':
cur = splatmapper.GetGaussianRadialExtent()
splatmapper.SetGaussianRadialExtent(cur - 0.1)
print "GaussianRadialExtent == %s" % str(cur - 0.1)
elif obj.GetKeyCode() == 'h':
cur = splatmapper.GetGaussianRadialExtent()
splatmapper.SetGaussianRadialExtent(cur + 0.1)
print "GaussianRadialExtent == %s" % str(cur + 0.1)
elif obj.GetKeyCode() == 't':
cur = splatmapper.GetGaussianSigma()
splatmapper.SetGaussianSigma(cur - 0.1)
print "GaussianSigma == %s" % str(cur - 0.1)
elif obj.GetKeyCode() == 'n':
cur = splatmapper.GetGaussianSigma()
splatmapper.SetGaussianSigma(cur + 0.1)
print "GaussianSigma == %s" % str(cur + 0.1)
elif obj.GetKeyCode() == 'b':
bench2(ren.GetActiveCamera(), rwi)
rwi.Render()
v16 = vtkVolume16Reader()
v16.SetDataDimensions(256,256)
v16.SetDataByteOrderToBigEndian()
v16.SetFilePrefix("CThead/CThead")
v16.SetImageRange(1, 99)
v16.SetDataSpacing(1, 1, 2)
v16.Update()
otf = vtkPiecewiseFunction()
otf.AddPoint(0.0, 0.0)
otf.AddPoint(899.9, 0.0)
otf.AddPoint(900, 0)
otf.AddPoint(1499.9, 1)
otf.AddPoint(1500.0, 1)
otf.AddPoint(65535.0, 1)
#skinCol = (0.83, 0.64, 0.58)
skinCol = (0.93, 0.87, 0.80)
boneCol = skinCol
#boneCol = (1.0, 0.937, 0.859)
ctf = vtkColorTransferFunction()
ctf.AddRGBPoint(0.0, 0.0, 0.0, 0.0)
ctf.AddRGBPoint(899.9, 0.0, 0.0, 0.0)
ctf.AddRGBPoint(900, skinCol[0], skinCol[1], skinCol[2])
ctf.AddRGBPoint(1499.9, skinCol[0], skinCol[1], skinCol[2])
ctf.AddRGBPoint(1500, boneCol[0], boneCol[1], boneCol[2])
ctf.AddRGBPoint(2800, boneCol[0], boneCol[1], boneCol[2])
#se = vtkShellExtractor()
#se.SetInput(hdfr.GetOutput())
#se.SetOpacityTF(otf)
#se.SetOmegaL(0.8)
#se.SetOmegaH(0.99)
#se.Update()
splatmapper = vtkOpenGLVolumeShellSplatMapper()
splatmapper.SetOmegaL(0.9)
splatmapper.SetOmegaH(0.9)
splatmapper.SetInput(v16.GetOutput())
splatmapper.SetRenderMode(0)
#splatmapper = vtkVolumeTextureMapper2D()
#splatmapper.SetInput(v16.GetOutput())
vprop = vtkVolumeProperty()
vprop.SetScalarOpacity(otf)
vprop.SetColor(ctf);
vprop.ShadeOn()
vprop.SetAmbient(0.1)
vprop.SetDiffuse(0.7)
vprop.SetSpecular(0.4)
vprop.SetSpecularPower(60) # 10
volume = vtkVolume()
volume.SetProperty(vprop)
volume.SetMapper(splatmapper)
ren = vtkRenderer()
ren.SetBackground(0.5, 0.5, 0.5)
ren.AddVolume(volume)
#ren.GetActiveCamera().ParallelProjectionOn()
cubeAxesActor2d = vtk.vtkCubeAxesActor2D()
cubeAxesActor2d.SetFlyModeToOuterEdges()
ren.AddActor(cubeAxesActor2d)
cubeAxesActor2d.VisibilityOff() # turn on here!
v16.Update()
cubeAxesActor2d.SetBounds(v16.GetOutput().GetBounds())
cubeAxesActor2d.SetCamera(ren.GetActiveCamera())
renwin = vtkRenderWindow()
renwin.SetSize(512, 512)
renwin.AddRenderer(ren)
rwi = vtkRenderWindowInteractor()
rwi.SetRenderWindow(renwin)
rwi.AddObserver('CharEvent', ce_cb)
#rwi.LightFollowCameraOn();
rwi.Initialize()
rwi.Start()
| {
"repo_name": "zhangfangyan/devide.vtkdevide",
"path": "Examples/Rendering/Python/shell_render_stanford_head.py",
"copies": "2",
"size": "4607",
"license": "bsd-3-clause",
"hash": -6586258528358648000,
"line_mean": 25.3257142857,
"line_max": 69,
"alpha_frac": 0.6566095073,
"autogenerated": false,
"ratio": 2.775301204819277,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9217021442832352,
"avg_score": 0.042977853857385215,
"num_lines": 175
} |
# $Id$
# example to test shell renderer (*shudder*)
from vtkpython import *
from vtkcpbothapython import *
import time
def bench(camera, rwi):
initial_time = time.clock()
for i in range(36):
camera.Azimuth(10)
rwi.Render()
end_time = time.clock()
print "FPS == %f" % (36 / (end_time - initial_time))
def ce_cb(obj, evt_name):
if obj.GetKeyCode() == 'm':
crm = splatmapper.GetRenderMode()
splatmapper.SetRenderMode(not crm)
print "rendermode switched to %d" % (not crm)
elif obj.GetKeyCode() == '\'':
cur = splatmapper.GetEllipsoidDiameter()
splatmapper.SetEllipsoidDiameter(cur - 0.1)
print "EllipsoidDiameter == %s" % str(cur - 0.1)
elif obj.GetKeyCode() == ',':
cur = splatmapper.GetEllipsoidDiameter()
splatmapper.SetEllipsoidDiameter(cur + 0.1)
print "EllipsoidDiameter == %s" % str(cur + 0.1)
elif obj.GetKeyCode() == 'd':
cur = splatmapper.GetGaussianRadialExtent()
splatmapper.SetGaussianRadialExtent(cur - 0.1)
print "GaussianRadialExtent == %s" % str(cur - 0.1)
elif obj.GetKeyCode() == 'h':
cur = splatmapper.GetGaussianRadialExtent()
splatmapper.SetGaussianRadialExtent(cur + 0.1)
print "GaussianRadialExtent == %s" % str(cur + 0.1)
elif obj.GetKeyCode() == 't':
cur = splatmapper.GetGaussianSigma()
splatmapper.SetGaussianSigma(cur - 0.1)
print "GaussianSigma == %s" % str(cur - 0.1)
elif obj.GetKeyCode() == 'n':
cur = splatmapper.GetGaussianSigma()
splatmapper.SetGaussianSigma(cur + 0.1)
print "GaussianSigma == %s" % str(cur + 0.1)
elif obj.GetKeyCode() == 'b':
bench(ren.GetActiveCamera(), rwi)
rwi.Render()
reader = vtkImageReader()
reader.SetHeaderSize(0)
reader.SetFileDimensionality(3)
reader.SetFileName("bonsai.raw")
reader.SetDataScalarType(3)
reader.SetDataExtent(0,255,0,255,0,255)
reader.SetDataSpacing(1,1,1)
otf = vtkPiecewiseFunction()
otf.AddPoint(0.0, 0.0)
otf.AddPoint(35, 0.0)
otf.AddPoint(35.1, 1)
otf.AddPoint(40, 1)
otf.AddPoint(40.1, 0)
otf.AddPoint(60,0)
otf.AddPoint(60.1,1)
otf.AddPoint(140,1)
otf.AddPoint(140.1,0)
otf.AddPoint(255.0, 0.0)
ctf = vtkColorTransferFunction()
ctf.AddRGBPoint(0.0, 0.0, 0.0, 0.0)
agreen = (35 / 255.0, 142 / 255.0, 35 / 255.0)
ctf.AddRGBPoint(35, 0.0, 0.0, 0.0)
ctf.AddRGBPoint(35.1, agreen[0], agreen[1], agreen[2])
ctf.AddRGBPoint(50, agreen[0], agreen[1], agreen[2])
ctf.AddRGBPoint(50.1, 0,0,0)
abrown = (142 / 255.0, 35 / 255.0, 35 / 255.0)
ctf.AddRGBPoint(60, 0,0,0)
ctf.AddRGBPoint(60.1, abrown[0], abrown[1], abrown[2])
ctf.AddRGBPoint(140, abrown[0], abrown[1], abrown[2])
ctf.AddRGBPoint(140.1, 0,0,0)
ctf.AddRGBPoint(180,0,0,0)
ctf.AddRGBPoint(180.1,0,0,1)
ctf.AddRGBPoint(255.0, 0, 0, 1)
#se = vtkShellExtractor()
#se.SetInput(hdfr.GetOutput())
#se.SetOpacityTF(otf)
#se.SetOmegaL(0.8)
#se.SetOmegaH(0.99)
#se.Update()
splatmapper = vtkOpenGLVolumeShellSplatMapper()
splatmapper.SetOmegaL(0.9)
splatmapper.SetOmegaH(0.9)
splatmapper.SetInput(reader.GetOutput())
splatmapper.SetRenderMode(0)
vprop = vtkVolumeProperty()
vprop.SetScalarOpacity(otf)
vprop.SetColor(ctf);
vprop.ShadeOn()
vprop.SetAmbient(0.1)
vprop.SetDiffuse(0.7)
vprop.SetSpecular(0.2)
vprop.SetSpecularPower(10)
volume = vtkVolume()
volume.SetProperty(vprop)
volume.SetMapper(splatmapper)
ren = vtkRenderer()
ren.AddVolume(volume)
ren.GetActiveCamera().ParallelProjectionOn()
renwin = vtkRenderWindow()
renwin.AddRenderer(ren)
rwi = vtkRenderWindowInteractor()
rwi.SetRenderWindow(renwin)
rwi.AddObserver('CharEvent', ce_cb)
#rwi.LightFollowCameraOn();
rwi.Initialize()
rwi.Start()
| {
"repo_name": "zhangfangyan/devide.vtkdevide",
"path": "Examples/Rendering/Python/shell_render_bonsai.py",
"copies": "2",
"size": "3758",
"license": "bsd-3-clause",
"hash": -5061219365613117000,
"line_mean": 25.6524822695,
"line_max": 59,
"alpha_frac": 0.6689728579,
"autogenerated": false,
"ratio": 2.598893499308437,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9156526167717356,
"avg_score": 0.022268037898216343,
"num_lines": 141
} |
# $Id$
# example to test shell renderer (*shudder*)
from vtkpython import *
from vtkcpbothapython import *
def ce_cb(obj, evt_name):
if obj.GetKeyCode() == 'm':
crm = splatmapper.GetRenderMode()
splatmapper.SetRenderMode(not crm)
print "rendermode switched to %d" % (not crm)
elif obj.GetKeyCode() == '\'':
cur = splatmapper.GetEllipsoidDiameter()
splatmapper.SetEllipsoidDiameter(cur - 0.1)
print "EllipsoidDiameter == %s" % str(cur - 0.1)
elif obj.GetKeyCode() == ',':
cur = splatmapper.GetEllipsoidDiameter()
splatmapper.SetEllipsoidDiameter(cur + 0.1)
print "EllipsoidDiameter == %s" % str(cur + 0.1)
elif obj.GetKeyCode() == 'd':
cur = splatmapper.GetGaussianRadialExtent()
splatmapper.SetGaussianRadialExtent(cur - 0.1)
print "GaussianRadialExtent == %s" % str(cur - 0.1)
elif obj.GetKeyCode() == 'h':
cur = splatmapper.GetGaussianRadialExtent()
splatmapper.SetGaussianRadialExtent(cur + 0.1)
print "GaussianRadialExtent == %s" % str(cur + 0.1)
elif obj.GetKeyCode() == 't':
cur = splatmapper.GetGaussianSigma()
splatmapper.SetGaussianSigma(cur - 0.1)
print "GaussianSigma == %s" % str(cur - 0.1)
elif obj.GetKeyCode() == 'n':
cur = splatmapper.GetGaussianSigma()
splatmapper.SetGaussianSigma(cur + 0.1)
print "GaussianSigma == %s" % str(cur + 0.1)
rwi.Render()
hdfr = vtkHDFVolumeReader()
hdfr.SetFileName("tu_schouder1.hdf")
otf = vtkPiecewiseFunction()
otf.AddPoint(0.0, 0.0)
otf.AddPoint(1249.9, 0.0)
otf.AddPoint(1250.0, 1.0)
otf.AddPoint(2800.0, 1.0)
ctf = vtkColorTransferFunction()
ctf.AddRGBPoint(0.0, 0.0, 0.0, 0.0)
ctf.AddRGBPoint(1249.9, 0.0, 0.0, 0.0)
ctf.AddRGBPoint(1250, 1.0, 0.937, 0.859)
ctf.AddRGBPoint(2800, 1.0, 0.937, 0.859)
#se = vtkShellExtractor()
#se.SetInput(hdfr.GetOutput())
#se.SetOpacityTF(otf)
#se.SetOmegaL(0.8)
#se.SetOmegaH(0.99)
#se.Update()
splatmapper = vtkOpenGLVolumeShellSplatMapper()
splatmapper.SetOmegaL(0.9)
splatmapper.SetOmegaH(0.9)
splatmapper.SetInput(hdfr.GetOutput())
splatmapper.SetRenderMode(0)
vprop = vtkVolumeProperty()
vprop.SetScalarOpacity(otf)
vprop.SetColor(ctf);
vprop.ShadeOn()
vprop.SetAmbient(0.1)
vprop.SetDiffuse(0.7)
vprop.SetSpecular(0.2)
vprop.SetSpecularPower(10)
volume = vtkVolume()
volume.SetProperty(vprop)
volume.SetMapper(splatmapper)
ren = vtkRenderer()
ren.AddVolume(volume)
ren.GetActiveCamera().ParallelProjectionOn()
renwin = vtkRenderWindow()
renwin.AddRenderer(ren)
rwi = vtkRenderWindowInteractor()
rwi.SetRenderWindow(renwin)
rwi.AddObserver('CharEvent', ce_cb)
#rwi.LightFollowCameraOn();
rwi.Initialize()
rwi.Start()
| {
"repo_name": "zhangfangyan/devide.vtkdevide",
"path": "Examples/Rendering/Python/shell_render_tu_schouder.py",
"copies": "2",
"size": "2764",
"license": "bsd-3-clause",
"hash": -9070734184723424000,
"line_mean": 26.64,
"line_max": 59,
"alpha_frac": 0.6772793054,
"autogenerated": false,
"ratio": 2.725838264299803,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4403117569699804,
"avg_score": null,
"num_lines": null
} |
# $Id$
# example to test shell renderer (*shudder*)
from vtkpython import *
from vtkcpbothapython import *
hdfr = vtkHDFVolumeReader()
hdfr.SetFileName("skull_256x256x256.hdf")
otf = vtkPiecewiseFunction()
otf.AddPoint(0.0, 0.0)
otf.AddPoint(39.9, 0.0)
otf.AddPoint(40.0, 1.0)
otf.AddPoint(150.0, 1.0)
ctf = vtkColorTransferFunction()
ctf.AddRGBPoint(0.0, 0.0, 0.0, 0.0)
ctf.AddRGBPoint(39.9, 0.0, 0.0, 0.0)
ctf.AddRGBPoint(40, 1.0, 0.937, 0.859)
ctf.AddRGBPoint(150, 1.0, 0.937, 0.859)
#se = vtkShellExtractor()
#se.SetInput(hdfr.GetOutput())
#se.SetOpacityTF(otf)
#se.SetOmegaL(0.8)
#se.SetOmegaH(0.99)
#se.Update()
splatmapper = vtkOpenGLVolumeShellSplatMapper()
splatmapper.SetOmegaL(0.9)
splatmapper.SetOmegaH(0.9)
splatmapper.SetInput(hdfr.GetOutput())
splatmapper.SetRenderMode(0)
vprop = vtkVolumeProperty()
vprop.SetScalarOpacity(otf)
vprop.SetColor(ctf);
vprop.ShadeOn()
vprop.SetAmbient(0.4)
vprop.SetDiffuse(0.7)
#vprop.SetSpecular(0.2)
vprop.SetSpecularPower(70)
volume = vtkVolume()
volume.SetProperty(vprop)
volume.SetMapper(splatmapper)
ren = vtkRenderer()
ren.AddVolume(volume)
ren.GetActiveCamera().ParallelProjectionOn()
renwin = vtkRenderWindow()
renwin.AddRenderer(ren)
rwi = vtkRenderWindowInteractor()
rwi.SetRenderWindow(renwin)
#rwi.LightFollowCameraOn();
rwi.Initialize()
rwi.Start()
| {
"repo_name": "chrisidefix/devide.vtkdevide",
"path": "Examples/Rendering/Python/shell_render_skull.py",
"copies": "2",
"size": "1324",
"license": "bsd-3-clause",
"hash": 7355161074872225000,
"line_mean": 20.3548387097,
"line_max": 47,
"alpha_frac": 0.7575528701,
"autogenerated": false,
"ratio": 2.3309859154929575,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.8973963882020163,
"avg_score": 0.022914980714559004,
"num_lines": 62
} |
# $Id$
# example to test shell renderer (*shudder*)
from vtkpython import *
from vtkdevide import *
import time
def bench(camera, rwi):
initial_time = time.clock()
for i in range(36):
camera.Azimuth(10)
rwi.Render()
end_time = time.clock()
print "FPS == %f" % (36 / (end_time - initial_time))
def bench2(camera, rwi):
initial_time = time.clock()
numberOfRenders = 10 * (36 + 1)
for i in range(10):
for j in range(36):
camera.Azimuth(10)
rwi.Render()
camera.Elevation(36 * i)
rwi.Render()
end_time = time.clock()
print "FPS == %f" % (numberOfRenders / (end_time - initial_time))
def ce_cb(obj, evt_name):
if obj.GetKeyCode() == 'm':
crm = splatmapper.GetRenderMode()
crm = crm + 1
if crm > 2:
crm = 0
splatmapper.SetRenderMode(crm)
print "rendermode switched to %d" % (crm)
if obj.GetKeyCode() == 'i':
com = splatmapper.GetPerspectiveOrderingMode()
com = com + 1
if com > 3:
com = 0
splatmapper.SetPerspectiveOrderingMode(com)
print "ordering mode switched to %d" % (com)
elif obj.GetKeyCode() == '\'':
cur = splatmapper.GetEllipsoidDiameter()
splatmapper.SetEllipsoidDiameter(cur - 0.1)
print "EllipsoidDiameter == %s" % str(cur - 0.1)
elif obj.GetKeyCode() == ',':
cur = splatmapper.GetEllipsoidDiameter()
splatmapper.SetEllipsoidDiameter(cur + 0.1)
print "EllipsoidDiameter == %s" % str(cur + 0.1)
elif obj.GetKeyCode() == 'd':
cur = splatmapper.GetGaussianRadialExtent()
splatmapper.SetGaussianRadialExtent(cur - 0.1)
print "GaussianRadialExtent == %s" % str(cur - 0.1)
elif obj.GetKeyCode() == 'h':
cur = splatmapper.GetGaussianRadialExtent()
splatmapper.SetGaussianRadialExtent(cur + 0.1)
print "GaussianRadialExtent == %s" % str(cur + 0.1)
elif obj.GetKeyCode() == 't':
cur = splatmapper.GetGaussianSigma()
splatmapper.SetGaussianSigma(cur - 0.1)
print "GaussianSigma == %s" % str(cur - 0.1)
elif obj.GetKeyCode() == 'n':
cur = splatmapper.GetGaussianSigma()
splatmapper.SetGaussianSigma(cur + 0.1)
print "GaussianSigma == %s" % str(cur + 0.1)
elif obj.GetKeyCode() == 'b':
bench2(ren.GetActiveCamera(), rwi)
rwi.Render()
reader = vtkImageReader()
reader.SetHeaderSize(0)
reader.SetFileDimensionality(3)
reader.SetFileName("aneurism.raw")
reader.SetDataScalarType(3)
reader.SetDataExtent(0,255,0,255,0,255)
reader.SetDataSpacing(1,1,1)
otf = vtkPiecewiseFunction()
otf.AddPoint(0.0, 0.0)
otf.AddPoint(100, 0.0)
otf.AddPoint(100.1, 0.9)
otf.AddPoint(255.0, 0.9)
ctf = vtkColorTransferFunction()
ctf.AddRGBPoint(0.0, 0.0, 0.0, 0.0)
ctf.AddRGBPoint(100, 0.0, 0.0, 0.0)
ctf.AddRGBPoint(100.1, 0.82, 0.43, 0.35)
ctf.AddRGBPoint(255.0, 0.82, 0.43, 0.35)
#se = vtkShellExtractor()
#se.SetInput(hdfr.GetOutput())
#se.SetOpacityTF(otf)
#se.SetOmegaL(0.8)
#se.SetOmegaH(0.99)
#se.Update()
splatmapper = vtkOpenGLVolumeShellSplatMapper()
splatmapper.SetOmegaL(0.1)
splatmapper.SetOmegaH(0.2)
splatmapper.SetInput(reader.GetOutput())
splatmapper.SetRenderMode(0)
splatmapper.SetPerspectiveOrderingMode(3)
vprop = vtkVolumeProperty()
vprop.SetScalarOpacity(otf)
vprop.SetColor(ctf);
vprop.ShadeOn()
vprop.SetAmbient(0.1)
vprop.SetDiffuse(0.7)
vprop.SetSpecular(0.2)
vprop.SetSpecularPower(10)
volume = vtkVolume()
volume.SetProperty(vprop)
volume.SetMapper(splatmapper)
ren = vtkRenderer()
ren.SetBackground(0.5, 0.5, 0.5)
ren.AddVolume(volume)
#ren.GetActiveCamera().ParallelProjectionOn()
cubeAxesActor2d = vtk.vtkCubeAxesActor2D()
cubeAxesActor2d.SetFlyModeToOuterEdges()
ren.AddActor(cubeAxesActor2d)
cubeAxesActor2d.VisibilityOff() # FIXME: activate axes here
reader.Update()
cubeAxesActor2d.SetBounds(reader.GetOutput().GetBounds())
cubeAxesActor2d.SetCamera(ren.GetActiveCamera())
renwin = vtkRenderWindow()
renwin.SetSize(512,512)
renwin.AddRenderer(ren)
rwi = vtkRenderWindowInteractor()
rwi.SetRenderWindow(renwin)
rwi.AddObserver('CharEvent', ce_cb)
#rwi.LightFollowCameraOn();
rwi.Initialize()
rwi.Start()
| {
"repo_name": "chrisidefix/devide.vtkdevide",
"path": "Examples/Rendering/Python/shell_render_aneurism.py",
"copies": "2",
"size": "4298",
"license": "bsd-3-clause",
"hash": -3879780000570995000,
"line_mean": 25.5308641975,
"line_max": 69,
"alpha_frac": 0.6591437878,
"autogenerated": false,
"ratio": 2.840713813615334,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4499857601415334,
"avg_score": null,
"num_lines": null
} |
# $Id$
# example to test shell renderer (*shudder*)
import vtk
from vtk import *
from vtkdevide import *
import time
def bench(camera, rwi):
initial_time = time.clock()
for i in range(36):
camera.Azimuth(10)
rwi.Render()
end_time = time.clock()
print "FPS == %f" % (36 / (end_time - initial_time))
def bench2(camera, rwi):
initial_time = time.clock()
numberOfRenders = 10 * (36 + 1)
for i in range(10):
for j in range(36):
camera.Azimuth(10)
rwi.Render()
camera.Elevation(36 * i)
rwi.Render()
end_time = time.clock()
print "FPS == %f" % (numberOfRenders / (end_time - initial_time))
textActor = vtk.vtkTextActor()
def ce_cb(obj, evt_name):
if obj.GetKeyCode() == 'm':
crm = splatmapper.GetRenderMode()
crm = crm + 1
if crm > 2:
crm = 0
splatmapper.SetRenderMode(crm)
print "rendermode switched to %d" % (crm)
if obj.GetKeyCode() in ['0', '1', '2', '4']:
# com = splatmapper.GetPerspectiveOrderingMode()
# com = com + 1
# if com > 2:
# com = 0
com = int(obj.GetKeyCode())
if com == 4:
com = 3
splatmapper.SetPerspectiveOrderingMode(com)
print "ordering mode switched to %d" % (com)
if com == 0:
textActor.SetInput("PBTF")
elif com == 1:
textActor.SetInput("IP-PBTF")
elif com == 2:
textActor.SetInput("Traditional BTF")
else:
textActor.SetInput("New-style IP-PBTF")
#textActor.GetPosition2Coordinate().SetValue(1, 1)
textActor.SetDisplayPosition(0, 140)
rwi.Render()
time.sleep(1.2)
textActor.SetDisplayPosition(0, 10)
elif obj.GetKeyCode() == '\'':
cur = splatmapper.GetEllipsoidDiameter()
splatmapper.SetEllipsoidDiameter(cur - 0.1)
print "EllipsoidDiameter == %s" % str(cur - 0.1)
elif obj.GetKeyCode() == ',':
cur = splatmapper.GetEllipsoidDiameter()
splatmapper.SetEllipsoidDiameter(cur + 0.1)
print "EllipsoidDiameter == %s" % str(cur + 0.1)
elif obj.GetKeyCode() == 'd':
cur = splatmapper.GetGaussianRadialExtent()
splatmapper.SetGaussianRadialExtent(cur - 0.1)
print "GaussianRadialExtent == %s" % str(cur - 0.1)
elif obj.GetKeyCode() == 'h':
cur = splatmapper.GetGaussianRadialExtent()
splatmapper.SetGaussianRadialExtent(cur + 0.1)
print "GaussianRadialExtent == %s" % str(cur + 0.1)
elif obj.GetKeyCode() == 't':
cur = splatmapper.GetGaussianSigma()
splatmapper.SetGaussianSigma(cur - 0.1)
print "GaussianSigma == %s" % str(cur - 0.1)
elif obj.GetKeyCode() == 'n':
cur = splatmapper.GetGaussianSigma()
splatmapper.SetGaussianSigma(cur + 0.1)
print "GaussianSigma == %s" % str(cur + 0.1)
elif obj.GetKeyCode() == 'b':
bench2(ren.GetActiveCamera(), rwi)
rwi.Render()
reader = vtkImageReader()
reader.SetHeaderSize(0)
reader.SetFileDimensionality(3)
reader.SetFileName("engine.raw")
reader.SetDataScalarType(3)
reader.SetDataExtent(0,255,0,255,0,127)
reader.SetDataSpacing(1,1,1)
otf = vtkPiecewiseFunction()
otf.AddPoint(0.0, 0.0)
otf.AddPoint(130, 0.0)
otf.AddPoint(130.1, 1)
otf.AddPoint(180.0, 1)
otf.AddPoint(180.1, 1)
otf.AddPoint(255.0, 1)
ctf = vtkColorTransferFunction()
ctf.AddRGBPoint(0.0, 0.0, 0.0, 0.0)
ctf.AddRGBPoint(130, 0.0, 0.0, 0.0)
ctf.AddRGBPoint(130.1, 0.80, 0.80, 0.80)
ctf.AddRGBPoint(180.0, 0.80, 0.80, 0.80)
ctf.AddRGBPoint(180.1, 0.91, 0.61, 0.10)
ctf.AddRGBPoint(255.0, 0.91, 0.61, 0.10)
#se = vtkShellExtractor()
#se.SetInput(hdfr.GetOutput())
#se.SetOpacityTF(otf)
#se.SetOmegaL(0.8)
#se.SetOmegaH(0.99)
#se.Update()
splatmapper = vtkOpenGLVolumeShellSplatMapper()
splatmapper.SetOmegaL(0.1)
splatmapper.SetOmegaH(0.2)
splatmapper.SetInput(reader.GetOutput())
splatmapper.SetRenderMode(0)
# this should be PBTF
splatmapper.SetPerspectiveOrderingMode(0)
vprop = vtkVolumeProperty()
vprop.SetScalarOpacity(otf)
vprop.SetColor(ctf);
vprop.ShadeOn()
vprop.SetAmbient(0.1)
vprop.SetDiffuse(0.7)
vprop.SetSpecular(0.4)
vprop.SetSpecularPower(40)
volume = vtkVolume()
volume.SetProperty(vprop)
volume.SetMapper(splatmapper)
ren = vtkRenderer()
ren.SetBackground(0.5, 0.5, 0.5)
ren.AddVolume(volume)
#ren.GetActiveCamera().ParallelProjectionOn()
cubeAxesActor2d = vtk.vtkCubeAxesActor2D()
cubeAxesActor2d.SetFlyModeToOuterEdges()
ren.AddActor(cubeAxesActor2d)
cubeAxesActor2d.VisibilityOff() # FIXME: you can switch it on here
reader.Update()
cubeAxesActor2d.SetBounds(reader.GetOutput().GetBounds())
cubeAxesActor2d.SetCamera(ren.GetActiveCamera())
# Create a scaled text actor.
textActor.ScaledTextOn()
textActor.SetDisplayPosition(0, 10)
textActor.SetInput("PBTF")
# Set coordinates to match the old vtkScaledTextActor default value
textActor.GetPosition2Coordinate().SetCoordinateSystemToNormalizedViewport()
textActor.GetPosition2Coordinate().SetValue(1, 0.1)
tprop = textActor.GetTextProperty()
tprop.SetFontSize(18)
tprop.SetFontFamilyToArial()
tprop.SetJustificationToCentered()
tprop.BoldOn()
#tprop.ItalicOn()
tprop.ShadowOn()
tprop.SetColor(1, 0, 0)
# FIXME: switch text actor on here
ren.AddActor(textActor)
#### end of text
renwin = vtkRenderWindow()
renwin.SetSize(512,512)
renwin.AddRenderer(ren)
rwi = vtkRenderWindowInteractor()
rwi.SetRenderWindow(renwin)
rwi.AddObserver('CharEvent', ce_cb)
#rwi.LightFollowCameraOn();
rwi.Initialize()
rwi.Start()
| {
"repo_name": "zhangfangyan/devide.vtkdevide",
"path": "Examples/Rendering/Python/shell_render_engine.py",
"copies": "2",
"size": "5672",
"license": "bsd-3-clause",
"hash": 4305382729724825600,
"line_mean": 25.3813953488,
"line_max": 76,
"alpha_frac": 0.6565585331,
"autogenerated": false,
"ratio": 2.8953547728432873,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4551913305943287,
"avg_score": null,
"num_lines": null
} |
# $Id$
# example to test shell renderer (*shudder*)
import vtk
import vtkdevide
def ce_cb(obj, evt_name):
if obj.GetKeyCode() == 'm':
crm = splatmapper.GetRenderMode()
splatmapper.SetRenderMode(not crm)
print "rendermode switched to %d" % (not crm)
elif obj.GetKeyCode() == '\'':
cur = splatmapper.GetEllipsoidDiameter()
splatmapper.SetEllipsoidDiameter(cur - 0.1)
print "EllipsoidDiameter == %s" % str(cur - 0.1)
elif obj.GetKeyCode() == ',':
cur = splatmapper.GetEllipsoidDiameter()
splatmapper.SetEllipsoidDiameter(cur + 0.1)
print "EllipsoidDiameter == %s" % str(cur + 0.1)
elif obj.GetKeyCode() == 'd':
cur = splatmapper.GetGaussianRadialExtent()
splatmapper.SetGaussianRadialExtent(cur - 0.1)
print "GaussianRadialExtent == %s" % str(cur - 0.1)
elif obj.GetKeyCode() == 'h':
cur = splatmapper.GetGaussianRadialExtent()
splatmapper.SetGaussianRadialExtent(cur + 0.1)
print "GaussianRadialExtent == %s" % str(cur + 0.1)
elif obj.GetKeyCode() == 't':
cur = splatmapper.GetGaussianSigma()
splatmapper.SetGaussianSigma(cur - 0.1)
print "GaussianSigma == %s" % str(cur - 0.1)
elif obj.GetKeyCode() == 'n':
cur = splatmapper.GetGaussianSigma()
splatmapper.SetGaussianSigma(cur + 0.1)
print "GaussianSigma == %s" % str(cur + 0.1)
rwi.Render()
reader = vtk.vtkXMLImageDataReader()
reader.SetFileName("cube.vti")
otf = vtk.vtkPiecewiseFunction()
otf.AddPoint(0.0, 0.0)
otf.AddPoint(253.9, 0.0)
otf.AddPoint(254, 0.95)
otf.AddPoint(255, 0.95)
ctf = vtk.vtkColorTransferFunction()
ctf.AddRGBPoint(0.0, 0.0, 0.0, 0.0)
ctf.AddRGBPoint(253.9, 0.0, 0.0, 0.0)
ctf.AddRGBPoint(254, 1.0, 1.0, 1.0)
ctf.AddRGBPoint(254.9, 1.0, 1.0, 1.0)
ctf.AddRGBPoint(255, 1.0, 0, 0)
#se = vtkShellExtractor()
#se.SetInput(hdfr.GetOutput())
#se.SetOpacityTF(otf)
#se.SetOmegaL(0.8)
#se.SetOmegaH(0.99)
#se.Update()
splatmapper = vtkdevide.vtkOpenGLVolumeShellSplatMapper()
splatmapper.SetOmegaL(0.9)
splatmapper.SetOmegaH(0.9)
splatmapper.SetInput(reader.GetOutput())
splatmapper.SetRenderMode(1)
splatmapper.SetPerspectiveOrderingMode(1)
vprop = vtk.vtkVolumeProperty()
vprop.SetScalarOpacity(otf)
vprop.SetColor(ctf);
vprop.ShadeOn()
vprop.SetAmbient(0.4)
vprop.SetDiffuse(0.7)
vprop.SetSpecular(0.2)
vprop.SetSpecularPower(70)
volume = vtk.vtkVolume()
volume.SetProperty(vprop)
volume.SetMapper(splatmapper)
ren = vtk.vtkRenderer()
ren.AddVolume(volume)
#ren.GetActiveCamera().ParallelProjectionOn()
cubeAxesActor2d = vtk.vtkCubeAxesActor2D()
cubeAxesActor2d.SetFlyModeToOuterEdges()
ren.AddActor(cubeAxesActor2d)
cubeAxesActor2d.VisibilityOn() # FIXME: activate axes here
reader.Update()
cubeAxesActor2d.SetBounds(reader.GetOutput().GetBounds())
cubeAxesActor2d.SetCamera(ren.GetActiveCamera())
renwin = vtk.vtkRenderWindow()
renwin.AddRenderer(ren)
rwi = vtk.vtkRenderWindowInteractor()
rwi.SetRenderWindow(renwin)
rwi.AddObserver('CharEvent', ce_cb)
#rwi.LightFollowCameraOn();
rwi.Initialize()
rwi.Start()
| {
"repo_name": "chrisidefix/devide.vtkdevide",
"path": "Examples/Rendering/Python/shell_render_synth.py",
"copies": "2",
"size": "3139",
"license": "bsd-3-clause",
"hash": 4470299878296584700,
"line_mean": 27.2792792793,
"line_max": 59,
"alpha_frac": 0.6948072635,
"autogenerated": false,
"ratio": 2.734320557491289,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4429127820991289,
"avg_score": null,
"num_lines": null
} |
"""$Id$"""
import os
import base64
import hashlib
import datetime
from google.appengine.ext.webapp import template
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
from google.appengine.api import images
from google.appengine.api import memcache
class LogoProgram(db.Model):
code = db.TextProperty()
img = db.BlobProperty()
date = db.DateTimeProperty(auto_now_add=True)
hash = db.StringProperty()
class Papert(webapp.RequestHandler):
def get(self):
hash = self.request.path[1:9] #this assumes that hashes are always 8 chars
extra = self.request.path[9:]
if extra == ".png" and hash == self.request.headers.get('If-None-Match'):
self.response.set_status(304)
return
if extra not in ('', '.png'):
self.redirect('/')
return
older = self.request.get("older")
newer = self.request.get("newer")
program = None
if hash:
program = memcache.get("program: %s" % hash)
if program is None:
program = LogoProgram.all().filter('hash = ', hash).get()
if program is None:
memcache.set("program: %s" % hash, "not found")
else:
memcache.set("program: %s" % hash, program)
if program == "not found":
program = None
if program is None:
self.redirect('/')
if program and extra == ".png":
self.response.headers['Content-Type'] = 'image/png'
self.response.headers['Cache-Control'] = 'max-age:604800'
self.response.headers['Etag'] = program.hash
self.response.headers['Last-Modified'] = program.date.ctime()
self.response.out.write(program.img)
else:
values = {'code':""}
if program:
values['code'] = program.code
values['hash'] = hash
if older or newer:
if older:
browse_date = datetime.datetime.strptime(older,"%Y-%m-%dT%H:%M:%S")
recent = LogoProgram.all().filter('date <', browse_date).order('-date').fetch(5)
values['older'] = older
elif newer:
browse_date = datetime.datetime.strptime(newer,"%Y-%m-%dT%H:%M:%S")
recent = LogoProgram.all().filter('date >', browse_date).order('date').fetch(5)
recent.reverse()
values['newer'] = newer
if recent:
values['recent'] = [program.hash for program in recent]
values['last_date'] = recent[-1].date.strftime("%Y-%m-%dT%H:%M:%S")
values['next_date'] = recent[0].date.strftime("%Y-%m-%dT%H:%M:%S")
else:
recent = memcache.get("recent_progs")
last_date = memcache.get("last_prog_date")
if not (recent and last_date):
recent = LogoProgram.all().order('-date').fetch(5)
if recent:
last_date = recent[-1].date.strftime("%Y-%m-%dT%H:%M:%S")
recent = [program.hash for program in recent]
memcache.set_multi({"recent_progs": recent,
"last_prog_date": last_date}, time=3600)
values['recent'] = recent
values['last_date'] = last_date
page = os.path.join(os.path.dirname(__file__), 'index.html.tmpl')
self.response.out.write(template.render(page, values))
def post(self):
code = self.request.get('code',None)
img = self.request.get('img',"")
# simple antispam
if sum(x in code.lower() for x in ('href=', 'url=', 'link=')) > 2:
self.redirect("/error")
return
if code.strip():
hash = base64.b64encode(hashlib.sha1(code.strip()).digest()[:6], "-_")
if not LogoProgram.all().filter('hash =', hash).get():
program = LogoProgram()
program.code = code
program.hash = hash
if img:
img = base64.b64decode(img)
img = images.Image(img)
img.resize(125, 125)
program.img = img.execute_transforms()
else:
self.redirect("/error")
return
program.put()
memcache.set("program: %s" % hash, program)
memcache.delete("recent")
else:
hash = ""
self.redirect("/%s" % hash)
application = webapp.WSGIApplication([('/.*', Papert)],debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| {
"repo_name": "elliotlaster/papert",
"path": "index.py",
"copies": "1",
"size": "4971",
"license": "mit",
"hash": 239587938446581760,
"line_mean": 35.8222222222,
"line_max": 100,
"alpha_frac": 0.507342587,
"autogenerated": false,
"ratio": 4.209144792548687,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5216487379548688,
"avg_score": null,
"num_lines": null
} |
# $Id$
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
from rdkit import RDConfig
import unittest,os
from rdkit.six.moves import cPickle
from rdkit import Chem
from rdkit.Chem.Fraggle import FraggleSim
class TestCase(unittest.TestCase):
def testFragmentation(self):
"""
"""
mol = Chem.MolFromSmiles('COc1cc(CN2CCC(CC2)NC(=O)c2cncc(C)c2)c(OC)c2ccccc12')
frags = FraggleSim.generate_fraggle_fragmentation(mol)
self.assertEqual(len(frags),16)
expected=('[*]C(=O)NC1CCN(Cc2cc(OC)c3ccccc3c2OC)CC1',
'[*]C(=O)c1cncc(C)c1.[*]C1CCN(Cc2cc(OC)c3ccccc3c2OC)CC1',
'[*]C(=O)c1cncc(C)c1.[*]c1cc(OC)c2ccccc2c1OC',
'[*]C1CCN(Cc2cc(OC)c3ccccc3c2OC)CC1',
'[*]C(=O)c1cncc(C)c1.[*]Cc1cc(OC)c2ccccc2c1OC',
'[*]Cc1cc(OC)c2ccccc2c1OC.[*]NC(=O)c1cncc(C)c1',
'[*]Cc1cc(OC)c2ccccc2c1OC.[*]c1cncc(C)c1',
'[*]NC(=O)c1cncc(C)c1.[*]c1cc(OC)c2ccccc2c1OC',
'[*]NC1CCN(Cc2cc(OC)c3ccccc3c2OC)CC1',
'[*]NC1CCN(Cc2cc(OC)c3ccccc3c2OC)CC1.[*]c1cncc(C)c1',
'[*]c1c(CN2CCC(NC(=O)c3cncc(C)c3)CC2)cc(OC)c2ccccc12',
'[*]c1c(OC)cc(CN2CCC(NC(=O)c3cncc(C)c3)CC2)c(OC)c1[*]',
'[*]c1cc(CN2CCC(NC(=O)c3cncc(C)c3)CC2)c(OC)c2ccccc12',
'[*]N1CCC(NC(=O)c2cncc(C)c2)CC1.[*]c1cc(OC)c2ccccc2c1OC',
'[*]C1CCN(Cc2cc(OC)c3ccccc3c2OC)CC1.[*]c1cncc(C)c1',
'[*]c1cc(OC)c2ccccc2c1OC.[*]c1cncc(C)c1')
for smi in frags:
self.assertTrue(smi in expected)
def testFragmentation2(self):
"""
"""
mol = Chem.MolFromSmiles('COc1cc(CN2CCC(NC(=O)c3ccccc3)CC2)c(OC)c2ccccc12')
frags = FraggleSim.generate_fraggle_fragmentation(mol)
self.assertEqual(len(frags),13)
expected=('[*]C(=O)c1ccccc1.[*]C1CCN(Cc2cc(OC)c3ccccc3c2OC)CC1',
'[*]C(=O)c1ccccc1.[*]Cc1cc(OC)c2ccccc2c1OC',
'[*]C(=O)c1ccccc1.[*]c1cc(OC)c2ccccc2c1OC',
'[*]C1CCN(Cc2cc(OC)c3ccccc3c2OC)CC1.[*]c1ccccc1',
'[*]Cc1cc(OC)c2ccccc2c1OC.[*]NC(=O)c1ccccc1',
'[*]Cc1cc(OC)c2ccccc2c1OC.[*]c1ccccc1',
'[*]N1CCC(NC(=O)c2ccccc2)CC1.[*]c1cc(OC)c2ccccc2c1OC',
'[*]NC(=O)c1ccccc1.[*]c1cc(OC)c2ccccc2c1OC',
'[*]NC1CCN(Cc2cc(OC)c3ccccc3c2OC)CC1.[*]c1ccccc1',
'[*]c1c(CN2CCC(NC(=O)c3ccccc3)CC2)cc(OC)c2ccccc12',
'[*]c1c(OC)cc(CN2CCC(NC(=O)c3ccccc3)CC2)c(OC)c1[*]',
'[*]c1cc(CN2CCC(NC(=O)c3ccccc3)CC2)c(OC)c2ccccc12',
'[*]c1cc(OC)c2ccccc2c1OC.[*]c1ccccc1')
for smi in frags:
self.assertTrue(smi in expected)
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "adalke/rdkit",
"path": "rdkit/Chem/Fraggle/UnitTestFraggle.py",
"copies": "1",
"size": "2521",
"license": "bsd-3-clause",
"hash": -6482826658338452000,
"line_mean": 34.5070422535,
"line_max": 82,
"alpha_frac": 0.6624355415,
"autogenerated": false,
"ratio": 1.8883895131086141,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3050825054608614,
"avg_score": null,
"num_lines": null
} |
# $Id$
__all__ = ['wsdl2python', 'utility', 'containers', 'commands']
class WSDLFormatError(Exception):
'''Detected errors in the WSDL document.
'''
class WsdlGeneratorError(Exception):
pass
class Wsdl2PythonError(Exception):
pass
class WSInteropError(Exception):
'''Conformance to WS-I Basic-Profile 1.0 specification
'''
class WSISpec:
R2203 = 'An rpc-literal binding in a DESCRIPTION MUST refer, in its soapbind:body element(s), only to wsdl:part element(s) that have been defined using the type attribute.'
R2710 = 'The operations in a wsdl:binding in a DESCRIPTION MUST result in wire signatures that are different from one another.'
R2717 = 'An rpc-literal binding in a DESCRIPTION MUST have the namespace attribute specified, the value of which MUST be an absolute URI, on contained soapbind:body elements.'
R2729 = 'A MESSAGE described with an rpc-literal binding that is a response message MUST have a wrapper element whose name is the corresponding wsdl:operation name suffixed with the string "Response"'
| {
"repo_name": "acigna/pywez",
"path": "zsi/ZSI/generate/__init__.py",
"copies": "3",
"size": "1064",
"license": "mit",
"hash": -3195133429876072400,
"line_mean": 43.3333333333,
"line_max": 204,
"alpha_frac": 0.7462406015,
"autogenerated": false,
"ratio": 3.869090909090909,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.018565706460853135,
"num_lines": 24
} |
# $Id$
"""AOL Instant Messenger."""
import dpkt
import struct
# OSCAR: http://iserverd1.khstu.ru/oscar/
class FLAP(dpkt.Packet):
__hdr__ = (
('ast', 'B', 0x2a), # '*'
('type', 'B', 0),
('seq', 'H', 0),
('len', 'H', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.ast != 0x2a:
raise dpkt.UnpackError('invalid FLAP header')
if len(self.data) < self.len:
raise dpkt.NeedData, '%d left, %d needed' % (len(self.data), self.len)
class SNAC(dpkt.Packet):
__hdr__ = (
('family', 'H', 0),
('subtype', 'H', 0),
('flags', 'H', 0),
('reqid', 'I', 0)
)
def tlv(buf):
n = 4
try:
t, l = struct.unpack('>HH', buf[:n])
except struct.error:
raise dpkt.UnpackError
v = buf[n:n+l]
if len(v) < l:
raise dpkt.NeedData
buf = buf[n+l:]
return (t,l,v, buf)
# TOC 1.0: http://jamwt.com/Py-TOC/PROTOCOL
# TOC 2.0: http://www.firestuff.org/projects/firetalk/doc/toc2.txt
| {
"repo_name": "tgoodyear/dpkt",
"path": "dpkt/aim.py",
"copies": "17",
"size": "1058",
"license": "bsd-3-clause",
"hash": 8493631405825797000,
"line_mean": 21.5106382979,
"line_max": 82,
"alpha_frac": 0.5009451796,
"autogenerated": false,
"ratio": 2.7058823529411766,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02841449194676874,
"num_lines": 47
} |
# $Id$
"""Border Gateway Protocol."""
import dpkt
import struct, socket
# Border Gateway Protocol 4 - RFC 4271
# Communities Attribute - RFC 1997
# Capabilities - RFC 3392
# Route Refresh - RFC 2918
# Route Reflection - RFC 4456
# Confederations - RFC 3065
# Cease Subcodes - RFC 4486
# NOPEER Community - RFC 3765
# Multiprotocol Extensions - 2858
# Message Types
OPEN = 1
UPDATE = 2
NOTIFICATION = 3
KEEPALIVE = 4
ROUTE_REFRESH = 5
# Attribute Types
ORIGIN = 1
AS_PATH = 2
NEXT_HOP = 3
MULTI_EXIT_DISC = 4
LOCAL_PREF = 5
ATOMIC_AGGREGATE = 6
AGGREGATOR = 7
COMMUNITIES = 8
ORIGINATOR_ID = 9
CLUSTER_LIST = 10
MP_REACH_NLRI = 14
MP_UNREACH_NLRI = 15
# Origin Types
ORIGIN_IGP = 0
ORIGIN_EGP = 1
INCOMPLETE = 2
# AS Path Types
AS_SET = 1
AS_SEQUENCE = 2
AS_CONFED_SEQUENCE = 3
AS_CONFED_SET = 4
# Reserved Communities Types
NO_EXPORT = 0xffffff01L
NO_ADVERTISE = 0xffffff02L
NO_EXPORT_SUBCONFED = 0xffffff03L
NO_PEER = 0xffffff04L
# Common AFI types
AFI_IPV4 = 1
AFI_IPV6 = 2
# Multiprotocol SAFI types
SAFI_UNICAST = 1
SAFI_MULTICAST = 2
SAFI_UNICAST_MULTICAST = 3
# OPEN Message Optional Parameters
AUTHENTICATION = 1
CAPABILITY = 2
# Capability Types
CAP_MULTIPROTOCOL = 1
CAP_ROUTE_REFRESH = 2
# NOTIFICATION Error Codes
MESSAGE_HEADER_ERROR = 1
OPEN_MESSAGE_ERROR = 2
UPDATE_MESSAGE_ERROR = 3
HOLD_TIMER_EXPIRED = 4
FSM_ERROR = 5
CEASE = 6
# Message Header Error Subcodes
CONNECTION_NOT_SYNCHRONIZED = 1
BAD_MESSAGE_LENGTH = 2
BAD_MESSAGE_TYPE = 3
# OPEN Message Error Subcodes
UNSUPPORTED_VERSION_NUMBER = 1
BAD_PEER_AS = 2
BAD_BGP_IDENTIFIER = 3
UNSUPPORTED_OPTIONAL_PARAMETER = 4
AUTHENTICATION_FAILURE = 5
UNACCEPTABLE_HOLD_TIME = 6
UNSUPPORTED_CAPABILITY = 7
# UPDATE Message Error Subcodes
MALFORMED_ATTRIBUTE_LIST = 1
UNRECOGNIZED_ATTRIBUTE = 2
MISSING_ATTRIBUTE = 3
ATTRIBUTE_FLAGS_ERROR = 4
ATTRIBUTE_LENGTH_ERROR = 5
INVALID_ORIGIN_ATTRIBUTE = 6
AS_ROUTING_LOOP = 7
INVALID_NEXT_HOP_ATTRIBUTE = 8
OPTIONAL_ATTRIBUTE_ERROR = 9
INVALID_NETWORK_FIELD = 10
MALFORMED_AS_PATH = 11
# Cease Error Subcodes
MAX_NUMBER_OF_PREFIXES_REACHED = 1
ADMINISTRATIVE_SHUTDOWN = 2
PEER_DECONFIGURED = 3
ADMINISTRATIVE_RESET = 4
CONNECTION_REJECTED = 5
OTHER_CONFIGURATION_CHANGE = 6
CONNECTION_COLLISION_RESOLUTION = 7
OUT_OF_RESOURCES = 8
class BGP(dpkt.Packet):
__hdr__ = (
('marker', '16s', '\xff' * 16),
('len', 'H', 0),
('type', 'B', OPEN)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len - self.__hdr_len__]
if self.type == OPEN:
self.data = self.open = self.Open(self.data)
elif self.type == UPDATE:
self.data = self.update = self.Update(self.data)
elif self.type == NOTIFICATION:
self.data = self.notifiation = self.Notification(self.data)
elif self.type == KEEPALIVE:
self.data = self.keepalive = self.Keepalive(self.data)
elif self.type == ROUTE_REFRESH:
self.data = self.route_refresh = self.RouteRefresh(self.data)
class Open(dpkt.Packet):
__hdr__ = (
('v', 'B', 4),
('asn', 'H', 0),
('holdtime', 'H', 0),
('identifier', 'I', 0),
('param_len', 'B', 0)
)
__hdr_defaults__ = {
'parameters': []
}
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
l = []
plen = self.param_len
while plen > 0:
param = self.Parameter(self.data)
self.data = self.data[len(param):]
plen -= len(param)
l.append(param)
self.data = self.parameters = l
def __len__(self):
return self.__hdr_len__ + \
sum(map(len, self.parameters))
def __str__(self):
params = ''.join(map(str, self.parameters))
self.param_len = len(params)
return self.pack_hdr() + params
class Parameter(dpkt.Packet):
__hdr__ = (
('type', 'B', 0),
('len', 'B', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len]
if self.type == AUTHENTICATION:
self.data = self.authentication = self.Authentication(self.data)
elif self.type == CAPABILITY:
self.data = self.capability = self.Capability(self.data)
class Authentication(dpkt.Packet):
__hdr__ = (
('code', 'B', 0),
)
class Capability(dpkt.Packet):
__hdr__ = (
('code', 'B', 0),
('len', 'B', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len]
class Update(dpkt.Packet):
__hdr_defaults__ = {
'withdrawn': [],
'attributes': [],
'announced': []
}
def unpack(self, buf):
self.data = buf
# Withdrawn Routes
wlen = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
l = []
while wlen > 0:
route = RouteIPV4(self.data)
self.data = self.data[len(route):]
wlen -= len(route)
l.append(route)
self.withdrawn = l
# Path Attributes
plen = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
l = []
while plen > 0:
attr = self.Attribute(self.data)
self.data = self.data[len(attr):]
plen -= len(attr)
l.append(attr)
self.attributes = l
# Announced Routes
l = []
while self.data:
route = RouteIPV4(self.data)
self.data = self.data[len(route):]
l.append(route)
self.announced = l
def __len__(self):
return 2 + sum(map(len, self.withdrawn)) + \
2 + sum(map(len, self.attributes)) + \
sum(map(len, self.announced))
def __str__(self):
return struct.pack('>H', sum(map(len, self.withdrawn))) + \
''.join(map(str, self.withdrawn)) + \
struct.pack('>H', sum(map(len, self.attributes))) + \
''.join(map(str, self.attributes)) + \
''.join(map(str, self.announced))
class Attribute(dpkt.Packet):
__hdr__ = (
('flags', 'B', 0),
('type', 'B', 0)
)
def _get_o(self):
return (self.flags >> 7) & 0x1
def _set_o(self, o):
self.flags = (self.flags & ~0x80) | ((o & 0x1) << 7)
optional = property(_get_o, _set_o)
def _get_t(self):
return (self.flags >> 6) & 0x1
def _set_t(self, t):
self.flags = (self.flags & ~0x40) | ((t & 0x1) << 6)
transitive = property(_get_t, _set_t)
def _get_p(self):
return (self.flags >> 5) & 0x1
def _set_p(self, p):
self.flags = (self.flags & ~0x20) | ((p & 0x1) << 5)
partial = property(_get_p, _set_p)
def _get_e(self):
return (self.flags >> 4) & 0x1
def _set_e(self, e):
self.flags = (self.flags & ~0x10) | ((e & 0x1) << 4)
extended_length = property(_get_e, _set_e)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.extended_length:
self.len = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
else:
self.len = struct.unpack('B', self.data[:1])[0]
self.data = self.data[1:]
self.data = self.data[:self.len]
if self.type == ORIGIN:
self.data = self.origin = self.Origin(self.data)
elif self.type == AS_PATH:
self.data = self.as_path = self.ASPath(self.data)
elif self.type == NEXT_HOP:
self.data = self.next_hop = self.NextHop(self.data)
elif self.type == MULTI_EXIT_DISC:
self.data = self.multi_exit_disc = self.MultiExitDisc(self.data)
elif self.type == LOCAL_PREF:
self.data = self.local_pref = self.LocalPref(self.data)
elif self.type == ATOMIC_AGGREGATE:
self.data = self.atomic_aggregate = self.AtomicAggregate(self.data)
elif self.type == AGGREGATOR:
self.data = self.aggregator = self.Aggregator(self.data)
elif self.type == COMMUNITIES:
self.data = self.communities = self.Communities(self.data)
elif self.type == ORIGINATOR_ID:
self.data = self.originator_id = self.OriginatorID(self.data)
elif self.type == CLUSTER_LIST:
self.data = self.cluster_list = self.ClusterList(self.data)
elif self.type == MP_REACH_NLRI:
self.data = self.mp_reach_nlri = self.MPReachNLRI(self.data)
elif self.type == MP_UNREACH_NLRI:
self.data = self.mp_unreach_nlri = self.MPUnreachNLRI(self.data)
def __len__(self):
if self.extended_length:
attr_len = 2
else:
attr_len = 1
return self.__hdr_len__ + \
attr_len + \
len(self.data)
def __str__(self):
if self.extended_length:
attr_len_str = struct.pack('>H', self.len)
else:
attr_len_str = struct.pack('B', self.len)
return self.pack_hdr() + \
attr_len_str + \
str(self.data)
class Origin(dpkt.Packet):
__hdr__ = (
('type', 'B', ORIGIN_IGP),
)
class ASPath(dpkt.Packet):
__hdr_defaults__ = {
'segments': []
}
def unpack(self, buf):
self.data = buf
l = []
while self.data:
seg = self.ASPathSegment(self.data)
self.data = self.data[len(seg):]
l.append(seg)
self.data = self.segments = l
def __len__(self):
return sum(map(len, self.data))
def __str__(self):
return ''.join(map(str, self.data))
class ASPathSegment(dpkt.Packet):
__hdr__ = (
('type', 'B', 0),
('len', 'B', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
l = []
for i in range(self.len):
AS = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
l.append(AS)
self.data = self.path = l
def __len__(self):
return self.__hdr_len__ + \
2 * len(self.path)
def __str__(self):
as_str = ''
for AS in self.path:
as_str += struct.pack('>H', AS)
return self.pack_hdr() + \
as_str
class NextHop(dpkt.Packet):
__hdr__ = (
('ip', 'I', 0),
)
class MultiExitDisc(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class LocalPref(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class AtomicAggregate(dpkt.Packet):
def unpack(self, buf):
pass
def __len__(self):
return 0
def __str__(self):
return ''
class Aggregator(dpkt.Packet):
__hdr__ = (
('asn', 'H', 0),
('ip', 'I', 0)
)
class Communities(dpkt.Packet):
__hdr_defaults__ = {
'list': []
}
def unpack(self, buf):
self.data = buf
l = []
while self.data:
val = struct.unpack('>I', self.data[:4])[0]
if (val >= 0x00000000L and val <= 0x0000ffffL) or \
(val >= 0xffff0000L and val <= 0xffffffffL):
comm = self.ReservedCommunity(self.data[:4])
else:
comm = self.Community(self.data[:4])
self.data = self.data[len(comm):]
l.append(comm)
self.data = self.list = l
def __len__(self):
return sum(map(len, self.data))
def __str__(self):
return ''.join(map(str, self.data))
class Community(dpkt.Packet):
__hdr__ = (
('asn', 'H', 0),
('value', 'H', 0)
)
class ReservedCommunity(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class OriginatorID(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class ClusterList(dpkt.Packet):
__hdr_defaults__ = {
'list': []
}
def unpack(self, buf):
self.data = buf
l = []
while self.data:
id = struct.unpack('>I', self.data[:4])[0]
self.data = self.data[4:]
l.append(id)
self.data = self.list = l
def __len__(self):
return 4 * len(self.list)
def __str__(self):
cluster_str = ''
for val in self.list:
cluster_str += struct.pack('>I', val)
return cluster_str
class MPReachNLRI(dpkt.Packet):
__hdr__ = (
('afi', 'H', AFI_IPV4),
('safi', 'B', SAFI_UNICAST),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
# Next Hop
nlen = struct.unpack('B', self.data[:1])[0]
self.data = self.data[1:]
self.next_hop = self.data[:nlen]
self.data = self.data[nlen:]
# SNPAs
l = []
num_snpas = struct.unpack('B', self.data[:1])[0]
self.data = self.data[1:]
for i in range(num_snpas):
snpa = self.SNPA(self.data)
self.data = self.data[len(snpa):]
l.append(snpa)
self.snpas = l
if self.afi == AFI_IPV4:
Route = RouteIPV4
elif self.afi == AFI_IPV6:
Route = RouteIPV6
else:
Route = RouteGeneric
# Announced Routes
l = []
while self.data:
route = Route(self.data)
self.data = self.data[len(route):]
l.append(route)
self.data = self.announced = l
def __len__(self):
return self.__hdr_len__ + \
1 + len(self.next_hop) + \
1 + sum(map(len, self.snpas)) + \
sum(map(len, self.announced))
def __str__(self):
return self.pack_hdr() + \
struct.pack('B', len(self.next_hop)) + \
str(self.next_hop) + \
struct.pack('B', len(self.snpas)) + \
''.join(map(str, self.snpas)) + \
''.join(map(str, self.announced))
class SNPA:
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:(self.len + 1) / 2]
class MPUnreachNLRI(dpkt.Packet):
__hdr__ = (
('afi', 'H', AFI_IPV4),
('safi', 'B', SAFI_UNICAST),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.afi == AFI_IPV4:
Route = RouteIPV4
elif self.afi == AFI_IPV6:
Route = RouteIPV6
else:
Route = RouteGeneric
# Withdrawn Routes
l = []
while self.data:
route = Route(self.data)
self.data = self.data[len(route):]
l.append(route)
self.data = self.withdrawn = l
def __len__(self):
return self.__hdr_len__ + \
sum(map(len, self.data))
def __str__(self):
return self.pack_hdr() + \
''.join(map(str, self.data))
class Notification(dpkt.Packet):
__hdr__ = (
('code', 'B', 0),
('subcode', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.error = self.data
class Keepalive(dpkt.Packet):
def unpack(self, buf):
pass
def __len__(self):
return 0
def __str__(self):
return ''
class RouteRefresh(dpkt.Packet):
__hdr__ = (
('afi', 'H', AFI_IPV4),
('rsvd', 'B', 0),
('safi', 'B', SAFI_UNICAST)
)
class RouteGeneric(dpkt.Packet):
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.prefix = self.data[:(self.len + 7) / 8]
class RouteIPV4(dpkt.Packet):
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
tmp = self.data[:(self.len + 7) / 8]
tmp += (4 - len(tmp)) * '\x00'
self.data = self.prefix = tmp
def __repr__(self):
cidr = '%s/%d' % (socket.inet_ntoa(self.prefix), self.len)
return '%s(%s)' % (self.__class__.__name__, cidr)
def __len__(self):
return self.__hdr_len__ + \
(self.len + 7) / 8
def __str__(self):
return self.pack_hdr() + \
self.prefix[:(self.len + 7) / 8]
class RouteIPV6(dpkt.Packet):
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
tmp = self.data[:(self.len + 7) / 8]
tmp += (16 - len(tmp)) * '\x00'
self.data = self.prefix = tmp
def __len__(self):
return self.__hdr_len__ + \
(self.len + 7) / 8
def __str__(self):
return self.pack_hdr() + \
self.prefix[:(self.len + 7) / 8]
if __name__ == '__main__':
import unittest
class BGPTestCase(unittest.TestCase):
def testPack(self):
b1 = BGP(self.bgp1)
self.failUnless(self.bgp1 == str(b1))
b2 = BGP(self.bgp2)
self.failUnless(self.bgp2 == str(b2))
b3 = BGP(self.bgp3)
self.failUnless(self.bgp3 == str(b3))
b4 = BGP(self.bgp4)
self.failUnless(self.bgp4 == str(b4))
def testUnpack(self):
b1 = BGP(self.bgp1)
self.failUnless(b1.len == 19)
self.failUnless(b1.type == KEEPALIVE)
self.failUnless(b1.keepalive is not None)
b2 = BGP(self.bgp2)
self.failUnless(b2.type == UPDATE)
self.failUnless(len(b2.update.withdrawn) == 0)
self.failUnless(len(b2.update.announced) == 1)
self.failUnless(len(b2.update.attributes) == 9)
a = b2.update.attributes[1]
self.failUnless(a.type == AS_PATH)
self.failUnless(a.len == 10)
self.failUnless(len(a.as_path.segments) == 2)
s = a.as_path.segments[0]
self.failUnless(s.type == AS_SET)
self.failUnless(s.len == 2)
self.failUnless(len(s.path) == 2)
self.failUnless(s.path[0] == 500)
a = b2.update.attributes[6]
self.failUnless(a.type == COMMUNITIES)
self.failUnless(a.len == 12)
self.failUnless(len(a.communities.list) == 3)
c = a.communities.list[0]
self.failUnless(c.asn == 65215)
self.failUnless(c.value == 1)
r = b2.update.announced[0]
self.failUnless(r.len == 22)
self.failUnless(r.prefix == '\xc0\xa8\x04\x00')
b3 = BGP(self.bgp3)
self.failUnless(b3.type == UPDATE)
self.failUnless(len(b3.update.withdrawn) == 0)
self.failUnless(len(b3.update.announced) == 0)
self.failUnless(len(b3.update.attributes) == 6)
a = b3.update.attributes[0]
self.failUnless(a.optional == False)
self.failUnless(a.transitive == True)
self.failUnless(a.partial == False)
self.failUnless(a.extended_length == False)
self.failUnless(a.type == ORIGIN)
self.failUnless(a.len == 1)
o = a.origin
self.failUnless(o.type == ORIGIN_IGP)
a = b3.update.attributes[5]
self.failUnless(a.optional == True)
self.failUnless(a.transitive == False)
self.failUnless(a.partial == False)
self.failUnless(a.extended_length == True)
self.failUnless(a.type == MP_REACH_NLRI)
self.failUnless(a.len == 30)
m = a.mp_reach_nlri
self.failUnless(m.afi == AFI_IPV4)
self.failUnless(len(m.snpas) == 0)
self.failUnless(len(m.announced) == 1)
p = m.announced[0]
self.failUnless(p.len == 96)
b4 = BGP(self.bgp4)
self.failUnless(b4.len == 45)
self.failUnless(b4.type == OPEN)
self.failUnless(b4.open.asn == 237)
self.failUnless(b4.open.param_len == 16)
self.failUnless(len(b4.open.parameters) == 3)
p = b4.open.parameters[0]
self.failUnless(p.type == CAPABILITY)
self.failUnless(p.len == 6)
c = p.capability
self.failUnless(c.code == CAP_MULTIPROTOCOL)
self.failUnless(c.len == 4)
self.failUnless(c.data == '\x00\x01\x00\x01')
c = b4.open.parameters[2].capability
self.failUnless(c.code == CAP_ROUTE_REFRESH)
self.failUnless(c.len == 0)
bgp1 = '\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x13\x04'
bgp2 = '\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x63\x02\x00\x00\x00\x48\x40\x01\x01\x00\x40\x02\x0a\x01\x02\x01\xf4\x01\xf4\x02\x01\xfe\xbb\x40\x03\x04\xc0\xa8\x00\x0f\x40\x05\x04\x00\x00\x00\x64\x40\x06\x00\xc0\x07\x06\xfe\xba\xc0\xa8\x00\x0a\xc0\x08\x0c\xfe\xbf\x00\x01\x03\x16\x00\x04\x01\x54\x00\xfa\x80\x09\x04\xc0\xa8\x00\x0f\x80\x0a\x04\xc0\xa8\x00\xfa\x16\xc0\xa8\x04'
bgp3 = '\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x79\x02\x00\x00\x00\x62\x40\x01\x01\x00\x40\x02\x00\x40\x05\x04\x00\x00\x00\x64\xc0\x10\x08\x00\x02\x01\x2c\x00\x00\x01\x2c\xc0\x80\x24\x00\x00\xfd\xe9\x40\x01\x01\x00\x40\x02\x04\x02\x01\x15\xb3\x40\x05\x04\x00\x00\x00\x2c\x80\x09\x04\x16\x05\x05\x05\x80\x0a\x04\x16\x05\x05\x05\x90\x0e\x00\x1e\x00\x01\x80\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x0c\x04\x04\x04\x00\x60\x18\x77\x01\x00\x00\x01\xf4\x00\x00\x01\xf4\x85'
bgp4 = '\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x2d\x01\x04\x00\xed\x00\x5a\xc6\x6e\x83\x7d\x10\x02\x06\x01\x04\x00\x01\x00\x01\x02\x02\x80\x00\x02\x02\x02\x00'
unittest.main()
| {
"repo_name": "edisona/dpkt",
"path": "dpkt/bgp.py",
"copies": "17",
"size": "25690",
"license": "bsd-3-clause",
"hash": -3886960918695103000,
"line_mean": 32.8026315789,
"line_max": 501,
"alpha_frac": 0.4518489685,
"autogenerated": false,
"ratio": 3.5547253355472535,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# $Id$
"""Cisco Discovery Protocol."""
import struct
import dpkt
CDP_DEVID = 1 # string
CDP_ADDRESS = 2
CDP_PORTID = 3 # string
CDP_CAPABILITIES = 4 # 32-bit bitmask
CDP_VERSION = 5 # string
CDP_PLATFORM = 6 # string
CDP_IPPREFIX = 7
CDP_VTP_MGMT_DOMAIN = 9 # string
CDP_NATIVE_VLAN = 10 # 16-bit integer
CDP_DUPLEX = 11 # 8-bit boolean
CDP_TRUST_BITMAP = 18 # 8-bit bitmask0x13
CDP_UNTRUST_COS = 19 # 8-bit port
CDP_SYSTEM_NAME = 20 # string
CDP_SYSTEM_OID = 21 # 10-byte binary string
CDP_MGMT_ADDRESS = 22 # 32-bit number of addrs, Addresses
CDP_LOCATION = 23 # string
class CDP(dpkt.Packet):
__hdr__ = (
('version', 'B', 2),
('ttl', 'B', 180),
('sum', 'H', 0)
)
class Address(dpkt.Packet):
# XXX - only handle NLPID/IP for now
__hdr__ = (
('ptype', 'B', 1), # protocol type (NLPID)
('plen', 'B', 1), # protocol length
('p', 'B', 0xcc), # IP
('alen', 'H', 4) # address length
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.alen]
class TLV(dpkt.Packet):
__hdr__ = (
('type', 'H', 0),
('len', 'H', 4)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len - 4]
if self.type == CDP_ADDRESS:
n = struct.unpack('>I', self.data[:4])[0]
buf = self.data[4:]
l = []
for i in range(n):
a = CDP.Address(buf)
l.append(a)
buf = buf[len(a):]
self.data = l
def __len__(self):
if self.type == CDP_ADDRESS:
n = 4 + sum(map(len, self.data))
else:
n = len(self.data)
return self.__hdr_len__ + n
def __str__(self):
self.len = len(self)
if self.type == CDP_ADDRESS:
s = struct.pack('>I', len(self.data)) + \
''.join(map(str, self.data))
else:
s = self.data
return self.pack_hdr() + s
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
buf = self.data
l = []
while buf:
tlv = self.TLV(buf)
l.append(tlv)
buf = buf[len(tlv):]
self.data = l
def __len__(self):
return self.__hdr_len__ + sum(map(len, self.data))
def __str__(self):
data = ''.join(map(str, self.data))
if not self.sum:
self.sum = dpkt.in_cksum(self.pack_hdr() + data)
return self.pack_hdr() + data
| {
"repo_name": "FunctionAnalysis/dpkt",
"path": "dpkt/cdp.py",
"copies": "17",
"size": "2751",
"license": "bsd-3-clause",
"hash": -2924175724334721500,
"line_mean": 27.9578947368,
"line_max": 60,
"alpha_frac": 0.4645583424,
"autogenerated": false,
"ratio": 3.1657077100115076,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# $Id$
"""Cisco Skinny Client Control Protocol."""
import dpkt
KEYPAD_BUTTON = 0x00000003
OFF_HOOK = 0x00000006
ON_HOOK = 0x00000007
OPEN_RECEIVE_CHANNEL_ACK= 0x00000022
START_TONE = 0x00000082
STOP_TONE = 0x00000083
SET_LAMP = 0x00000086
SET_SPEAKER_MODE = 0x00000088
START_MEDIA_TRANSMIT = 0x0000008A
STOP_MEDIA_TRANSMIT = 0x0000008B
CALL_INFO = 0x0000008F
DEFINE_TIME_DATE = 0x00000094
DISPLAY_TEXT = 0x00000099
OPEN_RECEIVE_CHANNEL = 0x00000105
CLOSE_RECEIVE_CHANNEL = 0x00000106
SELECT_SOFTKEYS = 0x00000110
CALL_STATE = 0x00000111
DISPLAY_PROMPT_STATUS = 0x00000112
CLEAR_PROMPT_STATUS = 0x00000113
ACTIVATE_CALL_PLANE = 0x00000116
class ActivateCallPlane(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('line_instance', 'I', 0),
)
class CallInfo(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('calling_party_name', '40s', ''),
('calling_party', '24s', ''),
('called_party_name', '40s', ''),
('called_party', '24s', ''),
('line_instance', 'I', 0),
('call_id', 'I', 0),
('call_type', 'I', 0),
('orig_called_party_name', '40s', ''),
('orig_called_party', '24s', '')
)
class CallState(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('call_state', 'I', 12), # 12: Proceed, 15: Connected
('line_instance', 'I', 1),
('call_id', 'I', 0)
)
class ClearPromptStatus(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('line_instance', 'I', 1),
('call_id', 'I', 0)
)
class CloseReceiveChannel(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('conference_id', 'I', 0),
('passthruparty_id', 'I', 0),
)
class DisplayPromptStatus(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('msg_timeout', 'I', 0),
('display_msg', '32s', ''),
('line_instance', 'I', 1),
('call_id', 'I', 0)
)
class DisplayText(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('display_msg', '36s', ''),
)
class KeypadButton(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('button', 'I', 0),
)
class OpenReceiveChannel(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('conference_id', 'I', 0),
('passthruparty_id', 'I', 0),
('ms_packet', 'I', 0),
('payload_capability', 'I', 4), # 4: G.711 u-law 64k
('echo_cancel_type', 'I', 4),
('g723_bitrate', 'I', 0),
)
class OpenReceiveChannelAck(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('channel_status', 'I', 0),
('ip', '4s', ''),
('port', 'I', 0),
('passthruparty_id', 'I', 0),
)
class SelectStartKeys(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('line_id', 'I', 1),
('call_id', 'I', 0),
('softkey_set', 'I', 8),
('softkey_map', 'I', 0xffffffffL)
)
class SetLamp(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('stimulus', 'I', 9), # 9: Line
('stimulus_instance', 'I', 1),
('lamp_mode', 'I', 1),
)
class SetSpeakerMode(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('speaker', 'I', 2), # 2: SpeakerOff
)
class StartMediaTransmission(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('conference_id', 'I', 0),
('passthruparty_id', 'I', 0),
('remote_ip', '4s', ''),
('remote_port', 'I', 0),
('ms_packet', 'I', 0),
('payload_capability', 'I', 4), # 4: G.711 u-law 64k
('precedence', 'I', 0),
('silence_suppression', 'I', 0),
('max_frames_per_pkt', 'I', 1),
('g723_bitrate', 'I', 0),
)
class StartTone(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('tone', 'I', 0x24), # 0x24: AlertingTone
)
class StopMediaTransmission(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('conference_id', 'I', 0),
('passthruparty_id', 'I', 0),
)
class SCCP(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('len', 'I', 0),
('rsvd', 'I', 0),
('msgid', 'I', 0),
('msg', '0s', ''),
)
_msgsw = {
KEYPAD_BUTTON:KeypadButton,
OPEN_RECEIVE_CHANNEL_ACK:OpenReceiveChannelAck,
START_TONE:StartTone,
SET_LAMP:SetLamp,
START_MEDIA_TRANSMIT:StartMediaTransmission,
STOP_MEDIA_TRANSMIT:StopMediaTransmission,
CALL_INFO:CallInfo,
DISPLAY_TEXT:DisplayText,
OPEN_RECEIVE_CHANNEL:OpenReceiveChannel,
CLOSE_RECEIVE_CHANNEL:CloseReceiveChannel,
CALL_STATE:CallState,
DISPLAY_PROMPT_STATUS:DisplayPromptStatus,
CLEAR_PROMPT_STATUS:ClearPromptStatus,
ACTIVATE_CALL_PLANE:ActivateCallPlane,
}
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
n = self.len - 4
if n > len(self.data):
raise dpkt.NeedData('not enough data')
self.msg, self.data = self.data[:n], self.data[n:]
try:
p = self._msgsw[self.msgid](self.msg)
setattr(self, p.__class__.__name__.lower(), p)
except (KeyError, dpkt.UnpackError):
pass
| {
"repo_name": "asteven/dpkt",
"path": "dpkt/sccp.py",
"copies": "17",
"size": "5273",
"license": "bsd-3-clause",
"hash": 7880280090423263000,
"line_mean": 25.9030612245,
"line_max": 61,
"alpha_frac": 0.504456666,
"autogenerated": false,
"ratio": 2.9892290249433104,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.012775778156327314,
"num_lines": 196
} |
#$Id$
class Account:
"""This class is used to create object for accounts object."""
def __init__(self):
"""Initialize parameters for Accounts."""
self.account_id = ''
self.account_name = ''
self.bcy_balance = 0.0
self.bcy_balance_formatted = ''
self.fcy_balance = 0.0
self.fcy_balance_formatted = ''
self.adjusted_balance = 0.0
self.adjusted_balance_formatted = ''
self.gain_or_loss = 0.0
self.gain_or_loss_formatted = ''
self.gl_specific_type = 0
self.account_split_id = ''
self.debit_or_credit = ''
self.exchange_rate = 0.0
self.currency_id = ''
self.currency_code = ''
self.bcy_amount = 0.0
self.amount = 0.0
def set_account_id(self, account_id):
"""Set account id.
Args:
account_id(str): Account id.
"""
self.account_id = account_id
def get_account_id(self):
"""Get account id.
Returns:
str: Account id.
"""
return self.account_id
def set_account_name(self, account_name):
"""Set account name.
Args:
account_name(str): Account name.
"""
self.account_name = account_name
def get_account_name(self):
"""Get account name.
Returns:
str: Account name.
"""
return self.account_name
def set_bcy_balance(self, bcy_balance):
"""Set bcy balance.
Args:
bcy_balance(float): Bcy balance.
"""
self.bcy_balance = bcy_balance
def get_bcy_balance(self):
"""Get bcy balance.
Returns:
float: Bcy balance.
"""
return self.bcy_balance
def set_bcy_balance_formatted(self, bcy_balance_formatted):
"""Set bcy balance formatted.
Args:
bcy_balance_formatted(str): Bcy balance formatted.
"""
self.bcy_balance_formatted = bcy_balance_formatted
def get_bcy_balance_formatted(self):
"""Get bcy balance formatted.
Returns:
str: Bcy balance formatted.
"""
return self.bcy_balance_formatted
def set_fcy_balance(self, fcy_balance):
"""Set fcy balance.
Args:
fcy_balance(float): Fcy balance.
"""
self.fcy_balance = fcy_balance
def get_fcy_balance(self):
"""Get fcy balance.
Returns:
float: Fcy balance.
"""
return self.fcy_balance
def set_fcy_balance_formatted(self, fcy_balance_formatted):
"""Set fcy balance formatted.
Args:
fcy_balance_formatted(str): Fcy balance formatted.
"""
self.fcy_balance_formatted = fcy_balance_formatted
def get_fcy_balance_formatted(self):
"""Get fcy balance formatted.
Returns:
str: Fcy balance formatted.
"""
return self.fcy_balance_formatted
def set_adjsuted_balance(self, adjusted_balance):
"""Set adjusted balance.
Args:
adjusted_balance(float): Adjusted balance.
"""
self.adjusted_balance = adjusted_balance
def get_adjusted_balance(self):
"""Get adjusted balance.
Returns:
float: Adjusted balance.
"""
return self.adjusted_balance
def set_adjusted_balance(self, adjusted_balance):
"""Set adjusted balance.
Args:
adjusted_balance(float): Adjusted balance.
"""
self.adjusted_balance = adjusted_balance
def get_adjusted_balance(self):
"""Get adjusted balance.
Returns:
float: Adjusted balance.
"""
return self.adjusted_balance
def set_adjusted_balance_formatted(self, adjusted_balance_formatted):
"""Set adjusted balance formatted.
Args:
adjusted_balance_formatted(str): Adjusted balance formatted.
"""
self.adjusted_balance_formatted = adjusted_balance_formatted
def get_adjusted_balance_formatted(self):
"""Get adjusted balance formatted.
Returns:
str: Adjusted balance formatted.
"""
return self.adjusted_balance_formatted
def set_gain_or_loss(self, gain_or_loss):
"""Set gain or loss.
Args:
gain_or_loss(float): Gain or loss.
"""
self.gain_or_loss = gain_or_loss
def get_gain_or_loss(self):
"""Get gain or loss.
Returns:
float: Gain or loss.
"""
return self.gain_or_loss
def set_gain_or_loss_formatted(self, gain_or_loss_formatted):
"""Set gain or loss formatted.
Args:
gain_or_loss_formatted(str): Gain or loss formatted.
"""
self.gain_or_loss_formatted = gain_or_loss_formatted
def get_gain_or_loss_formatted(self):
"""Get gain or loss formatted.
Returns:
str: Gain or loss formatted.
"""
return self.gain_or_loss_formatted
def set_gl_specific_type(self, gl_specific_type):
"""Set gl specific type.
Args:
gl_specific_type(int): Gl specific type.
"""
self.gl_specific_type = gl_specific_type
def get_gl_specific_type(self):
"""Get gl specific type.
Returns:
int: Gl specific type.
"""
return self.gl_specific_type
def set_account_split_id(self, account_split_id):
"""Set account split id.
Args:
account_split_id(str): Account split id.
"""
self.account_split_id = account_split_id
def get_account_split_id(self):
"""Get account split id.
Returns:
str: Account split id.
"""
return self.account_split_id
def set_debit_or_credit(self, debit_or_credit):
"""Set debit or credit.
Args:
debit_or_credit(str): Debit or credit.
"""
self.debit_or_credit = debit_or_credit
def get_debit_or_credit(self):
"""Get debit or credit.
Returns:
str: Debit or credit.
"""
return self.debit_or_credit
def set_exchange_rate(self, exchange_rate):
"""Set exchange rate.
Args:
exchange_rate(float): Exchange rate.
"""
self.exchange_rate = exchange_rate
def get_exchange_rate(self):
"""Get exchange rate.
Returns:
float: Exchange rate.
"""
return self.exchange_rate
def set_currency_id(self, currency_id):
"""Set currency id.
Args:
str: Currecny id.
"""
self.currency_id = currency_id
def get_currency_id(self):
"""Get currency id.
Returns:
str: Currency id.
"""
return self.currency_id
def set_currency_code(self, currency_code):
"""Set currency code.
Args:
currency_code(str): Currency code.
"""
self.currency_code = currency_code
def get_currency_code(self):
"""Get currency code.
Returns:
str: Currency code.
"""
return self.currency_code
def set_bcy_amount(self, bcy_amount):
"""Set bcy amount.
Args:
bcy_amount(float): Bcy amount.
"""
self.bcy_amount = bcy_amount
def get_bcy_amount(self):
"""Get bcy amount.
Returns:
float: Bcy amount.
"""
return self.bcy_amount
def set_amount(self, amount):
"""Set amount.
Args:
amount(float): Amount.
"""
self.amount = amount
def get_amount(self):
"""Get amount.
Returns:
float: Amount.
"""
return self.amount
def to_json(self):
"""This method is used to create json object for accounts.
Returns:
dict: Dictionary containing json object for accounts.
"""
data = {}
if self.account_id != '':
data['account_id'] = self.account_id
if self.debit_or_credit != '':
data['debit_or_credit'] = self.debit_or_credit
if self.exchange_rate > 0:
data['exchange_rate'] = self.exchange_rate
if self.currency_id != '':
data['currency_id'] = self.currency_id
if self.amount > 0:
data['amount'] = self.amount
return data
| {
"repo_name": "zoho/books-python-wrappers",
"path": "books/model/Account.py",
"copies": "1",
"size": "8620",
"license": "mit",
"hash": -2245876527502801400,
"line_mean": 21.2164948454,
"line_max": 73,
"alpha_frac": 0.5379350348,
"autogenerated": false,
"ratio": 4.098906324298621,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5136841359098621,
"avg_score": null,
"num_lines": null
} |
#$Id$
class Activity:
"""This class is used create object for activity."""
def __init__(self):
"""Initialize the parameters for Activity object."""
self.id = 0
self.state = ""
self.activity_for = ""
self.name = ""
self.activity_by = ""
self.time_long = 0
self.display_time = ""
self.time = ""
def set_id(self, id):
"""Set id.
Args:
id(long): Id.
"""
self.id = id
def get_id(self):
"""Get id.
Returns:
long: Id.
"""
return self.id
def set_state(self, state):
"""Set state.
Args:
state(str): State.
"""
self.state = state
def get_state(self):
"""Get state.
Returns:
str: State.
"""
return self.state
def set_activity_for(self, activity_for):
"""Set activity for.
Args:
activity_for(str): Activity for.
"""
self.activity_for = activity_for
def get_activity_for(self):
"""Get activity for.
Returns:
str: Activity for.
"""
return self.activity_for
def set_name(self, name):
"""Set name.
Args:
name(str): Name.
"""
self.name = name
def get_name(self):
"""Get name.
Returns:
str: Name.
"""
return self.name
def set_activity_by(self, activity_by):
"""Set activity by.
Args:
activity_by(str): Activity by.
"""
self.activity_by = activity_by
def get_activity_by(self):
"""Get activity by.
Returns:
str: Activity by.
"""
return self.activity_by
def set_time_long(self, time_long):
"""Set time long.
Args:
time_long(long): Time long.
"""
self.time_long = time_long
def get_time_long(self):
"""Get time long.
Returns:
long: Time long.
"""
return self.time_long
def set_display_time(self, display_time):
"""Set display time.
Args:
display_time(str): Display time.
"""
self.display_time = display_time
def get_display_time(self):
"""Get display time.
Returns:
str: Display time.
"""
return self.display_time
def set_time(self, time):
"""Set time.
Args:
time(str): Time.
"""
self.time = time
def get_time(self):
"""Get time.
Returns:
str: Time.
"""
return self.time
| {
"repo_name": "zoho/projects-python-wrappers",
"path": "projects/model/Activity.py",
"copies": "1",
"size": "2754",
"license": "mit",
"hash": -4223414069182418000,
"line_mean": 16.2125,
"line_max": 60,
"alpha_frac": 0.4567901235,
"autogenerated": false,
"ratio": 4.2110091743119265,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006326388888888888,
"num_lines": 160
} |
#$Id$
class Address:
"""This class is used to create an object for Address."""
def __init__(self):
"""Initialize parameters for address object"""
self.address = ''
self.city = ''
self.state = ''
self.zip = ''
self.country = ''
self.fax = ''
self.is_update_customer = None
self.street_address1 = ''
self.street_address2 = ''
def set_address(self, address):
"""Set address.
Args:
address(str): Address.
"""
self.address = address
def get_address(self):
"""Get address.
Returns:
str: Address.
"""
return self.address
def set_city(self, city):
"""Set city.
Args:
str: City.
"""
self.city = city
def get_city(self):
"""Get city.
Returns:
str: City.
"""
return self.city
def set_state(self, state):
"""Set state.
Args:
state(str): State.
"""
self.state = state
def get_state(self):
"""Get the state.
Returns:
str: State.
"""
return self.state
def set_zip(self, zip_code):
"""Set zip.
Args:
zip_code(str): Zip code.
"""
self.zip = zip_code
def get_zip(self):
"""Get zip.
Returns:
str: Zip code.
"""
return self.zip
def set_country(self, country):
"""Set country.
Args:
country(str): Country.
"""
self.country = country
def get_country(self):
"""Get country.
Returns:
str: Country.
"""
return self.country
def set_fax(self, fax):
"""Set fax.
Args:
fax(str): Fax.
"""
self.fax = fax
def get_fax(self):
"""Get fax.
Returns:
str: Fax.
"""
return self.fax
def set_is_update_customer(self, is_update_customer):
"""Set whether to update customer.
Args:
is_update_customer(bool): True to update customer else False.
"""
self.is_update_customer = is_update_customer
def get_is_update_customer(self):
"""Get is update customer
Returns:
bool: True to update customer else False.
"""
return self.is_update_customer
def set_street_address1(self, street_address1):
"""Set street address1.
Args:
street_address1(str): Street address 1.
"""
self.street_address1 = street_address1
def get_street_address1(self):
"""Get street address1.
Returns:
str: Street address 1.
"""
return self.street_address1
def set_street_address2(self, street_address2):
"""Set street address 2.
Args:
street_address2(str): street address 2.
"""
self.street_address2 = street_address2
def get_street_address2(self):
"""Get street address 2.
Returns:
str: Street address 2.
"""
return self.street_address2
def to_json(self):
"""This method is used to convert the address object to JSON object.
Returns:
dict: Dictionary containing details of address object.
"""
address = {}
if self.street_address1 != '':
address['street_address1'] = self.street_address1
if self.street_address2 != '':
address['street_address2'] = self.street_address2
if self.address != '':
address['address'] = self.address
if self.city != '':
address['city'] = self.city
if self.state != '':
address['state'] = self.state
if self.zip != '':
address['zip'] = self.zip
if self.country != '':
address['country'] = self.country
if self.fax != '':
address['fax'] = self.fax
return address
| {
"repo_name": "zoho/books-python-wrappers",
"path": "books/model/Address.py",
"copies": "1",
"size": "4317",
"license": "mit",
"hash": 8009073419537118000,
"line_mean": 20.2660098522,
"line_max": 76,
"alpha_frac": 0.4704656011,
"autogenerated": false,
"ratio": 4.343058350100604,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.03406890044821079,
"num_lines": 203
} |
# $Id$
class advectionProperties:
kits = ['vtk_kit']
cats = ['Sources']
help = """Given a series of prepared advection volumes (each input is a
timestep), calculate a number of metrics.
The first input HAS to have a VolumeIndex PointData attribute/array. For
example, the output of the pointsToSpheres that you used BEFORE having
passed through the first probeFilters. This first input will NOT be used
for the actual calculations, but only for point -> volume lookups.
Calculations will be performed for the second input and onwards.
This module writes a CSV file with the volume centroids over time, and
secretly writes a python file with all data as a python nested list.
This can easily be loaded in a Python script for further analysis.
"""
class cptDistanceField:
kits = ['vtk_kit']
cats = ['Sources']
help = """Driver module for Mauch's CPT code.
This takes an image data and a mesh input. The imagedata is only used
to determine the bounds of the output distance field. The mesh
is converted to the CPT brep format using the DeVIDE cptBrepWRT module.
A geom file is created. The CPT driver is executed with the geom and
brep files. The output distance field is read, y-axis is flipped, and
the whole shebang is made available at the output.
Contributions to module code by Stef Busking.
Suggestion: On Windows, your driver.bat could make use of plink /
pscp to copy data to a linux server and run Mauch's code there,
and then copy everything back. On Linux you can run Mauch's code
directly.
"""
class implicitToVolume:
kits = ['vtk_kit']
cats = ['Sources']
help = """Given an implicit function, this module will evaluate it over
a volume and yield that volume as output.
"""
class manualTransform:
kits = ['vtk_kit']
cats = ['Sources']
help = """Manually create linear transform by entering scale factors,
rotation angles and translations.
Scaling is performed, then rotation, then translation. It is often easier
to chain manualTransform modules than performing all transformations at
once.
"""
class MarschnerLobb:
kits = ['vtk_kit']
cats = ['Sources']
help = """Pure Python filter to generate Marschner-Lobb test
volume.
When resolution is left at the default value of 40, the generated
volume should be at the Nyquist frequency of the signal (analytic
function) that it has sampled.
Pure Python implementation: for the default resolution, takes a
second or two, 200 cubed takes about 20 seconds on Core2 hardware.
"""
class PassThrough:
kits = []
cats = ['Filters', 'System']
help = """Simple pass-through filter.
This is quite useful if you have a source that connects to a
number of consumers, and you want to be able to change sources
easily. Connect the source to the PassThrough, and the
PassThrough output to all the consumers. Now when you replace the
source, you only have to reconnect one module.
"""
class pointsToSpheres:
kits = ['vtk_kit']
cats = ['Sources']
help = """Given a set of selected points (for instance from a slice3dVWR),
generate polydata spheres centred at these points with user-specified
radius. The spheres' interiors are filled with smaller spheres. This is
useful when using selected points to generate points for seeding
streamlines or calculating advection by a vector field.
Each point's sphere has an array associated to its pointdata called
'VolumeIndex'. All values in this array are equal to the corresponding
point's index in the input points list.
"""
class superQuadric:
kits = ['vtk_kit']
cats = ['Sources']
help = """Generates a SuperQuadric implicit function and polydata as
outputs.
"""
| {
"repo_name": "ivoflipse/devide",
"path": "modules/misc/module_index.py",
"copies": "7",
"size": "3893",
"license": "bsd-3-clause",
"hash": -7746027245938686000,
"line_mean": 35.0462962963,
"line_max": 78,
"alpha_frac": 0.7033136399,
"autogenerated": false,
"ratio": 4.141489361702128,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.015082164953446418,
"num_lines": 108
} |
#$Id$
class BankAccount:
"""This class is used to create object for Bank accounts."""
def __init__(self):
"""Initialize parameters for Bank accounts object."""
self.account_id = ''
self.account_name = ''
self.currency_id = ''
self.currency_code = ''
self.account_type = ''
self.account_number = ''
self.uncategorized_transactions = ''
self.is_active = None
self.balance = 0.0
self.bank_name = ''
self.routing_number = ''
self.is_primary_account = None
self.is_paypal_account = None
self.paypal_email_address = ''
self.description = ''
self.paypal_type = ''
def set_account_id(self, account_id):
"""Set account id.
Args:
account_id(str): Account id.
"""
self.account_id = account_id
def get_account_id(self):
"""Get account id.
Returns:
str: Account id.
"""
return self.account_id
def set_account_name(self, account_name):
"""Set account name.
Args:
account_name(str): Account name.
"""
self.account_name = account_name
def get_account_name(self):
"""Get account name.
Returns:
str: Account name.
"""
return self.account_name
def set_currency_id(self, currency_id):
"""Set currency id.
Args:
currency_id(str): Currency id.
"""
self.currency_id = currency_id
def get_currency_id(self):
"""Get currency id.
Returns:
str: Currency id.
"""
return self.currency_id
def set_currency_code(self, currency_code):
"""Set currecny code.
Args:
currency_code(str): Currency code.
"""
self.currency_code = currency_code
def get_currency_code(self):
"""Get currency code.
Returns:
str: Currency code.
"""
return self.currency_code
def set_account_type(self, account_type):
"""Set account type.
Args:
account_type(str): Account type.
"""
self.account_type = account_type
def get_account_type(self):
"""Get account type.
Returns:
str: Account type.
"""
return self.account_type
def set_account_number(self, account_number):
"""Set account number.
Args:
account_number(str): ACcount number.
"""
self.acccount_number = account_number
def get_account_number(self):
"""Get account number.
Returns:
str: Account number.
"""
return self.account_number
def set_uncategorized_transactions(self, uncategorized_transactions):
"""Set uncategorized transactions.
Args:
uncategorized_transactions(str): Uncategorized transactions.
"""
self.uncategorized_transactions = uncategorized_transactions
def get_uncategorized_transactions(self):
"""Get uncategorized transactions.
Returns:
str: Uncategorized transactions.
"""
return self.uncategorized_transactions
def set_is_active(self, is_active):
"""Set whether the account is active or not.
Args:
is_active(bool): True if it is active else False.
"""
self.is_active = is_active
def get_is_active(self):
"""Get whether the bank account is active or not.
Returns:
bool: True if active else False.
"""
return self.is_active
def set_balance(self, balance):
"""Set balance.
Args:
balance(float): Balance.
"""
self.balance = balance
def get_balance(self):
"""Get balance.
Returns:
float: Balance.
"""
return self.balance
def set_bank_name(self, bank_name):
"""Set bank name.
Args:
bank_name(str): Bank name.
"""
self.bank_name = bank_name
def get_bank_name(self):
"""Get bank name.
Returns:
str: Bank name.
"""
return self.bank_name
def set_routing_number(self, routing_number):
"""Set routing number.
Args:
routing_number(str): Routing number.
"""
self.routing_number = routing_number
def get_routing_number(self):
"""Get routing number.
Returns:
str: Routing number.
"""
return self.routing_number
def set_is_primary_account(self, is_primary_account):
"""Set whether the bank account is primary account or not.
Args:
is_primary_account(bool): True if it is primary account else False.
"""
self.is_primary_account = is_primary_account
def get_is_primary_account(self):
"""Get whether the bank account is primary account or not.
Returns:
bool: True if it is primary account else False.
"""
return self.is_primary_account
def set_is_paypal_account(self, is_paypal_account):
"""Set whether the account is paypal account.
Args:
is_paypal_account(bool): True if the account is paypal account.
"""
self.is_paypal_account = is_paypal_account
def get_is_paypal_account(self):
"""Get whether the account is paypal account.
Returns:
bool: True if the account is paypal account else False.
"""
return self.is_paypal_account
def set_paypal_email_address(self, paypal_email_address):
"""Set paypal email address.
Args:
paypal_email_Address(str): Paypal email address.
"""
self.paypal_email_address = paypal_email_address
def get_paypal_email_address(self):
"""Get paypal email address.
Returns:
str: PAypal email address.
"""
return self.paypal_email_address
def set_description(self, description):
"""Set description.
Args:
description(str): Description.
"""
self.descrition = description
def get_description(self):
"""Get description.
Returns:
str: Description.
"""
return self.description
def set_paypal_type(self, paypal_type):
"""Set paypal type.
Args:
paypal_type(str): Paypal type.
"""
self.paypal_type = paypal_type
def get_paypal_type(self):
"""Get paypal type.
Returns:
str: Paypal type.
"""
return self.paypal_type
def to_json(self):
"""This method is used to create json object for bank acoounts.
Returns:
dict: Dictionary containing json object for bank accounts.
"""
data = {}
if self.account_name != '':
data['account_name'] = self.account_name
if self.account_type != '':
data['account_type'] = self.account_type
if self.account_number != '':
data['account_number'] = self.account_number
if self.currency_id != '':
data['currency_id'] = self.currency_id
if self.description != '':
data['description'] = self.description
if self.bank_name != '':
data['bank_name'] = self.bank_name
if self.routing_number != '':
data['routing_number'] = self.routing_number
if self.is_primary_account is not None:
data['is_primary_account'] = self.is_primary_account
if self.is_paypal_account is not None:
data['is_paypal_account'] = self.is_paypal_account
if self.paypal_type != '':
data['paypal_type'] = self.paypal_type
if self.paypal_email_address != '':
data['paypal_email_address'] = self.paypal_email_address
return data
| {
"repo_name": "zoho/books-python-wrappers",
"path": "books/model/BankAccount.py",
"copies": "1",
"size": "8119",
"license": "mit",
"hash": -759580501573415000,
"line_mean": 22.601744186,
"line_max": 79,
"alpha_frac": 0.5455105309,
"autogenerated": false,
"ratio": 4.304878048780488,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.015889872797449394,
"num_lines": 344
} |