text
stringlengths 0
1.05M
| meta
dict |
---|---|
# $Id: CacheRegion.py,v 1.1 2006-09-06 09:50:08 skyostil Exp $
"""Cache holder classes for Cheetah:
Cache regions are defined using the #cache Cheetah directive. Each
cache region can be viewed as a dictionary (keyed by cacheRegionID)
handling at least one cache item (the default one). It's possible to add
cacheItems in a region by using the `varyBy` #cache directive parameter as
in the following example::
#def getArticle
this is the article content.
#end def
#cache varyBy=$getArticleID()
$getArticle($getArticleID())
#end cache
The code above will generate a CacheRegion and add new cacheItem for each value
of $getArticleID().
Meta-Data
================================================================================
Author: Tavis Rudd <[email protected]> and Philippe Normand <[email protected]>
Version: $Revision: 1.1 $
Start Date: 2005/06/20
Last Revision Date: $Date: 2006-09-06 09:50:08 $
"""
__author__ = "Tavis Rudd <[email protected]> and Philippe Normand <[email protected]>"
__revision__ = "$Revision: 1.1 $"[11:-2]
import hashlib
from time import time as currentTime
from Cheetah.CacheStore import MemoryCacheStore
class CacheItem:
"""A CacheItem is a container storing:
- cacheID (string)
- refreshTime (timestamp or None) : last time the cache was refreshed
- data (string) : the content of the cache
"""
def __init__(self, cacheItemID, cacheStore):
self._cacheItemID = cacheItemID
self._cacheStore = cacheStore
self._refreshTime = None
self._expiryTime = 0
def hasExpired(self):
return (self._expiryTime and currentTime() > self._expiryTime)
def setExpiryTime(self, time):
self._expiryTime = time
def getExpiryTime(self):
return self._expiryTime
def setData(self, data):
self._refreshTime = currentTime()
self._cacheStore.set(self._cacheItemID, data, self._expiryTime)
def getRefreshTime(self):
return self._refreshTime
def getData(self):
assert self._refreshTime
return self._cacheStore.get(self._cacheItemID)
def renderOutput(self):
"""Can be overridden to implement edge-caching"""
return self.getData() or ""
def clear(self):
self._cacheStore.delete(self._cacheItemID)
self._refreshTime = None
class _CacheDataStoreWrapper:
def __init__(self, dataStore, keyPrefix):
self._dataStore = dataStore
self._keyPrefix = keyPrefix
def get(self, key):
return self._dataStore.get(self._keyPrefix+key)
def delete(self, key):
self._dataStore.delete(self._keyPrefix+key)
def set(self, key, val, time=0):
self._dataStore.set(self._keyPrefix+key, val, time=time)
class CacheRegion:
""" A `CacheRegion` stores some `CacheItem` instances.
This implementation stores the data in the memory of the current process.
If you need a more advanced data store, create a cacheStore class that works
with Cheetah's CacheStore protocol and provide it as the cacheStore argument
to __init__. For example you could use
Cheetah.CacheStore.MemcachedCacheStore, a wrapper around the Python
memcached API (http://www.danga.com/memcached).
"""
_cacheItemClass = CacheItem
def __init__(self, regionID, templateCacheIdPrefix='', cacheStore=None):
self._isNew = True
self._regionID = regionID
self._templateCacheIdPrefix = templateCacheIdPrefix
if not cacheStore:
cacheStore = MemoryCacheStore()
self._cacheStore = cacheStore
self._wrappedCacheDataStore = _CacheDataStoreWrapper(
cacheStore, keyPrefix=templateCacheIdPrefix+':'+regionID+':')
self._cacheItems = {}
def isNew(self):
return self._isNew
def clear(self):
" drop all the caches stored in this cache region "
for cacheItemId in self._cacheItems.keys():
cacheItem = self._cacheItems[cacheItemId]
cacheItem.clear()
del self._cacheItems[cacheItemId]
def getCacheItem(self, cacheItemID):
""" Lazy access to a cacheItem
Try to find a cache in the stored caches. If it doesn't
exist, it's created.
Returns a `CacheItem` instance.
"""
cacheItemID = hashlib.md5(str(cacheItemID)).hexdigest()
if not self._cacheItems.has_key(cacheItemID):
cacheItem = self._cacheItemClass(
cacheItemID=cacheItemID, cacheStore=self._wrappedCacheDataStore)
self._cacheItems[cacheItemID] = cacheItem
self._isNew = False
return self._cacheItems[cacheItemID]
| {
"repo_name": "skyostil/tracy",
"path": "src/generator/Cheetah/CacheRegion.py",
"copies": "1",
"size": "4911",
"license": "mit",
"hash": 3986817323145864700,
"line_mean": 33.5869565217,
"line_max": 89,
"alpha_frac": 0.6281816331,
"autogenerated": false,
"ratio": 3.982968369829684,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5111150002929683,
"avg_score": null,
"num_lines": null
} |
# $Id: CacheRegion.py,v 1.3 2006/01/28 04:19:30 tavis_rudd Exp $
'''
Cache holder classes for Cheetah:
Cache regions are defined using the #cache Cheetah directive. Each
cache region can be viewed as a dictionary (keyed by cacheRegionID)
handling at least one cache item (the default one). It's possible to add
cacheItems in a region by using the `varyBy` #cache directive parameter as
in the following example::
#def getArticle
this is the article content.
#end def
#cache varyBy=$getArticleID()
$getArticle($getArticleID())
#end cache
The code above will generate a CacheRegion and add new cacheItem for each value
of $getArticleID().
'''
try:
from hashlib import md5
except ImportError:
from md5 import md5
import time
import Cheetah.CacheStore
class CacheItem(object):
'''
A CacheItem is a container storing:
- cacheID (string)
- refreshTime (timestamp or None) : last time the cache was refreshed
- data (string) : the content of the cache
'''
def __init__(self, cacheItemID, cacheStore):
self._cacheItemID = cacheItemID
self._cacheStore = cacheStore
self._refreshTime = None
self._expiryTime = 0
def hasExpired(self):
return (self._expiryTime and time.time() > self._expiryTime)
def setExpiryTime(self, time):
self._expiryTime = time
def getExpiryTime(self):
return self._expiryTime
def setData(self, data):
self._refreshTime = time.time()
self._cacheStore.set(self._cacheItemID, data, self._expiryTime)
def getRefreshTime(self):
return self._refreshTime
def getData(self):
assert self._refreshTime
return self._cacheStore.get(self._cacheItemID)
def renderOutput(self):
"""Can be overridden to implement edge-caching"""
return self.getData() or ""
def clear(self):
self._cacheStore.delete(self._cacheItemID)
self._refreshTime = None
class _CacheDataStoreWrapper(object):
def __init__(self, dataStore, keyPrefix):
self._dataStore = dataStore
self._keyPrefix = keyPrefix
def get(self, key):
return self._dataStore.get(self._keyPrefix+key)
def delete(self, key):
self._dataStore.delete(self._keyPrefix+key)
def set(self, key, val, time=0):
self._dataStore.set(self._keyPrefix+key, val, time=time)
class CacheRegion(object):
'''
A `CacheRegion` stores some `CacheItem` instances.
This implementation stores the data in the memory of the current process.
If you need a more advanced data store, create a cacheStore class that works
with Cheetah's CacheStore protocol and provide it as the cacheStore argument
to __init__. For example you could use
Cheetah.CacheStore.MemcachedCacheStore, a wrapper around the Python
memcached API (http://www.danga.com/memcached).
'''
_cacheItemClass = CacheItem
def __init__(self, regionID, templateCacheIdPrefix='', cacheStore=None):
self._isNew = True
self._regionID = regionID
self._templateCacheIdPrefix = templateCacheIdPrefix
if not cacheStore:
cacheStore = Cheetah.CacheStore.MemoryCacheStore()
self._cacheStore = cacheStore
self._wrappedCacheDataStore = _CacheDataStoreWrapper(
cacheStore, keyPrefix=templateCacheIdPrefix+':'+regionID+':')
self._cacheItems = {}
def isNew(self):
return self._isNew
def clear(self):
" drop all the caches stored in this cache region "
for cacheItemId in self._cacheItems.keys():
cacheItem = self._cacheItems[cacheItemId]
cacheItem.clear()
del self._cacheItems[cacheItemId]
def getCacheItem(self, cacheItemID):
""" Lazy access to a cacheItem
Try to find a cache in the stored caches. If it doesn't
exist, it's created.
Returns a `CacheItem` instance.
"""
cacheItemID = md5(str(cacheItemID)).hexdigest()
if cacheItemID not in self._cacheItems:
cacheItem = self._cacheItemClass(
cacheItemID=cacheItemID, cacheStore=self._wrappedCacheDataStore)
self._cacheItems[cacheItemID] = cacheItem
self._isNew = False
return self._cacheItems[cacheItemID]
| {
"repo_name": "nzavagli/UnrealPy",
"path": "UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Cheetah-2.4.4/cheetah/CacheRegion.py",
"copies": "15",
"size": "4421",
"license": "mit",
"hash": 8394706611386328000,
"line_mean": 31.5073529412,
"line_max": 80,
"alpha_frac": 0.6512101335,
"autogenerated": false,
"ratio": 4.0190909090909095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009441026523069869,
"num_lines": 136
} |
import sys
from rdkit import six
if not six.PY3:
bytes = buffer
# for Python3, import cairocffi preferably
if six.PY3:
try:
import cairocffi as cairo
except ImportError:
import cairo
else:
try:
import cairo
except ImportError:
import cairocffi as cairo
if not hasattr(cairo.ImageSurface,'get_data') and \
not hasattr(cairo.ImageSurface,'get_data_as_rgba'):
raise ImportError('cairo version too old')
import math
import rdkit.RDConfig
import os, re
import array
if not 'RDK_NOPANGO' in os.environ:
try:
import pangocairo
except ImportError:
pangocairo = None
try:
import pango
except ImportError:
pango = None
else:
pango = None
pangocairo = None
from rdkit.Chem.Draw.canvasbase import CanvasBase
from PIL import Image
scriptPattern = re.compile(r'\<.+?\>')
class Canvas(CanvasBase):
def __init__(self,
image=None, # PIL image
size=None,
ctx=None,
imageType=None, # determines file type
fileName=None, # if set determines output file name
):
"""
Canvas can be used in four modes:
1) using the supplied PIL image
2) using the supplied cairo context ctx
3) writing to a file fileName with image type imageType
4) creating a cairo surface and context within the constructor
"""
self.image = None
self.imageType = imageType
if image is not None:
try:
imgd = getattr(image, 'tobytes', image.tostring)("raw", "BGRA")
except SystemError:
r, g, b, a = image.split()
mrg = Image.merge("RGBA", (b, g, r, a))
imgd = getattr(mrg, 'tobytes', mrg.tostring)("raw", "RGBA")
a = array.array('B', imgd)
stride = image.size[0] * 4
surface = cairo.ImageSurface.create_for_data(a, cairo.FORMAT_ARGB32, image.size[0],
image.size[1], stride)
ctx = cairo.Context(surface)
size = image.size[0], image.size[1]
self.image = image
elif ctx is None and size is not None:
if hasattr(cairo, "PDFSurface") and imageType == "pdf":
surface = cairo.PDFSurface(fileName, size[0], size[1])
elif hasattr(cairo, "SVGSurface") and imageType == "svg":
surface = cairo.SVGSurface(fileName, size[0], size[1])
elif hasattr(cairo, "PSSurface") and imageType == "ps":
surface = cairo.PSSurface(fileName, size[0], size[1])
elif imageType == "png":
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, size[0], size[1])
else:
raise ValueError("Unrecognized file type. Valid choices are pdf, svg, ps, and png")
ctx = cairo.Context(surface)
ctx.set_source_rgb(1, 1, 1)
ctx.paint()
else:
surface = ctx.get_target()
if size is None:
try:
size = surface.get_width(), surface.get_height()
except AttributeError:
size = None
self.ctx = ctx
self.size = size
self.surface = surface
self.fileName = fileName
def flush(self):
"""temporary interface, must be splitted to different methods,
"""
if self.fileName and self.imageType == 'png':
self.surface.write_to_png(self.fileName)
elif self.image is not None:
# on linux at least it seems like the PIL images are BGRA, not RGBA:
if hasattr(self.surface, 'get_data'):
getattr(self.image, 'frombytes', self.image.fromstring)(bytes(self.surface.get_data()),
"raw", "BGRA", 0, 1)
else:
getattr(self.image, 'frombytes', self.image.fromstring)(
bytes(self.surface.get_data_as_rgba()), "raw", "RGBA", 0, 1)
self.surface.finish()
elif self.imageType == "png":
if hasattr(self.surface, 'get_data'):
buffer = self.surface.get_data()
else:
buffer = self.surface.get_data_as_rgba()
return buffer
def _doLine(self, p1, p2, **kwargs):
if kwargs.get('dash', (0, 0)) == (0, 0):
self.ctx.move_to(p1[0], p1[1])
self.ctx.line_to(p2[0], p2[1])
else:
dash = kwargs['dash']
pts = self._getLinePoints(p1, p2, dash)
currDash = 0
dashOn = True
while currDash < (len(pts) - 1):
if dashOn:
p1 = pts[currDash]
p2 = pts[currDash + 1]
self.ctx.move_to(p1[0], p1[1])
self.ctx.line_to(p2[0], p2[1])
currDash += 1
dashOn = not dashOn
def addCanvasLine(self, p1, p2, color=(0, 0, 0), color2=None, **kwargs):
self.ctx.set_line_width(kwargs.get('linewidth', 1))
if color2 and color2 != color:
mp = (p1[0] + p2[0]) / 2., (p1[1] + p2[1]) / 2.
self.ctx.set_source_rgb(*color)
self._doLine(p1, mp, **kwargs)
self.ctx.stroke()
self.ctx.set_source_rgb(*color2)
self._doLine(mp, p2, **kwargs)
self.ctx.stroke()
else:
self.ctx.set_source_rgb(*color)
self._doLine(p1, p2, **kwargs)
self.ctx.stroke()
def _addCanvasText1(self, text, pos, font, color=(0, 0, 0), **kwargs):
if font.weight == 'bold':
weight = cairo.FONT_WEIGHT_BOLD
else:
weight = cairo.FONT_WEIGHT_NORMAL
self.ctx.select_font_face(font.face, cairo.FONT_SLANT_NORMAL, weight)
text = scriptPattern.sub('', text)
self.ctx.set_font_size(font.size)
w, h = self.ctx.text_extents(text)[2:4]
bw, bh = w + h * 0.4, h * 1.4
offset = w * pos[2]
dPos = pos[0] - w / 2. + offset, pos[1] + h / 2.
self.ctx.set_source_rgb(*color)
self.ctx.move_to(*dPos)
self.ctx.show_text(text)
if 0:
self.ctx.move_to(dPos[0], dPos[1])
self.ctx.line_to(dPos[0] + bw, dPos[1])
self.ctx.line_to(dPos[0] + bw, dPos[1] - bh)
self.ctx.line_to(dPos[0], dPos[1] - bh)
self.ctx.line_to(dPos[0], dPos[1])
self.ctx.close_path()
self.ctx.stroke()
return (bw, bh, offset)
def _addCanvasText2(self, text, pos, font, color=(0, 0, 0), **kwargs):
if font.weight == 'bold':
weight = cairo.FONT_WEIGHT_BOLD
else:
weight = cairo.FONT_WEIGHT_NORMAL
self.ctx.select_font_face(font.face, cairo.FONT_SLANT_NORMAL, weight)
orientation = kwargs.get('orientation', 'E')
cctx = pangocairo.CairoContext(self.ctx)
plainText = scriptPattern.sub('', text)
measureLout = cctx.create_layout()
measureLout.set_alignment(pango.ALIGN_LEFT)
measureLout.set_markup(plainText)
lout = cctx.create_layout()
lout.set_alignment(pango.ALIGN_LEFT)
lout.set_markup(text)
# for whatever reason, the font size using pango is larger
# than that w/ default cairo (at least for me)
fnt = pango.FontDescription('%s %d' % (font.face, font.size * .8))
lout.set_font_description(fnt)
measureLout.set_font_description(fnt)
# this is a bit kludgy, but empirically we end up with too much
# vertical padding if we use the text box with super and subscripts
# for the measurement.
iext, lext = measureLout.get_pixel_extents()
iext2, lext2 = lout.get_pixel_extents()
w = lext2[2] - lext2[0]
h = lext[3] - lext[1]
pad = [h * .2, h * .3]
# another empirical correction: labels draw at the bottom
# of bonds have too much vertical padding
if orientation == 'S':
pad[1] *= 0.5
bw, bh = w + pad[0], h + pad[1]
offset = w * pos[2]
if 0:
if orientation == 'W':
dPos = pos[0] - w + offset, pos[1] - h / 2.
elif orientation == 'E':
dPos = pos[0] - w / 2 + offset, pos[1] - h / 2.
else:
dPos = pos[0] - w / 2 + offset, pos[1] - h / 2.
self.ctx.move_to(dPos[0], dPos[1])
else:
dPos = pos[0] - w / 2. + offset, pos[1] - h / 2.
self.ctx.move_to(dPos[0], dPos[1])
self.ctx.set_source_rgb(*color)
cctx.update_layout(lout)
cctx.show_layout(lout)
if 0:
self.ctx.move_to(dPos[0], dPos[1])
self.ctx.line_to(dPos[0] + bw, dPos[1])
self.ctx.line_to(dPos[0] + bw, dPos[1] + bh)
self.ctx.line_to(dPos[0], dPos[1] + bh)
self.ctx.line_to(dPos[0], dPos[1])
self.ctx.close_path()
self.ctx.stroke()
return (bw, bh, offset)
def addCanvasText(self, text, pos, font, color=(0, 0, 0), **kwargs):
if pango is not None and pangocairo is not None:
textSize = self._addCanvasText2(text, pos, font, color, **kwargs)
else:
textSize = self._addCanvasText1(text, pos, font, color, **kwargs)
return textSize
def addCanvasPolygon(self, ps, color=(0, 0, 0), fill=True, stroke=False, **kwargs):
if not fill and not stroke:
return
dps = []
self.ctx.set_source_rgb(*color)
self.ctx.move_to(ps[0][0], ps[0][1])
for p in ps[1:]:
self.ctx.line_to(p[0], p[1])
self.ctx.close_path()
if stroke:
if fill:
self.ctx.stroke_preserve()
else:
self.ctx.stroke()
if fill:
self.ctx.fill()
def addCanvasDashedWedge(self, p1, p2, p3, dash=(2, 2), color=(0, 0, 0), color2=None, **kwargs):
self.ctx.set_line_width(kwargs.get('linewidth', 1))
self.ctx.set_source_rgb(*color)
dash = (3, 3)
pts1 = self._getLinePoints(p1, p2, dash)
pts2 = self._getLinePoints(p1, p3, dash)
if len(pts2) < len(pts1):
pts2, pts1 = pts1, pts2
for i in range(len(pts1)):
self.ctx.move_to(pts1[i][0], pts1[i][1])
self.ctx.line_to(pts2[i][0], pts2[i][1])
self.ctx.stroke()
def addCircle(self, center, radius, color=(0, 0, 0), fill=True, stroke=False, alpha=1.0,
**kwargs):
if not fill and not stroke:
return
dps = []
#import pdb; pdb.set_trace();
self.ctx.set_source_rgba(color[0], color[1], color[2], alpha)
self.ctx.arc(center[0], center[1], radius, 0, 2. * math.pi)
self.ctx.close_path()
if stroke:
if fill:
self.ctx.stroke_preserve()
else:
self.ctx.stroke()
if fill:
self.ctx.fill()
| {
"repo_name": "jandom/rdkit",
"path": "rdkit/Chem/Draw/cairoCanvas.py",
"copies": "1",
"size": "10317",
"license": "bsd-3-clause",
"hash": 7659760274741207000,
"line_mean": 31.4433962264,
"line_max": 98,
"alpha_frac": 0.5960065911,
"autogenerated": false,
"ratio": 3.029955947136564,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4125962538236564,
"avg_score": null,
"num_lines": null
} |
import sys
try:
import cairo
except ImportError:
import cairocffi as cairo
if not hasattr(cairo.ImageSurface,'get_data') and \
not hasattr(cairo.ImageSurface,'get_data_as_rgba'):
raise ImportError('cairo version too old')
import math
import rdkit.RDConfig
import os,re
import array
if not 'RDK_NOPANGO' in os.environ:
try:
import pangocairo
except ImportError:
pangocairo=None
try:
import pango
except ImportError:
pango=None
else:
pango=None
pangocairo=None
from rdkit.Chem.Draw.canvasbase import CanvasBase
try:
import Image
except ImportError:
from PIL import Image
scriptPattern=re.compile(r'\<.+?\>')
class Canvas(CanvasBase):
def __init__(self,
image=None, # PIL image
size=None,
ctx=None,
imageType=None, # determines file type
fileName=None, # if set determines output file name
):
"""
Canvas can be used in four modes:
1) using the supplied PIL image
2) using the supplied cairo context ctx
3) writing to a file fileName with image type imageType
4) creating a cairo surface and context within the constructor
"""
self.image=None
self.imageType=imageType
if image is not None:
try:
imgd = image.tostring("raw","BGRA")
except SystemError:
r,g,b,a = image.split()
imgd = Image.merge("RGBA",(b,g,r,a)).tostring("raw","RGBA")
a = array.array('B',imgd)
stride=image.size[0]*4
surface = cairo.ImageSurface.create_for_data (
a, cairo.FORMAT_ARGB32,
image.size[0], image.size[1], stride)
ctx = cairo.Context(surface)
size=image.size[0], image.size[1]
self.image=image
elif ctx is None and size is not None:
if cairo.HAS_PDF_SURFACE and imageType == "pdf":
surface = cairo.PDFSurface (fileName, size[0], size[1])
elif cairo.HAS_SVG_SURFACE and imageType == "svg":
surface = cairo.SVGSurface (fileName, size[0], size[1])
elif cairo.HAS_PS_SURFACE and imageType == "ps":
surface = cairo.PSSurface (fileName, size[0], size[1])
elif imageType == "png":
surface = cairo.ImageSurface (cairo.FORMAT_ARGB32, size[0], size[1])
else:
raise ValueError("Unrecognized file type. Valid choices are pdf, svg, ps, and png")
ctx = cairo.Context(surface)
ctx.set_source_rgb(1,1,1)
ctx.paint()
else:
surface=ctx.get_target()
if size is None:
try:
size=surface.get_width(),surface.get_height()
except AttributeError:
size=None
self.ctx=ctx
self.size=size
self.surface=surface
self.fileName=fileName
def flush(self):
"""temporary interface, must be splitted to different methods,
"""
if self.fileName and self.imageType=='png':
self.surface.write_to_png(self.fileName)
elif self.image is not None:
# on linux at least it seems like the PIL images are BGRA, not RGBA:
if hasattr(self.surface,'get_data'):
self.image.fromstring(self.surface.get_data(),
"raw","BGRA",0,1)
else:
self.image.fromstring(self.surface.get_data_as_rgba(),
"raw","RGBA",0,1)
self.surface.finish()
elif self.imageType == "png":
if hasattr(self.surface,'get_data'):
buffer=self.surface.get_data()
else:
buffer=self.surface.get_data_as_rgba()
return buffer
def _doLine(self, p1, p2, **kwargs):
if kwargs.get('dash',(0,0)) == (0,0):
self.ctx.move_to(p1[0],p1[1])
self.ctx.line_to(p2[0],p2[1])
else:
dash = kwargs['dash']
pts = self._getLinePoints(p1,p2,dash)
currDash = 0
dashOn = True
while currDash<(len(pts)-1):
if dashOn:
p1 = pts[currDash]
p2 = pts[currDash+1]
self.ctx.move_to(p1[0],p1[1])
self.ctx.line_to(p2[0],p2[1])
currDash+=1
dashOn = not dashOn
def addCanvasLine(self,p1,p2,color=(0,0,0),color2=None,**kwargs):
self.ctx.set_line_width(kwargs.get('linewidth',1))
if color2 and color2!=color:
mp = (p1[0]+p2[0])/2.,(p1[1]+p2[1])/2.
self.ctx.set_source_rgb(*color)
self._doLine(p1,mp,**kwargs)
self.ctx.stroke()
self.ctx.set_source_rgb(*color2)
self._doLine(mp,p2,**kwargs)
self.ctx.stroke()
else:
self.ctx.set_source_rgb(*color)
self._doLine(p1,p2,**kwargs)
self.ctx.stroke()
def _addCanvasText1(self,text,pos,font,color=(0,0,0),**kwargs):
if font.weight=='bold':
weight=cairo.FONT_WEIGHT_BOLD
else:
weight=cairo.FONT_WEIGHT_NORMAL
self.ctx.select_font_face(font.face,
cairo.FONT_SLANT_NORMAL,
weight)
text = scriptPattern.sub('',text)
self.ctx.set_font_size(font.size)
w,h=self.ctx.text_extents(text)[2:4]
bw,bh=w+h*0.4,h*1.4
offset = w*pos[2]
dPos = pos[0]-w/2.+offset,pos[1]+h/2.
self.ctx.set_source_rgb(*color)
self.ctx.move_to(*dPos)
self.ctx.show_text(text)
if 0:
self.ctx.move_to(dPos[0],dPos[1])
self.ctx.line_to(dPos[0]+bw,dPos[1])
self.ctx.line_to(dPos[0]+bw,dPos[1]-bh)
self.ctx.line_to(dPos[0],dPos[1]-bh)
self.ctx.line_to(dPos[0],dPos[1])
self.ctx.close_path()
self.ctx.stroke()
return (bw,bh,offset)
def _addCanvasText2(self,text,pos,font,color=(0,0,0),**kwargs):
if font.weight=='bold':
weight=cairo.FONT_WEIGHT_BOLD
else:
weight=cairo.FONT_WEIGHT_NORMAL
self.ctx.select_font_face(font.face,
cairo.FONT_SLANT_NORMAL,
weight)
orientation=kwargs.get('orientation','E')
cctx=pangocairo.CairoContext(self.ctx)
plainText = scriptPattern.sub('',text)
measureLout = cctx.create_layout()
measureLout.set_alignment(pango.ALIGN_LEFT)
measureLout.set_markup(plainText)
lout = cctx.create_layout()
lout.set_alignment(pango.ALIGN_LEFT)
lout.set_markup(text)
# for whatever reason, the font size using pango is larger
# than that w/ default cairo (at least for me)
fnt = pango.FontDescription('%s %d'%(font.face,font.size*.8))
lout.set_font_description(fnt)
measureLout.set_font_description(fnt)
# this is a bit kludgy, but empirically we end up with too much
# vertical padding if we use the text box with super and subscripts
# for the measurement.
iext,lext=measureLout.get_pixel_extents()
iext2,lext2=lout.get_pixel_extents()
w=lext2[2]-lext2[0]
h=lext[3]-lext[1]
pad = [h*.2,h*.3]
# another empirical correction: labels draw at the bottom
# of bonds have too much vertical padding
if orientation=='S':
pad[1] *= 0.5
bw,bh=w+pad[0],h+pad[1]
offset = w*pos[2]
if 0:
if orientation=='W':
dPos = pos[0]-w+offset,pos[1]-h/2.
elif orientation=='E':
dPos = pos[0]-w/2+offset,pos[1]-h/2.
else:
dPos = pos[0]-w/2+offset,pos[1]-h/2.
self.ctx.move_to(dPos[0],dPos[1])
else:
dPos = pos[0]-w/2.+offset,pos[1]-h/2.
self.ctx.move_to(dPos[0],dPos[1])
self.ctx.set_source_rgb(*color)
cctx.update_layout(lout)
cctx.show_layout(lout)
if 0:
self.ctx.move_to(dPos[0],dPos[1])
self.ctx.line_to(dPos[0]+bw,dPos[1])
self.ctx.line_to(dPos[0]+bw,dPos[1]+bh)
self.ctx.line_to(dPos[0],dPos[1]+bh)
self.ctx.line_to(dPos[0],dPos[1])
self.ctx.close_path()
self.ctx.stroke()
return (bw,bh,offset)
def addCanvasText(self,text,pos,font,color=(0,0,0),**kwargs):
if pango is not None and pangocairo is not None:
textSize = self._addCanvasText2(text,pos,font,color,**kwargs)
else:
textSize = self._addCanvasText1(text,pos,font,color,**kwargs)
return textSize
def addCanvasPolygon(self,ps,color=(0,0,0),fill=True,stroke=False,**kwargs):
if not fill and not stroke: return
dps = []
self.ctx.set_source_rgb(*color)
self.ctx.move_to(ps[0][0],ps[0][1])
for p in ps[1:]:
self.ctx.line_to(p[0],p[1])
self.ctx.close_path()
if stroke:
if fill:
self.ctx.stroke_preserve()
else:
self.ctx.stroke()
if fill:
self.ctx.fill()
def addCanvasDashedWedge(self,p1,p2,p3,dash=(2,2),color=(0,0,0),
color2=None,**kwargs):
self.ctx.set_line_width(kwargs.get('linewidth',1))
self.ctx.set_source_rgb(*color)
dash = (3,3)
pts1 = self._getLinePoints(p1,p2,dash)
pts2 = self._getLinePoints(p1,p3,dash)
if len(pts2)<len(pts1): pts2,pts1=pts1,pts2
for i in range(len(pts1)):
self.ctx.move_to(pts1[i][0],pts1[i][1])
self.ctx.line_to(pts2[i][0],pts2[i][1])
self.ctx.stroke()
def addCircle(self,center,radius,color=(0,0,0),fill=True,stroke=False,alpha=1.0,
**kwargs):
if not fill and not stroke: return
dps = []
#import pdb; pdb.set_trace();
self.ctx.set_source_rgba(color[0],color[1],color[2],alpha)
self.ctx.arc(center[0],center[1],radius,0,2.*math.pi)
self.ctx.close_path()
if stroke:
if fill:
self.ctx.stroke_preserve()
else:
self.ctx.stroke()
if fill:
self.ctx.fill()
| {
"repo_name": "AlexanderSavelyev/rdkit",
"path": "rdkit/Chem/Draw/cairoCanvas.py",
"copies": "1",
"size": "9751",
"license": "bsd-3-clause",
"hash": 2428390536177379000,
"line_mean": 30.5566343042,
"line_max": 91,
"alpha_frac": 0.6077325403,
"autogenerated": false,
"ratio": 3.005856966707768,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8943341780185913,
"avg_score": 0.034049545364371124,
"num_lines": 309
} |
"""$Id: category.py 699 2006-09-25 02:01:18Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 699 $"
__date__ = "$Date: 2006-09-25 02:01:18 +0000 (Mon, 25 Sep 2006) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import *
#
# author element.
#
class category(validatorBase, rfc3987_full, nonhtml):
def getExpectedAttrNames(self):
return [(None,u'term'),(None,u'scheme'),(None,u'label')]
def prevalidate(self):
self.children.append(True) # force warnings about "mixed" content
if not self.attrs.has_key((None,"term")):
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"term"}))
if self.attrs.has_key((None,"scheme")):
self.value=self.attrs.getValue((None,"scheme"))
rfc3987_full.validate(self, extraParams={"element": "scheme"})
if self.attrs.has_key((None,"label")):
self.value=self.attrs.getValue((None,"label"))
nonhtml.validate(self)
| {
"repo_name": "petecummings/NewsBlur",
"path": "vendor/feedvalidator/category.py",
"copies": "16",
"size": "1079",
"license": "mit",
"hash": -8707983106216220000,
"line_mean": 34.9666666667,
"line_max": 97,
"alpha_frac": 0.6682113068,
"autogenerated": false,
"ratio": 3.127536231884058,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# $Id: cdp.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Cisco Discovery Protocol."""
from __future__ import absolute_import
import struct
from . import dpkt
CDP_DEVID = 1 # string
CDP_ADDRESS = 2
CDP_PORTID = 3 # string
CDP_CAPABILITIES = 4 # 32-bit bitmask
CDP_VERSION = 5 # string
CDP_PLATFORM = 6 # string
CDP_IPPREFIX = 7
CDP_VTP_MGMT_DOMAIN = 9 # string
CDP_NATIVE_VLAN = 10 # 16-bit integer
CDP_DUPLEX = 11 # 8-bit boolean
CDP_TRUST_BITMAP = 18 # 8-bit bitmask0x13
CDP_UNTRUST_COS = 19 # 8-bit port
CDP_SYSTEM_NAME = 20 # string
CDP_SYSTEM_OID = 21 # 10-byte binary string
CDP_MGMT_ADDRESS = 22 # 32-bit number of addrs, Addresses
CDP_LOCATION = 23 # string
class CDP(dpkt.Packet):
"""Cisco Discovery Protocol.
See more about the BGP on \
https://en.wikipedia.org/wiki/Cisco_Discovery_Protocol
Attributes:
__hdr__: Header fields of CDP.
#TODO
"""
__hdr__ = (
('version', 'B', 2),
('ttl', 'B', 180),
('sum', 'H', 0)
)
class Address(dpkt.Packet):
# XXX - only handle NLPID/IP for now
__hdr__ = (
('ptype', 'B', 1), # protocol type (NLPID)
('plen', 'B', 1), # protocol length
('p', 'B', 0xcc), # IP
('alen', 'H', 4) # address length
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.alen]
class TLV(dpkt.Packet):
__hdr__ = (
('type', 'H', 0),
('len', 'H', 4)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len - 4]
if self.type == CDP_ADDRESS:
n = struct.unpack('>I', self.data[:4])[0]
buf = self.data[4:]
l = []
for i in range(n):
a = CDP.Address(buf)
l.append(a)
buf = buf[len(a):]
self.data = l
def __len__(self):
if self.type == CDP_ADDRESS:
n = 4 + sum(map(len, self.data))
else:
n = len(self.data)
return self.__hdr_len__ + n
def __bytes__(self):
self.len = len(self)
if self.type == CDP_ADDRESS:
s = struct.pack('>I', len(self.data)) + \
b''.join(map(bytes, self.data))
else:
s = self.data
return self.pack_hdr() + s
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
buf = self.data
l = []
while buf:
tlv = self.TLV(buf)
l.append(tlv)
buf = buf[len(tlv):]
self.data = l
def __len__(self):
return self.__hdr_len__ + sum(map(len, self.data))
def __bytes__(self):
data = b''.join(map(bytes, self.data))
if not self.sum:
self.sum = dpkt.in_cksum(self.pack_hdr() + data)
return self.pack_hdr() + data
| {
"repo_name": "dimagol/trex-core",
"path": "scripts/external_libs/dpkt-1.9.1/dpkt/cdp.py",
"copies": "3",
"size": "3053",
"license": "apache-2.0",
"hash": -4306279695577335300,
"line_mean": 26.5045045045,
"line_max": 60,
"alpha_frac": 0.4854241729,
"autogenerated": false,
"ratio": 3.2757510729613735,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009922966444705576,
"num_lines": 111
} |
# $Id: cdp.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Cisco Discovery Protocol."""
import struct
import dpkt
CDP_DEVID = 1 # string
CDP_ADDRESS = 2
CDP_PORTID = 3 # string
CDP_CAPABILITIES = 4 # 32-bit bitmask
CDP_VERSION = 5 # string
CDP_PLATFORM = 6 # string
CDP_IPPREFIX = 7
CDP_VTP_MGMT_DOMAIN = 9 # string
CDP_NATIVE_VLAN = 10 # 16-bit integer
CDP_DUPLEX = 11 # 8-bit boolean
CDP_TRUST_BITMAP = 18 # 8-bit bitmask0x13
CDP_UNTRUST_COS = 19 # 8-bit port
CDP_SYSTEM_NAME = 20 # string
CDP_SYSTEM_OID = 21 # 10-byte binary string
CDP_MGMT_ADDRESS = 22 # 32-bit number of addrs, Addresses
CDP_LOCATION = 23 # string
class CDP(dpkt.Packet):
__hdr__ = (
('version', 'B', 2),
('ttl', 'B', 180),
('sum', 'H', 0)
)
class Address(dpkt.Packet):
# XXX - only handle NLPID/IP for now
__hdr__ = (
('ptype', 'B', 1), # protocol type (NLPID)
('plen', 'B', 1), # protocol length
('p', 'B', 0xcc), # IP
('alen', 'H', 4) # address length
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.alen]
class TLV(dpkt.Packet):
__hdr__ = (
('type', 'H', 0),
('len', 'H', 4)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len - 4]
if self.type == CDP_ADDRESS:
n = struct.unpack('>I', self.data[:4])[0]
buf = self.data[4:]
l = []
for i in range(n):
a = CDP.Address(buf)
l.append(a)
buf = buf[len(a):]
self.data = l
def __len__(self):
if self.type == CDP_ADDRESS:
n = 4 + sum(map(len, self.data))
else:
n = len(self.data)
return self.__hdr_len__ + n
def __str__(self):
self.len = len(self)
if self.type == CDP_ADDRESS:
s = struct.pack('>I', len(self.data)) + \
''.join(map(str, self.data))
else:
s = self.data
return self.pack_hdr() + s
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
buf = self.data
l = []
while buf:
tlv = self.TLV(buf)
l.append(tlv)
buf = buf[len(tlv):]
self.data = l
def __len__(self):
return self.__hdr_len__ + sum(map(len, self.data))
def __str__(self):
data = ''.join(map(str, self.data))
if not self.sum:
self.sum = dpkt.in_cksum(self.pack_hdr() + data)
return self.pack_hdr() + data
| {
"repo_name": "lkash/test",
"path": "dpkt/cdp.py",
"copies": "6",
"size": "2792",
"license": "bsd-3-clause",
"hash": -1523198895272958000,
"line_mean": 27.202020202,
"line_max": 60,
"alpha_frac": 0.4717048711,
"autogenerated": false,
"ratio": 3.216589861751152,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011125750256185038,
"num_lines": 99
} |
# $Id: cdp.py 23 2006-11-08 15:45:33Z dugsong $
"""Cisco Discovery Protocol."""
import struct
import dpkt
CDP_DEVID = 1 # string
CDP_ADDRESS = 2
CDP_PORTID = 3 # string
CDP_CAPABILITIES = 4 # 32-bit bitmask
CDP_VERSION = 5 # string
CDP_PLATFORM = 6 # string
CDP_IPPREFIX = 7
CDP_VTP_MGMT_DOMAIN = 9 # string
CDP_NATIVE_VLAN = 10 # 16-bit integer
CDP_DUPLEX = 11 # 8-bit boolean
CDP_TRUST_BITMAP = 18 # 8-bit bitmask0x13
CDP_UNTRUST_COS = 19 # 8-bit port
CDP_SYSTEM_NAME = 20 # string
CDP_SYSTEM_OID = 21 # 10-byte binary string
CDP_MGMT_ADDRESS = 22 # 32-bit number of addrs, Addresses
CDP_LOCATION = 23 # string
class CDP(dpkt.Packet):
__hdr__ = (
('version', 'B', 2),
('ttl', 'B', 180),
('sum', 'H', 0)
)
class Address(dpkt.Packet):
# XXX - only handle NLPID/IP for now
__hdr__ = (
('ptype', 'B', 1), # protocol type (NLPID)
('plen', 'B', 1), # protocol length
('p', 'B', 0xcc), # IP
('alen', 'H', 4) # address length
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.alen]
class TLV(dpkt.Packet):
__hdr__ = (
('type', 'H', 0),
('len', 'H', 4)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len - 4]
if self.type == CDP_ADDRESS:
n = struct.unpack('>I', self.data[:4])[0]
buf = self.data[4:]
l = []
for i in range(n):
a = CDP.Address(buf)
l.append(a)
buf = buf[len(a):]
self.data = l
def __len__(self):
if self.type == CDP_ADDRESS:
n = 4 + sum(map(len, self.data))
else:
n = len(self.data)
return self.__hdr_len__ + n
def __str__(self):
self.len = len(self)
if self.type == CDP_ADDRESS:
s = struct.pack('>I', len(self.data)) + \
''.join(map(str, self.data))
else:
s = self.data
return self.pack_hdr() + s
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
buf = self.data
l = []
while buf:
tlv = self.TLV(buf)
l.append(tlv)
buf = buf[len(tlv):]
self.data = l
def __len__(self):
return self.__hdr_len__ + sum(map(len, self.data))
def __str__(self):
data = ''.join(map(str, self.data))
if not self.sum:
self.sum = dpkt.in_cksum(self.pack_hdr() + data)
return self.pack_hdr() + data
| {
"repo_name": "jacklee0810/QMarkdowner",
"path": "dpkt/cdp.py",
"copies": "15",
"size": "2792",
"license": "mit",
"hash": -1342541471977565400,
"line_mean": 28.3894736842,
"line_max": 60,
"alpha_frac": 0.4681232092,
"autogenerated": false,
"ratio": 3.140607424071991,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.016295940559548448,
"num_lines": 95
} |
# $Id: cee003fb7c38cb61af6bf2d036245576cfc8cf44 $
"""
File Locking
============
This module provides portable advisory file locking primitives that operate on
file descriptors. POSIX-like systems and Windows systems use different
primitives to perform file locking, and these different primitives are modeled
by incompatible (and different) modules in the Python standard library. This
module provides an abstract ``FileLock`` class, and underlying
implementations, to hide the operating system dependencies behind a simple
portable interface.
To create a file lock, simply instantiate the ``FileLock`` class with an open
file descriptor. It handles the rest:
.. python::
from grizzled.io.filelock import FileLock
fd = open('/tmp/lockfile', 'r+')
lock = FileLock(fd)
lock.acquire()
...
lock.release()
You can also use the ``locked_file()`` function to simplify your code:
.. python::
from grizzled.io.filelock import locked_file
fd = open('/tmp/lockfile', 'r+')
with locked_file(fd):
...
"""
__docformat__ = "restructuredtext en"
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import os
from contextlib import contextmanager
# ---------------------------------------------------------------------------
# Exports
# ---------------------------------------------------------------------------
__all__ = ['FileLock', 'locked_file']
LOCK_CLASSES = {'posix' : '_PosixFileLock',
'nt' : '_WindowsFileLock'}
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
class FileLock(object):
"""
A ``FileLock`` object models a file lock. It wraps a file descriptor
and contains methods to acquire and release a lock on the file.
File lock implementations that implement this interface are guaranteed
to be advisory, but not mandatory, file locks. (They may, in fact, also
be mandatory file locks, but they are not guaranteed to be.)
Currently, there are underlying implementations for both POSIX systems
and Windows.
"""
def __init__(self, fd):
"""
Allocate a new file lock that operates on the specified file
descriptor.
:Parameters:
fd : int
Open file descriptor. The file must be opened for writing or
updating, not reading.
"""
try:
cls = eval(LOCK_CLASSES[os.name])
self.lock = cls(fd)
except KeyError:
raise NotImplementedError, \
'''Don't know how to lock files on "%s" systems.''' % os.name
def acquire(self, no_wait=False):
"""
Lock the associated file. If someone already has the file locked,
this method will suspend the calling process, unless ``no_wait`` is
``True``.
:Parameters:
no_wait : bool
If ``False``, then ``acquire()`` will suspend the calling
process if someone has the file locked. If ``True``, then
``acquire()`` will raise an ``IOError`` if the file is
locked by someone else.
:raise IOError: If the file cannot be locked for any reason.
"""
self.lock.acquire(no_wait)
def release(self):
"""
Unlock (i.e., release the lock on) the associated file.
"""
self.lock.release()
class _PosixFileLock(object):
"""File lock implementation for POSIX-compliant systems."""
def __init__(self, fd):
self.fd = fd
def acquire(self, no_wait=False):
import fcntl
flags = fcntl.LOCK_EX
if no_wait:
flags |= fcntl.LOCK_NB
fcntl.lockf(self.fd, flags)
def release(self):
import fcntl
fcntl.lockf(self.fd, fcntl.LOCK_UN)
class _WindowsFileLock(object):
"""File lock implementation for Windows systems."""
def __init__(self, fd):
self.fd = fd
def lock(self, no_wait=False):
import msvcrt
if no_wait:
op = msvcrt.LK_NBLCK
else:
op = msvcrt.LK_LOCK
self.fd.seek(0)
msvcrt.locking(self.fd, op, 1)
def unlock(self):
import msvcrt
self.fd.seek(0)
msvcrt.locking(self.fd, LK_UNLCK, 1)
# ---------------------------------------------------------------------------
# Functions
# ---------------------------------------------------------------------------
@contextmanager
def locked_file(fd, no_wait=False):
"""
This function is intended to be used as a ``with`` statement context
manager. It wraps a ``FileLock`` object so that the locking and unlocking
of the file descriptor are automatic. With the ``locked_file()`` function,
you can replace this code:
.. python::
lock = FileLock(fd)
lock.acquire()
try:
do_something()
finally:
lock.release()
with this code:
.. python::
with locked_file(fd):
do_something()
:Parameters:
fd : int
Open file descriptor. The file must be opened for writing
or updating, not reading.
no_wait : bool
If ``False``, then ``locked_file()`` will suspend the calling
process if someone has the file locked. If ``True``, then
``locked_file()`` will raise an ``IOError`` if the file is
locked by someone else.
"""
locked = False
try:
lock = FileLock(fd)
lock.acquire(no_wait)
locked = True
yield lock
finally:
if locked:
lock.release()
| {
"repo_name": "undoware/neutron-drive",
"path": "google_appengine/lib/grizzled/grizzled/io/filelock.py",
"copies": "19",
"size": "5831",
"license": "bsd-3-clause",
"hash": 3702866897867850000,
"line_mean": 27.8663366337,
"line_max": 79,
"alpha_frac": 0.5374721317,
"autogenerated": false,
"ratio": 4.5913385826771655,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# $Id: CGITemplate.py,v 1.6 2006/01/29 02:09:59 tavis_rudd Exp $
"""A subclass of Cheetah.Template for use in CGI scripts.
Usage in a template:
#extends Cheetah.Tools.CGITemplate
#implements respond
$cgiHeaders#slurp
Usage in a template inheriting a Python class:
1. The template
#extends MyPythonClass
#implements respond
$cgiHeaders#slurp
2. The Python class
from Cheetah.Tools import CGITemplate
class MyPythonClass(CGITemplate):
def cgiHeadersHook(self):
return "Content-Type: text/html; charset=koi8-r\n\n"
To read GET/POST variables, use the .webInput method defined in
Cheetah.Utils.WebInputMixin (available in all templates without importing
anything), use Python's 'cgi' module, or make your own arrangements.
This class inherits from Cheetah.Template to make it usable in Cheetah's
single-inheritance model.
Meta-Data
================================================================================
Author: Mike Orr <[email protected]>
License: This software is released for unlimited distribution under the
terms of the MIT license. See the LICENSE file.
Version: $Revision: 1.6 $
Start Date: 2001/10/03
Last Revision Date: $Date: 2006/01/29 02:09:59 $
"""
__author__ = "Mike Orr <[email protected]>"
__revision__ = "$Revision: 1.6 $"[11:-2]
import os
from Cheetah.Template import Template
class CGITemplate(Template):
"""Methods useful in CGI scripts.
Any class that inherits this mixin must also inherit Cheetah.Servlet.
"""
def cgiHeaders(self):
"""Outputs the CGI headers if this is a CGI script.
Usage: $cgiHeaders#slurp
Override .cgiHeadersHook() if you want to customize the headers.
"""
if self.isCgi():
return self.cgiHeadersHook()
def cgiHeadersHook(self):
"""Override if you want to customize the CGI headers.
"""
return "Content-type: text/html\n\n"
def isCgi(self):
"""Is this a CGI script?
"""
env = 'REQUEST_METHOD' in os.environ
wk = self._CHEETAH__isControlledByWebKit
return env and not wk
# vim: shiftwidth=4 tabstop=4 expandtab
| {
"repo_name": "dragondjf/QMarkdowner",
"path": "Cheetah/Tools/CGITemplate.py",
"copies": "15",
"size": "2200",
"license": "mit",
"hash": 7283538975584913000,
"line_mean": 27.5714285714,
"line_max": 80,
"alpha_frac": 0.6518181818,
"autogenerated": false,
"ratio": 3.6303630363036303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""$Id: channel.py 711 2006-10-25 00:43:41Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 711 $"
__date__ = "$Date: 2006-10-25 00:43:41 +0000 (Wed, 25 Oct 2006) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from logging import *
from validators import *
from itunes import itunes_channel
from extension import *
#
# channel element.
#
class channel(validatorBase, rfc2396, extension_channel, itunes_channel):
def __init__(self):
self.link=None
validatorBase.__init__(self)
def validate(self):
if not "description" in self.children:
self.log(MissingDescription({"parent":self.name,"element":"description"}))
if not "link" in self.children:
self.log(MissingLink({"parent":self.name, "element":"link"}))
if not "title" in self.children:
self.log(MissingTitle({"parent":self.name, "element":"title"}))
if not "dc_language" in self.children and not "language" in self.children:
if not self.xmlLang:
self.log(MissingDCLanguage({"parent":self.name, "element":"language"}))
if self.children.count("image") > 1:
self.log(DuplicateElement({"parent":self.name, "element":"image"}))
if self.children.count("textInput") > 1:
self.log(DuplicateElement({"parent":self.name, "element":"textInput"}))
if self.children.count("skipHours") > 1:
self.log(DuplicateElement({"parent":self.name, "element":"skipHours"}))
if self.children.count("skipDays") > 1:
self.log(DuplicateElement({"parent":self.name, "element":"skipDays"}))
if self.attrs.has_key((rdfNS,"about")):
self.value = self.attrs.getValue((rdfNS, "about"))
rfc2396.validate(self, extraParams={"attr": "rdf:about"})
if not "items" in self.children:
self.log(MissingElement({"parent":self.name, "element":"items"}))
if self.itunes: itunes_channel.validate(self)
def do_image(self):
from image import image
return image(), noduplicates()
def do_textInput(self):
from textInput import textInput
return textInput(), noduplicates()
def do_textinput(self):
if not self.attrs.has_key((rdfNS,"about")):
# optimize for RSS 2.0. If it is not valid RDF, assume that it is
# a simple misspelling (in other words, the error message will be
# less than helpful on RSS 1.0 feeds.
self.log(UndefinedElement({"parent":self.name, "element":"textinput"}))
return eater(), noduplicates()
def do_link(self):
return link(), noduplicates()
def do_title(self):
return nonhtml(), noduplicates(), nonblank()
def do_description(self):
return nonhtml(), noduplicates()
def do_blink(self):
return blink(), noduplicates()
def do_atom_author(self):
from author import author
return author()
def do_atom_category(self):
from category import category
return category()
def do_atom_contributor(self):
from author import author
return author()
def do_atom_generator(self):
from generator import generator
return generator(), nonblank(), noduplicates()
def do_atom_id(self):
return rfc2396_full(), noduplicates()
def do_atom_icon(self):
return nonblank(), rfc2396(), noduplicates()
def do_atom_link(self):
from link import link
return link()
def do_atom_logo(self):
return nonblank(), rfc2396(), noduplicates()
def do_atom_title(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_atom_subtitle(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_atom_rights(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_atom_updated(self):
return rfc3339(), noduplicates()
def do_dc_creator(self):
if "managingEditor" in self.children:
self.log(DuplicateSemantics({"core":"managingEditor", "ext":"dc:creator"}))
return text() # duplicates allowed
def do_dc_subject(self):
if "category" in self.children:
self.log(DuplicateSemantics({"core":"category", "ext":"dc:subject"}))
return text() # duplicates allowed
def do_dc_date(self):
if "pubDate" in self.children:
self.log(DuplicateSemantics({"core":"pubDate", "ext":"dc:date"}))
return w3cdtf(), noduplicates()
def do_cc_license(self):
if "creativeCommons_license" in self.children:
self.log(DuplicateSemantics({"core":"creativeCommons:license", "ext":"cc:license"}))
return eater()
def do_creativeCommons_license(self):
if "cc_license" in self.children:
self.log(DuplicateSemantics({"core":"creativeCommons:license", "ext":"cc:license"}))
return rfc2396_full()
class rss20Channel(channel):
def do_item(self):
from item import rss20Item
return rss20Item()
def do_category(self):
return category()
def do_cloud(self):
return cloud(), noduplicates()
do_rating = validatorBase.leaf # TODO test cases?!?
def do_ttl(self):
return positiveInteger(), nonblank(), noduplicates()
def do_docs(self):
return rfc2396_full(), noduplicates()
def do_generator(self):
if "admin_generatorAgent" in self.children:
self.log(DuplicateSemantics({"core":"generator", "ext":"admin:generatorAgent"}))
return text(), noduplicates()
def do_pubDate(self):
if "dc_date" in self.children:
self.log(DuplicateSemantics({"core":"pubDate", "ext":"dc:date"}))
return rfc822(), noduplicates()
def do_managingEditor(self):
if "dc_creator" in self.children:
self.log(DuplicateSemantics({"core":"managingEditor", "ext":"dc:creator"}))
return email(), noduplicates()
def do_webMaster(self):
if "dc_publisher" in self.children:
self.log(DuplicateSemantics({"core":"webMaster", "ext":"dc:publisher"}))
return email(), noduplicates()
def do_language(self):
if "dc_language" in self.children:
self.log(DuplicateSemantics({"core":"language", "ext":"dc:language"}))
return iso639(), noduplicates()
def do_copyright(self):
if "dc_rights" in self.children:
self.log(DuplicateSemantics({"core":"copyright", "ext":"dc:rights"}))
return nonhtml(), noduplicates()
def do_lastBuildDate(self):
if "dcterms_modified" in self.children:
self.log(DuplicateSemantics({"core":"lastBuildDate", "ext":"dcterms:modified"}))
return rfc822(), noduplicates()
def do_skipHours(self):
from skipHours import skipHours
return skipHours()
def do_skipDays(self):
from skipDays import skipDays
return skipDays()
class rss10Channel(channel):
def getExpectedAttrNames(self):
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'about'),
(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'about')]
def prevalidate(self):
if self.attrs.has_key((rdfNS,"about")):
if not "abouts" in self.dispatcher.__dict__:
self.dispatcher.__dict__["abouts"] = []
self.dispatcher.__dict__["abouts"].append(self.attrs[(rdfNS,"about")])
def do_items(self): # this actually should be from the rss1.0 ns
if not self.attrs.has_key((rdfNS,"about")):
self.log(MissingAttribute({"parent":self.name, "element":self.name, "attr":"rdf:about"}))
from item import items
return items(), noduplicates()
def do_rdfs_label(self):
return text()
def do_rdfs_comment(self):
return text()
class link(rfc2396_full):
def validate(self):
self.parent.link = self.value
rfc2396_full.validate(self)
class blink(text):
def validate(self):
self.log(NoBlink({}))
class category(nonhtml):
def getExpectedAttrNames(self):
return [(None, u'domain')]
class cloud(validatorBase):
def getExpectedAttrNames(self):
return [(None, u'domain'), (None, u'path'), (None, u'registerProcedure'),
(None, u'protocol'), (None, u'port')]
def prevalidate(self):
if (None, 'domain') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"domain"}))
else:
self.log(ValidCloud({"parent":self.parent.name, "element":self.name, "attr":"domain"}))
try:
if int(self.attrs.getValue((None, 'port'))) <= 0:
self.log(InvalidIntegerAttribute({"parent":self.parent.name, "element":self.name, "attr":'port'}))
else:
self.log(ValidCloud({"parent":self.parent.name, "element":self.name, "attr":'port'}))
except KeyError:
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":'port'}))
except ValueError:
self.log(InvalidIntegerAttribute({"parent":self.parent.name, "element":self.name, "attr":'port'}))
if (None, 'path') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"path"}))
else:
self.log(ValidCloud({"parent":self.parent.name, "element":self.name, "attr":"path"}))
if (None, 'registerProcedure') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"registerProcedure"}))
else:
self.log(ValidCloud({"parent":self.parent.name, "element":self.name, "attr":"registerProcedure"}))
if (None, 'protocol') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"protocol"}))
else:
self.log(ValidCloud({"parent":self.parent.name, "element":self.name, "attr":"protocol"}))
## TODO - is there a list of accepted protocols for this thing?
return validatorBase.prevalidate(self)
| {
"repo_name": "dosiecki/NewsBlur",
"path": "vendor/feedvalidator/channel.py",
"copies": "16",
"size": "9663",
"license": "mit",
"hash": 1205504065043989500,
"line_mean": 33.6344086022,
"line_max": 110,
"alpha_frac": 0.6672875918,
"autogenerated": false,
"ratio": 3.4696588868940754,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.028896931559360427,
"num_lines": 279
} |
""" Provide a remote control console via SSH. """
from peloton.plugins import PelotonPlugin
from peloton.coreio import PelotonManagementInterface
from peloton.exceptions import PluginError
from twisted.application import service, strports
from twisted.cred import checkers, portal
from twisted.conch import manhole, manhole_ssh, recvline, checkers as conchc
from twisted.conch.insults import insults
from twisted.internet import protocol, reactor
from twisted.internet.error import CannotListenError
class Cockpit(PelotonPlugin):
""" Provides a control console accessible via SSH.
Intended that this is a more controlled, higher level interface than
the PelotonShell.
Definitely work in progress... this is just at the demo level."""
def initialise(self):
# create an interface, pull out all 'public_*' methods
# into our namespace, striping the prefix
psc = PelotonManagementInterface(self.kernel)
publicMethods = [i for i in dir(psc) if i.startswith('public_')]
self.namespace={}
for m in publicMethods:
self.namespace[m[7:]] = getattr(psc, m)
self.cockpit = PasswordCockpit(int(self.config.port),
**{self.config.username:self.config.password})
def start(self):
try:
self.cockpit.startService()
self.logger.info("Cockpit plugin initialised")
except CannotListenError:
raise PluginError("Cockpit cannot listen on port %d" % self.config.port)
def _stopped(self, *args, **kargs):
self.logger.info("Cockpit plugin stopped")
def stop(self):
self.logger.info("Cockpit plugin stopping")
d = self.cockpit.stopService()
d.addCallback(self._stopped)
class CockpitProtocol(recvline.HistoricRecvLine):
def __init__(self, user):
self.user = user
def connectionMade(self):
recvline.HistoricRecvLine.connectionMade(self)
self.terminal.write("Peloton Cockpit")
self.terminal.nextLine( )
self.do_help( )
self.showPrompt( )
reactor.callLater(0.1,self._init)
def _init(self):
self.terminal.reset( )
self.terminal.write("Peloton Cockpit")
self.terminal.nextLine()
self.terminal.write("---------------")
self.terminal.nextLine()
self.terminal.write("type 'help' for command list.")
self.terminal.nextLine()
self.showPrompt()
def showPrompt(self):
self.terminal.write("peloton> ")
def getCommandFunc(self, cmd):
return getattr(self, 'do_' + cmd, None)
def lineReceived(self, line):
line = line.strip( )
if line:
cmdAndArgs = line.split( )
cmd = cmdAndArgs[0]
args = cmdAndArgs[1:]
func = self.getCommandFunc(cmd)
if func:
try:
func(*args)
except Exception, e:
self.terminal.write("Error: %s" % e)
self.terminal.nextLine( )
else:
self.terminal.write("No such command.")
self.terminal.nextLine( )
self.showPrompt( )
def do_help(self, cmd=''):
"Get help on a command. Usage: help command"
if cmd:
func = self.getCommandFunc(cmd)
if func:
self.terminal.write(func.__doc__)
self.terminal.nextLine( )
return
publicMethods = filter(
lambda funcname: funcname.startswith('do_'), dir(self))
commands = [cmd.replace('do_', '', 1) for cmd in publicMethods]
self.terminal.write("Commands: " + " ".join(commands))
self.terminal.nextLine( )
def do_echo(self, *args):
"Echo a string. Usage: echo my line of text"
self.terminal.write(" ".join(args))
self.terminal.nextLine( )
def do_whoami(self):
"Prints your user name. Usage: whoami"
self.terminal.write(self.user.username)
self.terminal.nextLine( )
def do_quit(self):
"Ends your session. Usage: quit"
self.terminal.write("Thanks for playing!")
self.terminal.nextLine( )
self.terminal.loseConnection( )
def do_clear(self):
"Clears the screen. Usage: clear"
self.terminal.reset( )
class BaseCockpit(service.MultiService):
def __init__(self, port, checker):
"""
@type port: string or int
@param port: what port should the Cockpit listen on? This is a
strports specification string, like 'tcp:12345' or
'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
simple tcp port.
@type checker: an object providing the
L{twisted.cred.checkers.ICredentialsChecker} interface
@param checker: if provided, this checker is used to authenticate the
client instead of using the username/password scheme. You must either
provide a username/password or a Checker. Some useful values are::
import twisted.cred.checkers as credc
import twisted.conch.checkers as conchc
c = credc.AllowAnonymousAccess # completely open
c = credc.FilePasswordDB(passwd_filename) # file of name:passwd
c = conchc.UNIXPasswordDatabase # getpwnam() (probably /etc/passwd)
"""
service.MultiService.__init__(self)
if type(port) is int:
port = "tcp:%d" % port
self.port = port # for comparison later
self.checker = checker # to maybe compare later
def makeProtocol():
p = insults.ServerProtocol(CockpitProtocol, self)
return p
r = manhole_ssh.TerminalRealm()
r.chainedProtocolFactory = makeProtocol
p = portal.Portal(r, [self.checker])
f = manhole_ssh.ConchFactory(p)
s = strports.service(self.port, f)
s.setServiceParent(self)
def startService(self):
service.MultiService.startService(self)
class PasswordCockpit(BaseCockpit):
"""This Cockpit accepts encrypted (ssh) connections, and requires a
username and password to authorize access.
"""
def __init__(self, port, **users):
"""
@type port: string or int
@param port: what port should the Manhole listen on? This is a
strports specification string, like 'tcp:12345' or
'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
simple tcp port.
Supply one or more username=password keyword arguments.
"""
c = checkers.InMemoryUsernamePasswordDatabaseDontUse(**users)
BaseCockpit.__init__(self, port, c) | {
"repo_name": "aquamatt/Peloton",
"path": "src/peloton/plugins/cockpit.py",
"copies": "1",
"size": "6901",
"license": "bsd-3-clause",
"hash": 6873082628401717000,
"line_mean": 35.7127659574,
"line_max": 84,
"alpha_frac": 0.6204897841,
"autogenerated": false,
"ratio": 3.94568324757004,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008732866191948182,
"num_lines": 188
} |
# $Id: Combinators.py 1047 2009-01-15 14:48:58Z graham $
#
"""
Combinators for use with Python code, mostly based on Haskell library elements.
(Also contains some other Haskell-style list/tuple functions functions.)
Strictly speaking, the "curry..." functions are not currying, but partial
application. Currying is the partial application of a function of n
arguments to just one argument to yield a new function of (n-1) arguments.
See: http://en.wikipedia.org/wiki/Currying
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2011-2013, Graham Klyne, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
class compose:
"""
Function composition (with non-tuple intermediate value):
See: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52902
compose(f,g,x...)(y...) = f(g(y...),x...))
This extends the traditional functional '.' by allowing additional arguments
to be bound into the composition; a kind of curried composition, I suppose.
"""
def __init__(self, f, g, *args, **kwargs):
self.f = f
self.g = g
self.pending = args[:]
self.kwargs = kwargs.copy()
def __call__(self, *args, **kwargs):
return self.f(self.g(*args, **kwargs), *self.pending, **self.kwargs)
def curry1(func, arg):
"""
Curry one argument:
See: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/229472
"""
import new
return new.instancemethod(func, arg, object)
def curry(func, *args):
"""
Curry multiple arguments:
See: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/229472
As of Python 2.5, a more general version of this is in standard
module functools:
http://www.python.org/dev/peps/pep-0309/
http://docs.python.org/lib/module-functools.html
"""
def curried(*args2):
args2 = args + args2
return func(*args2)
return curried
# End.
| {
"repo_name": "wf4ever/ro-manager",
"path": "src/MiscUtils/Combinators.py",
"copies": "1",
"size": "1973",
"license": "mit",
"hash": -5322592649579358000,
"line_mean": 31.3442622951,
"line_max": 80,
"alpha_frac": 0.6573745565,
"autogenerated": false,
"ratio": 3.384219554030875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4541594110530875,
"avg_score": null,
"num_lines": null
} |
# "Copyright (c) 2000-2003 The Regents of the University of California.
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose, without fee, and without written agreement
# is hereby granted, provided that the above copyright notice, the following
# two paragraphs and the author appear in all copies of this software.
#
# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
# DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
# OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY
# OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
# ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION TO
# PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS."
# @author Cory Sharp <[email protected]>
# @author Shawn Schaffert <[email protected]>
import sys, os, string
from jpype import jimport, JObject, JProxy
def openMoteIF(sourceName) :
tinyos = jimport.net.tinyos
messenger = JObject( None, tinyos.util.Messenger )
source = tinyos.packet.BuildSource.makePhoenix( sourceName, messenger )
source.setPacketErrorHandler( jimport.PyPhoenixError(source) )
moteif = tinyos.message.MoteIF( source )
if source.isAlive() :
moteif.start()
else :
raise RuntimeError, "could not open MoteIF %s" % sourceName
return moteif
class MoteIFCache(object) :
def __init__(self) :
self._active = {}
def get(self,source) :
if self.isAlive(source) :
return self._active[source]
self._active[source] = openMoteIF( source )
return self._active[source]
def isAlive(self,source) :
if self.has(source) :
if self._active[source].getSource().isAlive() :
return True
return False
def has(self,source) :
if self._active.has_key(source) :
return True
return False
class MessageCommError(Exception):
pass
class MessageComm( object ) :
def __init__( self ) :
self._moteifCache = MoteIFCache()
self._connected = []
def connect( self , *moteComStr ) :
for newMoteComStr in moteComStr :
if newMoteComStr not in self._connected :
self._moteifCache.get( newMoteComStr )
self._connected.append( newMoteComStr )
else :
raise MessageCommError , "already connected to " + newMoteComStr
def disconnect( self , *moteComStr ) :
for oldMoteComStr in moteComStr :
if oldMoteComStr in self._connected :
self._connected.remove( oldMoteComStr )
else :
raise MessageCommError , "not connected to " + oldMoteComStr
def send( self , addr , msg , *moteComStr ) :
if len( moteComStr ) == 0 :
moteComStr = self._connected
for mc in moteComStr :
# FIXME: send expects a Message, but only the TOSMsg subclass has set_addr
self._moteifCache.get(mc).send( mote.TOS_BCAST_ADDR , msg.set_addr( addr ) )
def register( self , msg , callbackFcn , *moteComStr ) :
msgListenerCallback = JProxy( jimport.net.tinyos.message.MessageListener , dict = { "messageReceived" : callbackFcn } )
if len( moteComStr ) == 0 :
moteComStr = self._connected
for mc in moteComStr :
self._moteifCache.get(mc).registerListener( msg , msgListenerCallback )
def unregister( self , msg , callback , *moteComStr ) :
if len( moteComStr ) == 0 :
moteComStr = self._connected
for mc in moteComStr :
self._moteifCache.get(mc).deregisterListener( msg , callback )
| {
"repo_name": "fresskarma/tinyos-1.x",
"path": "contrib/nestfe/python/pytos/Comm.py",
"copies": "2",
"size": "3828",
"license": "bsd-3-clause",
"hash": 2943902533733374000,
"line_mean": 31.1680672269,
"line_max": 123,
"alpha_frac": 0.7024555904,
"autogenerated": false,
"ratio": 3.3637961335676625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9858013760407285,
"avg_score": 0.04164759271207531,
"num_lines": 119
} |
"""$Id: compatibility.py 699 2006-09-25 02:01:18Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 699 $"
__date__ = "$Date: 2006-09-25 02:01:18 +0000 (Mon, 25 Sep 2006) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from logging import *
def _must(event):
return isinstance(event, Error)
def _should(event):
return isinstance(event, Warning)
def _may(event):
return isinstance(event, Info)
def A(events):
return [event for event in events if _must(event)]
def AA(events):
return [event for event in events if _must(event) or _should(event)]
def AAA(events):
return [event for event in events if _must(event) or _should(event) or _may(event)]
def AAAA(events):
return events
def analyze(events, rawdata):
for event in events:
if isinstance(event,UndefinedElement):
if event.params['parent'] == 'root':
if event.params['element'].lower() in ['html','xhtml:html']:
return "html"
return None
| {
"repo_name": "mihaip/NewsBlur",
"path": "vendor/feedvalidator/compatibility.py",
"copies": "16",
"size": "1047",
"license": "mit",
"hash": 2528938566534896000,
"line_mean": 27.2972972973,
"line_max": 94,
"alpha_frac": 0.6695319962,
"autogenerated": false,
"ratio": 3.163141993957704,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.023663869852157213,
"num_lines": 37
} |
"""
Python 2/3 compatibility definitions.
This module currently provides the following helper symbols:
* bytes (name of byte string type; str in 2.x, bytes in 3.x)
* b (function converting a string literal to an ASCII byte string;
can be also used to convert a Unicode string into a byte string)
* u_prefix (unicode repr prefix, 'u' in 2.x, nothing in 3.x)
* BytesIO (a StringIO class that works with bytestrings)
"""
import sys
if sys.version_info < (3,0):
b = bytes = str
u_prefix = 'u'
else:
import builtins
bytes = builtins.bytes
u_prefix = ''
def b(s):
if isinstance(s, str):
return s.encode('latin1')
elif isinstance(s, bytes):
return s
else:
raise TypeError("Invalid argument %r for b()" % (s,))
# using this hack since 2to3 "fixes" the relative import
# when using ``from io import BytesIO``
BytesIO = __import__('io').BytesIO
| {
"repo_name": "Soya93/Extract-Refactoring",
"path": "python/helpers/py2only/docutils/_compat.py",
"copies": "5",
"size": "1097",
"license": "apache-2.0",
"hash": -5964214987455566000,
"line_mean": 30.3428571429,
"line_max": 66,
"alpha_frac": 0.6526891522,
"autogenerated": false,
"ratio": 3.584967320261438,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6737656472461439,
"avg_score": null,
"num_lines": null
} |
"""
Python 2/3 compatibility definitions.
This module currently provides the following helper symbols:
* bytes (name of byte string type; str in 2.x, bytes in 3.x)
* b (function converting a string literal to an ASCII byte string;
can be also used to convert a Unicode string into a byte string)
* u_prefix (unicode repr prefix, 'u' in 2.x, nothing in 3.x)
* BytesIO (a StringIO class that works with bytestrings)
"""
import sys
if sys.version_info < (3,0):
b = bytes = str
u_prefix = 'u'
from StringIO import StringIO as BytesIO
else:
import builtins
bytes = builtins.bytes
u_prefix = ''
def b(s):
if isinstance(s, str):
return s.encode('latin1')
elif isinstance(s, bytes):
return s
else:
raise TypeError("Invalid argument %r for b()" % (s,))
# using this hack since 2to3 "fixes" the relative import
# when using ``from io import BytesIO``
BytesIO = __import__('io').BytesIO
| {
"repo_name": "rimbalinux/MSISDNArea",
"path": "docutils/_compat.py",
"copies": "2",
"size": "1178",
"license": "bsd-3-clause",
"hash": -4621123537055207000,
"line_mean": 30.7222222222,
"line_max": 66,
"alpha_frac": 0.6375212224,
"autogenerated": false,
"ratio": 3.6697819314641746,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5307303153864175,
"avg_score": null,
"num_lines": null
} |
"""
Python 2/3 compatibility definitions.
This module currently provides the following helper symbols:
* bytes (name of byte string type; str in 2.x, bytes in 3.x)
* b (function converting a string literal to an ASCII byte string;
can be also used to convert a Unicode string into a byte string)
* u_prefix (unicode repr prefix: 'u' in 2.x, '' in 3.x)
(Required in docutils/test/test_publisher.py)
* BytesIO (a StringIO class that works with bytestrings)
"""
import sys
if sys.version_info < (3,0):
b = bytes = str
u_prefix = 'u'
from StringIO import StringIO as BytesIO
else:
import builtins
bytes = builtins.bytes
u_prefix = ''
def b(s):
if isinstance(s, str):
return s.encode('latin1')
elif isinstance(s, bytes):
return s
else:
raise TypeError("Invalid argument %r for b()" % (s,))
# using this hack since 2to3 "fixes" the relative import
# when using ``from io import BytesIO``
BytesIO = __import__('io').BytesIO
| {
"repo_name": "ddd332/presto",
"path": "presto-docs/target/sphinx/docutils/_compat.py",
"copies": "4",
"size": "1183",
"license": "apache-2.0",
"hash": -1744322731788892000,
"line_mean": 30.972972973,
"line_max": 66,
"alpha_frac": 0.6584953508,
"autogenerated": false,
"ratio": 3.617737003058104,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6276232353858104,
"avg_score": null,
"num_lines": null
} |
"""Complex drawing objects."""
from pygame import Surface, SRCALPHA, surfarray
class FaderSurface (Surface):
"""FaderSurface (width, height, alpha=255) -> FaderSurface
A pygame.Surface class, that supports alpha fading.
The FaderSurface is an enhanced 32-bit pygame.Surface, that supports
alpha blending and fading the surface in or out. It uses pixel-based
alpha values for transparency.
The 'alpha' attribute indicates the currently set alpha value for
the Surface and can be adjusted by either reassigning it directly or
using the set_alpha() method. Its value range is limited from 0
(transparent) to 255 (opaque) as supported by the pygame library.
fader.alpha = 155
fader.set_alpha (24)
Note that the set_alpha() method overrides the original set_alpha()
method from pygame.Surface.
The stepping value for fading operations can be read and set through
the 'step' attribute or set_step() method. Each call of the update()
method will increase (or decrease) the alpha value by the set step.
fader.step = 10
fader.set_step (5)
To in- or decrease the alpha channel so that you will receive a fade
in or fade out effect, you can use the update() method in a loop,
which constantly blits the surface.
while fader.update ():
screen.blit (fader, (10, 10))
pygame.display.update (fader_rect)
The update() method returns True as long as the alpha value has not
reached its upper or lower boundary.
Attributes:
alpha - The currently set alpha value.
step - The step range for increasing or decreasing the alpha value.
"""
def __init__ (self, width, height, alpha=255):
Surface.__init__ (self, (width, height), SRCALPHA, 32)
self._alpha = 255
self._step = -1
self.set_alpha (alpha)
def set_alpha (self, alpha=255):
"""F.set_alpha (...) -> None
Sets the alpha transparency value.
Raises a TypeError, if the passed argument is not an integer.
Raises a ValueError, if the passed argument is not in the range
0 <= alpha <= 255
"""
if type (alpha) != int:
raise TypeError ("alpha must be a positive integer")
if (alpha < 0) or (alpha > 255):
raise ValueError ("alpha must be in the range 0 <= alpha <= 255")
self._alpha = alpha
# Apply the alpha.
array = surfarray.pixels_alpha (self)
array[:] = alpha
del array
def set_step (self, step=-1):
"""F.set_step (...) -> None
Sets the step range to use for in- or decreasing the alpha value.
Raises a TypeError, if the passed argument is not an integer.
"""
if type (step) != int:
raise TypeError ("step must be an integer")
self._step = step
def update (self):
"""F.update () -> bool
Updates the alpha channel of the surface.
Updates the alpha channel of the surface and returns True, if
the alpha channel was modified or False, if not.
"""
val = self._alpha + self._step
if val < 0:
val = 0
if val > 255:
val = 255
array = surfarray.pixels_alpha (self)
array[:] = val
del array
self._alpha = val
return not (val == 0) and not (val == 255)
alpha = property (lambda self: self._alpha,
lambda self, var: self.set_alpha (var),
doc = "The currently set alpha value.")
step = property (lambda self: self._step,
lambda self, var: self.set_step (var),
doc = "The step range for increasing or decreasing the" \
"alpha value.")
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/draw/Complex.py",
"copies": "1",
"size": "5240",
"license": "bsd-2-clause",
"hash": 3423823274545998300,
"line_mean": 37.2481751825,
"line_max": 79,
"alpha_frac": 0.6494274809,
"autogenerated": false,
"ratio": 4.28454619787408,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.543397367877408,
"avg_score": null,
"num_lines": null
} |
import time
import datetime
class ComplicatedTime:
def __init__(self, year):
self.year = year
# params for tos-time module
self.year_seconds = 0
self.wday_offset = 0
if self.year % 4:
self.isleap = False
else:
self.isleap = True
self.gmt_offset = 0
self.dst_first_yday = 0
self.dst_last_yday = 0
self.tzname = time.tzname
self.daylight = time.daylight
self.newyears_weekday()
self.gen_gmtime()
self.tzrules = ((0, 0, 0), (0, 0, 0))
if self.daylight:
self.tzrules = self.timezone_rules()
self.dst_first_yday = self.nth_month_day_to_year_day(year, self.tzrules[0]);
self.dst_last_yday = self.nth_month_day_to_year_day(year, self.tzrules[1]);
def timezone_rules(self):
# north america
if self.gmt_offset >= 5.0 and self.gmt_offset <= 8.0:
return ((3, 6, 2), (11, 6, 1))
# east across europe/asia
elif self.gmt_offset <= 0.0 and self.gmt_offset >= -7.0:
return ((3, 6, 99), (10, 6, 99))
# far east
elif self.gmt_offset <= -8.0 and self.gmt_offset >= -11.0:
# only real exception here is australia
if 'WST' in self.tzname or \
'CST' in self.tzname or \
'EST' in self.tzname:
return ((10, 6, 1), (4, 6, 1))
else:
return ((3, 6, 99), (10, 6, 99))
# south-west pacific
elif self.gmt_offset == -12.0:
if 'NZST' in self.tzname:
return ((9, 6, 99), (4, 6, 1))
else:
return ((3, 6, 99), (10, 6, 99))
# alaska
elif self.gmt_offset == 9.0:
return ((3, 6, 2), (11, 6, 1))
elif self.gmt_offset == 10.0:
return ((3, 6, 2), (11, 6, 1))
def gen_gmtime(self):
sec_offset = time.timezone # seconds
self.gmt_offset = sec_offset / 3600.0 # hours
self.year_seconds = int(time.mktime((self.year, 1, 1, 0, 0, 0, self.wday_offset, 1, 0)))
def newyears_weekday(self):
self.wday_offset = datetime.date(self.year, 1, 1).weekday()
def print_em(self):
print "first second of %d is %x\nfirst day of year %d\ndst start is %d\ndst end is %d" \
% (self.year, self.year_seconds, self.wday_offset, self.dst_first_yday, self.dst_last_yday)
# huh? we want to know which day of 365/6, for example, the second sunday of march in 2010
# so, we say nth_month_day_to_year(2010, 3, 6, 2) and get back 74
# if we want the last, pass in 99 for which_one
def nth_month_day_to_year_day(self, year, (month, selected_day, which_one)):
d = { 1:31, 3:31, 4:30, 5:31, 6:30, 7:31, 8:31, 9:30, 10:31, 11:30, 12:31 }
if self.isleap:
d[2] = 29
else:
d[2] = 28
if which_one != 99:
first_weekday = datetime.date(year, month, 1).weekday()
if selected_day < first_weekday:
selected_day = selected_day + 7
days_to_selectedday = selected_day - first_weekday
our_day = 7 * (which_one - 1) + days_to_selectedday + 1
else:
lastday = d[month]
last_weekday = datetime.date(year, month, lastday).weekday()
if last_weekday < selected_day:
last_weekday = last_weekday + 7
days_to_selectedday = last_weekday - selected_day
our_day = lastday - days_to_selectedday
# now we have to convert to day of year
for i in range(1, month):
our_day = our_day + d[i]
return our_day
| {
"repo_name": "ekiwi/tinyos-1.x",
"path": "contrib/handhelds/tools/scripts/ComplicatedTime.py",
"copies": "2",
"size": "5615",
"license": "bsd-3-clause",
"hash": -5726697777855302000,
"line_mean": 38.2657342657,
"line_max": 99,
"alpha_frac": 0.5934105076,
"autogenerated": false,
"ratio": 3.594750320102433,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007416941023797454,
"num_lines": 143
} |
"""
Docutils component-related transforms.
"""
__docformat__ = 'reStructuredText'
from docutils.transforms import Transform
class Filter(Transform):
"""
Include or exclude elements which depend on a specific Docutils component.
For use with `nodes.pending` elements. A "pending" element's dictionary
attribute ``details`` must contain the keys "component" and "format". The
value of ``details['component']`` must match the type name of the
component the elements depend on (e.g. "writer"). The value of
``details['format']`` is the name of a specific format or context of that
component (e.g. "html"). If the matching Docutils component supports that
format or context, the "pending" element is replaced by the contents of
``details['nodes']`` (a list of nodes); otherwise, the "pending" element
is removed.
For example, the reStructuredText "meta" directive creates a "pending"
element containing a "meta" element (in ``pending.details['nodes']``).
Only writers (``pending.details['component'] == 'writer'``) supporting the
"html" format (``pending.details['format'] == 'html'``) will include the
"meta" element; it will be deleted from the output of all other writers.
"""
default_priority = 780
def apply(self):
pending = self.startnode
component_type = pending.details['component'] # 'reader' or 'writer'
format = pending.details['format']
component = self.document.transformer.components[component_type]
if component.supports(format):
pending.replace_self(pending.details['nodes'])
else:
pending.parent.remove(pending)
| {
"repo_name": "leafclick/intellij-community",
"path": "python/helpers/py3only/docutils/transforms/components.py",
"copies": "49",
"size": "1851",
"license": "apache-2.0",
"hash": -8461568939808082000,
"line_mean": 39.2391304348,
"line_max": 78,
"alpha_frac": 0.6866558617,
"autogenerated": false,
"ratio": 4.140939597315437,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00028232636928289106,
"num_lines": 46
} |
"""
Docutils component-related transforms.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import time
from docutils import nodes, utils
from docutils import ApplicationError, DataError
from docutils.transforms import Transform, TransformError
class Filter(Transform):
"""
Include or exclude elements which depend on a specific Docutils component.
For use with `nodes.pending` elements. A "pending" element's dictionary
attribute ``details`` must contain the keys "component" and "format". The
value of ``details['component']`` must match the type name of the
component the elements depend on (e.g. "writer"). The value of
``details['format']`` is the name of a specific format or context of that
component (e.g. "html"). If the matching Docutils component supports that
format or context, the "pending" element is replaced by the contents of
``details['nodes']`` (a list of nodes); otherwise, the "pending" element
is removed.
For centralfitestoque, the reStructuredText "meta" directive creates a "pending"
element containing a "meta" element (in ``pending.details['nodes']``).
Only writers (``pending.details['component'] == 'writer'``) supporting the
"html" format (``pending.details['format'] == 'html'``) will include the
"meta" element; it will be deleted from the output of all other writers.
"""
default_priority = 780
def apply(self):
pending = self.startnode
component_type = pending.details['component'] # 'reader' or 'writer'
format = pending.details['format']
component = self.document.transformer.components[component_type]
if component.supports(format):
pending.replace_self(pending.details['nodes'])
else:
pending.parent.remove(pending)
| {
"repo_name": "akiokio/centralfitestoque",
"path": "src/.pycharm_helpers/docutils/transforms/components.py",
"copies": "1",
"size": "2003",
"license": "bsd-2-clause",
"hash": -5812997957689690000,
"line_mean": 37.5192307692,
"line_max": 84,
"alpha_frac": 0.6994508238,
"autogenerated": false,
"ratio": 4.1384297520661155,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5337880575866115,
"avg_score": null,
"num_lines": null
} |
"""
Docutils component-related transforms.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import time
from docutils import nodes, utils
from docutils import ApplicationError, DataError
from docutils.transforms import Transform, TransformError
class Filter(Transform):
"""
Include or exclude elements which depend on a specific Docutils component.
For use with `nodes.pending` elements. A "pending" element's dictionary
attribute ``details`` must contain the keys "component" and "format". The
value of ``details['component']`` must match the type name of the
component the elements depend on (e.g. "writer"). The value of
``details['format']`` is the name of a specific format or context of that
component (e.g. "html"). If the matching Docutils component supports that
format or context, the "pending" element is replaced by the contents of
``details['nodes']`` (a list of nodes); otherwise, the "pending" element
is removed.
For example, the reStructuredText "meta" directive creates a "pending"
element containing a "meta" element (in ``pending.details['nodes']``).
Only writers (``pending.details['component'] == 'writer'``) supporting the
"html" format (``pending.details['format'] == 'html'``) will include the
"meta" element; it will be deleted from the output of all other writers.
"""
default_priority = 780
def apply(self):
pending = self.startnode
component_type = pending.details['component'] # 'reader' or 'writer'
format = pending.details['format']
component = self.document.transformer.components[component_type]
if component.supports(format):
pending.replace_self(pending.details['nodes'])
else:
pending.parent.remove(pending)
| {
"repo_name": "rimbalinux/MSISDNArea",
"path": "docutils/transforms/components.py",
"copies": "2",
"size": "2045",
"license": "bsd-3-clause",
"hash": 2142520015116600000,
"line_mean": 37.3269230769,
"line_max": 78,
"alpha_frac": 0.680195599,
"autogenerated": false,
"ratio": 4.190573770491803,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002465483234714004,
"num_lines": 52
} |
"""Constants used by the ocempgui.draw module.
Font style constants
--------------------
Font style flsgs influence the rendering of the font and add, dependant
on the set flags, a bold, italic or underlined layout (or an combination
of those three).
The font flags are grouped in the FONT_STYLE_TYPES tuple.
FONT_STYLE_NORMAL
Default font style with no additional rendering.
FONT_STYLE_BOLD
Bold font rendering.
FONT_STYLE_ITALIC
Italic font rendering.
FONT_STYLE_UNDERLINE
Underlined font rendering.
"""
FONT_STYLE_NORMAL = 0
FONT_STYLE_BOLD = 1
FONT_STYLE_ITALIC = 2
FONT_STYLE_UNDERLINE = 4
FONT_STYLE_TYPES = (FONT_STYLE_NORMAL, FONT_STYLE_BOLD, FONT_STYLE_ITALIC,
FONT_STYLE_UNDERLINE)
def constants_is_font_style (style):
"""C.constants_is_font_style (style) -> bool
Checks whether the passed value evaluates to a font style value.
"""
if type (style) != int:
raise TypeError ("style must be an integer.")
return (style & FONT_STYLE_BOLD == FONT_STYLE_BOLD) or \
(style & FONT_STYLE_ITALIC == FONT_STYLE_ITALIC) or \
(style & FONT_STYLE_UNDERLINE == FONT_STYLE_UNDERLINE)
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/draw/Constants.py",
"copies": "1",
"size": "2585",
"license": "bsd-2-clause",
"hash": 3366401402950442000,
"line_mean": 38.1666666667,
"line_max": 78,
"alpha_frac": 0.7357833656,
"autogenerated": false,
"ratio": 3.8410104011887074,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5076793766788708,
"avg_score": null,
"num_lines": null
} |
"""Constants used by the ocempgui widgets module.
General constants
-----------------
General constants denote values, which are used for multiple purposes
and usually not limited to specific widget class parts.
DEFAULTDATADIR
Path to the directory where theme files and additional resources were
installed. This path is especially important for the Style class and
user code, which relies upon the drawing engines or other resources.
State constants
---------------
The state constants denote the state of a widget and influence its
behaviour and appeareance. They usually can be read from the widget
through its 'state' attribute and are usually prefixed with 'STATE_'.
The state constants are grouped in the STATE_TYPES tuple.
STATE_NORMAL
The usual state ('default') of a widget. This is the basic state, the
widget has, if it is created and no interaction happened with it.
STATE_ENTERED
The widget area was entered by a pointing device. This usually slightly
changes the visual appearance of the widget, so it is easier to
distinguish between the entered and other widgets.
STATE_ACTIVE
The widget is activated. This usually will cause a widget to change its
visual appearance, so the user can see the interaction. A Button widget
for example will be drawn in a sunken state, while an Entry widget will
show a blinking caret.
STATE_INSENSITIVE
Usually set through the 'sensitive' attribute of a widget. This will
change the visual appearance of the widget, so that it is easier to
distinguish between widgets, with which an interaction is possible and
insensitive widgets, with which it is not.
Scrolling constants
-------------------
The scrolling constants only apply to widgets, which support scrolling
such as the ScrolledList or ScrolledWindow classes. They influence the
visibility and behaviour of the attached scrollbars for those widgets.
The scrolling constants are grouped in the SCROLL_TYPES tuple.
SCROLL_NEVER
Scrollbars will never be shown, independant of if they are necessary or
not.
SCROLL_AUTO
Scrollbars will be shown and hidden on demand.
SCROLL_ALWAYS
Scrollbars will always be shown, independant of if they are necessary or
not.
Selection constants
-------------------
Selection constants influence the selection behaviour in the list and
tree like widgets such as the ScrolledList class.
The selection constants are grouped in the SELECTION_TYPES tuple.
SELECTION_NONE
No selection is possible. Elements cannot be selected and existing
selections can not be changed.
SELECTION_SINGLE
Only one element can be selected per time.
SELECTION_MULTIPLE
Multiple elements can be selected.
Border constants
----------------
The border constants influence the appearance of a widget
border. Several widgets support this constants to fit in their
environment.
The border constants are grouped in the BORDER_TYPES tuple.
BORDER_NONE
The widget will have no visible border.
BORDER_FLAT
The widget will have a flat border around it.
BORDER_SUNKEN
The widget will have a sunken border effect.
BORDER_RAISED
The widget will have a raised border effect.
BORDER_ETCHED_IN
The widget will have a etched in border effect.
BORDER_ETCHED_OUT
The widget will have a etched out border effect.
Arrow constants
---------------
The arrow constants are used by the ScrollBar widgets and determine the
direction of the arrows on the scrollbar buttons.
The arrow constants are grouped in the ARROW_TYPES tuple.
ARROW_UP
The arrow points upwards.
ARROW_DOWN
The arrown points downwards
ARROW_LEFT
The arrow points to the left.
ARROW_RIGHT
The arrow points to the right.
Alignment flags
---------------
The alignment flags are used by most Container and Bin widget classes
and influence the drawing positions of the attached children. The flags
can be combined bitwise, although not every combination makes sense
(like ALIGN_TOP | ALIGN_BOTTOM).
The alignment flags are grouped in the ALIGN_TYPES tuple.
ALIGN_NONE
No special alignment, use the defaults of the widget.
ALIGN_TOP
Align the child(ren) at the top.
ALIGN_BOTTOM
Align the child(ren) at the bottom.
ALIGN_LEFT
Align the child(ren) at the left side.
ALIGN_RIGHT
Align the child(ren) at the right side.
Orientation types
-----------------
The orientation types are used by widgets, which are able to draw
themselves in a vertical or horizontal alignment, such as Diagram or
Scale widgets. A single axis chart for example can have either a
vertical or horizontal axis orientation.
The orientation constance are grouped in the ORIENTATION_TYPES tuple.
ORIENTATION_VERTICAL
The widget parts are aligned and drawn vertically.
ORIENTATION_HORIZONTAL
The widget parts are align and drawn horizontally.
Dialog result values
--------------------
Dialog result values are used and sent by dialog windows, which inherit
from the GenericDialog, only. They usually represent the button of the
dialog, that got pressed and allow to react upon the user input
action.
The dialog result constants are grouped in the DLGRESULT_TYPES tuple.
The descriptions of the results below are meant as usability
guidelines. The average user mostly will have the described intention,
when he presses a button with that result value.
DLGRESULT_OK
Indicates, that the user agrees to or accepts the information shown on
the dialog or successfully finished her input.
DLGRESULT_CANCEL
Indicates, that the user wants to abort from the current action or input
without making any changes to the current state.
DLGRESULT_ABORT
Same as above, but mostly used in a different context such as a hard
program or process abort.
DLGRESULT_CLOSE
Used in dialogs, in which usually no special agreement of the user is
needed. Mostly used for dialogs, that show information only and do not
influence the application behaviour in any way.
DLGRESULT_USER
Freely choosable.
Signal constants
----------------
The signal constants denote various event identfies, which are emitted
on and by widgets and to which widgets can listen. Usually there is a
distinction between native signal (those which are emitted by the pygame
library) and widget signals (those which are emitted by widgets). The
native signals are usually just aliases for pygame constants to fit in
the OcempGUI namespace.
Signal callbacks for native signals usually will receive the pygame
event data as first argument. This means, that a callback would look
like the following:
widget.connect_signal (SIG_MOUSEDOWN, mousedown_callback, owndata)
def mousedown_callback (eventdata, userdata1, ...):
if eventdata.pos == ...:
...
if userdata1 == owndata:
...
The passed pygame event data matches the sent pygame.event.Event data
for the specific signal as described in the pygame documentation.
SIG_KEYDOWN
Alias for pygame.locals.KEYDOWN. Emitted, if a key on the keyboard will
be pressed down. OcempGUI uses a repeatedly emittance of that event by
the pygame.key.set_repeat() function.
Note: Signal callbacks will receive the pygame event data as first
argument.
SIG_KEYUP
Alias for pygame.locals.KEYUP. Emitted, if a key on the keyboard is
released.
Note: Signal callbacks will receive the pygame event data as first
argument.
SIG_MOUSEDOWN
Alias for pygame.locals.MOUSEBUTTONDOWN. Emitted, if a mouse button gets
pressed (or the mouse wheel is used).
Note: Signal callbacks will receive the pygame event data as first
argument.
SIG_MOUSEUP
Alias for pygame.locals.MOUSEBUTTONUP, Emitted, if a mouse button is
released.
Note: Signal callbacks will receive the pygame event data as first
argument.
SIG_MOUSEMOVE
Alias for pygame.locals.MOUSEMOTION. Emitted, if the mouse cursor
position changes (the mouse is moved).
Note: Signal callbacks will receive the pygame event data as first
argument.
SIG_TICK
Alias for pygame.locals.USEREVENT + 1. This is a timer event for
widgets, which need timed event emits such as the Entry widget class
(blinking cursor) or the StatusBar (date update). Usually this event is
emitted every 500ms by the Renderer class.
SIG_TWISTED
Alias for pygame.locals.USEREVENT + 2. This signal type is used only by
the TwistedRenderer at the moment and will be sent whenever the
interleave() method of its attached reactor is called.
SIG_UPDATED
Alias for pygame.locals.USEREVENT + 3. This signal is used by the
Renderer class to notify listeners about an update of the screen area.
SIG_SCREENCHANGED
Alias for pygame.locals.USEREVENT + 4. This signal type is used by the
Renderer class to notify listeners about changes of the attached
screen area.
SIG_CLICKED
Raised by supporting widgets, if the following event sequence happens on
the widget:
1) Left mouse button pressed down (SIG_MOUSEDOWN).
2) Left mouse button released (SIG_MOUSEUP)
3) Click event will be raised by the widget (SIG_CLICKED).
SIG_FOCUSED
Raised by supporting widgets, if a widget is focused by the mouse or
another input device (such as the keyboard). This usually will pass the
input focus to that widget.
SIG_INPUT
Raised by widgets, which support data input and indicates, that an made
input was successfully finished. An example would be the Editable widget
class, which raises SIG_INPUT upon pressing Enter/Return after typing text.
SIG_TOGGLED
Raised by widgets, which support and/or indicate True/False value states
such as the Check- or ToggleButton. This will be raised _after_ the
value has been changed.
SIG_VALCHANGED
Raised, if the main value of a supporting widget changed (usually
numeric), such as the value of a Scale or ProgressBar.
SIG_SELECTCHANGED
Raised, if a certain selection changed.
SIG_LISTCHANGED
Raised if the attached item list of a ScrolledList changed.
SIG_DIALOGRESPONSE
Raised, when a button attached to a GenericDialog object gets pressed.
Note: Signal callbacks will receive the result value bound to the button
as first argument.
SIG_DESTROYED
Raised by a widget, when it is about to be destroyed.
Note: Signal callbacks will receive the destroyed widget as first argument.
SIG_ENTER
Raised by a widget, when the mouse cursor enters it.
SIG_LEAVE
Raised by a widget, when the mouse cursor leaves it.
"""
from pygame import KEYDOWN, KEYUP, MOUSEBUTTONDOWN, MOUSEMOTION, MOUSEBUTTONUP
from pygame import USEREVENT
# State constants.
STATE_NORMAL = 0
STATE_ENTERED = 1
STATE_ACTIVE = 2
STATE_INSENSITIVE = 3
STATE_TYPES = (STATE_NORMAL, STATE_ENTERED, STATE_ACTIVE, STATE_INSENSITIVE)
# Srolling behaviour widget widgets, which support it.
SCROLL_NEVER = 0
SCROLL_AUTO = 1
SCROLL_ALWAYS = 2
SCROLL_TYPES = (SCROLL_NEVER, SCROLL_AUTO, SCROLL_ALWAYS)
# Selection modes.
SELECTION_NONE = 0
SELECTION_SINGLE = 1
SELECTION_MULTIPLE = 2
SELECTION_TYPES = (SELECTION_NONE, SELECTION_SINGLE, SELECTION_MULTIPLE)
# Border types.
BORDER_NONE = 0
BORDER_FLAT = 1
BORDER_SUNKEN = 2
BORDER_RAISED = 3
BORDER_ETCHED_IN = 4
BORDER_ETCHED_OUT = 5
BORDER_TYPES = (BORDER_NONE, BORDER_FLAT, BORDER_SUNKEN, BORDER_RAISED,
BORDER_ETCHED_IN, BORDER_ETCHED_OUT)
# Arrow types.
ARROW_UP = 0
ARROW_DOWN = 1
ARROW_LEFT = 2
ARROW_RIGHT = 3
ARROW_TYPES = (ARROW_UP, ARROW_DOWN, ARROW_LEFT, ARROW_RIGHT)
# Alignment types.
ALIGN_NONE = 0
ALIGN_TOP = 1 << 0
ALIGN_BOTTOM = 1 << 1
ALIGN_LEFT = 1 << 2
ALIGN_RIGHT = 1 << 3
ALIGN_TYPES = (ALIGN_NONE, ALIGN_TOP, ALIGN_BOTTOM, ALIGN_LEFT, ALIGN_RIGHT)
def constants_is_align (align):
"""C.constants_is_align (align) -> bool
Checks whether the passed value evaluates to an align value.
"""
if type (align) != int:
raise TypeError ("align must be an integer.")
return (align & ALIGN_TOP == ALIGN_TOP) or \
(align & ALIGN_BOTTOM == ALIGN_BOTTOM) or \
(align & ALIGN_LEFT == ALIGN_LEFT) or \
(align & ALIGN_RIGHT == ALIGN_RIGHT) or \
(align & ALIGN_NONE == ALIGN_NONE)
# Orientation types.
ORIENTATION_VERTICAL = intern ("vertical")
ORIENTATION_HORIZONTAL = intern ("horizontal")
ORIENTATION_TYPES = (ORIENTATION_VERTICAL, ORIENTATION_HORIZONTAL)
# Dialog results.
DLGRESULT_OK = 0
DLGRESULT_CANCEL = 1
DLGRESULT_ABORT = 2
DLGRESULT_CLOSE = 3
DLGRESULT_USER = 99
DLGRESULT_TYPES = (DLGRESULT_OK, DLGRESULT_CANCEL, DLGRESULT_ABORT,
DLGRESULT_CLOSE, DLGRESULT_USER)
# Signal constants, native.
SIG_KEYDOWN = KEYDOWN
SIG_KEYUP = KEYUP
SIG_MOUSEDOWN = MOUSEBUTTONDOWN
SIG_MOUSEMOVE = MOUSEMOTION
SIG_MOUSEUP = MOUSEBUTTONUP
SIG_TICK = USEREVENT + 1
SIG_TWISTED = USEREVENT + 2
SIG_UPDATED = USEREVENT + 3
SIG_SCREENCHANGED = USEREVENT + 4
# Signal groups for fast tests.
SIGNALS_KEYS = (SIG_KEYDOWN, SIG_KEYUP)
SIGNALS_MOUSE = (SIG_MOUSEDOWN, SIG_MOUSEMOVE, SIG_MOUSEUP)
# Signal constants, raised by the widgets.
SIG_ACTIVATED = intern ("activated")
SIG_CLICKED = intern ("clicked")
SIG_FOCUSED = intern ("focused")
SIG_INPUT = intern ("input")
SIG_TOGGLED = intern ("toggled")
SIG_VALCHANGED = intern ("value-changed")
SIG_SELECTCHANGED = intern ("selection-changed")
SIG_LISTCHANGED = intern ("list-changed")
SIG_DOUBLECLICKED = intern ("double-clicked")
SIG_DIALOGRESPONSE = intern ("dialog-response")
SIG_DESTROYED = intern ("destroyed")
SIG_ENTER = intern ("entered")
SIG_LEAVE = intern ("left")
# The default data directory, where the themes and co. get installed.
# @DATAPATH@ will be replaced at installation usually.
DEFAULTDATADIR = "@DATAPATH@"
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/widgets/Constants.py",
"copies": "1",
"size": "14896",
"license": "bsd-2-clause",
"hash": -1831841270256650200,
"line_mean": 32.0288248337,
"line_max": 78,
"alpha_frac": 0.7609425349,
"autogenerated": false,
"ratio": 3.825372367745249,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0063890521870758315,
"num_lines": 451
} |
"""An abstract widget, which can hold other widgets."""
from BaseWidget import BaseWidget
class Container (BaseWidget):
"""Container () -> Container
A container widget class, which can hold multiple other widgets.
The Container class is an abstract class, which can hold multiple
widgets. It is usable to serve as container for various types of
widgets and allows inheritors to use their own look.
The 'children' attribute is a list of the widgets added to the
Container.
children = container.children # get the list of children
container.add_child (widget) # add a widget
container.add_child (widget1, widget2) # add multiple widgets at once
container.remove_child (widget) # remove a widget
container.children = my_list_of_widgets # Set multiple widgets at once.
The 'padding' attribute and set_padding() method are used to place a
certain amount of pixels between the children and the outer edges of
the Container.
container.padding = 10
container.set_padding (10)
An additional amount of pixels can be placed between the widgets
using the 'spacing' attribute or set_spacing() method. Dependant on
the inherited Container class, this places the given amount of
pixels between the children.
container.spacing = 10
container.set_spacing (10)
Default action (invoked by activate()):
None
Mnemonic action (invoked by activate_mnemonic()):
None
Attributes:
children - List of widgets packed into the Container.
spacing - Spacing in pixels between the children.
padding - Padding between the borders and the children of the
Container.
"""
def __init__ (self):
BaseWidget.__init__ (self)
self._children = []
self._spacing = 2
self._padding = 2
def set_depth (self, depth):
"""C.set_depth (...) -> None
Sets the depth of the Container.
Sets the depth of the Container and its children to the given
value.
"""
self.lock ()
BaseWidget.set_depth (self, depth)
for child in self.children:
child.set_depth (depth)
self.unlock ()
def set_spacing (self, spacing):
"""C.set_spacing (...) -> None
Sets the spacing between the children of the Container.
The spacing value is the amount of pixels to place between the
children of the Container.
Raises a TypeError, if the passed argument is not a positive
integer.
"""
if (type (spacing) != int) or (spacing < 0):
raise TypeError ("spacing must be a positive integer")
self._spacing = spacing
def set_padding (self, padding):
"""C.set_padding (...) -> None
Sets the padding between the edges and children of the Container.
The padding value is the amount of pixels to place between the
edges of the Container and its child widgets.
Raises a TypeError, if the passed argument is not a positive
integer.
"""
if (type (padding) != int) or (padding < 0):
raise TypeError ("padding must be a positive integer")
self._padding = padding
def set_indexable (self, indexable):
"""C.set_indexable (...) -> None
Sets the indexable of the Container.
Adds the Container to an IIndexable implementation and causes its
children to be added to the same, too.
"""
BaseWidget.set_indexable (self, indexable)
for child in self.children:
child.set_indexable (indexable)
def set_event_manager (self, manager):
"""C.set_event_manager (...) -> None
Sets the event manager of the Container.
Adds the Container to an event manager and causes its children
to be added to the same, too.
"""
BaseWidget.set_event_manager (self, manager)
for child in self.children:
child.set_event_manager (manager)
def set_sensitive (self, sensitive=True):
"""C.set_sensitive (...) -> None
Sets the sensitivity of the Container and its children.
"""
BaseWidget.set_sensitive (self, sensitive)
for child in self.children:
child.set_sensitive (sensitive)
def _add (self, **kwargs):
"""C._add (...) -> None
Internal add method for child additions to the container.
"""
children = kwargs.get ("children", [])
insert = kwargs.get ("insert", False)
pos = kwargs.get ("pos", 0)
for child in children:
if not isinstance (child, BaseWidget):
raise TypeError ("Widget %s must inherit from BaseWidget"
% child)
if child.parent:
raise Exception ("Widget %s already packed into another"
"Container" % child)
child.parent = self
if child.depth != self.depth:
child.set_depth (self.depth)
if not self.sensitive:
child.set_sensitive (self.sensitive)
if insert:
self.children.insert (pos, child)
else:
self.children.append (child)
if (self.manager != None) and (child.manager == None):
child.manager = self.manager
if (self.indexable != None) and (child.indexable == None):
child.indexable = self.indexable
self.dirty = True
def add_child (self, *children):
"""C.add_child (...) -> None
Adds one or more children to the Container.
Adds one or more children to the Container and updates the
parent-child relationships.
Raises a TypeError, if one of the passed arguments does not
inherit from the BaseWidget class.
Raises an Exception, if one of the passed arguments is already
attached to another parent.
"""
self._add (children=children)
def remove_child (self, child):
"""C.remove_child (...) -> None
Removes a child from the Container.
Removes the child from the Container and updates the
parent-child relationship of the child.
Raises a TypeError, if the passed argument does not inherit from
the BaseWidget class.
"""
if not isinstance (child, BaseWidget):
raise TypeError ("child must inherit from BaseWidget")
self.children.remove (child)
child.parent = None
self.dirty = True
def insert_child (self, pos, *children):
"""C.insert_child (...) -> None
Inserts one or more children at the desired position.
Inserts one or more children at the desired position to the
Container and updates the parent-child relationships.
Raises a TypeError, if one of the passed arguments does not
inherit from the BaseWidget class.
Raises an Exception, if one of the passed arguments is already
attached to another parent.
"""
self._add (children=children, insert=True, pos=pos)
def set_children (self, children):
"""C.set_children (...) -> None
Sets the children of the Container.
Sets the children of the Container to the passed list of
widgets. If the Container already contains children, they will
be removed first.
Raises a TypeError, if one of the passed arguments does not
inherit from the BaseWidget class.
Raises an Exception, if one of the passed arguments is already
attached to another parent.
"""
while len (self._children) > 0:
self.remove_child (self._children[0])
if (children != None):
self._add (children=children)
def destroy (self):
"""C.destroy () -> None
Destroys the Container and removes it from its event system.
"""
_pop = self.children.pop
while len (self.children) > 0:
widget = _pop ()
widget.parent = None
widget.destroy ()
del widget
# del self.children
BaseWidget.destroy (self)
def calculate_size (self):
"""C.calculate_size (...) -> int, int
Calculates the size needed by the children.
Calculates the size needed by the children and returns the
resulting width and height.
This method has to be implemented by inherited widgets.
"""
raise NotImplementedError
def dispose_widgets (self):
"""C.dispose_widgets (...) -> None
Sets the children to their correct positions within the Container.
This method has to be implemented by inherited widgets.
"""
raise NotImplementedError
def update (self, **kwargs):
"""C.update (...) -> None
Updates the Container and refreshes its image and rect content.
Updates the Container and causes its parent to update itself on
demand.
"""
children = kwargs.get ("children", {})
resize = kwargs.get ("resize", False)
# We have to check for possible size changes here!
if resize:
self.dirty = True
elif self.locked:
return
else:
# Get the intersections with other overlapping children and add
# them to the update list.
items = children.items ()
ch = self.children
for w, rect in items:
for widget in ch:
if w == widget:
continue
intersect = widget.rect.clip (rect)
if intersect.size != (0, 0):
children[widget] = intersect
BaseWidget.update (self, children=children, resize=resize)
spacing = property (lambda self: self._spacing,
lambda self, var: self.set_spacing (var),
doc = "The spacing between the children.")
padding = property (lambda self: self._padding,
lambda self, var: self.set_padding (var),
doc = "The additional padding for the Container.")
children = property (lambda self: self._children,
lambda self, var: self.set_children (var),
doc = "List of the children for the Container.")
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/widgets/Container.py",
"copies": "1",
"size": "12012",
"license": "bsd-2-clause",
"hash": -6932945010455478000,
"line_mean": 34.9640718563,
"line_max": 78,
"alpha_frac": 0.6127206127,
"autogenerated": false,
"ratio": 4.822159775190687,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5934880387890686,
"avg_score": null,
"num_lines": null
} |
"""$Id: content.py 699 2006-09-25 02:01:18Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 699 $"
__date__ = "$Date: 2006-09-25 02:01:18 +0000 (Mon, 25 Sep 2006) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import *
from logging import *
#
# item element.
#
class textConstruct(validatorBase,rfc2396,nonhtml):
from validators import mime_re
import re
def getExpectedAttrNames(self):
return [(None, u'type'),(None, u'src')]
def normalizeWhitespace(self):
pass
def maptype(self):
if self.type.find('/') > -1:
self.log(InvalidTextType({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
def prevalidate(self):
if self.attrs.has_key((None,"src")):
self.type=''
else:
self.type='text'
if self.getFeedType() == TYPE_RSS2 and self.name != 'atom_summary':
self.log(DuplicateDescriptionSemantics({"element":self.name}))
if self.attrs.has_key((None,"type")):
self.type=self.attrs.getValue((None,"type"))
if not self.type:
self.log(AttrNotBlank({"parent":self.parent.name, "element":self.name, "attr":"type"}))
self.maptype()
if self.attrs.has_key((None,"src")):
self.children.append(True) # force warnings about "mixed" content
self.value=self.attrs.getValue((None,"src"))
rfc2396.validate(self, errorClass=InvalidURIAttribute, extraParams={"attr": "src"})
self.value=""
if not self.attrs.has_key((None,"type")):
self.log(MissingTypeAttr({"parent":self.parent.name, "element":self.name, "attr":"type"}))
if self.type in ['text','html','xhtml'] and not self.attrs.has_key((None,"src")):
pass
elif self.type and not self.mime_re.match(self.type):
self.log(InvalidMIMEType({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
else:
self.log(ValidMIMEAttribute({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
if not self.xmlLang:
self.log(MissingDCLanguage({"parent":self.name, "element":"xml:lang"}))
def validate(self):
if self.type in ['text','xhtml']:
if self.type=='xhtml':
nonhtml.validate(self, NotInline)
else:
nonhtml.validate(self, ContainsUndeclaredHTML)
else:
if self.type.find('/') > -1 and not (
self.type.endswith('+xml') or self.type.endswith('/xml') or
self.type.startswith('text/')):
import base64
try:
self.value=base64.decodestring(self.value)
if self.type.endswith('/html'): self.type='html'
except:
self.log(NotBase64({"parent":self.parent.name, "element":self.name,"value":self.value}))
if self.type=='html' or self.type.endswith("/html"):
self.validateSafe(self.value)
if self.type.endswith("/html"):
if self.value.find("<html")<0 and not self.attrs.has_key((None,"src")):
self.log(HtmlFragment({"parent":self.parent.name, "element":self.name,"value":self.value, "type":self.type}))
else:
nonhtml.validate(self, ContainsUndeclaredHTML)
if not self.value and len(self.children)==0 and not self.attrs.has_key((None,"src")):
self.log(NotBlank({"parent":self.parent.name, "element":self.name}))
def textOK(self):
if self.children: validatorBase.textOK(self)
def characters(self, string):
for c in string:
if 0x80 <= ord(c) <= 0x9F or c == u'\ufffd':
from validators import BadCharacters
self.log(BadCharacters({"parent":self.parent.name, "element":self.name}))
if (self.type=='xhtml') and string.strip() and not self.value.strip():
self.log(MissingXhtmlDiv({"parent":self.parent.name, "element":self.name}))
validatorBase.characters(self,string)
def startElementNS(self, name, qname, attrs):
if (self.type<>'xhtml') and not (
self.type.endswith('+xml') or self.type.endswith('/xml')):
self.log(UndefinedElement({"parent":self.name, "element":name}))
if self.type=="xhtml":
if name<>'div' and not self.value.strip():
self.log(MissingXhtmlDiv({"parent":self.parent.name, "element":self.name}))
elif qname not in ["http://www.w3.org/1999/xhtml"]:
self.log(NotHtml({"parent":self.parent.name, "element":self.name, "message":"unexpected namespace: %s" % qname}))
if self.type=="application/xhtml+xml":
if name<>'html':
self.log(HtmlFragment({"parent":self.parent.name, "element":self.name,"value":self.value, "type":self.type}))
elif qname not in ["http://www.w3.org/1999/xhtml"]:
self.log(NotHtml({"parent":self.parent.name, "element":self.name, "message":"unexpected namespace: %s" % qname}))
if self.attrs.has_key((None,"mode")):
if self.attrs.getValue((None,"mode")) == 'escaped':
self.log(NotEscaped({"parent":self.parent.name, "element":self.name}))
if name=="div" and qname=="http://www.w3.org/1999/xhtml":
handler=diveater()
else:
handler=eater()
self.children.append(handler)
self.push(handler, name, attrs)
# treat xhtml:div as part of the content for purposes of detecting escaped html
class diveater(eater):
def __init__(self):
eater.__init__(self)
self.mixed = False
def textOK(self):
pass
def characters(self, string):
validatorBase.characters(self, string)
def startElementNS(self, name, qname, attrs):
if not qname:
self.log(MissingNamespace({"parent":"xhtml:div", "element":name}))
self.mixed = True
eater.startElementNS(self, name, qname, attrs)
def validate(self):
if not self.mixed: self.parent.value += self.value
class content(textConstruct):
def maptype(self):
if self.type == 'multipart/alternative':
self.log(InvalidMIMEType({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
| {
"repo_name": "stone5495/NewsBlur",
"path": "vendor/feedvalidator/content.py",
"copies": "16",
"size": "6021",
"license": "mit",
"hash": 6620676962272808000,
"line_mean": 38.8741721854,
"line_max": 121,
"alpha_frac": 0.6454077396,
"autogenerated": false,
"ratio": 3.4074702886247876,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.029028925946002013,
"num_lines": 151
} |
""" Core classes referenced by adapters to perform Peloton tasks. No
adapter provides services that are other than published interfaces
to methods in these classes."""
import peloton.utils.logging as logging
from peloton.exceptions import NoWorkersError
from peloton.exceptions import DeadProxyError
from peloton.exceptions import ServiceError
from twisted.internet.defer import Deferred
from cStringIO import StringIO
class PelotonInterface(object):
""" Subclasses of the PelotonInterface will all need access to
common objects, such as a logger and kernel hooks. These are provided
through this class. """
def __init__(self, kernel):
self.logger = logging.getLogger()
self.profile = kernel.profile
self.__kernel__ = kernel
class PelotonRequestInterface(PelotonInterface):
""" Methods of this class perform the core actions of Peloton nodes
such as executing a method or posting a request on the execution stack.
These methods are exposed via adapters. For clarity, although for no other
technical reason, methods intended for use via adapters are named
public_<name> by convention."""
def public_call(self, sessionId, target, service, method, args, kwargs):
""" Call a Peloton method in the specified service and return
a deferred for the result. Target refers to the output channel, e.g. html
or xml. """
d = Deferred()
self._publicCall(d, sessionId, target, service, method, args, kwargs)
return d
def _publicCall(self, d, sessionId, target, service, method, args, kwargs):
while True:
try:
p = self.__kernel__.routingTable.getPscProxyForService(service)
rd = p.call(service, method, *args, **kwargs)
# rd.addCallback(d.callback)
rd.addCallback(self.__callResponse, target, service, method, d)
rd.addErrback(self.__callError, p, d, sessionId, target, service, method, args, kwargs)
break
except NoWorkersError, npe:
self.logger.error("No workers: %s" % str(npe))
d.errback(npe)
break
except DeadProxyError, dpe:
self.__kernel__.routingTable.removeHandlerForService(service, proxy=p, removeAll=True)
def __callResponse(self, rv, target, service, method, d):
if not target == 'raw':
profile, transforms = self.__kernel__.serviceLibrary.getProfile(service)
try:
txform = transforms[method]
except KeyError:
instanceInfo = {"resourceRoot" : profile["resourceRoot"],
"publishedName" : profile["publishedName"]}
txform = OutputTransform(instanceInfo, profile['methods'][method]['properties'])
transforms[method] = txform
systemInfo = {'publishedName' : profile['publishedName']}
rv = txform.transform(target, rv, systemInfo)
d.callback(rv)
def __callError(self, err, proxy, d, sessionId, target, service, method, args, kwargs):
if err.parents[-1] == 'peloton.exceptions.NoWorkersError' or \
err.parents[-1] == 'peloton.exceptions.DeadProxyError':
# so we got back from our PSC that it had no workers left. This is
# despite trying to start some more. We respond by removing from
# the service handlers and trying another.
self.__kernel__.routingTable.removeHandlerForService(service, proxy=proxy, removeAll=True)
self._publicCall(d, sessionId, target, service, method, args, kwargs)
else:
d.errback(err)
def public_post(self, sessionId, service, method, *args, **kwargs):
""" Post a method call onto the stack. The return value is the
grid-unique execution ID for this call. The method will be executed
when it reaches the head of the call stack."""
raise NotImplementedError
def public_postLater(self, sessionId, delay_seconds, service, method, *args, **kwargs):
""" Interface to a scheduled call system. Run the given method after
the specified interval. Return value is the grid-unique execution ID for this
call."""
raise NotImplementedError
def public_postAt(self, sessionId, dateTime, service, method, *args, **kwargs):
""" Interface to a scheduled call system. Run the given method at the
specified time. Return value is the grid-unique execution ID for this call."""
raise NotImplementedError
class PelotonEventInterface(PelotonInterface):
""" Methods for firing events on and listing to events from the event
framework.
For clarity, although for no other technical reason, methods intended for use
via adapters are named public_<name> by convention."""
def public_fireEvent(self, sessionId, eventChannel, eventName, payload):
""" Fire an event message onto the grid. """
raise NotImplementedError
def public_subscribeToEvent(self, sessionId, eventChannel, eventName):
""" Subscribe to recieve notifications of specified events. """
raise NotImplementedError
class PelotonNodeInterface(PelotonInterface):
""" Methods of this class relate to the node itself rather than services.
For clarity, although for no other technical reason, methods intended for use
via adapters are named public_<name> by convention."""
def public_ping(self, value=''):
""" Return the value sent. A basic node-level ping. """
return value
class PelotonInternodeInterface(PelotonInterface):
""" Methods for communication between nodes only, for example for relaying method
calls and exchanging status and profile information.
For clarity, although for no other technical reason, methods intended for use
via adapters are named public_<name> by convention."""
def public_relayCall(self, sessionId, service, method, *args, **kwargs):
""" Called by a remote node to relay a method request to this node.
The method is now executed on this node."""
p = self.__kernel__.routingTable.localProxy
rd = Deferred()
d = p.call(service, method, *args, **kwargs)
d.addCallback(rd.callback)
d.addErrback(self.__callError,rd, p, service)
return rd
def __callError(self, err, d, proxy, service):
if err.parents[-1] == 'peloton.exceptions.NoWorkersError':
# so we got back from our PSC that it had no workers left. This is
# despite trying to start some more. We respond by removing from
# the service handlers and trying another.
self.__kernel__.routingTable.removeHandlerForService(service, proxy=proxy, removeAll=True)
d.errback(err)
class PelotonManagementInterface(PelotonInterface):
""" Methods for use by management tools, such as a console,
the SSH terminal or similar. All methods prefixed public_ are available
for use in such tools."""
def public_shutdown(self):
# self.__kernel__.closedown()
self.__kernel__.domainManager.sendCommand('MESH_CLOSEDOWN')
def public_startPlugin(self, plugin):
self.__kernel__.startPlugin(plugin)
def public_stopPlugin(self, plugin):
self.__kernel__.stopPlugin(plugin)
def public_listPlugins(self, verbose=True):
theList = []
for name, plugin in self.__kernel__.plugins.items():
if verbose:
theList.append( (name, plugin.comment) )
else:
theList.append(name)
return theList
def public_listNodes(self, pprint=False):
pscs = self.__kernel__.routingTable.pscByGUID
nodes = {}
for guid,psc in pscs.items():
nodes[guid] = "%(hostname)s - %(ipaddress)s:%(port)s" % psc.profile
if pprint:
s = StringIO()
for k,v in nodes.items():
s.write("%s: %s\n" % (k,v))
return s.getvalue()
return nodes
def public_listServices(self, pprint=False):
pscs = self.__kernel__.routingTable.pscByService
services = {}
for svc, handlers in pscs.items():
services[svc] = len(handlers)
if pprint:
s = StringIO()
for k,v in services.items():
s.write("%s (%d handlers)\n" % (k, v))
return s.getvalue()
return services
def public_showProfile(self):
return self.__kernel__.profile.prettyprint()
def public_start(self, serviceName, runconfig=None):
self.__kernel__.launchService(serviceName, runconfig)
def public_stop(self, serviceName):
self.__kernel__.stopService(serviceName)
def public_noop(self):
self.__kernel__.domainManager.sendCommand('NOOP')
from peloton.utils.transforms import *
class OutputTransform(object):
""" Initialises, manages and processes the transformation
of results from source to target. """
def __init__(self, overrideOpts, methodProperties):
self.transformChains = {}
# pull out all transforms, get instances
for k,v in methodProperties.items():
if not k.startswith('transform.'):
continue
target = k[10:]
transformChain = [eval(self.__clean(i)) for i in v]
self.transformChains[target] = transformChain
def __clean(self, method):
""" Takes an element of the transform chain as written in a
service. If no arguments are passed you may write this as, for example,
"defaultHTMLTransform" but to eval we need "defaultHTMLTransform()".
This adds the parantheses effectively.
"""
ix = method.find("(")
if ix == -1:
return "%s(overrideOpts)" % method
else:
# insert '__opts' as first argument
ix+=1
return "%soverrideOpts, %s" % (method[:ix], method[ix:])
def transform(self, target, value, opts):
""" Transform value through the transform chain for the specified
target. """
try:
chain = self.transformChains[target]
for fn in chain:
value = fn(value, opts)
return value
except KeyError:
raise ServiceError("Invalid target (%s) to transform value %s" % (target, str(value)))
| {
"repo_name": "aquamatt/Peloton",
"path": "src/peloton/coreio.py",
"copies": "1",
"size": "10604",
"license": "bsd-3-clause",
"hash": 4628732080276864000,
"line_mean": 41.2470119522,
"line_max": 103,
"alpha_frac": 0.6423990947,
"autogenerated": false,
"ratio": 4.298338062423997,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5440737157123997,
"avg_score": null,
"num_lines": null
} |
"""
Calling the ``publish_*`` convenience functions (or instantiating a
`Publisher` object) with component names will result in default
behavior. For custom behavior (setting component options), create
custom component objects first, and pass *them* to
``publish_*``/`Publisher`. See `The Docutils Publisher`_.
.. _The Docutils Publisher: http://docutils.sf.net/docs/api/publisher.html
"""
__docformat__ = 'reStructuredText'
import sys
import pprint
from docutils import __version__, __version_details__, SettingsSpec
from docutils import frontend, io, utils, readers, writers
from docutils.frontend import OptionParser
from docutils.transforms import Transformer
import docutils.readers.doctree
class Publisher:
"""
A facade encapsulating the high-level logic of a Docutils system.
"""
def __init__(self, reader=None, parser=None, writer=None,
source=None, source_class=io.FileInput,
destination=None, destination_class=io.FileOutput,
settings=None):
"""
Initial setup. If any of `reader`, `parser`, or `writer` are not
specified, the corresponding ``set_...`` method should be called with
a component name (`set_reader` sets the parser as well).
"""
self.document = None
"""The document tree (`docutils.nodes` objects)."""
self.reader = reader
"""A `docutils.readers.Reader` instance."""
self.parser = parser
"""A `docutils.parsers.Parser` instance."""
self.writer = writer
"""A `docutils.writers.Writer` instance."""
for component in 'reader', 'parser', 'writer':
assert not isinstance(getattr(self, component), str), (
'passed string "%s" as "%s" parameter; pass an instance, '
'or use the "%s_name" parameter instead (in '
'docutils.core.publish_* convenience functions).'
% (getattr(self, component), component, component))
self.source = source
"""The source of input data, a `docutils.io.Input` instance."""
self.source_class = source_class
"""The class for dynamically created source objects."""
self.destination = destination
"""The destination for docutils output, a `docutils.io.Output`
instance."""
self.destination_class = destination_class
"""The class for dynamically created destination objects."""
self.settings = settings
"""An object containing Docutils settings as instance attributes.
Set by `self.process_command_line()` or `self.get_settings()`."""
def set_reader(self, reader_name, parser, parser_name):
"""Set `self.reader` by name."""
reader_class = readers.get_reader_class(reader_name)
self.reader = reader_class(parser, parser_name)
self.parser = self.reader.parser
def set_writer(self, writer_name):
"""Set `self.writer` by name."""
writer_class = writers.get_writer_class(writer_name)
self.writer = writer_class()
def set_components(self, reader_name, parser_name, writer_name):
if self.reader is None:
self.set_reader(reader_name, self.parser, parser_name)
if self.parser is None:
if self.reader.parser is None:
self.reader.set_parser(parser_name)
self.parser = self.reader.parser
if self.writer is None:
self.set_writer(writer_name)
def setup_option_parser(self, usage=None, description=None,
settings_spec=None, config_section=None,
**defaults):
if config_section:
if not settings_spec:
settings_spec = SettingsSpec()
settings_spec.config_section = config_section
parts = config_section.split()
if len(parts) > 1 and parts[-1] == 'application':
settings_spec.config_section_dependencies = ['applications']
#@@@ Add self.source & self.destination to components in future?
option_parser = OptionParser(
components=(self.parser, self.reader, self.writer, settings_spec),
defaults=defaults, read_config_files=1,
usage=usage, description=description)
return option_parser
def get_settings(self, usage=None, description=None,
settings_spec=None, config_section=None, **defaults):
"""
Set and return default settings (overrides in `defaults` dict).
Set components first (`self.set_reader` & `self.set_writer`).
Explicitly setting `self.settings` disables command line option
processing from `self.publish()`.
"""
option_parser = self.setup_option_parser(
usage, description, settings_spec, config_section, **defaults)
self.settings = option_parser.get_default_values()
return self.settings
def process_programmatic_settings(self, settings_spec,
settings_overrides,
config_section):
if self.settings is None:
defaults = (settings_overrides or {}).copy()
# Propagate exceptions by default when used programmatically:
defaults.setdefault('traceback', 1)
self.get_settings(settings_spec=settings_spec,
config_section=config_section,
**defaults)
def process_command_line(self, argv=None, usage=None, description=None,
settings_spec=None, config_section=None,
**defaults):
"""
Pass an empty list to `argv` to avoid reading `sys.argv` (the
default).
Set components first (`self.set_reader` & `self.set_writer`).
"""
option_parser = self.setup_option_parser(
usage, description, settings_spec, config_section, **defaults)
if argv is None:
argv = sys.argv[1:]
self.settings = option_parser.parse_args(argv)
def set_io(self, source_path=None, destination_path=None):
if self.source is None:
self.set_source(source_path=source_path)
if self.destination is None:
self.set_destination(destination_path=destination_path)
def set_source(self, source=None, source_path=None):
if source_path is None:
source_path = self.settings._source
else:
self.settings._source = source_path
self.source = self.source_class(
source=source, source_path=source_path,
encoding=self.settings.input_encoding)
def set_destination(self, destination=None, destination_path=None):
if destination_path is None:
destination_path = self.settings._destination
else:
self.settings._destination = destination_path
self.destination = self.destination_class(
destination=destination, destination_path=destination_path,
encoding=self.settings.output_encoding,
error_handler=self.settings.output_encoding_error_handler)
def apply_transforms(self):
self.document.transformer.populate_from_components(
(self.source, self.reader, self.reader.parser, self.writer,
self.destination))
self.document.transformer.apply_transforms()
def publish(self, argv=None, usage=None, description=None,
settings_spec=None, settings_overrides=None,
config_section=None, enable_exit_status=None):
"""
Process command line options and arguments (if `self.settings` not
already set), run `self.reader` and then `self.writer`. Return
`self.writer`'s output.
"""
exit = None
try:
if self.settings is None:
self.process_command_line(
argv, usage, description, settings_spec, config_section,
**(settings_overrides or {}))
self.set_io()
self.document = self.reader.read(self.source, self.parser,
self.settings)
self.apply_transforms()
output = self.writer.write(self.document, self.destination)
self.writer.assemble_parts()
except SystemExit, error:
exit = 1
exit_status = error.code
except Exception, error:
if not self.settings: # exception too early to report nicely
raise
if self.settings.traceback: # Propagate exceptions?
self.debugging_dumps()
raise
self.report_Exception(error)
exit = 1
exit_status = 1
self.debugging_dumps()
if (enable_exit_status and self.document
and (self.document.reporter.max_level
>= self.settings.exit_status_level)):
sys.exit(self.document.reporter.max_level + 10)
elif exit:
sys.exit(exit_status)
return output
def debugging_dumps(self):
if not self.document:
return
if self.settings.dump_settings:
print >>sys.stderr, '\n::: Runtime settings:'
print >>sys.stderr, pprint.pformat(self.settings.__dict__)
if self.settings.dump_internals:
print >>sys.stderr, '\n::: Document internals:'
print >>sys.stderr, pprint.pformat(self.document.__dict__)
if self.settings.dump_transforms:
print >>sys.stderr, '\n::: Transforms applied:'
print >>sys.stderr, (' (priority, transform class, '
'pending node details, keyword args)')
print >>sys.stderr, pprint.pformat(
[(priority, '%s.%s' % (xclass.__module__, xclass.__name__),
pending and pending.details, kwargs)
for priority, xclass, pending, kwargs
in self.document.transformer.applied])
if self.settings.dump_pseudo_xml:
print >>sys.stderr, '\n::: Pseudo-XML:'
print >>sys.stderr, self.document.pformat().encode(
'raw_unicode_escape')
def report_Exception(self, error):
if isinstance(error, utils.SystemMessage):
self.report_SystemMessage(error)
elif isinstance(error, UnicodeEncodeError):
self.report_UnicodeError(error)
else:
print >>sys.stderr, '%s: %s' % (error.__class__.__name__, error)
print >>sys.stderr, ("""\
Exiting due to error. Use "--traceback" to diagnose.
Please report errors to <[email protected]>.
Include "--traceback" output, Docutils version (%s [%s]),
Python version (%s), your OS type & version, and the
command line used.""" % (__version__, __version_details__,
sys.version.split()[0]))
def report_SystemMessage(self, error):
print >>sys.stderr, ('Exiting due to level-%s (%s) system message.'
% (error.level,
utils.Reporter.levels[error.level]))
def report_UnicodeError(self, error):
sys.stderr.write(
'%s: %s\n'
'\n'
'The specified output encoding (%s) cannot\n'
'handle all of the output.\n'
'Try setting "--output-encoding-error-handler" to\n'
'\n'
'* "xmlcharrefreplace" (for HTML & XML output);\n'
% (error.__class__.__name__, error,
self.settings.output_encoding))
try:
data = error.object[error.start:error.end]
sys.stderr.write(
' the output will contain "%s" and should be usable.\n'
'* "backslashreplace" (for other output formats, Python 2.3+);\n'
' look for "%s" in the output.\n'
% (data.encode('ascii', 'xmlcharrefreplace'),
data.encode('ascii', 'backslashreplace')))
except AttributeError:
sys.stderr.write(' the output should be usable as-is.\n')
sys.stderr.write(
'* "replace"; look for "?" in the output.\n'
'\n'
'"--output-encoding-error-handler" is currently set to "%s".\n'
'\n'
'Exiting due to error. Use "--traceback" to diagnose.\n'
'If the advice above doesn\'t eliminate the error,\n'
'please report it to <[email protected]>.\n'
'Include "--traceback" output, Docutils version (%s),\n'
'Python version (%s), your OS type & version, and the\n'
'command line used.\n'
% (self.settings.output_encoding_error_handler,
__version__, sys.version.split()[0]))
default_usage = '%prog [options] [<source> [<destination>]]'
default_description = ('Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sf.net/docs/user/config.html> for '
'the full reference.')
def publish_cmdline(reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=1, argv=None,
usage=default_usage, description=default_description):
"""
Set up & run a `Publisher` for command-line-based file I/O (input and
output file paths taken automatically from the command line). Return the
encoded string output also.
Parameters: see `publish_programmatically` for the remainder.
- `argv`: Command-line argument list to use instead of ``sys.argv[1:]``.
- `usage`: Usage string, output if there's a problem parsing the command
line.
- `description`: Program description, output for the "--help" option
(along with command-line option descriptions).
"""
pub = Publisher(reader, parser, writer, settings=settings)
pub.set_components(reader_name, parser_name, writer_name)
output = pub.publish(
argv, usage, description, settings_spec, settings_overrides,
config_section=config_section, enable_exit_status=enable_exit_status)
return output
def publish_file(source=None, source_path=None,
destination=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None, settings_overrides=None,
config_section=None, enable_exit_status=None):
"""
Set up & run a `Publisher` for programmatic use with file-like I/O.
Return the encoded string output also.
Parameters: see `publish_programmatically`.
"""
output, pub = publish_programmatically(
source_class=io.FileInput, source=source, source_path=source_path,
destination_class=io.FileOutput,
destination=destination, destination_path=destination_path,
reader=reader, reader_name=reader_name,
parser=parser, parser_name=parser_name,
writer=writer, writer_name=writer_name,
settings=settings, settings_spec=settings_spec,
settings_overrides=settings_overrides,
config_section=config_section,
enable_exit_status=enable_exit_status)
return output
def publish_string(source, source_path=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=None):
"""
Set up & run a `Publisher` for programmatic use with string I/O. Return
the encoded string or Unicode string output.
For encoded string output, be sure to set the 'output_encoding' setting to
the desired encoding. Set it to 'unicode' for unencoded Unicode string
output. Here's one way::
publish_string(..., settings_overrides={'output_encoding': 'unicode'})
Similarly for Unicode string input (`source`)::
publish_string(..., settings_overrides={'input_encoding': 'unicode'})
Parameters: see `publish_programmatically`.
"""
output, pub = publish_programmatically(
source_class=io.StringInput, source=source, source_path=source_path,
destination_class=io.StringOutput,
destination=None, destination_path=destination_path,
reader=reader, reader_name=reader_name,
parser=parser, parser_name=parser_name,
writer=writer, writer_name=writer_name,
settings=settings, settings_spec=settings_spec,
settings_overrides=settings_overrides,
config_section=config_section,
enable_exit_status=enable_exit_status)
return output
def publish_parts(source, source_path=None, source_class=io.StringInput,
destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=None):
"""
Set up & run a `Publisher`, and return a dictionary of document parts.
Dictionary keys are the names of parts, and values are Unicode strings;
encoding is up to the client. For programmatic use with string I/O.
For encoded string input, be sure to set the 'input_encoding' setting to
the desired encoding. Set it to 'unicode' for unencoded Unicode string
input. Here's how::
publish_parts(..., settings_overrides={'input_encoding': 'unicode'})
Parameters: see `publish_programmatically`.
"""
output, pub = publish_programmatically(
source=source, source_path=source_path, source_class=source_class,
destination_class=io.StringOutput,
destination=None, destination_path=destination_path,
reader=reader, reader_name=reader_name,
parser=parser, parser_name=parser_name,
writer=writer, writer_name=writer_name,
settings=settings, settings_spec=settings_spec,
settings_overrides=settings_overrides,
config_section=config_section,
enable_exit_status=enable_exit_status)
return pub.writer.parts
def publish_doctree(source, source_path=None,
source_class=io.StringInput,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=None):
"""
Set up & run a `Publisher` for programmatic use with string I/O.
Return the document tree.
For encoded string input, be sure to set the 'input_encoding' setting to
the desired encoding. Set it to 'unicode' for unencoded Unicode string
input. Here's one way::
publish_doctree(..., settings_overrides={'input_encoding': 'unicode'})
Parameters: see `publish_programmatically`.
"""
pub = Publisher(reader=reader, parser=parser, writer=None,
settings=settings,
source_class=source_class,
destination_class=io.NullOutput)
pub.set_components(reader_name, parser_name, 'null')
pub.process_programmatic_settings(
settings_spec, settings_overrides, config_section)
pub.set_source(source, source_path)
pub.set_destination(None, None)
output = pub.publish(enable_exit_status=enable_exit_status)
return pub.document
def publish_from_doctree(document, destination_path=None,
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=None):
"""
Set up & run a `Publisher` to render from an existing document
tree data structure, for programmatic use with string I/O. Return
the encoded string output.
Note that document.settings is overridden; if you want to use the settings
of the original `document`, pass settings=document.settings.
Also, new document.transformer and document.reporter objects are
generated.
For encoded string output, be sure to set the 'output_encoding' setting to
the desired encoding. Set it to 'unicode' for unencoded Unicode string
output. Here's one way::
publish_from_doctree(
..., settings_overrides={'output_encoding': 'unicode'})
Parameters: `document` is a `docutils.nodes.document` object, an existing
document tree.
Other parameters: see `publish_programmatically`.
"""
reader = docutils.readers.doctree.Reader(parser_name='null')
pub = Publisher(reader, None, writer,
source=io.DocTreeInput(document),
destination_class=io.StringOutput, settings=settings)
if not writer and writer_name:
pub.set_writer(writer_name)
pub.process_programmatic_settings(
settings_spec, settings_overrides, config_section)
pub.set_destination(None, destination_path)
return pub.publish(enable_exit_status=enable_exit_status)
def publish_cmdline_to_binary(reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=1, argv=None,
usage=default_usage, description=default_description,
destination=None, destination_class=io.BinaryFileOutput
):
"""
Set up & run a `Publisher` for command-line-based file I/O (input and
output file paths taken automatically from the command line). Return the
encoded string output also.
This is just like publish_cmdline, except that it uses
io.BinaryFileOutput instead of io.FileOutput.
Parameters: see `publish_programmatically` for the remainder.
- `argv`: Command-line argument list to use instead of ``sys.argv[1:]``.
- `usage`: Usage string, output if there's a problem parsing the command
line.
- `description`: Program description, output for the "--help" option
(along with command-line option descriptions).
"""
pub = Publisher(reader, parser, writer, settings=settings,
destination_class=destination_class)
pub.set_components(reader_name, parser_name, writer_name)
output = pub.publish(
argv, usage, description, settings_spec, settings_overrides,
config_section=config_section, enable_exit_status=enable_exit_status)
return output
def publish_programmatically(source_class, source, source_path,
destination_class, destination, destination_path,
reader, reader_name,
parser, parser_name,
writer, writer_name,
settings, settings_spec,
settings_overrides, config_section,
enable_exit_status):
"""
Set up & run a `Publisher` for custom programmatic use. Return the
encoded string output and the Publisher object.
Applications should not need to call this function directly. If it does
seem to be necessary to call this function directly, please write to the
Docutils-develop mailing list
<http://docutils.sf.net/docs/user/mailing-lists.html#docutils-develop>.
Parameters:
* `source_class` **required**: The class for dynamically created source
objects. Typically `io.FileInput` or `io.StringInput`.
* `source`: Type depends on `source_class`:
- If `source_class` is `io.FileInput`: Either a file-like object
(must have 'read' and 'close' methods), or ``None``
(`source_path` is opened). If neither `source` nor
`source_path` are supplied, `sys.stdin` is used.
- If `source_class` is `io.StringInput` **required**: The input
string, either an encoded 8-bit string (set the
'input_encoding' setting to the correct encoding) or a Unicode
string (set the 'input_encoding' setting to 'unicode').
* `source_path`: Type depends on `source_class`:
- `io.FileInput`: Path to the input file, opened if no `source`
supplied.
- `io.StringInput`: Optional. Path to the file or object that produced
`source`. Only used for diagnostic output.
* `destination_class` **required**: The class for dynamically created
destination objects. Typically `io.FileOutput` or `io.StringOutput`.
* `destination`: Type depends on `destination_class`:
- `io.FileOutput`: Either a file-like object (must have 'write' and
'close' methods), or ``None`` (`destination_path` is opened). If
neither `destination` nor `destination_path` are supplied,
`sys.stdout` is used.
- `io.StringOutput`: Not used; pass ``None``.
* `destination_path`: Type depends on `destination_class`:
- `io.FileOutput`: Path to the output file. Opened if no `destination`
supplied.
- `io.StringOutput`: Path to the file or object which will receive the
output; optional. Used for determining relative paths (stylesheets,
source links, etc.).
* `reader`: A `docutils.readers.Reader` object.
* `reader_name`: Name or alias of the Reader class to be instantiated if
no `reader` supplied.
* `parser`: A `docutils.parsers.Parser` object.
* `parser_name`: Name or alias of the Parser class to be instantiated if
no `parser` supplied.
* `writer`: A `docutils.writers.Writer` object.
* `writer_name`: Name or alias of the Writer class to be instantiated if
no `writer` supplied.
* `settings`: A runtime settings (`docutils.frontend.Values`) object, for
dotted-attribute access to runtime settings. It's the end result of the
`SettingsSpec`, config file, and option processing. If `settings` is
passed, it's assumed to be complete and no further setting/config/option
processing is done.
* `settings_spec`: A `docutils.SettingsSpec` subclass or object. Provides
extra application-specific settings definitions independently of
components. In other words, the application becomes a component, and
its settings data is processed along with that of the other components.
Used only if no `settings` specified.
* `settings_overrides`: A dictionary containing application-specific
settings defaults that override the defaults of other components.
Used only if no `settings` specified.
* `config_section`: A string, the name of the configuration file section
for this application. Overrides the ``config_section`` attribute
defined by `settings_spec`. Used only if no `settings` specified.
* `enable_exit_status`: Boolean; enable exit status at end of processing?
"""
pub = Publisher(reader, parser, writer, settings=settings,
source_class=source_class,
destination_class=destination_class)
pub.set_components(reader_name, parser_name, writer_name)
pub.process_programmatic_settings(
settings_spec, settings_overrides, config_section)
pub.set_source(source, source_path)
pub.set_destination(destination, destination_path)
output = pub.publish(enable_exit_status=enable_exit_status)
return output, pub
| {
"repo_name": "rimbalinux/LMD3",
"path": "docutils/core.py",
"copies": "2",
"size": "29126",
"license": "bsd-3-clause",
"hash": -8649670899984873000,
"line_mean": 43.0170015456,
"line_max": 81,
"alpha_frac": 0.6092838014,
"autogenerated": false,
"ratio": 4.424426553243202,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006009604331066999,
"num_lines": 647
} |
"""
Calling the ``publish_*`` convenience functions (or instantiating a
`Publisher` object) with component names will result in default
behavior. For custom behavior (setting component options), create
custom component objects first, and pass *them* to
``publish_*``/`Publisher`. See `The Docutils Publisher`_.
.. _The Docutils Publisher: http://docutils.sf.net/docs/api/publisher.html
"""
__docformat__ = 'reStructuredText'
import sys
import pprint
from docutils import __version__, __version_details__, SettingsSpec
from docutils import frontend, io, utils, readers, writers
from docutils.frontend import OptionParser
from docutils.transforms import Transformer
from docutils.error_reporting import ErrorOutput, ErrorString
import docutils.readers.doctree
class Publisher:
"""
A facade encapsulating the high-level logic of a Docutils system.
"""
def __init__(self, reader=None, parser=None, writer=None,
source=None, source_class=io.FileInput,
destination=None, destination_class=io.FileOutput,
settings=None):
"""
Initial setup. If any of `reader`, `parser`, or `writer` are not
specified, the corresponding ``set_...`` method should be called with
a component name (`set_reader` sets the parser as well).
"""
self.document = None
"""The document tree (`docutils.nodes` objects)."""
self.reader = reader
"""A `docutils.readers.Reader` instance."""
self.parser = parser
"""A `docutils.parsers.Parser` instance."""
self.writer = writer
"""A `docutils.writers.Writer` instance."""
for component in 'reader', 'parser', 'writer':
assert not isinstance(getattr(self, component), str), (
'passed string "%s" as "%s" parameter; pass an instance, '
'or use the "%s_name" parameter instead (in '
'docutils.core.publish_* convenience functions).'
% (getattr(self, component), component, component))
self.source = source
"""The source of input data, a `docutils.io.Input` instance."""
self.source_class = source_class
"""The class for dynamically created source objects."""
self.destination = destination
"""The destination for docutils output, a `docutils.io.Output`
instance."""
self.destination_class = destination_class
"""The class for dynamically created destination objects."""
self.settings = settings
"""An object containing Docutils settings as instance attributes.
Set by `self.process_command_line()` or `self.get_settings()`."""
self._stderr = ErrorOutput()
def set_reader(self, reader_name, parser, parser_name):
"""Set `self.reader` by name."""
reader_class = readers.get_reader_class(reader_name)
self.reader = reader_class(parser, parser_name)
self.parser = self.reader.parser
def set_writer(self, writer_name):
"""Set `self.writer` by name."""
writer_class = writers.get_writer_class(writer_name)
self.writer = writer_class()
def set_components(self, reader_name, parser_name, writer_name):
if self.reader is None:
self.set_reader(reader_name, self.parser, parser_name)
if self.parser is None:
if self.reader.parser is None:
self.reader.set_parser(parser_name)
self.parser = self.reader.parser
if self.writer is None:
self.set_writer(writer_name)
def setup_option_parser(self, usage=None, description=None,
settings_spec=None, config_section=None,
**defaults):
if config_section:
if not settings_spec:
settings_spec = SettingsSpec()
settings_spec.config_section = config_section
parts = config_section.split()
if len(parts) > 1 and parts[-1] == 'application':
settings_spec.config_section_dependencies = ['applications']
#@@@ Add self.source & self.destination to components in future?
option_parser = OptionParser(
components=(self.parser, self.reader, self.writer, settings_spec),
defaults=defaults, read_config_files=True,
usage=usage, description=description)
return option_parser
def get_settings(self, usage=None, description=None,
settings_spec=None, config_section=None, **defaults):
"""
Set and return default settings (overrides in `defaults` dict).
Set components first (`self.set_reader` & `self.set_writer`).
Explicitly setting `self.settings` disables command line option
processing from `self.publish()`.
"""
option_parser = self.setup_option_parser(
usage, description, settings_spec, config_section, **defaults)
self.settings = option_parser.get_default_values()
return self.settings
def process_programmatic_settings(self, settings_spec,
settings_overrides,
config_section):
if self.settings is None:
defaults = (settings_overrides or {}).copy()
# Propagate exceptions by default when used programmatically:
defaults.setdefault('traceback', True)
self.get_settings(settings_spec=settings_spec,
config_section=config_section,
**defaults)
def process_command_line(self, argv=None, usage=None, description=None,
settings_spec=None, config_section=None,
**defaults):
"""
Pass an empty list to `argv` to avoid reading `sys.argv` (the
default).
Set components first (`self.set_reader` & `self.set_writer`).
"""
option_parser = self.setup_option_parser(
usage, description, settings_spec, config_section, **defaults)
if argv is None:
argv = sys.argv[1:]
# converting to Unicode (Python 3 does this automatically):
if sys.version_info < (3,0):
# TODO: make this failsafe and reversible?
argv_encoding = (frontend.locale_encoding or 'ascii')
argv = [a.decode(argv_encoding) for a in argv]
self.settings = option_parser.parse_args(argv)
def set_io(self, source_path=None, destination_path=None):
if self.source is None:
self.set_source(source_path=source_path)
if self.destination is None:
self.set_destination(destination_path=destination_path)
def set_source(self, source=None, source_path=None):
if source_path is None:
source_path = self.settings._source
else:
self.settings._source = source_path
# Raise IOError instead of system exit with `tracback == True`
# TODO: change io.FileInput's default behaviour and remove this hack
try:
self.source = self.source_class(
source=source, source_path=source_path,
encoding=self.settings.input_encoding,
handle_io_errors=False)
except TypeError:
self.source = self.source_class(
source=source, source_path=source_path,
encoding=self.settings.input_encoding)
def set_destination(self, destination=None, destination_path=None):
if destination_path is None:
destination_path = self.settings._destination
else:
self.settings._destination = destination_path
self.destination = self.destination_class(
destination=destination, destination_path=destination_path,
encoding=self.settings.output_encoding,
error_handler=self.settings.output_encoding_error_handler)
# Raise IOError instead of system exit with `tracback == True`
# TODO: change io.FileInput's default behaviour and remove this hack
self.destination.handle_io_errors=False
def apply_transforms(self):
self.document.transformer.populate_from_components(
(self.source, self.reader, self.reader.parser, self.writer,
self.destination))
self.document.transformer.apply_transforms()
def publish(self, argv=None, usage=None, description=None,
settings_spec=None, settings_overrides=None,
config_section=None, enable_exit_status=False):
"""
Process command line options and arguments (if `self.settings` not
already set), run `self.reader` and then `self.writer`. Return
`self.writer`'s output.
"""
exit = None
try:
if self.settings is None:
self.process_command_line(
argv, usage, description, settings_spec, config_section,
**(settings_overrides or {}))
self.set_io()
self.document = self.reader.read(self.source, self.parser,
self.settings)
self.apply_transforms()
output = self.writer.write(self.document, self.destination)
self.writer.assemble_parts()
except SystemExit, error:
exit = 1
exit_status = error.code
except Exception, error:
if not self.settings: # exception too early to report nicely
raise
if self.settings.traceback: # Propagate exceptions?
self.debugging_dumps()
raise
self.report_Exception(error)
exit = True
exit_status = 1
self.debugging_dumps()
if (enable_exit_status and self.document
and (self.document.reporter.max_level
>= self.settings.exit_status_level)):
sys.exit(self.document.reporter.max_level + 10)
elif exit:
sys.exit(exit_status)
return output
def debugging_dumps(self):
if not self.document:
return
if self.settings.dump_settings:
print >>self._stderr, '\n::: Runtime settings:'
print >>self._stderr, pprint.pformat(self.settings.__dict__)
if self.settings.dump_internals:
print >>self._stderr, '\n::: Document internals:'
print >>self._stderr, pprint.pformat(self.document.__dict__)
if self.settings.dump_transforms:
print >>self._stderr, '\n::: Transforms applied:'
print >>self._stderr, (' (priority, transform class, '
'pending node details, keyword args)')
print >>self._stderr, pprint.pformat(
[(priority, '%s.%s' % (xclass.__module__, xclass.__name__),
pending and pending.details, kwargs)
for priority, xclass, pending, kwargs
in self.document.transformer.applied])
if self.settings.dump_pseudo_xml:
print >>self._stderr, '\n::: Pseudo-XML:'
print >>self._stderr, self.document.pformat().encode(
'raw_unicode_escape')
def report_Exception(self, error):
if isinstance(error, utils.SystemMessage):
self.report_SystemMessage(error)
elif isinstance(error, UnicodeEncodeError):
self.report_UnicodeError(error)
elif isinstance(error, io.InputError):
self._stderr.write(u'Unable to open source file for reading:\n'
u' %s\n' % ErrorString(error))
elif isinstance(error, io.OutputError):
self._stderr.write(
u'Unable to open destination file for writing:\n'
u' %s\n' % ErrorString(error))
else:
print >>self._stderr, u'%s' % ErrorString(error)
print >>self._stderr, ("""\
Exiting due to error. Use "--traceback" to diagnose.
Please report errors to <[email protected]>.
Include "--traceback" output, Docutils version (%s [%s]),
Python version (%s), your OS type & version, and the
command line used.""" % (__version__, __version_details__,
sys.version.split()[0]))
def report_SystemMessage(self, error):
print >>self._stderr, ('Exiting due to level-%s (%s) system message.'
% (error.level,
utils.Reporter.levels[error.level]))
def report_UnicodeError(self, error):
data = error.object[error.start:error.end]
self._stderr.write(
'%s\n'
'\n'
'The specified output encoding (%s) cannot\n'
'handle all of the output.\n'
'Try setting "--output-encoding-error-handler" to\n'
'\n'
'* "xmlcharrefreplace" (for HTML & XML output);\n'
' the output will contain "%s" and should be usable.\n'
'* "backslashreplace" (for other output formats);\n'
' look for "%s" in the output.\n'
'* "replace"; look for "?" in the output.\n'
'\n'
'"--output-encoding-error-handler" is currently set to "%s".\n'
'\n'
'Exiting due to error. Use "--traceback" to diagnose.\n'
'If the advice above doesn\'t eliminate the error,\n'
'please report it to <[email protected]>.\n'
'Include "--traceback" output, Docutils version (%s),\n'
'Python version (%s), your OS type & version, and the\n'
'command line used.\n'
% (ErrorString(error),
self.settings.output_encoding,
data.encode('ascii', 'xmlcharrefreplace'),
data.encode('ascii', 'backslashreplace'),
self.settings.output_encoding_error_handler,
__version__, sys.version.split()[0]))
default_usage = '%prog [options] [<source> [<destination>]]'
default_description = ('Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sf.net/docs/user/config.html> for '
'the full reference.')
def publish_cmdline(reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=True, argv=None,
usage=default_usage, description=default_description):
"""
Set up & run a `Publisher` for command-line-based file I/O (input and
output file paths taken automatically from the command line). Return the
encoded string output also.
Parameters: see `publish_programmatically` for the remainder.
- `argv`: Command-line argument list to use instead of ``sys.argv[1:]``.
- `usage`: Usage string, output if there's a problem parsing the command
line.
- `description`: Program description, output for the "--help" option
(along with command-line option descriptions).
"""
pub = Publisher(reader, parser, writer, settings=settings)
pub.set_components(reader_name, parser_name, writer_name)
output = pub.publish(
argv, usage, description, settings_spec, settings_overrides,
config_section=config_section, enable_exit_status=enable_exit_status)
return output
def publish_file(source=None, source_path=None,
destination=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None, settings_overrides=None,
config_section=None, enable_exit_status=False):
"""
Set up & run a `Publisher` for programmatic use with file-like I/O.
Return the encoded string output also.
Parameters: see `publish_programmatically`.
"""
output, pub = publish_programmatically(
source_class=io.FileInput, source=source, source_path=source_path,
destination_class=io.FileOutput,
destination=destination, destination_path=destination_path,
reader=reader, reader_name=reader_name,
parser=parser, parser_name=parser_name,
writer=writer, writer_name=writer_name,
settings=settings, settings_spec=settings_spec,
settings_overrides=settings_overrides,
config_section=config_section,
enable_exit_status=enable_exit_status)
return output
def publish_string(source, source_path=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=False):
"""
Set up & run a `Publisher` for programmatic use with string I/O. Return
the encoded string or Unicode string output.
For encoded string output, be sure to set the 'output_encoding' setting to
the desired encoding. Set it to 'unicode' for unencoded Unicode string
output. Here's one way::
publish_string(..., settings_overrides={'output_encoding': 'unicode'})
Similarly for Unicode string input (`source`)::
publish_string(..., settings_overrides={'input_encoding': 'unicode'})
Parameters: see `publish_programmatically`.
"""
output, pub = publish_programmatically(
source_class=io.StringInput, source=source, source_path=source_path,
destination_class=io.StringOutput,
destination=None, destination_path=destination_path,
reader=reader, reader_name=reader_name,
parser=parser, parser_name=parser_name,
writer=writer, writer_name=writer_name,
settings=settings, settings_spec=settings_spec,
settings_overrides=settings_overrides,
config_section=config_section,
enable_exit_status=enable_exit_status)
return output
def publish_parts(source, source_path=None, source_class=io.StringInput,
destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=False):
"""
Set up & run a `Publisher`, and return a dictionary of document parts.
Dictionary keys are the names of parts, and values are Unicode strings;
encoding is up to the client. For programmatic use with string I/O.
For encoded string input, be sure to set the 'input_encoding' setting to
the desired encoding. Set it to 'unicode' for unencoded Unicode string
input. Here's how::
publish_parts(..., settings_overrides={'input_encoding': 'unicode'})
Parameters: see `publish_programmatically`.
"""
output, pub = publish_programmatically(
source=source, source_path=source_path, source_class=source_class,
destination_class=io.StringOutput,
destination=None, destination_path=destination_path,
reader=reader, reader_name=reader_name,
parser=parser, parser_name=parser_name,
writer=writer, writer_name=writer_name,
settings=settings, settings_spec=settings_spec,
settings_overrides=settings_overrides,
config_section=config_section,
enable_exit_status=enable_exit_status)
return pub.writer.parts
def publish_doctree(source, source_path=None,
source_class=io.StringInput,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=False):
"""
Set up & run a `Publisher` for programmatic use with string I/O.
Return the document tree.
For encoded string input, be sure to set the 'input_encoding' setting to
the desired encoding. Set it to 'unicode' for unencoded Unicode string
input. Here's one way::
publish_doctree(..., settings_overrides={'input_encoding': 'unicode'})
Parameters: see `publish_programmatically`.
"""
pub = Publisher(reader=reader, parser=parser, writer=None,
settings=settings,
source_class=source_class,
destination_class=io.NullOutput)
pub.set_components(reader_name, parser_name, 'null')
pub.process_programmatic_settings(
settings_spec, settings_overrides, config_section)
pub.set_source(source, source_path)
pub.set_destination(None, None)
output = pub.publish(enable_exit_status=enable_exit_status)
return pub.document
def publish_from_doctree(document, destination_path=None,
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=False):
"""
Set up & run a `Publisher` to render from an existing document
tree data structure, for programmatic use with string I/O. Return
the encoded string output.
Note that document.settings is overridden; if you want to use the settings
of the original `document`, pass settings=document.settings.
Also, new document.transformer and document.reporter objects are
generated.
For encoded string output, be sure to set the 'output_encoding' setting to
the desired encoding. Set it to 'unicode' for unencoded Unicode string
output. Here's one way::
publish_from_doctree(
..., settings_overrides={'output_encoding': 'unicode'})
Parameters: `document` is a `docutils.nodes.document` object, an existing
document tree.
Other parameters: see `publish_programmatically`.
"""
reader = docutils.readers.doctree.Reader(parser_name='null')
pub = Publisher(reader, None, writer,
source=io.DocTreeInput(document),
destination_class=io.StringOutput, settings=settings)
if not writer and writer_name:
pub.set_writer(writer_name)
pub.process_programmatic_settings(
settings_spec, settings_overrides, config_section)
pub.set_destination(None, destination_path)
return pub.publish(enable_exit_status=enable_exit_status)
def publish_cmdline_to_binary(reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=True, argv=None,
usage=default_usage, description=default_description,
destination=None, destination_class=io.BinaryFileOutput
):
"""
Set up & run a `Publisher` for command-line-based file I/O (input and
output file paths taken automatically from the command line). Return the
encoded string output also.
This is just like publish_cmdline, except that it uses
io.BinaryFileOutput instead of io.FileOutput.
Parameters: see `publish_programmatically` for the remainder.
- `argv`: Command-line argument list to use instead of ``sys.argv[1:]``.
- `usage`: Usage string, output if there's a problem parsing the command
line.
- `description`: Program description, output for the "--help" option
(along with command-line option descriptions).
"""
pub = Publisher(reader, parser, writer, settings=settings,
destination_class=destination_class)
pub.set_components(reader_name, parser_name, writer_name)
output = pub.publish(
argv, usage, description, settings_spec, settings_overrides,
config_section=config_section, enable_exit_status=enable_exit_status)
return output
def publish_programmatically(source_class, source, source_path,
destination_class, destination, destination_path,
reader, reader_name,
parser, parser_name,
writer, writer_name,
settings, settings_spec,
settings_overrides, config_section,
enable_exit_status):
"""
Set up & run a `Publisher` for custom programmatic use. Return the
encoded string output and the Publisher object.
Applications should not need to call this function directly. If it does
seem to be necessary to call this function directly, please write to the
Docutils-develop mailing list
<http://docutils.sf.net/docs/user/mailing-lists.html#docutils-develop>.
Parameters:
* `source_class` **required**: The class for dynamically created source
objects. Typically `io.FileInput` or `io.StringInput`.
* `source`: Type depends on `source_class`:
- If `source_class` is `io.FileInput`: Either a file-like object
(must have 'read' and 'close' methods), or ``None``
(`source_path` is opened). If neither `source` nor
`source_path` are supplied, `sys.stdin` is used.
- If `source_class` is `io.StringInput` **required**: The input
string, either an encoded 8-bit string (set the
'input_encoding' setting to the correct encoding) or a Unicode
string (set the 'input_encoding' setting to 'unicode').
* `source_path`: Type depends on `source_class`:
- `io.FileInput`: Path to the input file, opened if no `source`
supplied.
- `io.StringInput`: Optional. Path to the file or object that produced
`source`. Only used for diagnostic output.
* `destination_class` **required**: The class for dynamically created
destination objects. Typically `io.FileOutput` or `io.StringOutput`.
* `destination`: Type depends on `destination_class`:
- `io.FileOutput`: Either a file-like object (must have 'write' and
'close' methods), or ``None`` (`destination_path` is opened). If
neither `destination` nor `destination_path` are supplied,
`sys.stdout` is used.
- `io.StringOutput`: Not used; pass ``None``.
* `destination_path`: Type depends on `destination_class`:
- `io.FileOutput`: Path to the output file. Opened if no `destination`
supplied.
- `io.StringOutput`: Path to the file or object which will receive the
output; optional. Used for determining relative paths (stylesheets,
source links, etc.).
* `reader`: A `docutils.readers.Reader` object.
* `reader_name`: Name or alias of the Reader class to be instantiated if
no `reader` supplied.
* `parser`: A `docutils.parsers.Parser` object.
* `parser_name`: Name or alias of the Parser class to be instantiated if
no `parser` supplied.
* `writer`: A `docutils.writers.Writer` object.
* `writer_name`: Name or alias of the Writer class to be instantiated if
no `writer` supplied.
* `settings`: A runtime settings (`docutils.frontend.Values`) object, for
dotted-attribute access to runtime settings. It's the end result of the
`SettingsSpec`, config file, and option processing. If `settings` is
passed, it's assumed to be complete and no further setting/config/option
processing is done.
* `settings_spec`: A `docutils.SettingsSpec` subclass or object. Provides
extra application-specific settings definitions independently of
components. In other words, the application becomes a component, and
its settings data is processed along with that of the other components.
Used only if no `settings` specified.
* `settings_overrides`: A dictionary containing application-specific
settings defaults that override the defaults of other components.
Used only if no `settings` specified.
* `config_section`: A string, the name of the configuration file section
for this application. Overrides the ``config_section`` attribute
defined by `settings_spec`. Used only if no `settings` specified.
* `enable_exit_status`: Boolean; enable exit status at end of processing?
"""
pub = Publisher(reader, parser, writer, settings=settings,
source_class=source_class,
destination_class=destination_class)
pub.set_components(reader_name, parser_name, writer_name)
pub.process_programmatic_settings(
settings_spec, settings_overrides, config_section)
pub.set_source(source, source_path)
pub.set_destination(destination, destination_path)
output = pub.publish(enable_exit_status=enable_exit_status)
return output, pub
| {
"repo_name": "neumerance/deploy",
"path": ".venv/lib/python2.7/site-packages/docutils/core.py",
"copies": "4",
"size": "29656",
"license": "apache-2.0",
"hash": 7813100253511853000,
"line_mean": 43.4617691154,
"line_max": 78,
"alpha_frac": 0.6234151605,
"autogenerated": false,
"ratio": 4.381796690307328,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7005211850807329,
"avg_score": null,
"num_lines": null
} |
# $Id: crc32c.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import array
# CRC-32C Checksum for SCTP
# http://tools.ietf.org/html/rfc3309
crc32c_table = (
0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4, 0xC79A971F,
0x35F1141C, 0x26A1E7E8, 0xD4CA64EB, 0x8AD958CF, 0x78B2DBCC,
0x6BE22838, 0x9989AB3B, 0x4D43CFD0, 0xBF284CD3, 0xAC78BF27,
0x5E133C24, 0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B,
0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384, 0x9A879FA0,
0x68EC1CA3, 0x7BBCEF57, 0x89D76C54, 0x5D1D08BF, 0xAF768BBC,
0xBC267848, 0x4E4DFB4B, 0x20BD8EDE, 0xD2D60DDD, 0xC186FE29,
0x33ED7D2A, 0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35,
0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5, 0x6DFE410E,
0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA, 0x30E349B1, 0xC288CAB2,
0xD1D83946, 0x23B3BA45, 0xF779DEAE, 0x05125DAD, 0x1642AE59,
0xE4292D5A, 0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A,
0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595, 0x417B1DBC,
0xB3109EBF, 0xA0406D4B, 0x522BEE48, 0x86E18AA3, 0x748A09A0,
0x67DAFA54, 0x95B17957, 0xCBA24573, 0x39C9C670, 0x2A993584,
0xD8F2B687, 0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198,
0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927, 0x96BF4DCC,
0x64D4CECF, 0x77843D3B, 0x85EFBE38, 0xDBFC821C, 0x2997011F,
0x3AC7F2EB, 0xC8AC71E8, 0x1C661503, 0xEE0D9600, 0xFD5D65F4,
0x0F36E6F7, 0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096,
0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789, 0xEB1FCBAD,
0x197448AE, 0x0A24BB5A, 0xF84F3859, 0x2C855CB2, 0xDEEEDFB1,
0xCDBE2C45, 0x3FD5AF46, 0x7198540D, 0x83F3D70E, 0x90A324FA,
0x62C8A7F9, 0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6,
0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36, 0x3CDB9BDD,
0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829, 0x82F63B78, 0x709DB87B,
0x63CD4B8F, 0x91A6C88C, 0x456CAC67, 0xB7072F64, 0xA457DC90,
0x563C5F93, 0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043,
0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C, 0x92A8FC17,
0x60C37F14, 0x73938CE0, 0x81F80FE3, 0x55326B08, 0xA759E80B,
0xB4091BFF, 0x466298FC, 0x1871A4D8, 0xEA1A27DB, 0xF94AD42F,
0x0B21572C, 0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033,
0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652, 0x65D122B9,
0x97BAA1BA, 0x84EA524E, 0x7681D14D, 0x2892ED69, 0xDAF96E6A,
0xC9A99D9E, 0x3BC21E9D, 0xEF087A76, 0x1D63F975, 0x0E330A81,
0xFC588982, 0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D,
0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622, 0x38CC2A06,
0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2, 0xFF56BD19, 0x0D3D3E1A,
0x1E6DCDEE, 0xEC064EED, 0xC38D26C4, 0x31E6A5C7, 0x22B65633,
0xD0DDD530, 0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F,
0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF, 0x8ECEE914,
0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0, 0xD3D3E1AB, 0x21B862A8,
0x32E8915C, 0xC083125F, 0x144976B4, 0xE622F5B7, 0xF5720643,
0x07198540, 0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90,
0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F, 0xE330A81A,
0x115B2B19, 0x020BD8ED, 0xF0605BEE, 0x24AA3F05, 0xD6C1BC06,
0xC5914FF2, 0x37FACCF1, 0x69E9F0D5, 0x9B8273D6, 0x88D28022,
0x7AB90321, 0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E,
0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81, 0x34F4F86A,
0xC69F7B69, 0xD5CF889D, 0x27A40B9E, 0x79B737BA, 0x8BDCB4B9,
0x988C474D, 0x6AE7C44E, 0xBE2DA0A5, 0x4C4623A6, 0x5F16D052,
0xAD7D5351
)
def add(crc, buf):
byte_buf = array.array('B', buf)
for b in byte_buf:
crc = (crc >> 8) ^ crc32c_table[(crc ^ b) & 0xff]
return crc
def done(crc):
tmp = ~crc & 0xffffffff
b0 = tmp & 0xff
b1 = (tmp >> 8) & 0xff
b2 = (tmp >> 16) & 0xff
b3 = (tmp >> 24) & 0xff
crc = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3
return crc
def cksum(buf):
"""Return computed CRC-32c checksum."""
return done(add(0xffffffff, buf))
def test_crc32c():
def bswap32(x):
from struct import pack, unpack
return unpack('<I', pack('>I', x))[0]
# reference test value from CRC catalogue
# http://reveng.sourceforge.net/crc-catalogue/17plus.htm#crc.cat.crc-32c
# SCTP uses tranport-level mirrored byte ordering, so we bswap32
assert cksum(b'') == 0
assert cksum(b'123456789') == bswap32(0xe3069283)
if __name__ == '__main__':
test_crc32c()
print('Tests Successful...')
| {
"repo_name": "smutt/dpkt",
"path": "dpkt/crc32c.py",
"copies": "3",
"size": "4444",
"license": "bsd-3-clause",
"hash": 6715323622813542000,
"line_mean": 41.7307692308,
"line_max": 76,
"alpha_frac": 0.7304230423,
"autogenerated": false,
"ratio": 1.831067161104244,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40614902034042444,
"avg_score": null,
"num_lines": null
} |
# $Id: crc32c.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
import array
# CRC-32C Checksum
# http://tools.ietf.org/html/rfc3309
crc32c_table = (
0x00000000L, 0xF26B8303L, 0xE13B70F7L, 0x1350F3F4L, 0xC79A971FL,
0x35F1141CL, 0x26A1E7E8L, 0xD4CA64EBL, 0x8AD958CFL, 0x78B2DBCCL,
0x6BE22838L, 0x9989AB3BL, 0x4D43CFD0L, 0xBF284CD3L, 0xAC78BF27L,
0x5E133C24L, 0x105EC76FL, 0xE235446CL, 0xF165B798L, 0x030E349BL,
0xD7C45070L, 0x25AFD373L, 0x36FF2087L, 0xC494A384L, 0x9A879FA0L,
0x68EC1CA3L, 0x7BBCEF57L, 0x89D76C54L, 0x5D1D08BFL, 0xAF768BBCL,
0xBC267848L, 0x4E4DFB4BL, 0x20BD8EDEL, 0xD2D60DDDL, 0xC186FE29L,
0x33ED7D2AL, 0xE72719C1L, 0x154C9AC2L, 0x061C6936L, 0xF477EA35L,
0xAA64D611L, 0x580F5512L, 0x4B5FA6E6L, 0xB93425E5L, 0x6DFE410EL,
0x9F95C20DL, 0x8CC531F9L, 0x7EAEB2FAL, 0x30E349B1L, 0xC288CAB2L,
0xD1D83946L, 0x23B3BA45L, 0xF779DEAEL, 0x05125DADL, 0x1642AE59L,
0xE4292D5AL, 0xBA3A117EL, 0x4851927DL, 0x5B016189L, 0xA96AE28AL,
0x7DA08661L, 0x8FCB0562L, 0x9C9BF696L, 0x6EF07595L, 0x417B1DBCL,
0xB3109EBFL, 0xA0406D4BL, 0x522BEE48L, 0x86E18AA3L, 0x748A09A0L,
0x67DAFA54L, 0x95B17957L, 0xCBA24573L, 0x39C9C670L, 0x2A993584L,
0xD8F2B687L, 0x0C38D26CL, 0xFE53516FL, 0xED03A29BL, 0x1F682198L,
0x5125DAD3L, 0xA34E59D0L, 0xB01EAA24L, 0x42752927L, 0x96BF4DCCL,
0x64D4CECFL, 0x77843D3BL, 0x85EFBE38L, 0xDBFC821CL, 0x2997011FL,
0x3AC7F2EBL, 0xC8AC71E8L, 0x1C661503L, 0xEE0D9600L, 0xFD5D65F4L,
0x0F36E6F7L, 0x61C69362L, 0x93AD1061L, 0x80FDE395L, 0x72966096L,
0xA65C047DL, 0x5437877EL, 0x4767748AL, 0xB50CF789L, 0xEB1FCBADL,
0x197448AEL, 0x0A24BB5AL, 0xF84F3859L, 0x2C855CB2L, 0xDEEEDFB1L,
0xCDBE2C45L, 0x3FD5AF46L, 0x7198540DL, 0x83F3D70EL, 0x90A324FAL,
0x62C8A7F9L, 0xB602C312L, 0x44694011L, 0x5739B3E5L, 0xA55230E6L,
0xFB410CC2L, 0x092A8FC1L, 0x1A7A7C35L, 0xE811FF36L, 0x3CDB9BDDL,
0xCEB018DEL, 0xDDE0EB2AL, 0x2F8B6829L, 0x82F63B78L, 0x709DB87BL,
0x63CD4B8FL, 0x91A6C88CL, 0x456CAC67L, 0xB7072F64L, 0xA457DC90L,
0x563C5F93L, 0x082F63B7L, 0xFA44E0B4L, 0xE9141340L, 0x1B7F9043L,
0xCFB5F4A8L, 0x3DDE77ABL, 0x2E8E845FL, 0xDCE5075CL, 0x92A8FC17L,
0x60C37F14L, 0x73938CE0L, 0x81F80FE3L, 0x55326B08L, 0xA759E80BL,
0xB4091BFFL, 0x466298FCL, 0x1871A4D8L, 0xEA1A27DBL, 0xF94AD42FL,
0x0B21572CL, 0xDFEB33C7L, 0x2D80B0C4L, 0x3ED04330L, 0xCCBBC033L,
0xA24BB5A6L, 0x502036A5L, 0x4370C551L, 0xB11B4652L, 0x65D122B9L,
0x97BAA1BAL, 0x84EA524EL, 0x7681D14DL, 0x2892ED69L, 0xDAF96E6AL,
0xC9A99D9EL, 0x3BC21E9DL, 0xEF087A76L, 0x1D63F975L, 0x0E330A81L,
0xFC588982L, 0xB21572C9L, 0x407EF1CAL, 0x532E023EL, 0xA145813DL,
0x758FE5D6L, 0x87E466D5L, 0x94B49521L, 0x66DF1622L, 0x38CC2A06L,
0xCAA7A905L, 0xD9F75AF1L, 0x2B9CD9F2L, 0xFF56BD19L, 0x0D3D3E1AL,
0x1E6DCDEEL, 0xEC064EEDL, 0xC38D26C4L, 0x31E6A5C7L, 0x22B65633L,
0xD0DDD530L, 0x0417B1DBL, 0xF67C32D8L, 0xE52CC12CL, 0x1747422FL,
0x49547E0BL, 0xBB3FFD08L, 0xA86F0EFCL, 0x5A048DFFL, 0x8ECEE914L,
0x7CA56A17L, 0x6FF599E3L, 0x9D9E1AE0L, 0xD3D3E1ABL, 0x21B862A8L,
0x32E8915CL, 0xC083125FL, 0x144976B4L, 0xE622F5B7L, 0xF5720643L,
0x07198540L, 0x590AB964L, 0xAB613A67L, 0xB831C993L, 0x4A5A4A90L,
0x9E902E7BL, 0x6CFBAD78L, 0x7FAB5E8CL, 0x8DC0DD8FL, 0xE330A81AL,
0x115B2B19L, 0x020BD8EDL, 0xF0605BEEL, 0x24AA3F05L, 0xD6C1BC06L,
0xC5914FF2L, 0x37FACCF1L, 0x69E9F0D5L, 0x9B8273D6L, 0x88D28022L,
0x7AB90321L, 0xAE7367CAL, 0x5C18E4C9L, 0x4F48173DL, 0xBD23943EL,
0xF36E6F75L, 0x0105EC76L, 0x12551F82L, 0xE03E9C81L, 0x34F4F86AL,
0xC69F7B69L, 0xD5CF889DL, 0x27A40B9EL, 0x79B737BAL, 0x8BDCB4B9L,
0x988C474DL, 0x6AE7C44EL, 0xBE2DA0A5L, 0x4C4623A6L, 0x5F16D052L,
0xAD7D5351L
)
def add(crc, buf):
buf = array.array('B', buf)
for b in buf:
crc = (crc >> 8) ^ crc32c_table[(crc ^ b) & 0xff]
return crc
def done(crc):
tmp = ~crc & 0xffffffffL
b0 = tmp & 0xff
b1 = (tmp >> 8) & 0xff
b2 = (tmp >> 16) & 0xff
b3 = (tmp >> 24) & 0xff
crc = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3
return crc
def cksum(buf):
"""Return computed CRC-32c checksum."""
return done(add(0xffffffffL, buf))
| {
"repo_name": "hexcap/dpkt",
"path": "dpkt/crc32c.py",
"copies": "6",
"size": "4160",
"license": "bsd-3-clause",
"hash": -5374550263356379000,
"line_mean": 49.1204819277,
"line_max": 68,
"alpha_frac": 0.7584134615,
"autogenerated": false,
"ratio": 1.7028243962341383,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5461237857734138,
"avg_score": null,
"num_lines": null
} |
# $Id: crc32c.py 23 2006-11-08 15:45:33Z dugsong $
import array
# CRC-32C Checksum
# http://tools.ietf.org/html/rfc3309
crc32c_table = (
0x00000000L, 0xF26B8303L, 0xE13B70F7L, 0x1350F3F4L, 0xC79A971FL,
0x35F1141CL, 0x26A1E7E8L, 0xD4CA64EBL, 0x8AD958CFL, 0x78B2DBCCL,
0x6BE22838L, 0x9989AB3BL, 0x4D43CFD0L, 0xBF284CD3L, 0xAC78BF27L,
0x5E133C24L, 0x105EC76FL, 0xE235446CL, 0xF165B798L, 0x030E349BL,
0xD7C45070L, 0x25AFD373L, 0x36FF2087L, 0xC494A384L, 0x9A879FA0L,
0x68EC1CA3L, 0x7BBCEF57L, 0x89D76C54L, 0x5D1D08BFL, 0xAF768BBCL,
0xBC267848L, 0x4E4DFB4BL, 0x20BD8EDEL, 0xD2D60DDDL, 0xC186FE29L,
0x33ED7D2AL, 0xE72719C1L, 0x154C9AC2L, 0x061C6936L, 0xF477EA35L,
0xAA64D611L, 0x580F5512L, 0x4B5FA6E6L, 0xB93425E5L, 0x6DFE410EL,
0x9F95C20DL, 0x8CC531F9L, 0x7EAEB2FAL, 0x30E349B1L, 0xC288CAB2L,
0xD1D83946L, 0x23B3BA45L, 0xF779DEAEL, 0x05125DADL, 0x1642AE59L,
0xE4292D5AL, 0xBA3A117EL, 0x4851927DL, 0x5B016189L, 0xA96AE28AL,
0x7DA08661L, 0x8FCB0562L, 0x9C9BF696L, 0x6EF07595L, 0x417B1DBCL,
0xB3109EBFL, 0xA0406D4BL, 0x522BEE48L, 0x86E18AA3L, 0x748A09A0L,
0x67DAFA54L, 0x95B17957L, 0xCBA24573L, 0x39C9C670L, 0x2A993584L,
0xD8F2B687L, 0x0C38D26CL, 0xFE53516FL, 0xED03A29BL, 0x1F682198L,
0x5125DAD3L, 0xA34E59D0L, 0xB01EAA24L, 0x42752927L, 0x96BF4DCCL,
0x64D4CECFL, 0x77843D3BL, 0x85EFBE38L, 0xDBFC821CL, 0x2997011FL,
0x3AC7F2EBL, 0xC8AC71E8L, 0x1C661503L, 0xEE0D9600L, 0xFD5D65F4L,
0x0F36E6F7L, 0x61C69362L, 0x93AD1061L, 0x80FDE395L, 0x72966096L,
0xA65C047DL, 0x5437877EL, 0x4767748AL, 0xB50CF789L, 0xEB1FCBADL,
0x197448AEL, 0x0A24BB5AL, 0xF84F3859L, 0x2C855CB2L, 0xDEEEDFB1L,
0xCDBE2C45L, 0x3FD5AF46L, 0x7198540DL, 0x83F3D70EL, 0x90A324FAL,
0x62C8A7F9L, 0xB602C312L, 0x44694011L, 0x5739B3E5L, 0xA55230E6L,
0xFB410CC2L, 0x092A8FC1L, 0x1A7A7C35L, 0xE811FF36L, 0x3CDB9BDDL,
0xCEB018DEL, 0xDDE0EB2AL, 0x2F8B6829L, 0x82F63B78L, 0x709DB87BL,
0x63CD4B8FL, 0x91A6C88CL, 0x456CAC67L, 0xB7072F64L, 0xA457DC90L,
0x563C5F93L, 0x082F63B7L, 0xFA44E0B4L, 0xE9141340L, 0x1B7F9043L,
0xCFB5F4A8L, 0x3DDE77ABL, 0x2E8E845FL, 0xDCE5075CL, 0x92A8FC17L,
0x60C37F14L, 0x73938CE0L, 0x81F80FE3L, 0x55326B08L, 0xA759E80BL,
0xB4091BFFL, 0x466298FCL, 0x1871A4D8L, 0xEA1A27DBL, 0xF94AD42FL,
0x0B21572CL, 0xDFEB33C7L, 0x2D80B0C4L, 0x3ED04330L, 0xCCBBC033L,
0xA24BB5A6L, 0x502036A5L, 0x4370C551L, 0xB11B4652L, 0x65D122B9L,
0x97BAA1BAL, 0x84EA524EL, 0x7681D14DL, 0x2892ED69L, 0xDAF96E6AL,
0xC9A99D9EL, 0x3BC21E9DL, 0xEF087A76L, 0x1D63F975L, 0x0E330A81L,
0xFC588982L, 0xB21572C9L, 0x407EF1CAL, 0x532E023EL, 0xA145813DL,
0x758FE5D6L, 0x87E466D5L, 0x94B49521L, 0x66DF1622L, 0x38CC2A06L,
0xCAA7A905L, 0xD9F75AF1L, 0x2B9CD9F2L, 0xFF56BD19L, 0x0D3D3E1AL,
0x1E6DCDEEL, 0xEC064EEDL, 0xC38D26C4L, 0x31E6A5C7L, 0x22B65633L,
0xD0DDD530L, 0x0417B1DBL, 0xF67C32D8L, 0xE52CC12CL, 0x1747422FL,
0x49547E0BL, 0xBB3FFD08L, 0xA86F0EFCL, 0x5A048DFFL, 0x8ECEE914L,
0x7CA56A17L, 0x6FF599E3L, 0x9D9E1AE0L, 0xD3D3E1ABL, 0x21B862A8L,
0x32E8915CL, 0xC083125FL, 0x144976B4L, 0xE622F5B7L, 0xF5720643L,
0x07198540L, 0x590AB964L, 0xAB613A67L, 0xB831C993L, 0x4A5A4A90L,
0x9E902E7BL, 0x6CFBAD78L, 0x7FAB5E8CL, 0x8DC0DD8FL, 0xE330A81AL,
0x115B2B19L, 0x020BD8EDL, 0xF0605BEEL, 0x24AA3F05L, 0xD6C1BC06L,
0xC5914FF2L, 0x37FACCF1L, 0x69E9F0D5L, 0x9B8273D6L, 0x88D28022L,
0x7AB90321L, 0xAE7367CAL, 0x5C18E4C9L, 0x4F48173DL, 0xBD23943EL,
0xF36E6F75L, 0x0105EC76L, 0x12551F82L, 0xE03E9C81L, 0x34F4F86AL,
0xC69F7B69L, 0xD5CF889DL, 0x27A40B9EL, 0x79B737BAL, 0x8BDCB4B9L,
0x988C474DL, 0x6AE7C44EL, 0xBE2DA0A5L, 0x4C4623A6L, 0x5F16D052L,
0xAD7D5351L
)
def add(crc, buf):
buf = array.array('B', buf)
for b in buf:
crc = (crc >> 8) ^ crc32c_table[(crc ^ b) & 0xff]
return crc
def done(crc):
tmp = ~crc & 0xffffffffL
b0 = tmp & 0xff
b1 = (tmp >> 8) & 0xff
b2 = (tmp >> 16) & 0xff
b3 = (tmp >> 24) & 0xff
crc = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3
return crc
def cksum(buf):
"""Return computed CRC-32c checksum."""
return done(add(0xffffffffL, buf))
| {
"repo_name": "dproc/trex_odp_porting_integration",
"path": "scripts/external_libs/dpkt-1.8.6/dpkt/crc32c.py",
"copies": "15",
"size": "4138",
"license": "apache-2.0",
"hash": -1433405102775106800,
"line_mean": 50.725,
"line_max": 68,
"alpha_frac": 0.7600289995,
"autogenerated": false,
"ratio": 1.6993839835728952,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0022724780701754385,
"num_lines": 80
} |
# $Id: crossdomain.py 671 2011-02-02 23:59:24Z shreyas $
from django import http
import logging
from django.conf import settings
from urlparse import urlparse
from netaddr import all_matching_cidrs
from socket import gethostbyname
logger = logging.getLogger(__name__)
logger.setLevel(getattr(settings, 'LOG_LEVEL', logging.DEBUG))
try:
XS_SHARING_ALLOWED_ORIGINS = settings.XS_SHARING_ALLOWED_ORIGINS
XS_SHARING_ALLOWED_METHODS = settings.XS_SHARING_ALLOWED_METHODS
except:
XS_SHARING_ALLOWED_ORIGINS = '*'
XS_SHARING_ALLOWED_METHODS = ['POST', 'GET', 'OPTIONS', 'PUT', 'DELETE', 'HEAD']
def check_xs_allowed(url):
"""
Returns True if cross-domain requests are permitted
for url and False if not
Checks against settings.ALLOWED_HOSTS and settings.ALLOWED_CIDRS
for whitelisted hosts and networks
"""
try:
hostname = urlparse(url).hostname
# Allow whitelisted hosts (to avoid network lookups if not needed
if hostname in settings.ALLOWED_HOSTS:
return True
if all_matching_cidrs(gethostbyname(hostname), settings.ALLOWED_CIDRS):
return True
else:
return False
except Exception, e:
logger.warn("Failed lookup on %s: " % hostname)
return False
class CORSMiddleware(object):
"""
Enabl
"""
def process_request(self, request):
logger.debug("Processing request")
setattr(request, '_dont_enforce_csrf_checks', True)
if 'HTTP_ACCESS_CONTROL_REQUEST_METHOD' in request.META:
response = http.HttpResponse()
if ('HTTP_ORIGIN' in request.META) and check_xs_allowed(request.META['HTTP_ORIGIN']):
response['Access-Control-Allow-Origin'] = request.META['HTTP_ORIGIN']
else:
response['Access-Control-Allow-Origin'] = XS_SHARING_ALLOWED_ORIGINS
response['Access-Control-Allow-Methods'] = ",".join( XS_SHARING_ALLOWED_METHODS )
response['Access-Control-Allow-Credentials'] = 'true'
response['Access-Control-Allow-Headers'] = 'x-requested-with, content-type'
return response
return None
def process_response(self, request, response):
logger.debug("Processing response")
if ('HTTP_ORIGIN' in request.META) and check_xs_allowed(request.META['HTTP_ORIGIN']):
response['Access-Control-Allow-Origin'] = request.META['HTTP_ORIGIN']
else:
response['Access-Control-Allow-Origin'] = XS_SHARING_ALLOWED_ORIGINS
response['Access-Control-Allow-Methods'] = ",".join( XS_SHARING_ALLOWED_METHODS )
response['Access-Control-Allow-Credentials'] = 'true'
response['Access-Control-Allow-Headers'] = 'x-requested-with, content-type'
return response
| {
"repo_name": "wentixiaogege/newt-2.0",
"path": "newt/crossdomain.py",
"copies": "3",
"size": "2858",
"license": "bsd-2-clause",
"hash": 2447988337192249000,
"line_mean": 36.1168831169,
"line_max": 97,
"alpha_frac": 0.6501049685,
"autogenerated": false,
"ratio": 3.9972027972027973,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009444945649665944,
"num_lines": 77
} |
""" Provides cryptographic services """
import random
import base64
import cPickle as pickle
from peloton.exceptions import PelotonError
from Crypto.PublicKey import RSA
from Crypto.Util.randpool import RandomPool
# default character set on which to draw to make cookies
tokenspace = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
_Pub_Alg_ = RSA
randpool = RandomPool()
def makeCookie(tokenlength, tokenspace=tokenspace):
""" Create a cookie tokenlength characters long made from
characters randomly chosen from the provided tokenspace (for which
a suitable default is provided.)
"""
tchars = len(tokenspace)
cookie = "".join([tokenspace[random.randrange(0,tchars)]
for _ in xrange(tokenlength)])
return cookie
def newKey(lenbits=512):
""" Return a new key pair lenbits long. """
randpool.stir()
key = _Pub_Alg_.generate(lenbits, randpool.get_bytes)
return key
def importKey(keyStr):
""" Takes keyStr and returns a key. """
return pickle.loads(keyStr)
def exportKey(key):
""" Returns serialization of this key. """
return pickle.dumps(key)
def encrypt(data, key):
""" Takes data, pickles to string and encrypts into ASCII for
safe transmission over unknown wire protocols.
Beware: the encryption is strong but the computation is slow!
"""
pt = pickle.dumps(data)
blocksize = key.size()/8
ct = []
while pt:
if len(pt) <= blocksize:
chunk = pt
pt=''
else:
chunk=pt[:blocksize]
pt=pt[blocksize:]
ct.append(key.encrypt(chunk,'')[0])
return "".join(ct)
def decrypt(ciphertext, key):
""" Takes ciphertext made by encrypt, decrypts and de-pickles. """
blocksize = key.size()/8 + 1
pt = []
while ciphertext:
if len(ciphertext) <= blocksize:
chunk = ciphertext
ciphertext=''
else:
chunk = ciphertext[:blocksize]
ciphertext = ciphertext[blocksize:]
pt.append(key.decrypt(chunk))
pt = ''.join(pt)
try:
v = pickle.loads(pt)
return v
except:
raise PelotonError("Invalid ciphertext given to 'decode'")
def makeKeyAndCookieFile(keyfile = None, keylength=512, tokenlength=50):
""" Creates a Peloton key and cookie file and writes to keyfile if
specified. Returns the contents as a string. """
cookie = makeCookie(tokenlength)
key = newKey(keylength)
contents = (cookie, key)
asciiForm = base64.encodestring(pickle.dumps(contents))
if keyfile:
f = open(keyfile,'wt')
f.writelines(asciiForm)
f.close()
return asciiForm
def loadKeyAndCookieFile(keyfile):
""" Loads a key and cookie file returning a tuple of (cookie, key). """
f = open(keyfile, 'rt')
asciiForm = f.readlines()
pkle = base64.decodestring("".join(asciiForm))
return pickle.loads(pkle) | {
"repo_name": "aquamatt/Peloton",
"path": "src/peloton/utils/crypto.py",
"copies": "1",
"size": "3093",
"license": "bsd-3-clause",
"hash": 4804750385254920000,
"line_mean": 30.2525252525,
"line_max": 77,
"alpha_frac": 0.6618170061,
"autogenerated": false,
"ratio": 3.94515306122449,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.510697006732449,
"avg_score": null,
"num_lines": null
} |
# $Id: CycleCreators.py 344 2012-12-13 13:10:53Z krasznaa $
##########################################################################
# Project: SFrame - ROOT-based analysis framework for ATLAS #
# #
# author Stefan Ask <[email protected]> - Manchester #
# author David Berge <[email protected]> - CERN #
# author Johannes Haller <[email protected]> - Hamburg #
# author A. Krasznahorkay <[email protected]> - NYU/Debrecen #
# #
##########################################################################
## @package CycleCreators
# @short Functions for creating a new analysis cycle torso
#
# This package collects the functions used by sframe_create_cycle.py
# to create the torso of a new analysis cycle. Apart from using
# sframe_create_cycle.py, the functions can be used in an interactive
# python session by executing:
#
# <code>
# >>> import CycleCreators
# </code>
## @short Class creating analysis cycle templates
#
# This class can be used to create a template cycle inheriting from
# SCycleBase. It is quite smart actually. If you call CycleCreator.CreateCycle
# from inside an "SFrame package", it will find the right locations for the
# created files and extend an already existing LinkDef.h file with the
# line for the new cycle.
class CycleCreator:
_headerFile = ""
_sourceFile = ""
def __init__( self ):
self._headerFile = ""
self._sourceFile = ""
## @short Template for cycle outside of a namespace
#
# This string is used by CreateHeader to create a header file that
# holds a cycle which is not in a namespace.
_header = """// Dear emacs, this is -*- c++ -*-
// $Id: CycleCreators.py 344 2012-12-13 13:10:53Z krasznaa $
#ifndef %(class)-s_H
#define %(class)-s_H
// SFrame include(s):
#include \"core/include/SCycleBase.h\"
/**
* @short Put short description of class here
*
* Put a longer description over here...
*
* @author Put your name here
* @version $Revision: 344 $
*/
class %(class)-s : public SCycleBase {
public:
/// Default constructor
%(class)-s();
/// Default destructor
~%(class)-s();
/// Function called at the beginning of the cycle
virtual void BeginCycle() throw( SError );
/// Function called at the end of the cycle
virtual void EndCycle() throw( SError );
/// Function called at the beginning of a new input data
virtual void BeginInputData( const SInputData& ) throw( SError );
/// Function called after finishing to process an input data
virtual void EndInputData ( const SInputData& ) throw( SError );
/// Function called after opening each new input file
virtual void BeginInputFile( const SInputData& ) throw( SError );
/// Function called for every event
virtual void ExecuteEvent( const SInputData&, Double_t ) throw( SError );
private:
//
// Put all your private variables here
//
// Macro adding the functions for dictionary generation
ClassDef( %(class)-s, 0 );
}; // class %(class)-s
#endif // %(class)-s_H
"""
## @short Template for cycle in a namespace
#
# This string is used by CreateHeader to create a header file that
# holds a cycle which is in a namespace.
_headerNs = """// Dear emacs, this is -*- c++ -*-
// $Id: CycleCreators.py 344 2012-12-13 13:10:53Z krasznaa $
#ifndef %(ns)-s_%(class)-s_H
#define %(ns)-s_%(class)-s_H
// SFrame include(s):
#include \"core/include/SCycleBase.h\"
namespace %(ns)-s {
/**
* @short Put short description of class here
*
* Put a longer description over here...
*
* @author Put your name here
* @version $Revision: 344 $
*/
class %(class)-s : public SCycleBase {
public:
/// Default constructor
%(class)-s();
/// Default destructor
~%(class)-s();
/// Function called at the beginning of the cycle
virtual void BeginCycle() throw( SError );
/// Function called at the end of the cycle
virtual void EndCycle() throw( SError );
/// Function called at the beginning of a new input data
virtual void BeginInputData( const SInputData& ) throw( SError );
/// Function called after finishing to process an input data
virtual void EndInputData ( const SInputData& ) throw( SError );
/// Function called after opening each new input file
virtual void BeginInputFile( const SInputData& ) throw( SError );
/// Function called for every event
virtual void ExecuteEvent( const SInputData&, Double_t ) throw( SError );
private:
//
// Put all your private variables here
//
// Macro adding the functions for dictionary generation
ClassDef( %(ns)-s::%(class)-s, 0 );
}; // class %(class)-s
} // namespace %(ns)-s
#endif // %(ns)-s_%(class)-s_H
"""
## @short Function creating an analysis cycle header
#
# This function can be used to create the header file for a new analysis
# cycle. It can correctly create the header file if the cycle name is
# defined like "Ana::AnalysisCycle". In this case it creates a cycle
# called "AnalysisCycle" that is in the C++ namespace "Ana". Multiple
# namespaces such as "Ana::MyAna::AnalysisCycle" are not supported!
#
# @param cycleName Name of the analysis cycle. Can contain the namespace name.
# @param fileName Optional parameter with the output header file name
def CreateHeader( self, cycleName, fileName = "" ):
# Extract the namespace name if it has been specified:
namespace = ""
import re
if re.search( "::", cycleName ):
print "CreateHeader:: We're creating a cycle in a namespace"
m = re.match( "(.*)::(.*)", cycleName )
namespace = m.group( 1 )
cycleName = m.group( 2 )
print "CreateHeader:: Namespace name = " + namespace
# Construct the file name if it has not been specified:
if fileName == "":
fileName = cycleName + ".h"
# Some printouts:
print "CreateHeader:: Cycle name = " + cycleName
print "CreateHeader:: File name = " + fileName
self._headerFile = fileName
# Create a backup of an already existing header file:
import os.path
if os.path.exists( fileName ):
print "CreateHeader:: File \"" + fileName + "\" already exists"
print "CreateHeader:: Moving \"" + fileName + "\" to \"" + \
fileName + ".backup\""
import shutil
shutil.move( fileName, fileName + ".backup" )
# Write the header file:
output = open( fileName, "w" )
if namespace == "":
output.write( self._header % { 'class' : cycleName } )
else:
output.write( self._headerNs % { 'class' : cycleName,
'ns' : namespace } )
return
## @short Template for cycle outside of a namespace
#
# This string is used by CreateSource to create a source file that
# holds a cycle which is not in a namespace.
_source = """// $Id: CycleCreators.py 344 2012-12-13 13:10:53Z krasznaa $
// Local include(s):
#include \"%(dir)-s/%(class)-s.h\"
ClassImp( %(class)-s );
%(class)-s::%(class)-s()
: SCycleBase() {
SetLogName( GetName() );
}
%(class)-s::~%(class)-s() {
}
void %(class)-s::BeginCycle() throw( SError ) {
return;
}
void %(class)-s::EndCycle() throw( SError ) {
return;
}
void %(class)-s::BeginInputData( const SInputData& ) throw( SError ) {
return;
}
void %(class)-s::EndInputData( const SInputData& ) throw( SError ) {
return;
}
void %(class)-s::BeginInputFile( const SInputData& ) throw( SError ) {
return;
}
void %(class)-s::ExecuteEvent( const SInputData&, Double_t ) throw( SError ) {
return;
}
"""
## @short Template for cycle in a namespace
#
# This string is used by CreateSource to create a source file that
# holds a cycle which is in a namespace.
_sourceNs = """// $Id: CycleCreators.py 344 2012-12-13 13:10:53Z krasznaa $
// Local include(s):
#include \"%(dir)-s/%(class)-s.h\"
ClassImp( %(ns)-s::%(class)-s );
namespace %(ns)-s {
%(class)-s::%(class)-s()
: SCycleBase() {
SetLogName( GetName() );
}
%(class)-s::~%(class)-s() {
}
void %(class)-s::BeginCycle() throw( SError ) {
return;
}
void %(class)-s::EndCycle() throw( SError ) {
return;
}
void %(class)-s::BeginInputData( const SInputData& ) throw( SError ) {
return;
}
void %(class)-s::EndInputData( const SInputData& ) throw( SError ) {
return;
}
void %(class)-s::BeginInputFile( const SInputData& ) throw( SError ) {
return;
}
void %(class)-s::ExecuteEvent( const SInputData&, Double_t ) throw( SError ) {
return;
}
} // namespace %(ns)-s
"""
## @short Function creating the analysis cycle source file
#
# This function creates the source file that works with the header created
# by CreateHeader. It is important that CreateHeader is executed before
# this function, as it depends on knowing where the header file is
# physically. (To include it correctly in the source file.) It can
# handle cycles in namespaces, just like CreateHeader. (The same
# rules apply.)
#
# @param cycleName Name of the analysis cycle. Can contain the namespace name.
# @param fileName Optional parameter with the output source file name
def CreateSource( self, cycleName, fileName = "" ):
# Extract the namespace name if it has been specified:
namespace = ""
import re
if re.search( "::", cycleName ):
print "CreateSource:: We're creating a cycle in a namespace"
m = re.match( "(.*)::(.*)", cycleName )
namespace = m.group( 1 )
cycleName = m.group( 2 )
print "CreateSource:: Namespace name = " + namespace
# Construct the file name if it has not been specified:
if fileName == "":
fileName = cycleName + ".cxx"
# Some printouts:
print "CreateSource:: Cycle name = " + cycleName
print "CreateSource:: File name = " + fileName
self._sourceFile = fileName
# The following is a tricky part. Here I evaluate how the source file
# will be able to include the previously created header file.
# Probably a Python guru could've done it in a shorter way, but
# at least it works.
import os.path
hdir = os.path.dirname( self._headerFile )
sdir = os.path.dirname( self._sourceFile )
prefix = os.path.commonprefix( [ self._headerFile, self._sourceFile ] )
hdir = hdir.replace( prefix, "" )
sdir = sdir.replace( prefix, "" )
nup = sdir.count( "/" );
nup = nup + 1
dir = ""
for i in range( 0, nup ):
dir = dir.join( [ "../", hdir ] )
# Create a backup of an already existing header file:
if os.path.exists( fileName ):
print "CreateHeader:: File \"" + fileName + "\" already exists"
print "CreateHeader:: Moving \"" + fileName + "\" to \"" + \
fileName + ".backup\""
import shutil
shutil.move( fileName, fileName + ".backup" )
# Write the source file:
output = open( fileName, "w" )
if namespace == "":
output.write( self._source % { 'dir' : dir,
'class' : cycleName } )
else:
output.write( self._sourceNs % { 'dir' : dir,
'class' : cycleName,
'ns' : namespace } )
return
## @short Function adding link definitions for rootcint
#
# Each new analysis cycle has to declare itself in a so called "LinkDef
# file". This makes sure that rootcint knows that a dictionary should
# be generated for this C++ class.
#
# This function is also quite smart. If the file name specified does
# not yet exist, it creates a fully functionaly LinkDef file. If the
# file already exists, it just inserts one line declaring the new
# cycle into this file.
#
# @param cycleName Name of the analysis cycle. Can contain the namespace name.
# @param fileName Optional parameter with the LinkDef file name
def AddLinkDef( self, cycleName, fileName = "LinkDef.h" ):
import os.path
if os.path.exists( fileName ):
print "AddLinkDef:: Extending already existing file \"" + fileName + "\""
# Read in the already existing file:
output = open( fileName, "r" )
lines = output.readlines()
output.close()
# Find the "#endif" line:
endif_line = ""
import re
for line in lines:
if re.search( "#endif", line ):
endif_line = line
if endif_line == "":
print "AddLinkDef:: ERROR File \"" + file + "\" is not in the right format!"
print "AddLinkDef:: ERROR Not adding link definitions!"
return
index = lines.index( endif_line )
# Add the line defining the current analysis cycle:
lines.insert( index, "#pragma link C++ class %s+;\n" % cycleName )
lines.insert( index + 1, "\n" )
# Overwrite the file with the new contents:
output = open( fileName, "w" )
for line in lines:
output.write( line )
output.close()
else:
# Create a new file and fill it with all the necessary lines:
print "AddLinkDef:: Creating new file called \"" + fileName + "\""
output = open( fileName, "w" )
output.write( "// Dear emacs, this is -*- c++ -*-\n" )
output.write( "// $Id: CycleCreators.py 344 2012-12-13 13:10:53Z krasznaa $\n\n" )
output.write( "#ifdef __CINT__\n\n" )
output.write( "#pragma link off all globals;\n" )
output.write( "#pragma link off all classes;\n" )
output.write( "#pragma link off all functions;\n\n" )
output.write( "#pragma link C++ nestedclass;\n\n" )
output.write( "#pragma link C++ class %s+;\n\n" % cycleName )
output.write( "#endif // __CINT__\n" )
return
## @short Function creating a configuration file for the new cycle
#
# This function is supposed to create an example configuration file
# for the new cycle. It uses PyXML to write the configuration, and
# exactly this causes a bit of trouble. PyXML is about the worst
# XML implementation I ever came accross... There are tons of things
# that it can't do. Not to mention the lack of any proper documentation.
#
# All in all, the resulting XML file is not too usable at the moment,
# it's probably easier just copying one of the example cycles from
# SFrame/user/config and adjusting it to the user's needs...
#
# @param cycleName Name of the analysis cycle. Can contain the namespace name.
# @param fileName Optional parameter with the configuration file name
def CreateConfig( self, cycleName, fileName = "" ):
# Extract the namespace name if it has been specified:
namespace = ""
import re
if re.search( "::", cycleName ):
print "CreateConfig:: We're creating a cycle in a namespace"
m = re.match( "(.*)::(.*)", cycleName )
namespace = m.group( 1 )
cycleName = m.group( 2 )
print "CreateConfig:: Namespace name = " + namespace
# Construct the file name if it has not been specified:
if fileName == "":
fileName = cycleName + "_config.xml"
# Some printouts:
print "CreateConfig:: Cycle name = " + cycleName
print "CreateConfig:: File name = " + fileName
# Use PyXML for the configuration creation:
import xml.dom.minidom
doc = xml.dom.minidom.Document()
doctype = xml.dom.minidom.DocumentType( "JobConfiguration" )
doctype.publicId = ""
doctype.systemId = "JobConfig.dtd"
doc.doctype = doctype
jobconfig = doc.createElement( "JobConfiguration" )
doc.appendChild( jobconfig )
jobconfig.setAttribute( "JobName", cycleName + "Job" )
jobconfig.setAttribute( "OutputLevel", "INFO" )
userlib = doc.createElement( "Library" )
jobconfig.appendChild( userlib )
userlib.setAttribute( "Name", "YourLibraryNameComesHere" )
cycle = doc.createElement( "Cycle" )
jobconfig.appendChild( cycle )
cycle.setAttribute( "Name", cycleName )
cycle.setAttribute( "OutputDirectory", "./" )
cycle.setAttribute( "PostFix", "" )
cycle.setAttribute( "TargetLumi", "1.0" )
inputdata = doc.createElement( "InputData" )
cycle.appendChild( inputdata )
inputdata.setAttribute( "Type", "Data1" )
inputdata.setAttribute( "Version", "Reco" )
inputdata.setAttribute( "Lumi", "0.0" )
inputdata.setAttribute( "NEventsMax", "-1" )
infile = doc.createElement( "In" )
inputdata.appendChild( infile )
infile.setAttribute( "FileName", "YourInputFileComesHere" )
userconf = doc.createElement( "UserConfig" )
cycle.appendChild( userconf )
confitem = doc.createElement( "Item" )
userconf.appendChild( confitem )
confitem.setAttribute( "Name", "NameOfUserProperty" )
confitem.setAttribute( "Value", "ValueOfUserProperty" )
output = open( fileName, "w" )
output.write( doc.toprettyxml( encoding="UTF-8" ) )
return
## @short Main analysis cycle creator function
#
# The users of this class should normally just use this function
# to create a new analysis cycle.
#
# It only really needs to receive the name of the new cycle, it can guess
# the name of the LinkDef file by itself if it has to. It calls all the
# other functions of this class to create all the files for the new
# cycle.
#
# @param cycleName Name of the analysis cycle. Can contain the namespace name.
# @param linkdef Optional parameter with the name of the LinkDef file
def CreateCycle( self, cycleName, linkdef = "" ):
# If the specified name contains a namespace, get just the class name:
className = cycleName
import re
if re.search( "::", cycleName ):
m = re.match( ".*::(.*)", cycleName )
className = m.group( 1 )
# Check if a directory called "include" exists in the current directory.
# If it does, put the new header in that directory. Otherwise leave it up
# to the CreateHeader function to put it where it wants.
import os.path
if os.path.exists( "./include" ):
self.CreateHeader( cycleName, "./include/" + className + ".h" )
if linkdef == "":
import glob
filelist = glob.glob( "./include/*LinkDef.h" )
if len( filelist ) == 0:
print "CreateCycle:: WARNING There is no LinkDef file under ./include"
print "CreateCycle:: WARNING Creating one with the name ./include/LinkDef.h"
linkdef = "./include/LinkDef.h"
elif len( filelist ) == 1:
linkdef = filelist[ 0 ]
else:
print "CreateCycle:: ERROR Multiple header files ending in LinkDef.h"
print "CreateCycle:: ERROR I don't know which one to use..."
return
self.AddLinkDef( cycleName, linkdef )
else:
self.CreateHeader( cycleName )
if linkdef == "":
import glob
filelist = glob.glob( "*LinkDef.h" )
if len( filelist ) == 0:
print "CreateCycle:: Creating new LinkDef file: LinkDef.h"
linkdef = "LinkDef.h"
elif len( filelist ) == 1:
linkdef = filelist[ 0 ]
else:
print "CreateCycle:: ERROR Multiple header files ending in LinkDef.h"
print "CreateCycle:: ERROR I don't know which one to use..."
return
self.AddLinkDef( cycleName, linkdef )
# Check if a directory called "src" exists in the current directory.
# If it does, put the new source in that directory. Otherwise leave it up
# to the CreateSource function to put it where it wants.
if os.path.exists( "./src" ):
self.CreateSource( cycleName, "./src/" + className + ".cxx" )
else:
self.CreateSource( cycleName )
# Check if a directory called "config" exists in the current directory.
# If it does, put the new configuration in that directory. Otherwise leave it up
# to the CreateConfig function to put it where it wants.
if os.path.exists( "./config" ):
self.CreateConfig( cycleName, "./config/" + className + "_config.xml" )
else:
self.CreateConfig( cycleName )
return
| {
"repo_name": "jpavel/cms-ucl-tau",
"path": "SFrame/python/CycleCreators.py",
"copies": "1",
"size": "21701",
"license": "mit",
"hash": -1106553094682084900,
"line_mean": 33.7216,
"line_max": 96,
"alpha_frac": 0.5840744666,
"autogenerated": false,
"ratio": 4.086047825268311,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5170122291868311,
"avg_score": null,
"num_lines": null
} |
# $Id: data.py,v 1.1 2011/03/19 01:52:27 paus Exp $
import FWCore.ParameterSet.Config as cms
process = cms.Process('FILLER')
# import of standard configurations
process.load('Configuration/StandardSequences/Services_cff')
process.load('FWCore/MessageService/MessageLogger_cfi')
process.load('Configuration.StandardSequences.Reconstruction_cff')
process.load('Configuration.StandardSequences.GeometryDB_cff')
process.load('Configuration/StandardSequences/MagneticField_AutoFromDBCurrent_cff')
process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')
process.load('Configuration/EventContent/EventContent_cff')
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('Mit_014'),
annotation = cms.untracked.string('RECO'),
name = cms.untracked.string('BambuProduction')
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.options = cms.untracked.PSet(
Rethrow = cms.untracked.vstring('ProductNotFound'),
fileMode = cms.untracked.string('NOMERGE'),
)
# input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('/store/data/Run2010A/EG/RECO/v2/000/136/100/A687F4BC-E667-DF11-947D-001617E30E2C.root')
)
process.source.inputCommands = cms.untracked.vstring("keep *", "drop *_MEtoEDMConverter_*_*", "drop L1GlobalTriggerObjectMapRecord_hltL1GtObjectMap__HLT")
# other statements
process.GlobalTag.globaltag = 'GR10_P_V11::All'
process.add_(cms.Service("ObjectService"))
process.load("MitProd.BAMBUSequences.BambuFillRECO_cfi")
# set the name for the output file
process.MitTreeFiller.TreeWriter.fileName = 'XX-MITDATASET-XX'
process.MitTreeFiller.TreeWriter.maxSize = cms.untracked.uint32(1790)
process.bambu_step = cms.Path(process.BambuFillRECO)
# schedule definition
process.schedule = cms.Schedule(process.bambu_step)
| {
"repo_name": "cpausmit/Kraken",
"path": "filefi/016/data.py",
"copies": "1",
"size": "1870",
"license": "mit",
"hash": 1876015633848288500,
"line_mean": 35.6666666667,
"line_max": 154,
"alpha_frac": 0.7834224599,
"autogenerated": false,
"ratio": 2.992,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42754224599,
"avg_score": null,
"num_lines": null
} |
# $Id: data.py,v 1.1 2013/07/10 02:25:44 paus Exp $
import FWCore.ParameterSet.Config as cms
process = cms.Process('FILEFI')
# import of standard configurations
process.load('Configuration/StandardSequences/Services_cff')
process.load('FWCore/MessageService/MessageLogger_cfi')
process.load('Configuration.StandardSequences.GeometryDB_cff')
process.load('Configuration/StandardSequences/MagneticField_38T_cff')
process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')
process.load('Configuration/EventContent/EventContent_cff')
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('Mit_029'),
annotation = cms.untracked.string('AOD'),
name = cms.untracked.string('BambuProduction')
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.options = cms.untracked.PSet(
Rethrow = cms.untracked.vstring('ProductNotFound'),
fileMode = cms.untracked.string('NOMERGE'),
)
# input source
process.source = cms.Source("PoolSource",
# fileNames = cms.untracked.vstring('file:/tmp/FAB36B02-36D4-E111-92D6-0025B32036E2.root')
# fileNames = cms.untracked.vstring('file:/tmp/F853EAC9-44C8-E111-9778-003048F110BE.root')
# fileNames = cms.untracked.vstring('file:/tmp/4EA92226-F2C6-E111-A390-001D09F23A20.root')
fileNames = cms.untracked.vstring('file:/tmp/1C19C50D-AED9-E111-9DDF-E0CB4E553651.root')
)
process.source.inputCommands = cms.untracked.vstring("keep *",
"drop *_MEtoEDMConverter_*_*",
"drop L1GlobalTriggerObjectMapRecord_hltL1GtObjectMap__HLT")
# other statements
process.GlobalTag.globaltag = 'GR_P_V40::All'
process.add_(cms.Service("ObjectService"))
process.load("MitProd.BAMBUSequences.BambuFillAOD_cfi")
#process.MitTreeFiller.TreeWriter.fileName = 'XX-MITDATASET-XX'
process.MitTreeFiller.TreeWriter.fileName = 'bambu-output-file-tmp'
process.bambu_step = cms.Path(process.BambuFillAOD)
# schedule definition
process.schedule = cms.Schedule(process.bambu_step)
| {
"repo_name": "cpausmit/Kraken",
"path": "filefi/032/data.py",
"copies": "1",
"size": "2092",
"license": "mit",
"hash": -8401774625456348000,
"line_mean": 37.7407407407,
"line_max": 113,
"alpha_frac": 0.7385277247,
"autogenerated": false,
"ratio": 2.913649025069638,
"config_test": true,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.41521767497696377,
"avg_score": null,
"num_lines": null
} |
# $Id: data.py,v 1.2 2013/06/29 03:15:04 paus Exp $
import FWCore.ParameterSet.Config as cms
process = cms.Process('FILEFI')
# import of standard configurations
process.load('Configuration/StandardSequences/Services_cff')
process.load('FWCore/MessageService/MessageLogger_cfi')
process.load('Configuration.StandardSequences.GeometryDB_cff')
process.load('Configuration/StandardSequences/MagneticField_38T_cff')
process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')
process.load('Configuration/EventContent/EventContent_cff')
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('Mit_029'),
annotation = cms.untracked.string('AOD'),
name = cms.untracked.string('BambuProduction')
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.options = cms.untracked.PSet(
Rethrow = cms.untracked.vstring('ProductNotFound'),
fileMode = cms.untracked.string('NOMERGE'),
)
# input source
process.source = cms.Source("PoolSource",
# fileNames = cms.untracked.vstring('file:/tmp/FAB36B02-36D4-E111-92D6-0025B32036E2.root')
# fileNames = cms.untracked.vstring('file:/tmp/F853EAC9-44C8-E111-9778-003048F110BE.root')
# fileNames = cms.untracked.vstring('file:/tmp/4EA92226-F2C6-E111-A390-001D09F23A20.root')
fileNames = cms.untracked.vstring('file:/tmp/1C19C50D-AED9-E111-9DDF-E0CB4E553651.root')
)
process.source.inputCommands = cms.untracked.vstring("keep *",
"drop *_MEtoEDMConverter_*_*",
"drop L1GlobalTriggerObjectMapRecord_hltL1GtObjectMap__HLT")
# other statements
process.GlobalTag.globaltag = 'GR_P_V40::All'
process.add_(cms.Service("ObjectService"))
process.load("MitProd.BAMBUSequences.BambuFillAOD_cfi")
process.MitTreeFiller.TreeWriter.fileName = 'XX-MITDATASET-XX'
process.bambu_step = cms.Path(process.BambuFillAOD)
# schedule definition
process.schedule = cms.Schedule(process.bambu_step)
| {
"repo_name": "cpausmit/Kraken",
"path": "filefi/031/data.py",
"copies": "1",
"size": "2023",
"license": "mit",
"hash": 7404146062349658000,
"line_mean": 37.1698113208,
"line_max": 113,
"alpha_frac": 0.7360355907,
"autogenerated": false,
"ratio": 2.919191919191919,
"config_test": true,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.4155227509891919,
"avg_score": null,
"num_lines": null
} |
# $Id: data.py,v 1.3 2010/10/23 12:43:55 ceballos Exp $
import FWCore.ParameterSet.Config as cms
process = cms.Process('FILLER')
# import of standard configurations
process.load('Configuration/StandardSequences/Services_cff')
process.load('FWCore/MessageService/MessageLogger_cfi')
process.load('Configuration/StandardSequences/GeometryIdeal_cff')
process.load('Configuration/StandardSequences/MagneticField_AutoFromDBCurrent_cff')
process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')
process.load('Configuration/EventContent/EventContent_cff')
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('Mit_014a'),
annotation = cms.untracked.string('RECO'),
name = cms.untracked.string('BambuProduction')
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.options = cms.untracked.PSet(
Rethrow = cms.untracked.vstring('ProductNotFound'),
fileMode = cms.untracked.string('NOMERGE'),
)
# input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('/store/data/Run2010A/EG/RECO/v2/000/136/100/A687F4BC-E667-DF11-947D-001617E30E2C.root')
)
process.source.inputCommands = cms.untracked.vstring("keep *", "drop *_MEtoEDMConverter_*_*", "drop L1GlobalTriggerObjectMapRecord_hltL1GtObjectMap__HLT")
# other statements
process.GlobalTag.globaltag = 'GR10_P_V10::All'
process.add_(cms.Service("ObjectService"))
process.load("MitProd.BAMBUSequences.BambuFillRECO_cfi")
# set the name for the output file
process.MitTreeFiller.TreeWriter.fileName = 'XX-MITDATASET-XX'
process.MitTreeFiller.TreeWriter.maxSize = cms.untracked.uint32(1790)
process.bambu_step = cms.Path(process.BambuFillRECO)
# schedule definition
process.schedule = cms.Schedule(process.bambu_step)
| {
"repo_name": "cpausmit/Kraken",
"path": "filefi/014/data.py",
"copies": "1",
"size": "1820",
"license": "mit",
"hash": 9082743460944804000,
"line_mean": 35.4,
"line_max": 154,
"alpha_frac": 0.7774725275,
"autogenerated": false,
"ratio": 2.964169381107492,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9153313726044464,
"avg_score": 0.017665636512605595,
"num_lines": 50
} |
# $Id: dbexts.py 4185 2008-02-28 16:55:33Z cgroves $
"""
This script provides platform independence by wrapping Python
Database API 2.0 compatible drivers to allow seamless database
usage across implementations.
In order to use the C version, you need mxODBC and mxDateTime.
In order to use the Java version, you need zxJDBC.
>>> import dbexts
>>> d = dbexts.dbexts() # use the default db
>>> d.isql('select count(*) count from player')
count
-------
13569.0
1 row affected
>>> r = d.raw('select count(*) count from player')
>>> r
([('count', 3, 17, None, 15, 0, 1)], [(13569.0,)])
>>>
The configuration file follows the following format in a file name dbexts.ini:
[default]
name=mysql
[jdbc]
name=mysql
url=jdbc:mysql://localhost/ziclix
user=
pwd=
driver=org.gjt.mm.mysql.Driver
datahandler=com.ziclix.python.sql.handler.MySQLDataHandler
[jdbc]
name=pg
url=jdbc:postgresql://localhost:5432/ziclix
user=bzimmer
pwd=
driver=org.postgresql.Driver
datahandler=com.ziclix.python.sql.handler.PostgresqlDataHandler
"""
import os, re
from types import StringType
__author__ = "brian zimmer ([email protected])"
__version__ = "$Revision: 4185 $"[11:-2]
__OS__ = os.name
choose = lambda bool, a, b: (bool and [a] or [b])[0]
def console(rows, headers=()):
"""Format the results into a list of strings (one for each row):
<header>
<headersep>
<row1>
<row2>
...
headers may be given as list of strings.
Columns are separated by colsep; the header is separated from
the result set by a line of headersep characters.
The function calls stringify to format the value data into a string.
It defaults to calling str() and striping leading and trailing whitespace.
- copied and modified from mxODBC
"""
# Check row entry lengths
output = []
headers = map(lambda header: header.upper(), list(map(lambda x: x or "", headers)))
collen = map(len,headers)
output.append(headers)
if rows and len(rows) > 0:
for row in rows:
row = map(lambda x: str(x), row)
for i in range(len(row)):
entry = row[i]
if collen[i] < len(entry):
collen[i] = len(entry)
output.append(row)
if len(output) == 1:
affected = "0 rows affected"
elif len(output) == 2:
affected = "1 row affected"
else:
affected = "%d rows affected" % (len(output) - 1)
# Format output
for i in range(len(output)):
row = output[i]
l = []
for j in range(len(row)):
l.append('%-*s' % (collen[j],row[j]))
output[i] = " | ".join(l)
# Insert header separator
totallen = len(output[0])
output[1:1] = ["-"*(totallen/len("-"))]
output.append("\n" + affected)
return output
def html(rows, headers=()):
output = []
output.append('<table class="results">')
output.append('<tr class="headers">')
headers = map(lambda x: '<td class="header">%s</td>' % (x.upper()), list(headers))
map(output.append, headers)
output.append('</tr>')
if rows and len(rows) > 0:
for row in rows:
output.append('<tr class="row">')
row = map(lambda x: '<td class="value">%s</td>' % (x), row)
map(output.append, row)
output.append('</tr>')
output.append('</table>')
return output
comments = lambda x: re.compile("{.*?}", re.S).sub("", x, 0)
class mxODBCProxy:
"""Wraps mxODBC to provide proxy support for zxJDBC's additional parameters."""
def __init__(self, c):
self.c = c
def __getattr__(self, name):
if name == "execute":
return self.execute
elif name == "gettypeinfo":
return self.gettypeinfo
else:
return getattr(self.c, name)
def execute(self, sql, params=None, bindings=None, maxrows=None):
if params:
self.c.execute(sql, params)
else:
self.c.execute(sql)
def gettypeinfo(self, typeid=None):
if typeid:
self.c.gettypeinfo(typeid)
class executor:
"""Handles the insertion of values given dynamic data."""
def __init__(self, table, cols):
self.cols = cols
self.table = table
if self.cols:
self.sql = "insert into %s (%s) values (%s)" % (table, ",".join(self.cols), ",".join(("?",) * len(self.cols)))
else:
self.sql = "insert into %s values (%%s)" % (table)
def execute(self, db, rows, bindings):
assert rows and len(rows) > 0, "must have at least one row"
if self.cols:
sql = self.sql
else:
sql = self.sql % (",".join(("?",) * len(rows[0])))
db.raw(sql, rows, bindings)
def connect(dbname):
return dbexts(dbname)
def lookup(dbname):
return dbexts(jndiname=dbname)
class dbexts:
def __init__(self, dbname=None, cfg=None, formatter=console, autocommit=0, jndiname=None, out=None):
self.verbose = 1
self.results = []
self.headers = []
self.autocommit = autocommit
self.formatter = formatter
self.out = out
self.lastrowid = None
self.updatecount = None
if not jndiname:
if cfg == None:
fn = os.path.join(os.path.split(__file__)[0], "dbexts.ini")
if not os.path.exists(fn):
fn = os.path.join(os.environ['HOME'], ".dbexts")
self.dbs = IniParser(fn)
elif isinstance(cfg, IniParser):
self.dbs = cfg
else:
self.dbs = IniParser(cfg)
if dbname == None: dbname = self.dbs[("default", "name")]
if __OS__ == 'java':
from com.ziclix.python.sql import zxJDBC
database = zxJDBC
if not jndiname:
t = self.dbs[("jdbc", dbname)]
self.dburl, dbuser, dbpwd, jdbcdriver = t['url'], t['user'], t['pwd'], t['driver']
if t.has_key('datahandler'):
self.datahandler = []
for dh in t['datahandler'].split(','):
classname = dh.split(".")[-1]
datahandlerclass = __import__(dh, globals(), locals(), classname)
self.datahandler.append(datahandlerclass)
keys = [x for x in t.keys() if x not in ['url', 'user', 'pwd', 'driver', 'datahandler', 'name']]
props = {}
for a in keys:
props[a] = t[a]
self.db = apply(database.connect, (self.dburl, dbuser, dbpwd, jdbcdriver), props)
else:
self.db = database.lookup(jndiname)
self.db.autocommit = self.autocommit
elif __OS__ == 'nt':
for modname in ["mx.ODBC.Windows", "ODBC.Windows"]:
try:
database = __import__(modname, globals(), locals(), "Windows")
break
except:
continue
else:
raise ImportError("unable to find appropriate mxODBC module")
t = self.dbs[("odbc", dbname)]
self.dburl, dbuser, dbpwd = t['url'], t['user'], t['pwd']
self.db = database.Connect(self.dburl, dbuser, dbpwd, clear_auto_commit=1)
self.dbname = dbname
for a in database.sqltype.keys():
setattr(self, database.sqltype[a], a)
for a in dir(database):
try:
p = getattr(database, a)
if issubclass(p, Exception):
setattr(self, a, p)
except:
continue
del database
def __str__(self):
return self.dburl
def __repr__(self):
return self.dburl
def __getattr__(self, name):
if "cfg" == name:
return self.dbs.cfg
raise AttributeError("'dbexts' object has no attribute '%s'" % (name))
def close(self):
""" close the connection to the database """
self.db.close()
def begin(self, style=None):
""" reset ivars and return a new cursor, possibly binding an auxiliary datahandler """
self.headers, self.results = [], []
if style:
c = self.db.cursor(style)
else:
c = self.db.cursor()
if __OS__ == 'java':
if hasattr(self, 'datahandler'):
for dh in self.datahandler:
c.datahandler = dh(c.datahandler)
else:
c = mxODBCProxy(c)
return c
def commit(self, cursor=None, close=1):
""" commit the cursor and create the result set """
if cursor and cursor.description:
self.headers = cursor.description
self.results = cursor.fetchall()
if hasattr(cursor, "nextset"):
s = cursor.nextset()
while s:
self.results += cursor.fetchall()
s = cursor.nextset()
if hasattr(cursor, "lastrowid"):
self.lastrowid = cursor.lastrowid
if hasattr(cursor, "updatecount"):
self.updatecount = cursor.updatecount
if not self.autocommit or cursor is None:
if not self.db.autocommit:
self.db.commit()
if cursor and close: cursor.close()
def rollback(self):
""" rollback the cursor """
self.db.rollback()
def prepare(self, sql):
""" prepare the sql statement """
cur = self.begin()
try:
return cur.prepare(sql)
finally:
self.commit(cur)
def display(self):
""" using the formatter, display the results """
if self.formatter and self.verbose > 0:
res = self.results
if res:
print >> self.out, ""
for a in self.formatter(res, map(lambda x: x[0], self.headers)):
print >> self.out, a
print >> self.out, ""
def __execute__(self, sql, params=None, bindings=None, maxrows=None):
""" the primary execution method """
cur = self.begin()
try:
if bindings:
cur.execute(sql, params, bindings, maxrows=maxrows)
elif params:
cur.execute(sql, params, maxrows=maxrows)
else:
cur.execute(sql, maxrows=maxrows)
finally:
self.commit(cur, close=isinstance(sql, StringType))
def isql(self, sql, params=None, bindings=None, maxrows=None):
""" execute and display the sql """
self.raw(sql, params, bindings, maxrows=maxrows)
self.display()
def raw(self, sql, params=None, bindings=None, delim=None, comments=comments, maxrows=None):
""" execute the sql and return a tuple of (headers, results) """
if delim:
headers = []
results = []
if type(sql) == type(StringType):
if comments: sql = comments(sql)
statements = filter(lambda x: len(x) > 0,
map(lambda statement: statement.strip(), sql.split(delim)))
else:
statements = [sql]
for a in statements:
self.__execute__(a, params, bindings, maxrows=maxrows)
headers.append(self.headers)
results.append(self.results)
self.headers = headers
self.results = results
else:
self.__execute__(sql, params, bindings, maxrows=maxrows)
return (self.headers, self.results)
def callproc(self, procname, params=None, bindings=None, maxrows=None):
""" execute a stored procedure """
cur = self.begin()
try:
cur.callproc(procname, params=params, bindings=bindings, maxrows=maxrows)
finally:
self.commit(cur)
self.display()
def pk(self, table, owner=None, schema=None):
""" display the table's primary keys """
cur = self.begin()
cur.primarykeys(schema, owner, table)
self.commit(cur)
self.display()
def fk(self, primary_table=None, foreign_table=None, owner=None, schema=None):
""" display the table's foreign keys """
cur = self.begin()
if primary_table and foreign_table:
cur.foreignkeys(schema, owner, primary_table, schema, owner, foreign_table)
elif primary_table:
cur.foreignkeys(schema, owner, primary_table, schema, owner, None)
elif foreign_table:
cur.foreignkeys(schema, owner, None, schema, owner, foreign_table)
self.commit(cur)
self.display()
def table(self, table=None, types=("TABLE",), owner=None, schema=None):
"""If no table argument, displays a list of all tables. If a table argument,
displays the columns of the given table."""
cur = self.begin()
if table:
cur.columns(schema, owner, table, None)
else:
cur.tables(schema, owner, None, types)
self.commit(cur)
self.display()
def proc(self, proc=None, owner=None, schema=None):
"""If no proc argument, displays a list of all procedures. If a proc argument,
displays the parameters of the given procedure."""
cur = self.begin()
if proc:
cur.procedurecolumns(schema, owner, proc, None)
else:
cur.procedures(schema, owner, None)
self.commit(cur)
self.display()
def stat(self, table, qualifier=None, owner=None, unique=0, accuracy=0):
""" display the table's indicies """
cur = self.begin()
cur.statistics(qualifier, owner, table, unique, accuracy)
self.commit(cur)
self.display()
def typeinfo(self, sqltype=None):
""" display the types available for the database """
cur = self.begin()
cur.gettypeinfo(sqltype)
self.commit(cur)
self.display()
def tabletypeinfo(self):
""" display the table types available for the database """
cur = self.begin()
cur.gettabletypeinfo()
self.commit(cur)
self.display()
def schema(self, table, full=0, sort=1, owner=None):
"""Displays a Schema object for the table. If full is true, then generates
references to the table in addition to the standard fields. If sort is true,
sort all the items in the schema, else leave them in db dependent order."""
print >> self.out, str(Schema(self, table, owner, full, sort))
def bulkcopy(self, dst, table, include=[], exclude=[], autobatch=0, executor=executor):
"""Returns a Bulkcopy object using the given table."""
if type(dst) == type(""):
dst = dbexts(dst, cfg=self.dbs)
bcp = Bulkcopy(dst, table, include=include, exclude=exclude, autobatch=autobatch, executor=executor)
return bcp
def bcp(self, src, table, where='(1=1)', params=[], include=[], exclude=[], autobatch=0, executor=executor):
"""Bulkcopy of rows from a src database to the current database for a given table and where clause."""
if type(src) == type(""):
src = dbexts(src, cfg=self.dbs)
bcp = self.bulkcopy(self, table, include, exclude, autobatch, executor)
num = bcp.transfer(src, where, params)
return num
def unload(self, filename, sql, delimiter=",", includeheaders=1):
""" Unloads the delimited results of the query to the file specified, optionally including headers. """
u = Unload(self, filename, delimiter, includeheaders)
u.unload(sql)
class Bulkcopy:
"""The idea for a bcp class came from http://object-craft.com.au/projects/sybase"""
def __init__(self, dst, table, include=[], exclude=[], autobatch=0, executor=executor):
self.dst = dst
self.table = table
self.total = 0
self.rows = []
self.autobatch = autobatch
self.bindings = {}
include = map(lambda x: x.lower(), include)
exclude = map(lambda x: x.lower(), exclude)
_verbose = self.dst.verbose
self.dst.verbose = 0
try:
self.dst.table(self.table)
if self.dst.results:
colmap = {}
for a in self.dst.results:
colmap[a[3].lower()] = a[4]
cols = self.__filter__(colmap.keys(), include, exclude)
for a in zip(range(len(cols)), cols):
self.bindings[a[0]] = colmap[a[1]]
colmap = None
else:
cols = self.__filter__(include, include, exclude)
finally:
self.dst.verbose = _verbose
self.executor = executor(table, cols)
def __str__(self):
return "[%s].[%s]" % (self.dst, self.table)
def __repr__(self):
return "[%s].[%s]" % (self.dst, self.table)
def __getattr__(self, name):
if name == 'columns':
return self.executor.cols
def __filter__(self, values, include, exclude):
cols = map(lambda col: col.lower(), values)
if exclude:
cols = filter(lambda x, ex=exclude: x not in ex, cols)
if include:
cols = filter(lambda x, inc=include: x in inc, cols)
return cols
def format(self, column, type):
self.bindings[column] = type
def done(self):
if len(self.rows) > 0:
return self.batch()
return 0
def batch(self):
self.executor.execute(self.dst, self.rows, self.bindings)
cnt = len(self.rows)
self.total += cnt
self.rows = []
return cnt
def rowxfer(self, line):
self.rows.append(line)
if self.autobatch: self.batch()
def transfer(self, src, where="(1=1)", params=[]):
sql = "select %s from %s where %s" % (", ".join(self.columns), self.table, where)
h, d = src.raw(sql, params)
if d:
map(self.rowxfer, d)
return self.done()
return 0
class Unload:
"""Unloads a sql statement to a file with optional formatting of each value."""
def __init__(self, db, filename, delimiter=",", includeheaders=1):
self.db = db
self.filename = filename
self.delimiter = delimiter
self.includeheaders = includeheaders
self.formatters = {}
def format(self, o):
if not o:
return ""
o = str(o)
if o.find(",") != -1:
o = "\"\"%s\"\"" % (o)
return o
def unload(self, sql, mode="w"):
headers, results = self.db.raw(sql)
w = open(self.filename, mode)
if self.includeheaders:
w.write("%s\n" % (self.delimiter.join(map(lambda x: x[0], headers))))
if results:
for a in results:
w.write("%s\n" % (self.delimiter.join(map(self.format, a))))
w.flush()
w.close()
class Schema:
"""Produces a Schema object which represents the database schema for a table"""
def __init__(self, db, table, owner=None, full=0, sort=1):
self.db = db
self.table = table
self.owner = owner
self.full = full
self.sort = sort
_verbose = self.db.verbose
self.db.verbose = 0
try:
if table: self.computeschema()
finally:
self.db.verbose = _verbose
def computeschema(self):
self.db.table(self.table, owner=self.owner)
self.columns = []
# (column name, type_name, size, nullable)
if self.db.results:
self.columns = map(lambda x: (x[3], x[5], x[6], x[10]), self.db.results)
if self.sort: self.columns.sort(lambda x, y: cmp(x[0], y[0]))
self.db.fk(None, self.table)
# (pk table name, pk column name, fk column name, fk name, pk name)
self.imported = []
if self.db.results:
self.imported = map(lambda x: (x[2], x[3], x[7], x[11], x[12]), self.db.results)
if self.sort: self.imported.sort(lambda x, y: cmp(x[2], y[2]))
self.exported = []
if self.full:
self.db.fk(self.table, None)
# (pk column name, fk table name, fk column name, fk name, pk name)
if self.db.results:
self.exported = map(lambda x: (x[3], x[6], x[7], x[11], x[12]), self.db.results)
if self.sort: self.exported.sort(lambda x, y: cmp(x[1], y[1]))
self.db.pk(self.table)
self.primarykeys = []
if self.db.results:
# (column name, key_seq, pk name)
self.primarykeys = map(lambda x: (x[3], x[4], x[5]), self.db.results)
if self.sort: self.primarykeys.sort(lambda x, y: cmp(x[1], y[1]))
try:
self.indices = None
self.db.stat(self.table)
self.indices = []
# (non-unique, name, type, pos, column name, asc)
if self.db.results:
idxdict = {}
# mxODBC returns a row of None's, so filter it out
idx = map(lambda x: (x[3], x[5].strip(), x[6], x[7], x[8]), filter(lambda x: x[5], self.db.results))
def cckmp(x, y):
c = cmp(x[1], y[1])
if c == 0: c = cmp(x[3], y[3])
return c
# sort this regardless, this gets the indicies lined up
idx.sort(cckmp)
for a in idx:
if not idxdict.has_key(a[1]):
idxdict[a[1]] = []
idxdict[a[1]].append(a)
self.indices = idxdict.values()
if self.sort: self.indices.sort(lambda x, y: cmp(x[0][1], y[0][1]))
except:
pass
def __str__(self):
d = []
d.append("Table")
d.append(" " + self.table)
d.append("\nPrimary Keys")
for a in self.primarykeys:
d.append(" %s {%s}" % (a[0], a[2]))
d.append("\nImported (Foreign) Keys")
for a in self.imported:
d.append(" %s (%s.%s) {%s}" % (a[2], a[0], a[1], a[3]))
if self.full:
d.append("\nExported (Referenced) Keys")
for a in self.exported:
d.append(" %s (%s.%s) {%s}" % (a[0], a[1], a[2], a[3]))
d.append("\nColumns")
for a in self.columns:
nullable = choose(a[3], "nullable", "non-nullable")
d.append(" %-20s %s(%s), %s" % (a[0], a[1], a[2], nullable))
d.append("\nIndices")
if self.indices is None:
d.append(" (failed)")
else:
for a in self.indices:
unique = choose(a[0][0], "non-unique", "unique")
cname = ", ".join(map(lambda x: x[4], a))
d.append(" %s index {%s} on (%s)" % (unique, a[0][1], cname))
return "\n".join(d)
class IniParser:
def __init__(self, cfg, key='name'):
self.key = key
self.records = {}
self.ctypeRE = re.compile("\[(jdbc|odbc|default)\]")
self.entryRE = re.compile("([a-zA-Z]+)[ \t]*=[ \t]*(.*)")
self.cfg = cfg
self.parse()
def parse(self):
fp = open(self.cfg, "r")
data = fp.readlines()
fp.close()
lines = filter(lambda x: len(x) > 0 and x[0] not in ['#', ';'], map(lambda x: x.strip(), data))
current = None
for i in range(len(lines)):
line = lines[i]
g = self.ctypeRE.match(line)
if g: # a section header
current = {}
if not self.records.has_key(g.group(1)):
self.records[g.group(1)] = []
self.records[g.group(1)].append(current)
else:
g = self.entryRE.match(line)
if g:
current[g.group(1)] = g.group(2)
def __getitem__(self, (ctype, skey)):
if skey == self.key: return self.records[ctype][0][skey]
t = filter(lambda x, p=self.key, s=skey: x[p] == s, self.records[ctype])
if not t or len(t) > 1:
raise KeyError, "invalid key ('%s', '%s')" % (ctype, skey)
return t[0]
def random_table_name(prefix, num_chars):
import random
d = [prefix, '_']
i = 0
while i < num_chars:
d.append(chr(int(100 * random.random()) % 26 + ord('A')))
i += 1
return "".join(d)
class ResultSetRow:
def __init__(self, rs, row):
self.row = row
self.rs = rs
def __getitem__(self, i):
if type(i) == type(""):
i = self.rs.index(i)
return self.row[i]
def __getslice__(self, i, j):
if type(i) == type(""): i = self.rs.index(i)
if type(j) == type(""): j = self.rs.index(j)
return self.row[i:j]
def __len__(self):
return len(self.row)
def __repr__(self):
return str(self.row)
class ResultSet:
def __init__(self, headers, results=[]):
self.headers = map(lambda x: x.upper(), headers)
self.results = results
def index(self, i):
return self.headers.index(i.upper())
def __getitem__(self, i):
return ResultSetRow(self, self.results[i])
def __getslice__(self, i, j):
return map(lambda x, rs=self: ResultSetRow(rs, x), self.results[i:j])
def __repr__(self):
return "<%s instance {cols [%d], rows [%d]} at %s>" % (self.__class__, len(self.headers), len(self.results), id(self))
| {
"repo_name": "babble/babble",
"path": "include/jython/Lib/dbexts.py",
"copies": "1",
"size": "31053",
"license": "apache-2.0",
"hash": -1132955567386362900,
"line_mean": 41.7727272727,
"line_max": 134,
"alpha_frac": 0.4419540785,
"autogenerated": false,
"ratio": 4.534608644859813,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5476562723359814,
"avg_score": null,
"num_lines": null
} |
# $Id: dbexts.py 6638 2009-08-10 17:05:49Z fwierzbicki $
"""
This script provides platform independence by wrapping Python
Database API 2.0 compatible drivers to allow seamless database
usage across implementations.
In order to use the C version, you need mxODBC and mxDateTime.
In order to use the Java version, you need zxJDBC.
>>> import dbexts
>>> d = dbexts.dbexts() # use the default db
>>> d.isql('select count(*) count from player')
count
-------
13569.0
1 row affected
>>> r = d.raw('select count(*) count from player')
>>> r
([('count', 3, 17, None, 15, 0, 1)], [(13569.0,)])
>>>
The configuration file follows the following format in a file name dbexts.ini:
[default]
name=mysql
[jdbc]
name=mysql
url=jdbc:mysql://localhost/ziclix
user=
pwd=
driver=org.gjt.mm.mysql.Driver
datahandler=com.ziclix.python.sql.handler.MySQLDataHandler
[jdbc]
name=pg
url=jdbc:postgresql://localhost:5432/ziclix
user=bzimmer
pwd=
driver=org.postgresql.Driver
datahandler=com.ziclix.python.sql.handler.PostgresqlDataHandler
"""
import os, re
from types import StringType
__author__ = "brian zimmer ([email protected])"
__version__ = "$Revision: 6638 $"[11:-2]
__OS__ = os.name
choose = lambda bool, a, b: (bool and [a] or [b])[0]
def console(rows, headers=()):
"""Format the results into a list of strings (one for each row):
<header>
<headersep>
<row1>
<row2>
...
headers may be given as list of strings.
Columns are separated by colsep; the header is separated from
the result set by a line of headersep characters.
The function calls stringify to format the value data into a string.
It defaults to calling str() and striping leading and trailing whitespace.
- copied and modified from mxODBC
"""
# Check row entry lengths
output = []
headers = map(lambda header: header.upper(), list(map(lambda x: x or "", headers)))
collen = map(len,headers)
output.append(headers)
if rows and len(rows) > 0:
for row in rows:
row = map(lambda x: str(x), row)
for i in range(len(row)):
entry = row[i]
if collen[i] < len(entry):
collen[i] = len(entry)
output.append(row)
if len(output) == 1:
affected = "0 rows affected"
elif len(output) == 2:
affected = "1 row affected"
else:
affected = "%d rows affected" % (len(output) - 1)
# Format output
for i in range(len(output)):
row = output[i]
l = []
for j in range(len(row)):
l.append('%-*s' % (collen[j],row[j]))
output[i] = " | ".join(l)
# Insert header separator
totallen = len(output[0])
output[1:1] = ["-"*(totallen/len("-"))]
output.append("\n" + affected)
return output
def html(rows, headers=()):
output = []
output.append('<table class="results">')
output.append('<tr class="headers">')
headers = map(lambda x: '<td class="header">%s</td>' % (x.upper()), list(headers))
map(output.append, headers)
output.append('</tr>')
if rows and len(rows) > 0:
for row in rows:
output.append('<tr class="row">')
row = map(lambda x: '<td class="value">%s</td>' % (x), row)
map(output.append, row)
output.append('</tr>')
output.append('</table>')
return output
comments = lambda x: re.compile("{.*?}", re.S).sub("", x, 0)
class mxODBCProxy:
"""Wraps mxODBC to provide proxy support for zxJDBC's additional parameters."""
def __init__(self, c):
self.c = c
def __getattr__(self, name):
if name == "execute":
return self.execute
elif name == "gettypeinfo":
return self.gettypeinfo
else:
return getattr(self.c, name)
def execute(self, sql, params=None, bindings=None, maxrows=None):
if params:
self.c.execute(sql, params)
else:
self.c.execute(sql)
def gettypeinfo(self, typeid=None):
if typeid:
self.c.gettypeinfo(typeid)
class executor:
"""Handles the insertion of values given dynamic data."""
def __init__(self, table, cols):
self.cols = cols
self.table = table
if self.cols:
self.sql = "insert into %s (%s) values (%s)" % (table, ",".join(self.cols), ",".join(("?",) * len(self.cols)))
else:
self.sql = "insert into %s values (%%s)" % (table)
def execute(self, db, rows, bindings):
assert rows and len(rows) > 0, "must have at least one row"
if self.cols:
sql = self.sql
else:
sql = self.sql % (",".join(("?",) * len(rows[0])))
db.raw(sql, rows, bindings)
def connect(dbname):
return dbexts(dbname)
def lookup(dbname):
return dbexts(jndiname=dbname)
class dbexts:
def __init__(self, dbname=None, cfg=None, formatter=console, autocommit=0, jndiname=None, out=None):
self.verbose = 1
self.results = []
self.headers = []
self.autocommit = autocommit
self.formatter = formatter
self.out = out
self.lastrowid = None
self.updatecount = None
if not jndiname:
if cfg == None:
fn = os.path.join(os.path.split(__file__)[0], "dbexts.ini")
if not os.path.exists(fn):
fn = os.path.join(os.environ['HOME'], ".dbexts")
self.dbs = IniParser(fn)
elif isinstance(cfg, IniParser):
self.dbs = cfg
else:
self.dbs = IniParser(cfg)
if dbname == None: dbname = self.dbs[("default", "name")]
if __OS__ == 'java':
from com.ziclix.python.sql import zxJDBC
database = zxJDBC
if not jndiname:
t = self.dbs[("jdbc", dbname)]
self.dburl, dbuser, dbpwd, jdbcdriver = t['url'], t['user'], t['pwd'], t['driver']
if t.has_key('datahandler'):
self.datahandler = []
for dh in t['datahandler'].split(','):
classname = dh.split(".")[-1]
datahandlerclass = __import__(dh, globals(), locals(), classname)
self.datahandler.append(datahandlerclass)
keys = [x for x in t.keys() if x not in ['url', 'user', 'pwd', 'driver', 'datahandler', 'name']]
props = {}
for a in keys:
props[a] = t[a]
self.db = apply(database.connect, (self.dburl, dbuser, dbpwd, jdbcdriver), props)
else:
self.db = database.lookup(jndiname)
self.db.autocommit = self.autocommit
elif __OS__ == 'nt':
for modname in ["mx.ODBC.Windows", "ODBC.Windows"]:
try:
database = __import__(modname, globals(), locals(), "Windows")
break
except:
continue
else:
raise ImportError("unable to find appropriate mxODBC module")
t = self.dbs[("odbc", dbname)]
self.dburl, dbuser, dbpwd = t['url'], t['user'], t['pwd']
self.db = database.Connect(self.dburl, dbuser, dbpwd, clear_auto_commit=1)
self.dbname = dbname
for a in database.sqltype.keys():
setattr(self, database.sqltype[a], a)
for a in dir(database):
try:
p = getattr(database, a)
if issubclass(p, Exception):
setattr(self, a, p)
except:
continue
del database
def __str__(self):
return self.dburl
def __repr__(self):
return self.dburl
def __getattr__(self, name):
if "cfg" == name:
return self.dbs.cfg
raise AttributeError("'dbexts' object has no attribute '%s'" % (name))
def close(self):
""" close the connection to the database """
self.db.close()
def begin(self, style=None):
""" reset ivars and return a new cursor, possibly binding an auxiliary datahandler """
self.headers, self.results = [], []
if style:
c = self.db.cursor(style)
else:
c = self.db.cursor()
if __OS__ == 'java':
if hasattr(self, 'datahandler'):
for dh in self.datahandler:
c.datahandler = dh(c.datahandler)
else:
c = mxODBCProxy(c)
return c
def commit(self, cursor=None, close=1):
""" commit the cursor and create the result set """
if cursor and cursor.description:
self.headers = cursor.description
self.results = cursor.fetchall()
if hasattr(cursor, "nextset"):
s = cursor.nextset()
while s:
self.results += cursor.fetchall()
s = cursor.nextset()
if hasattr(cursor, "lastrowid"):
self.lastrowid = cursor.lastrowid
if hasattr(cursor, "updatecount"):
self.updatecount = cursor.updatecount
if not self.autocommit or cursor is None:
if not self.db.autocommit:
self.db.commit()
if cursor and close: cursor.close()
def rollback(self):
""" rollback the cursor """
self.db.rollback()
def prepare(self, sql):
""" prepare the sql statement """
cur = self.begin()
try:
return cur.prepare(sql)
finally:
self.commit(cur)
def display(self):
""" using the formatter, display the results """
if self.formatter and self.verbose > 0:
res = self.results
if res:
print >> self.out, ""
for a in self.formatter(res, map(lambda x: x[0], self.headers)):
print >> self.out, a
print >> self.out, ""
def __execute__(self, sql, params=None, bindings=None, maxrows=None):
""" the primary execution method """
cur = self.begin()
try:
if bindings:
cur.execute(sql, params, bindings, maxrows=maxrows)
elif params:
cur.execute(sql, params, maxrows=maxrows)
else:
cur.execute(sql, maxrows=maxrows)
finally:
self.commit(cur, close=isinstance(sql, StringType))
def isql(self, sql, params=None, bindings=None, maxrows=None):
""" execute and display the sql """
self.raw(sql, params, bindings, maxrows=maxrows)
self.display()
def raw(self, sql, params=None, bindings=None, delim=None, comments=comments, maxrows=None):
""" execute the sql and return a tuple of (headers, results) """
if delim:
headers = []
results = []
if type(sql) == type(StringType):
if comments: sql = comments(sql)
statements = filter(lambda x: len(x) > 0,
map(lambda statement: statement.strip(), sql.split(delim)))
else:
statements = [sql]
for a in statements:
self.__execute__(a, params, bindings, maxrows=maxrows)
headers.append(self.headers)
results.append(self.results)
self.headers = headers
self.results = results
else:
self.__execute__(sql, params, bindings, maxrows=maxrows)
return (self.headers, self.results)
def callproc(self, procname, params=None, bindings=None, maxrows=None):
""" execute a stored procedure """
cur = self.begin()
try:
cur.callproc(procname, params=params, bindings=bindings, maxrows=maxrows)
finally:
self.commit(cur)
self.display()
def pk(self, table, owner=None, schema=None):
""" display the table's primary keys """
cur = self.begin()
cur.primarykeys(schema, owner, table)
self.commit(cur)
self.display()
def fk(self, primary_table=None, foreign_table=None, owner=None, schema=None):
""" display the table's foreign keys """
cur = self.begin()
if primary_table and foreign_table:
cur.foreignkeys(schema, owner, primary_table, schema, owner, foreign_table)
elif primary_table:
cur.foreignkeys(schema, owner, primary_table, schema, owner, None)
elif foreign_table:
cur.foreignkeys(schema, owner, None, schema, owner, foreign_table)
self.commit(cur)
self.display()
def table(self, table=None, types=("TABLE",), owner=None, schema=None):
"""If no table argument, displays a list of all tables. If a table argument,
displays the columns of the given table."""
cur = self.begin()
if table:
cur.columns(schema, owner, table, None)
else:
cur.tables(schema, owner, None, types)
self.commit(cur)
self.display()
def proc(self, proc=None, owner=None, schema=None):
"""If no proc argument, displays a list of all procedures. If a proc argument,
displays the parameters of the given procedure."""
cur = self.begin()
if proc:
cur.procedurecolumns(schema, owner, proc, None)
else:
cur.procedures(schema, owner, None)
self.commit(cur)
self.display()
def stat(self, table, qualifier=None, owner=None, unique=0, accuracy=0):
""" display the table's indicies """
cur = self.begin()
cur.statistics(qualifier, owner, table, unique, accuracy)
self.commit(cur)
self.display()
def typeinfo(self, sqltype=None):
""" display the types available for the database """
cur = self.begin()
cur.gettypeinfo(sqltype)
self.commit(cur)
self.display()
def tabletypeinfo(self):
""" display the table types available for the database """
cur = self.begin()
cur.gettabletypeinfo()
self.commit(cur)
self.display()
def schema(self, table, full=0, sort=1, owner=None):
"""Displays a Schema object for the table. If full is true, then generates
references to the table in addition to the standard fields. If sort is true,
sort all the items in the schema, else leave them in db dependent order."""
print >> self.out, str(Schema(self, table, owner, full, sort))
def bulkcopy(self, dst, table, include=[], exclude=[], autobatch=0, executor=executor):
"""Returns a Bulkcopy object using the given table."""
if type(dst) == type(""):
dst = dbexts(dst, cfg=self.dbs)
bcp = Bulkcopy(dst, table, include=include, exclude=exclude, autobatch=autobatch, executor=executor)
return bcp
def bcp(self, src, table, where='(1=1)', params=[], include=[], exclude=[], autobatch=0, executor=executor):
"""Bulkcopy of rows from a src database to the current database for a given table and where clause."""
if type(src) == type(""):
src = dbexts(src, cfg=self.dbs)
bcp = self.bulkcopy(self, table, include, exclude, autobatch, executor)
num = bcp.transfer(src, where, params)
return num
def unload(self, filename, sql, delimiter=",", includeheaders=1):
""" Unloads the delimited results of the query to the file specified, optionally including headers. """
u = Unload(self, filename, delimiter, includeheaders)
u.unload(sql)
class Bulkcopy:
"""The idea for a bcp class came from http://object-craft.com.au/projects/sybase"""
def __init__(self, dst, table, include=[], exclude=[], autobatch=0, executor=executor):
self.dst = dst
self.table = table
self.total = 0
self.rows = []
self.autobatch = autobatch
self.bindings = {}
include = map(lambda x: x.lower(), include)
exclude = map(lambda x: x.lower(), exclude)
_verbose = self.dst.verbose
self.dst.verbose = 0
try:
self.dst.table(self.table)
if self.dst.results:
colmap = {}
for a in self.dst.results:
colmap[a[3].lower()] = a[4]
cols = self.__filter__(colmap.keys(), include, exclude)
for a in zip(range(len(cols)), cols):
self.bindings[a[0]] = colmap[a[1]]
colmap = None
else:
cols = self.__filter__(include, include, exclude)
finally:
self.dst.verbose = _verbose
self.executor = executor(table, cols)
def __str__(self):
return "[%s].[%s]" % (self.dst, self.table)
def __repr__(self):
return "[%s].[%s]" % (self.dst, self.table)
def __getattr__(self, name):
if name == 'columns':
return self.executor.cols
def __filter__(self, values, include, exclude):
cols = map(lambda col: col.lower(), values)
if exclude:
cols = filter(lambda x, ex=exclude: x not in ex, cols)
if include:
cols = filter(lambda x, inc=include: x in inc, cols)
return cols
def format(self, column, type):
self.bindings[column] = type
def done(self):
if len(self.rows) > 0:
return self.batch()
return 0
def batch(self):
self.executor.execute(self.dst, self.rows, self.bindings)
cnt = len(self.rows)
self.total += cnt
self.rows = []
return cnt
def rowxfer(self, line):
self.rows.append(line)
if self.autobatch: self.batch()
def transfer(self, src, where="(1=1)", params=[]):
sql = "select %s from %s where %s" % (", ".join(self.columns), self.table, where)
h, d = src.raw(sql, params)
if d:
map(self.rowxfer, d)
return self.done()
return 0
class Unload:
"""Unloads a sql statement to a file with optional formatting of each value."""
def __init__(self, db, filename, delimiter=",", includeheaders=1):
self.db = db
self.filename = filename
self.delimiter = delimiter
self.includeheaders = includeheaders
self.formatters = {}
def format(self, o):
if not o:
return ""
o = str(o)
if o.find(",") != -1:
o = "\"\"%s\"\"" % (o)
return o
def unload(self, sql, mode="w"):
headers, results = self.db.raw(sql)
w = open(self.filename, mode)
if self.includeheaders:
w.write("%s\n" % (self.delimiter.join(map(lambda x: x[0], headers))))
if results:
for a in results:
w.write("%s\n" % (self.delimiter.join(map(self.format, a))))
w.flush()
w.close()
class Schema:
"""Produces a Schema object which represents the database schema for a table"""
def __init__(self, db, table, owner=None, full=0, sort=1):
self.db = db
self.table = table
self.owner = owner
self.full = full
self.sort = sort
_verbose = self.db.verbose
self.db.verbose = 0
try:
if table: self.computeschema()
finally:
self.db.verbose = _verbose
def computeschema(self):
self.db.table(self.table, owner=self.owner)
self.columns = []
# (column name, type_name, size, nullable)
if self.db.results:
self.columns = map(lambda x: (x[3], x[5], x[6], x[10]), self.db.results)
if self.sort: self.columns.sort(lambda x, y: cmp(x[0], y[0]))
self.db.fk(None, self.table)
# (pk table name, pk column name, fk column name, fk name, pk name)
self.imported = []
if self.db.results:
self.imported = map(lambda x: (x[2], x[3], x[7], x[11], x[12]), self.db.results)
if self.sort: self.imported.sort(lambda x, y: cmp(x[2], y[2]))
self.exported = []
if self.full:
self.db.fk(self.table, None)
# (pk column name, fk table name, fk column name, fk name, pk name)
if self.db.results:
self.exported = map(lambda x: (x[3], x[6], x[7], x[11], x[12]), self.db.results)
if self.sort: self.exported.sort(lambda x, y: cmp(x[1], y[1]))
self.db.pk(self.table)
self.primarykeys = []
if self.db.results:
# (column name, key_seq, pk name)
self.primarykeys = map(lambda x: (x[3], x[4], x[5]), self.db.results)
if self.sort: self.primarykeys.sort(lambda x, y: cmp(x[1], y[1]))
try:
self.indices = None
self.db.stat(self.table)
self.indices = []
# (non-unique, name, type, pos, column name, asc)
if self.db.results:
idxdict = {}
# mxODBC returns a row of None's, so filter it out
idx = map(lambda x: (x[3], x[5].strip(), x[6], x[7], x[8]), filter(lambda x: x[5], self.db.results))
def cckmp(x, y):
c = cmp(x[1], y[1])
if c == 0: c = cmp(x[3], y[3])
return c
# sort this regardless, this gets the indicies lined up
idx.sort(cckmp)
for a in idx:
if not idxdict.has_key(a[1]):
idxdict[a[1]] = []
idxdict[a[1]].append(a)
self.indices = idxdict.values()
if self.sort: self.indices.sort(lambda x, y: cmp(x[0][1], y[0][1]))
except:
pass
def __str__(self):
d = []
d.append("Table")
d.append(" " + self.table)
d.append("\nPrimary Keys")
for a in self.primarykeys:
d.append(" %s {%s}" % (a[0], a[2]))
d.append("\nImported (Foreign) Keys")
for a in self.imported:
d.append(" %s (%s.%s) {%s}" % (a[2], a[0], a[1], a[3]))
if self.full:
d.append("\nExported (Referenced) Keys")
for a in self.exported:
d.append(" %s (%s.%s) {%s}" % (a[0], a[1], a[2], a[3]))
d.append("\nColumns")
for a in self.columns:
nullable = choose(a[3], "nullable", "non-nullable")
d.append(" %-20s %s(%s), %s" % (a[0], a[1], a[2], nullable))
d.append("\nIndices")
if self.indices is None:
d.append(" (failed)")
else:
for a in self.indices:
unique = choose(a[0][0], "non-unique", "unique")
cname = ", ".join(map(lambda x: x[4], a))
d.append(" %s index {%s} on (%s)" % (unique, a[0][1], cname))
return "\n".join(d)
class IniParser:
def __init__(self, cfg, key='name'):
self.key = key
self.records = {}
self.ctypeRE = re.compile("\[(jdbc|odbc|default)\]")
self.entryRE = re.compile("([a-zA-Z]+)[ \t]*=[ \t]*(.*)")
self.cfg = cfg
self.parse()
def parse(self):
fp = open(self.cfg, "r")
data = fp.readlines()
fp.close()
lines = filter(lambda x: len(x) > 0 and x[0] not in ['#', ';'], map(lambda x: x.strip(), data))
current = None
for i in range(len(lines)):
line = lines[i]
g = self.ctypeRE.match(line)
if g: # a section header
current = {}
if not self.records.has_key(g.group(1)):
self.records[g.group(1)] = []
self.records[g.group(1)].append(current)
else:
g = self.entryRE.match(line)
if g:
current[g.group(1)] = g.group(2)
def __getitem__(self, (ctype, skey)):
if skey == self.key: return self.records[ctype][0][skey]
t = filter(lambda x, p=self.key, s=skey: x[p] == s, self.records[ctype])
if not t or len(t) > 1:
raise KeyError, "invalid key ('%s', '%s')" % (ctype, skey)
return t[0]
def random_table_name(prefix, num_chars):
import random
d = [prefix, '_']
i = 0
while i < num_chars:
d.append(chr(int(100 * random.random()) % 26 + ord('A')))
i += 1
return "".join(d)
class ResultSetRow:
def __init__(self, rs, row):
self.row = row
self.rs = rs
def __getitem__(self, i):
if type(i) == type(""):
i = self.rs.index(i)
return self.row[i]
def __getslice__(self, i, j):
if type(i) == type(""): i = self.rs.index(i)
if type(j) == type(""): j = self.rs.index(j)
return self.row[i:j]
def __len__(self):
return len(self.row)
def __repr__(self):
return str(self.row)
class ResultSet:
def __init__(self, headers, results=[]):
self.headers = map(lambda x: x.upper(), headers)
self.results = results
def index(self, i):
return self.headers.index(i.upper())
def __getitem__(self, i):
return ResultSetRow(self, self.results[i])
def __getslice__(self, i, j):
return map(lambda x, rs=self: ResultSetRow(rs, x), self.results[i:j])
def __repr__(self):
return "<%s instance {cols [%d], rows [%d]} at %s>" % (self.__class__, len(self.headers), len(self.results), id(self))
| {
"repo_name": "ftomassetti/intellij-community",
"path": "python/lib/Lib/dbexts.py",
"copies": "74",
"size": "25541",
"license": "apache-2.0",
"hash": 834296514198689000,
"line_mean": 34.1804407713,
"line_max": 126,
"alpha_frac": 0.5374887436,
"autogenerated": false,
"ratio": 3.728613138686131,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# $Id: dbexts.py,v 1.4 2001/12/29 18:00:15 bzimmer Exp $
"""
This script provides platform independence by wrapping Python
Database API 2.0 compatible drivers to allow seamless database
usage across implementations.
In order to use the C version, you need mxODBC and mxDateTime.
In order to use the Java version, you need zxJDBC.
>>> import dbexts
>>> d = dbexts.dbexts() # use the default db
>>> d.isql('select count(*) count from player')
count
-------
13569.0
1 row affected
>>> r = d.raw('select count(*) count from player')
>>> r
([('count', 3, 17, None, 15, 0, 1)], [(13569.0,)])
>>>
The configuration file follows the following format in a file name dbexts.ini:
[default]
name=mysql
[jdbc]
name=mysql
url=jdbc:mysql://localhost/ziclix
user=
pwd=
driver=org.gjt.mm.mysql.Driver
datahandler=com.ziclix.python.sql.handler.MySQLDataHandler
[jdbc]
name=pg
url=jdbc:postgresql://localhost:5432/ziclix
user=bzimmer
pwd=
driver=org.postgresql.Driver
datahandler=com.ziclix.python.sql.handler.PostgresqlDataHandler
"""
import os, string, re
__author__ = "brian zimmer ([email protected])"
__version__ = "$Revision: 1.4 $"[11:-2]
__OS__ = os.name
choose = lambda bool, a, b: (bool and [a] or [b])[0]
def console(rows, headers=()):
"""Format the results into a list of strings (one for each row):
<header>
<headersep>
<row1>
<row2>
...
headers may be given as list of strings.
Columns are separated by colsep; the header is separated from
the result set by a line of headersep characters.
The function calls stringify to format the value data into a string.
It defaults to calling str() and striping leading and trailing whitespace.
- copied and modified from mxODBC
"""
# Check row entry lengths
output = []
headers = map(string.upper, list(map(lambda x: x or "", headers)))
collen = map(len,headers)
output.append(headers)
if rows and len(rows) > 0:
for row in rows:
row = map(lambda x: str(x), row)
for i in range(len(row)):
entry = row[i]
if collen[i] < len(entry):
collen[i] = len(entry)
output.append(row)
if len(output) == 1:
affected = "0 rows affected"
elif len(output) == 2:
affected = "1 row affected"
else:
affected = "%d rows affected" % (len(output) - 1)
# Format output
for i in range(len(output)):
row = output[i]
l = []
for j in range(len(row)):
l.append('%-*s' % (collen[j],row[j]))
output[i] = string.join(l, " | ")
# Insert header separator
totallen = len(output[0])
output[1:1] = ["-"*(totallen/len("-"))]
output.append("\n" + affected)
return output
def html(rows, headers=()):
output = []
output.append('<table class="results">')
output.append('<tr class="headers">')
headers = map(lambda x: '<td class="header">%s</td>' % (x.upper()), list(headers))
map(output.append, headers)
output.append('</tr>')
if rows and len(rows) > 0:
for row in rows:
output.append('<tr class="row">')
row = map(lambda x: '<td class="value">%s</td>' % (x), row)
map(output.append, row)
output.append('</tr>')
output.append('</table>')
return output
comments = lambda x: re.compile("{.*?}", re.S).sub("", x, 0)
class ex_proxy:
"""Wraps mxODBC to provide proxy support for zxJDBC's additional parameters."""
def __init__(self, c):
self.c = c
def __getattr__(self, name):
if name == "execute":
return self.execute
elif name == "gettypeinfo":
return self.gettypeinfo
else:
return getattr(self.c, name)
def execute(self, sql, params=None, bindings=None, maxrows=None):
if params:
self.c.execute(sql, params)
else:
self.c.execute(sql)
def gettypeinfo(self, typeid=None):
if typeid:
self.c.gettypeinfo(typeid)
class executor:
"""Handles the insertion of values given dynamic data."""
def __init__(self, table, cols):
self.cols = cols
self.table = table
if self.cols:
self.sql = "insert into %s (%s) values (%s)" % (table, ",".join(self.cols), ",".join(("?",) * len(self.cols)))
else:
self.sql = "insert into %s values (%%s)" % (table)
def execute(self, db, rows, bindings):
assert rows and len(rows) > 0, "must have at least one row"
if self.cols:
sql = self.sql
else:
sql = self.sql % (",".join(("?",) * len(rows[0])))
db.raw(sql, rows, bindings)
def connect(dbname):
return dbexts(dbname)
def lookup(dbname):
return dbexts(jndiname=dbname)
class dbexts:
def __init__(self, dbname=None, cfg=None, formatter=console, autocommit=1, jndiname=None, out=None):
self.verbose = 1
self.results = None
self.headers = None
self.datahandler = None
self.autocommit = autocommit
self.formatter = formatter
self.out = out
self.lastrowid = None
self.updatecount = None
if not jndiname:
if cfg == None:
fn = os.path.join(os.path.split(__file__)[0], "dbexts.ini")
if not os.path.exists(fn):
fn = os.path.join(os.environ['HOME'], ".dbexts")
self.dbs = IniParser(fn)
elif isinstance(cfg, IniParser):
self.dbs = cfg
else:
self.dbs = IniParser(cfg)
if dbname == None: dbname = self.dbs[("default", "name")]
if __OS__ == 'java':
from com.ziclix.python.sql import zxJDBC
database = zxJDBC
if not jndiname:
t = self.dbs[("jdbc", dbname)]
self.dburl, dbuser, dbpwd, jdbcdriver = t['url'], t['user'], t['pwd'], t['driver']
if t.has_key("datahandler"):
try:
datahandlerclass = string.split(t['datahandler'], ".")[-1]
self.datahandler = __import__(t['datahandler'], globals(), locals(), datahandlerclass)
except:
pass
keys = filter(lambda x: x not in ['url', 'user', 'pwd', 'driver', 'datahandler', 'name'], t.keys())
props = {}
for a in keys:
props[a] = t[a]
self.db = apply(database.connect, (self.dburl, dbuser, dbpwd, jdbcdriver), props)
else:
self.db = database.lookup(jndiname)
self.db.autocommit = 0
elif __OS__ == 'nt':
for modname in ["mx.ODBC.Windows", "ODBC.Windows"]:
try:
database = __import__(modname, globals(), locals(), "Windows")
break
except:
continue
else:
raise ImportError("unable to find appropriate mxODBC module")
t = self.dbs[("odbc", dbname)]
self.dburl, dbuser, dbpwd = t['url'], t['user'], t['pwd']
self.db = database.Connect(self.dburl, dbuser, dbpwd, clear_auto_commit=1)
for a in database.sqltype.keys():
setattr(self, database.sqltype[a], a)
del database
def __str__(self):
return self.dburl
def __repr__(self):
return self.dburl
def __getattr__(self, name):
if "cfg" == name:
return self.dbs.cfg
def close(self):
""" close the connection to the database """
self.db.close()
def begin(self):
""" reset ivars and return a new cursor, possibly binding an auxiliary datahandler """
self.headers, self.results = None, None
c = self.db.cursor()
if __OS__ == 'java':
if self.datahandler: c.datahandler = self.datahandler(c.datahandler)
else:
c = ex_proxy(c)
return c
def commit(self, cursor=None):
""" commit the cursor and create the result set """
if cursor and cursor.description:
self.headers = cursor.description
self.results = cursor.fetchall()
if hasattr(cursor, "nextset"):
s = cursor.nextset()
while s:
f = cursor.fetchall()
if f: self.results = choose(self.results is None, [], self.results) + f
s = cursor.nextset()
if hasattr(cursor, "lastrowid"): self.lastrowid = cursor.lastrowid
if hasattr(cursor, "updatecount"): self.updatecount = cursor.updatecount
if self.autocommit or cursor is None: self.db.commit()
if cursor: cursor.close()
def rollback(self):
""" rollback the cursor """
self.db.rollback()
def display(self):
""" using the formatter, display the results """
if self.formatter and self.verbose > 0:
res = self.results
if res:
print >> self.out, ""
for a in self.formatter(res, map(lambda x: x[0], self.headers)):
print >> self.out, a
print >> self.out, ""
def __execute__(self, sql, params=None, bindings=None, maxrows=None):
""" the primary execution method """
cur = self.begin()
try:
if bindings:
cur.execute(sql, params, bindings, maxrows=maxrows)
elif params:
cur.execute(sql, params, maxrows=maxrows)
else:
cur.execute(sql, maxrows=maxrows)
finally:
self.commit(cur)
def isql(self, sql, params=None, bindings=None, maxrows=None):
""" execute and display the sql """
self.raw(sql, params, bindings, maxrows=maxrows)
self.display()
def raw(self, sql, params=None, bindings=None, delim=None, comments=comments, maxrows=None):
""" execute the sql and return a tuple of (headers, results) """
if delim:
headers = []
results = []
if comments: sql = comments(sql)
statements = filter(lambda x: len(x) > 0, map(string.strip, string.split(sql, delim)))
for a in statements:
self.__execute__(a, params, bindings, maxrows=maxrows)
headers.append(self.headers)
results.append(self.results)
self.headers = headers
self.results = results
else:
self.__execute__(sql, params, bindings, maxrows=maxrows)
return (self.headers, self.results)
def callproc(self, procname, params=None, bindings=None, maxrows=None):
""" execute a stored procedure """
cur = self.begin()
try:
cur.callproc(procname, params=params, bindings=bindings, maxrows=maxrows)
finally:
self.commit(cur)
self.display()
def pk(self, table, owner=None, schema=None):
""" display the table's primary keys """
cur = self.begin()
cur.primarykeys(schema, owner, table)
self.commit(cur)
self.display()
def fk(self, primary_table=None, foreign_table=None, owner=None, schema=None):
""" display the table's foreign keys """
cur = self.begin()
if primary_table and foreign_table:
cur.foreignkeys(schema, owner, primary_table, schema, owner, foreign_table)
elif primary_table:
cur.foreignkeys(schema, owner, primary_table, schema, owner, None)
elif foreign_table:
cur.foreignkeys(schema, owner, None, schema, owner, foreign_table)
self.commit(cur)
self.display()
def table(self, table=None, types=("TABLE",), owner=None, schema=None):
"""If no table argument, displays a list of all tables. If a table argument,
displays the columns of the given table."""
cur = self.begin()
if table:
cur.columns(schema, owner, table, None)
else:
cur.tables(schema, owner, None, types)
self.commit(cur)
self.display()
def proc(self, proc=None, owner=None, schema=None):
"""If no proc argument, displays a list of all procedures. If a proc argument,
displays the parameters of the given procedure."""
cur = self.begin()
if proc:
cur.procedurecolumns(schema, owner, proc, None)
else:
cur.procedures(schema, owner, None)
self.commit(cur)
self.display()
def stat(self, table, qualifier=None, owner=None, unique=0, accuracy=0):
""" display the table's indicies """
cur = self.begin()
cur.statistics(qualifier, owner, table, unique, accuracy)
self.commit(cur)
self.display()
def typeinfo(self, sqltype=None):
""" display the types available for the database """
cur = self.begin()
cur.gettypeinfo(sqltype)
self.commit(cur)
self.display()
def tabletypeinfo(self):
""" display the table types available for the database """
cur = self.begin()
cur.gettabletypeinfo()
self.commit(cur)
self.display()
def schema(self, table, full=0, sort=1, owner=None):
"""Displays a Schema object for the table. If full is true, then generates
references to the table in addition to the standard fields. If sort is true,
sort all the items in the schema, else leave them in db dependent order."""
print >> self.out, str(Schema(self, table, owner, full, sort))
def bulkcopy(self, dst, table, include=[], exclude=[], autobatch=0, executor=executor):
"""Returns a Bulkcopy object using the given table."""
if type(dst) == type(""):
dst = dbexts(dst, cfg=self.dbs)
bcp = Bulkcopy(dst, table, include=include, exclude=exclude, autobatch=autobatch, executor=executor)
return bcp
def bcp(self, src, table, where='(1=1)', params=[], include=[], exclude=[], autobatch=0, executor=executor):
"""Bulkcopy of rows from a src database to the current database for a given table and where clause."""
if type(src) == type(""):
src = dbexts(src, cfg=self.dbs)
bcp = self.bulkcopy(self, table, include, exclude, autobatch, executor)
num = bcp.transfer(src, where, params)
return num
def unload(self, filename, sql, delimiter=",", includeheaders=1):
""" Unloads the delimited results of the query to the file specified, optionally including headers. """
u = Unload(self, filename, delimiter, includeheaders)
u.unload(sql)
class Bulkcopy:
"""The idea for a bcp class came from http://object-craft.com.au/projects/sybase"""
def __init__(self, dst, table, include=[], exclude=[], autobatch=0, executor=executor):
self.dst = dst
self.table = table
self.total = 0
self.rows = []
self.autobatch = autobatch
self.bindings = {}
include = map(lambda x: string.lower(x), include)
exclude = map(lambda x: string.lower(x), exclude)
_verbose = self.dst.verbose
self.dst.verbose = 0
try:
self.dst.table(self.table)
if self.dst.results:
colmap = {}
for a in self.dst.results:
colmap[a[3].lower()] = a[4]
cols = self.__filter__(colmap.keys(), include, exclude)
for a in zip(range(len(cols)), cols):
self.bindings[a[0]] = colmap[a[1]]
colmap = None
else:
cols = self.__filter__(include, include, exclude)
finally:
self.dst.verbose = _verbose
self.executor = executor(table, cols)
def __str__(self):
return "[%s].[%s]" % (self.dst, self.table)
def __repr__(self):
return "[%s].[%s]" % (self.dst, self.table)
def __getattr__(self, name):
if name == 'columns':
return self.executor.cols
def __filter__(self, values, include, exclude):
cols = map(string.lower, values)
if exclude:
cols = filter(lambda x, ex=exclude: x not in ex, cols)
if include:
cols = filter(lambda x, inc=include: x in inc, cols)
return cols
def format(self, column, type):
self.bindings[column] = type
def done(self):
if len(self.rows) > 0:
return self.batch()
return 0
def batch(self):
self.executor.execute(self.dst, self.rows, self.bindings)
cnt = len(self.rows)
self.total += cnt
self.rows = []
return cnt
def rowxfer(self, line):
self.rows.append(line)
if self.autobatch: self.batch()
def transfer(self, src, where="(1=1)", params=[]):
sql = "select %s from %s where %s" % (string.join(self.columns, ", "), self.table, where)
h, d = src.raw(sql, params)
if d:
map(self.rowxfer, d)
return self.done()
return 0
class Unload:
"""Unloads a sql statement to a file with optional formatting of each value."""
def __init__(self, db, filename, delimiter=",", includeheaders=1):
self.db = db
self.filename = filename
self.delimiter = delimiter
self.includeheaders = includeheaders
self.formatters = {}
def format(self, o):
if not o:
return ""
o = str(o)
if o.find(",") != -1:
o = "\"\"%s\"\"" % (o)
return o
def unload(self, sql, mode="w"):
headers, results = self.db.raw(sql)
w = open(self.filename, mode)
if self.includeheaders:
w.write("%s\n" % (string.join(map(lambda x: x[0], headers), self.delimiter)))
if results:
for a in results:
w.write("%s\n" % (string.join(map(self.format, a), self.delimiter)))
w.flush()
w.close()
class Schema:
"""Produces a Schema object which represents the database schema for a table"""
def __init__(self, db, table, owner=None, full=0, sort=1):
self.db = db
self.table = table
self.owner = owner
self.full = full
self.sort = sort
_verbose = self.db.verbose
self.db.verbose = 0
try:
if table: self.computeschema()
finally:
self.db.verbose = _verbose
def computeschema(self):
self.db.table(self.table, owner=self.owner)
self.columns = []
# (column name, type_name, size, nullable)
if self.db.results:
self.columns = map(lambda x: (x[3], x[5], x[6], x[10]), self.db.results)
if self.sort: self.columns.sort(lambda x, y: cmp(x[0], y[0]))
self.db.fk(None, self.table)
# (pk table name, pk column name, fk column name, fk name, pk name)
self.imported = []
if self.db.results:
self.imported = map(lambda x: (x[2], x[3], x[7], x[11], x[12]), self.db.results)
if self.sort: self.imported.sort(lambda x, y: cmp(x[2], y[2]))
self.exported = []
if self.full:
self.db.fk(self.table, None)
# (pk column name, fk table name, fk column name, fk name, pk name)
if self.db.results:
self.exported = map(lambda x: (x[3], x[6], x[7], x[11], x[12]), self.db.results)
if self.sort: self.exported.sort(lambda x, y: cmp(x[1], y[1]))
self.db.pk(self.table)
self.primarykeys = []
if self.db.results:
# (column name, key_seq, pk name)
self.primarykeys = map(lambda x: (x[3], x[4], x[5]), self.db.results)
if self.sort: self.primarykeys.sort(lambda x, y: cmp(x[1], y[1]))
self.db.stat(self.table)
# (non-unique, name, type, pos, column name, asc)
self.indices = []
if self.db.results:
idxdict = {}
# mxODBC returns a row of None's, so filter it out
idx = map(lambda x: (x[3], string.strip(x[5]), x[6], x[7], x[8]), filter(lambda x: x[5], self.db.results))
def cckmp(x, y):
c = cmp(x[1], y[1])
if c == 0: c = cmp(x[3], y[3])
return c
# sort this regardless, this gets the indicies lined up
idx.sort(cckmp)
for a in idx:
if not idxdict.has_key(a[1]):
idxdict[a[1]] = []
idxdict[a[1]].append(a)
self.indices = idxdict.values()
if self.sort: self.indices.sort(lambda x, y: cmp(x[0][1], y[0][1]))
def __str__(self):
d = []
d.append("Table")
d.append(" " + self.table)
d.append("\nPrimary Keys")
for a in self.primarykeys:
d.append(" %s {%s}" % (a[0], a[2]))
d.append("\nImported (Foreign) Keys")
for a in self.imported:
d.append(" %s (%s.%s) {%s}" % (a[2], a[0], a[1], a[3]))
if self.full:
d.append("\nExported (Referenced) Keys")
for a in self.exported:
d.append(" %s (%s.%s) {%s}" % (a[0], a[1], a[2], a[3]))
d.append("\nColumns")
for a in self.columns:
nullable = choose(a[3], "nullable", "non-nullable")
d.append(" %-20s %s(%s), %s" % (a[0], a[1], a[2], nullable))
d.append("\nIndices")
for a in self.indices:
unique = choose(a[0][0], "non-unique", "unique")
cname = string.join(map(lambda x: x[4], a), ", ")
d.append(" %s index {%s} on (%s)" % (unique, a[0][1], cname))
return string.join(d, "\n")
class IniParser:
def __init__(self, cfg, key='name'):
self.key = key
self.records = {}
self.ctypeRE = re.compile("\[(jdbc|odbc|default)\]")
self.entryRE = re.compile("([a-zA-Z]+)[ \t]*=[ \t]*(.*)")
self.cfg = cfg
self.parse()
def parse(self):
fp = open(self.cfg, "r")
data = fp.readlines()
fp.close()
lines = filter(lambda x: len(x) > 0 and x[0] not in ['#', ';'], map(string.strip, data))
current = None
for i in range(len(lines)):
line = lines[i]
g = self.ctypeRE.match(line)
if g: # a section header
current = {}
if not self.records.has_key(g.group(1)):
self.records[g.group(1)] = []
self.records[g.group(1)].append(current)
else:
g = self.entryRE.match(line)
if g:
current[g.group(1)] = g.group(2)
def __getitem__(self, (ctype, skey)):
if skey == self.key: return self.records[ctype][0][skey]
t = filter(lambda x, p=self.key, s=skey: x[p] == s, self.records[ctype])
if not t or len(t) > 1:
raise KeyError, "invalid key ('%s', '%s')" % (ctype, skey)
return t[0]
def random_table_name(prefix, num_chars):
import random
d = [prefix, '_']
i = 0
while i < num_chars:
d.append(chr(int(100 * random.random()) % 26 + ord('A')))
i += 1
return string.join(d, "")
class ResultSetRow:
def __init__(self, rs, row):
self.row = row
self.rs = rs
def __getitem__(self, i):
if type(i) == type(""):
i = self.rs.index(i)
return self.row[i]
def __getslice__(self, i, j):
if type(i) == type(""): i = self.rs.index(i)
if type(j) == type(""): j = self.rs.index(j)
return self.row[i:j]
def __len__(self):
return len(self.row)
def __repr__(self):
return str(self.row)
class ResultSet:
def __init__(self, headers, results=[]):
self.headers = map(lambda x: x.upper(), headers)
self.results = results
def index(self, i):
return self.headers.index(string.upper(i))
def __getitem__(self, i):
return ResultSetRow(self, self.results[i])
def __getslice__(self, i, j):
return map(lambda x, rs=self: ResultSetRow(rs, x), self.results[i:j])
def __repr__(self):
return "<%s instance {cols [%d], rows [%d]} at %s>" % (self.__class__, len(self.headers), len(self.results), id(self))
| {
"repo_name": "Integral-Technology-Solutions/ConfigNOW",
"path": "Lib/dbexts.py",
"copies": "2",
"size": "20653",
"license": "mit",
"hash": -7604733996391548000,
"line_mean": 28.931884058,
"line_max": 120,
"alpha_frac": 0.6440710793,
"autogenerated": false,
"ratio": 2.867675645653985,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9323855795838787,
"avg_score": 0.0375781858230396,
"num_lines": 690
} |
# $Id: dbexts.py,v 1.5 2002/01/07 04:59:50 bzimmer Exp $
"""
This script provides platform independence by wrapping Python
Database API 2.0 compatible drivers to allow seamless database
usage across implementations.
In order to use the C version, you need mxODBC and mxDateTime.
In order to use the Java version, you need zxJDBC.
>>> import dbexts
>>> d = dbexts.dbexts() # use the default db
>>> d.isql('select count(*) count from player')
count
-------
13569.0
1 row affected
>>> r = d.raw('select count(*) count from player')
>>> r
([('count', 3, 17, None, 15, 0, 1)], [(13569.0,)])
>>>
The configuration file follows the following format in a file name dbexts.ini:
[default]
name=mysql
[jdbc]
name=mysql
url=jdbc:mysql://localhost/ziclix
user=
pwd=
driver=org.gjt.mm.mysql.Driver
datahandler=com.ziclix.python.sql.handler.MySQLDataHandler
[jdbc]
name=pg
url=jdbc:postgresql://localhost:5432/ziclix
user=bzimmer
pwd=
driver=org.postgresql.Driver
datahandler=com.ziclix.python.sql.handler.PostgresqlDataHandler
"""
import os, re
__author__ = "brian zimmer ([email protected])"
__version__ = "$Revision: 1.5 $"[11:-2]
__OS__ = os.name
choose = lambda bool, a, b: (bool and [a] or [b])[0]
def console(rows, headers=()):
"""Format the results into a list of strings (one for each row):
<header>
<headersep>
<row1>
<row2>
...
headers may be given as list of strings.
Columns are separated by colsep; the header is separated from
the result set by a line of headersep characters.
The function calls stringify to format the value data into a string.
It defaults to calling str() and striping leading and trailing whitespace.
- copied and modified from mxODBC
"""
# Check row entry lengths
output = []
headers = map(lambda header: header.upper(), list(map(lambda x: x or "", headers)))
collen = map(len,headers)
output.append(headers)
if rows and len(rows) > 0:
for row in rows:
row = map(lambda x: str(x), row)
for i in range(len(row)):
entry = row[i]
if collen[i] < len(entry):
collen[i] = len(entry)
output.append(row)
if len(output) == 1:
affected = "0 rows affected"
elif len(output) == 2:
affected = "1 row affected"
else:
affected = "%d rows affected" % (len(output) - 1)
# Format output
for i in range(len(output)):
row = output[i]
l = []
for j in range(len(row)):
l.append('%-*s' % (collen[j],row[j]))
output[i] = " | ".join(l)
# Insert header separator
totallen = len(output[0])
output[1:1] = ["-"*(totallen/len("-"))]
output.append("\n" + affected)
return output
def html(rows, headers=()):
output = []
output.append('<table class="results">')
output.append('<tr class="headers">')
headers = map(lambda x: '<td class="header">%s</td>' % (x.upper()), list(headers))
map(output.append, headers)
output.append('</tr>')
if rows and len(rows) > 0:
for row in rows:
output.append('<tr class="row">')
row = map(lambda x: '<td class="value">%s</td>' % (x), row)
map(output.append, row)
output.append('</tr>')
output.append('</table>')
return output
comments = lambda x: re.compile("{.*?}", re.S).sub("", x, 0)
class mxODBCProxy:
"""Wraps mxODBC to provide proxy support for zxJDBC's additional parameters."""
def __init__(self, c):
self.c = c
def __getattr__(self, name):
if name == "execute":
return self.execute
elif name == "gettypeinfo":
return self.gettypeinfo
else:
return getattr(self.c, name)
def execute(self, sql, params=None, bindings=None, maxrows=None):
if params:
self.c.execute(sql, params)
else:
self.c.execute(sql)
def gettypeinfo(self, typeid=None):
if typeid:
self.c.gettypeinfo(typeid)
class executor:
"""Handles the insertion of values given dynamic data."""
def __init__(self, table, cols):
self.cols = cols
self.table = table
if self.cols:
self.sql = "insert into %s (%s) values (%s)" % (table, ",".join(self.cols), ",".join(("?",) * len(self.cols)))
else:
self.sql = "insert into %s values (%%s)" % (table)
def execute(self, db, rows, bindings):
assert rows and len(rows) > 0, "must have at least one row"
if self.cols:
sql = self.sql
else:
sql = self.sql % (",".join(("?",) * len(rows[0])))
db.raw(sql, rows, bindings)
def connect(dbname):
return dbexts(dbname)
def lookup(dbname):
return dbexts(jndiname=dbname)
class dbexts:
def __init__(self, dbname=None, cfg=None, formatter=console, autocommit=0, jndiname=None, out=None):
self.verbose = 1
self.results = None
self.headers = None
self.datahandler = None
self.autocommit = autocommit
self.formatter = formatter
self.out = out
self.lastrowid = None
self.updatecount = None
if not jndiname:
if cfg == None:
fn = os.path.join(os.path.split(__file__)[0], "dbexts.ini")
if not os.path.exists(fn):
fn = os.path.join(os.environ['HOME'], ".dbexts")
self.dbs = IniParser(fn)
elif isinstance(cfg, IniParser):
self.dbs = cfg
else:
self.dbs = IniParser(cfg)
if dbname == None: dbname = self.dbs[("default", "name")]
if __OS__ == 'java':
from com.ziclix.python.sql import zxJDBC
database = zxJDBC
if not jndiname:
t = self.dbs[("jdbc", dbname)]
self.dburl, dbuser, dbpwd, jdbcdriver = t['url'], t['user'], t['pwd'], t['driver']
if t.has_key("datahandler"):
try:
datahandlerclass = t['datahandler'].split(".")[-1]
self.datahandler = __import__(t['datahandler'], globals(), locals(), datahandlerclass)
except:
pass
keys = filter(lambda x: x not in ['url', 'user', 'pwd', 'driver', 'datahandler', 'name'], t.keys())
props = {}
for a in keys:
props[a] = t[a]
self.db = apply(database.connect, (self.dburl, dbuser, dbpwd, jdbcdriver), props)
else:
self.db = database.lookup(jndiname)
self.db.autocommit = self.autocommit
elif __OS__ == 'nt':
for modname in ["mx.ODBC.Windows", "ODBC.Windows"]:
try:
database = __import__(modname, globals(), locals(), "Windows")
break
except:
continue
else:
raise ImportError("unable to find appropriate mxODBC module")
t = self.dbs[("odbc", dbname)]
self.dburl, dbuser, dbpwd = t['url'], t['user'], t['pwd']
self.db = database.Connect(self.dburl, dbuser, dbpwd, clear_auto_commit=1)
self.dbname = dbname
for a in database.sqltype.keys():
setattr(self, database.sqltype[a], a)
del database
def __str__(self):
return self.dburl
def __repr__(self):
return self.dburl
def __getattr__(self, name):
if "cfg" == name:
return self.dbs.cfg
def close(self):
""" close the connection to the database """
self.db.close()
def begin(self):
""" reset ivars and return a new cursor, possibly binding an auxiliary datahandler """
self.headers, self.results = None, None
c = self.db.cursor()
if __OS__ == 'java':
if self.datahandler: c.datahandler = self.datahandler(c.datahandler)
else:
c = mxODBCProxy(c)
return c
def commit(self, cursor=None):
""" commit the cursor and create the result set """
if cursor and cursor.description:
self.headers = cursor.description
self.results = cursor.fetchall()
if hasattr(cursor, "nextset"):
s = cursor.nextset()
while s:
f = cursor.fetchall()
if f: self.results = choose(self.results is None, [], self.results) + f
s = cursor.nextset()
if hasattr(cursor, "lastrowid"):
self.lastrowid = cursor.lastrowid
if hasattr(cursor, "updatecount"):
self.updatecount = cursor.updatecount
if not self.autocommit or cursor is None: self.db.commit()
if cursor: cursor.close()
def rollback(self):
""" rollback the cursor """
self.db.rollback()
def display(self):
""" using the formatter, display the results """
if self.formatter and self.verbose > 0:
res = self.results
if res:
print >> self.out, ""
for a in self.formatter(res, map(lambda x: x[0], self.headers)):
print >> self.out, a
print >> self.out, ""
def __execute__(self, sql, params=None, bindings=None, maxrows=None):
""" the primary execution method """
cur = self.begin()
try:
if bindings:
cur.execute(sql, params, bindings, maxrows=maxrows)
elif params:
cur.execute(sql, params, maxrows=maxrows)
else:
cur.execute(sql, maxrows=maxrows)
finally:
self.commit(cur)
def isql(self, sql, params=None, bindings=None, maxrows=None):
""" execute and display the sql """
self.raw(sql, params, bindings, maxrows=maxrows)
self.display()
def raw(self, sql, params=None, bindings=None, delim=None, comments=comments, maxrows=None):
""" execute the sql and return a tuple of (headers, results) """
if delim:
headers = []
results = []
if comments: sql = comments(sql)
statements = filter(lambda x: len(x) > 0,
map(lambda statement: statement.strip(), sql.split(delim)))
for a in statements:
self.__execute__(a, params, bindings, maxrows=maxrows)
headers.append(self.headers)
results.append(self.results)
self.headers = headers
self.results = results
else:
self.__execute__(sql, params, bindings, maxrows=maxrows)
return (self.headers, self.results)
def callproc(self, procname, params=None, bindings=None, maxrows=None):
""" execute a stored procedure """
cur = self.begin()
try:
cur.callproc(procname, params=params, bindings=bindings, maxrows=maxrows)
finally:
self.commit(cur)
self.display()
def pk(self, table, owner=None, schema=None):
""" display the table's primary keys """
cur = self.begin()
cur.primarykeys(schema, owner, table)
self.commit(cur)
self.display()
def fk(self, primary_table=None, foreign_table=None, owner=None, schema=None):
""" display the table's foreign keys """
cur = self.begin()
if primary_table and foreign_table:
cur.foreignkeys(schema, owner, primary_table, schema, owner, foreign_table)
elif primary_table:
cur.foreignkeys(schema, owner, primary_table, schema, owner, None)
elif foreign_table:
cur.foreignkeys(schema, owner, None, schema, owner, foreign_table)
self.commit(cur)
self.display()
def table(self, table=None, types=("TABLE",), owner=None, schema=None):
"""If no table argument, displays a list of all tables. If a table argument,
displays the columns of the given table."""
cur = self.begin()
if table:
cur.columns(schema, owner, table, None)
else:
cur.tables(schema, owner, None, types)
self.commit(cur)
self.display()
def proc(self, proc=None, owner=None, schema=None):
"""If no proc argument, displays a list of all procedures. If a proc argument,
displays the parameters of the given procedure."""
cur = self.begin()
if proc:
cur.procedurecolumns(schema, owner, proc, None)
else:
cur.procedures(schema, owner, None)
self.commit(cur)
self.display()
def stat(self, table, qualifier=None, owner=None, unique=0, accuracy=0):
""" display the table's indicies """
cur = self.begin()
cur.statistics(qualifier, owner, table, unique, accuracy)
self.commit(cur)
self.display()
def typeinfo(self, sqltype=None):
""" display the types available for the database """
cur = self.begin()
cur.gettypeinfo(sqltype)
self.commit(cur)
self.display()
def tabletypeinfo(self):
""" display the table types available for the database """
cur = self.begin()
cur.gettabletypeinfo()
self.commit(cur)
self.display()
def schema(self, table, full=0, sort=1, owner=None):
"""Displays a Schema object for the table. If full is true, then generates
references to the table in addition to the standard fields. If sort is true,
sort all the items in the schema, else leave them in db dependent order."""
print >> self.out, str(Schema(self, table, owner, full, sort))
def bulkcopy(self, dst, table, include=[], exclude=[], autobatch=0, executor=executor):
"""Returns a Bulkcopy object using the given table."""
if type(dst) == type(""):
dst = dbexts(dst, cfg=self.dbs)
bcp = Bulkcopy(dst, table, include=include, exclude=exclude, autobatch=autobatch, executor=executor)
return bcp
def bcp(self, src, table, where='(1=1)', params=[], include=[], exclude=[], autobatch=0, executor=executor):
"""Bulkcopy of rows from a src database to the current database for a given table and where clause."""
if type(src) == type(""):
src = dbexts(src, cfg=self.dbs)
bcp = self.bulkcopy(self, table, include, exclude, autobatch, executor)
num = bcp.transfer(src, where, params)
return num
def unload(self, filename, sql, delimiter=",", includeheaders=1):
""" Unloads the delimited results of the query to the file specified, optionally including headers. """
u = Unload(self, filename, delimiter, includeheaders)
u.unload(sql)
class Bulkcopy:
"""The idea for a bcp class came from http://object-craft.com.au/projects/sybase"""
def __init__(self, dst, table, include=[], exclude=[], autobatch=0, executor=executor):
self.dst = dst
self.table = table
self.total = 0
self.rows = []
self.autobatch = autobatch
self.bindings = {}
include = map(lambda x: x.lower(), include)
exclude = map(lambda x: x.lower(), exclude)
_verbose = self.dst.verbose
self.dst.verbose = 0
try:
self.dst.table(self.table)
if self.dst.results:
colmap = {}
for a in self.dst.results:
colmap[a[3].lower()] = a[4]
cols = self.__filter__(colmap.keys(), include, exclude)
for a in zip(range(len(cols)), cols):
self.bindings[a[0]] = colmap[a[1]]
colmap = None
else:
cols = self.__filter__(include, include, exclude)
finally:
self.dst.verbose = _verbose
self.executor = executor(table, cols)
def __str__(self):
return "[%s].[%s]" % (self.dst, self.table)
def __repr__(self):
return "[%s].[%s]" % (self.dst, self.table)
def __getattr__(self, name):
if name == 'columns':
return self.executor.cols
def __filter__(self, values, include, exclude):
cols = map(lambda col: col.lower(), values)
if exclude:
cols = filter(lambda x, ex=exclude: x not in ex, cols)
if include:
cols = filter(lambda x, inc=include: x in inc, cols)
return cols
def format(self, column, type):
self.bindings[column] = type
def done(self):
if len(self.rows) > 0:
return self.batch()
return 0
def batch(self):
self.executor.execute(self.dst, self.rows, self.bindings)
cnt = len(self.rows)
self.total += cnt
self.rows = []
return cnt
def rowxfer(self, line):
self.rows.append(line)
if self.autobatch: self.batch()
def transfer(self, src, where="(1=1)", params=[]):
sql = "select %s from %s where %s" % (", ".join(self.columns), self.table, where)
h, d = src.raw(sql, params)
if d:
map(self.rowxfer, d)
return self.done()
return 0
class Unload:
"""Unloads a sql statement to a file with optional formatting of each value."""
def __init__(self, db, filename, delimiter=",", includeheaders=1):
self.db = db
self.filename = filename
self.delimiter = delimiter
self.includeheaders = includeheaders
self.formatters = {}
def format(self, o):
if not o:
return ""
o = str(o)
if o.find(",") != -1:
o = "\"\"%s\"\"" % (o)
return o
def unload(self, sql, mode="w"):
headers, results = self.db.raw(sql)
w = open(self.filename, mode)
if self.includeheaders:
w.write("%s\n" % (self.delimiter.join(map(lambda x: x[0], headers))))
if results:
for a in results:
w.write("%s\n" % (self.delimiter.join(map(self.format, a))))
w.flush()
w.close()
class Schema:
"""Produces a Schema object which represents the database schema for a table"""
def __init__(self, db, table, owner=None, full=0, sort=1):
self.db = db
self.table = table
self.owner = owner
self.full = full
self.sort = sort
_verbose = self.db.verbose
self.db.verbose = 0
try:
if table: self.computeschema()
finally:
self.db.verbose = _verbose
def computeschema(self):
self.db.table(self.table, owner=self.owner)
self.columns = []
# (column name, type_name, size, nullable)
if self.db.results:
self.columns = map(lambda x: (x[3], x[5], x[6], x[10]), self.db.results)
if self.sort: self.columns.sort(lambda x, y: cmp(x[0], y[0]))
self.db.fk(None, self.table)
# (pk table name, pk column name, fk column name, fk name, pk name)
self.imported = []
if self.db.results:
self.imported = map(lambda x: (x[2], x[3], x[7], x[11], x[12]), self.db.results)
if self.sort: self.imported.sort(lambda x, y: cmp(x[2], y[2]))
self.exported = []
if self.full:
self.db.fk(self.table, None)
# (pk column name, fk table name, fk column name, fk name, pk name)
if self.db.results:
self.exported = map(lambda x: (x[3], x[6], x[7], x[11], x[12]), self.db.results)
if self.sort: self.exported.sort(lambda x, y: cmp(x[1], y[1]))
self.db.pk(self.table)
self.primarykeys = []
if self.db.results:
# (column name, key_seq, pk name)
self.primarykeys = map(lambda x: (x[3], x[4], x[5]), self.db.results)
if self.sort: self.primarykeys.sort(lambda x, y: cmp(x[1], y[1]))
self.db.stat(self.table)
# (non-unique, name, type, pos, column name, asc)
self.indices = []
if self.db.results:
idxdict = {}
# mxODBC returns a row of None's, so filter it out
idx = map(lambda x: (x[3], x[5].strip(), x[6], x[7], x[8]), filter(lambda x: x[5], self.db.results))
def cckmp(x, y):
c = cmp(x[1], y[1])
if c == 0: c = cmp(x[3], y[3])
return c
# sort this regardless, this gets the indicies lined up
idx.sort(cckmp)
for a in idx:
if not idxdict.has_key(a[1]):
idxdict[a[1]] = []
idxdict[a[1]].append(a)
self.indices = idxdict.values()
if self.sort: self.indices.sort(lambda x, y: cmp(x[0][1], y[0][1]))
def __str__(self):
d = []
d.append("Table")
d.append(" " + self.table)
d.append("\nPrimary Keys")
for a in self.primarykeys:
d.append(" %s {%s}" % (a[0], a[2]))
d.append("\nImported (Foreign) Keys")
for a in self.imported:
d.append(" %s (%s.%s) {%s}" % (a[2], a[0], a[1], a[3]))
if self.full:
d.append("\nExported (Referenced) Keys")
for a in self.exported:
d.append(" %s (%s.%s) {%s}" % (a[0], a[1], a[2], a[3]))
d.append("\nColumns")
for a in self.columns:
nullable = choose(a[3], "nullable", "non-nullable")
d.append(" %-20s %s(%s), %s" % (a[0], a[1], a[2], nullable))
d.append("\nIndices")
for a in self.indices:
unique = choose(a[0][0], "non-unique", "unique")
cname = ", ".join(map(lambda x: x[4], a))
d.append(" %s index {%s} on (%s)" % (unique, a[0][1], cname))
return "\n".join(d)
class IniParser:
def __init__(self, cfg, key='name'):
self.key = key
self.records = {}
self.ctypeRE = re.compile("\[(jdbc|odbc|default)\]")
self.entryRE = re.compile("([a-zA-Z]+)[ \t]*=[ \t]*(.*)")
self.cfg = cfg
self.parse()
def parse(self):
fp = open(self.cfg, "r")
data = fp.readlines()
fp.close()
lines = filter(lambda x: len(x) > 0 and x[0] not in ['#', ';'], map(lambda x: x.strip(), data))
current = None
for i in range(len(lines)):
line = lines[i]
g = self.ctypeRE.match(line)
if g: # a section header
current = {}
if not self.records.has_key(g.group(1)):
self.records[g.group(1)] = []
self.records[g.group(1)].append(current)
else:
g = self.entryRE.match(line)
if g:
current[g.group(1)] = g.group(2)
def __getitem__(self, (ctype, skey)):
if skey == self.key: return self.records[ctype][0][skey]
t = filter(lambda x, p=self.key, s=skey: x[p] == s, self.records[ctype])
if not t or len(t) > 1:
raise KeyError, "invalid key ('%s', '%s')" % (ctype, skey)
return t[0]
def random_table_name(prefix, num_chars):
import random
d = [prefix, '_']
i = 0
while i < num_chars:
d.append(chr(int(100 * random.random()) % 26 + ord('A')))
i += 1
return "".join(d)
class ResultSetRow:
def __init__(self, rs, row):
self.row = row
self.rs = rs
def __getitem__(self, i):
if type(i) == type(""):
i = self.rs.index(i)
return self.row[i]
def __getslice__(self, i, j):
if type(i) == type(""): i = self.rs.index(i)
if type(j) == type(""): j = self.rs.index(j)
return self.row[i:j]
def __len__(self):
return len(self.row)
def __repr__(self):
return str(self.row)
class ResultSet:
def __init__(self, headers, results=[]):
self.headers = map(lambda x: x.upper(), headers)
self.results = results
def index(self, i):
return self.headers.index(i.upper())
def __getitem__(self, i):
return ResultSetRow(self, self.results[i])
def __getslice__(self, i, j):
return map(lambda x, rs=self: ResultSetRow(rs, x), self.results[i:j])
def __repr__(self):
return "<%s instance {cols [%d], rows [%d]} at %s>" % (self.__class__, len(self.headers), len(self.results), id(self))
| {
"repo_name": "ai-ku/langvis",
"path": "dependencies/jython-2.1/Lib/dbexts.py",
"copies": "2",
"size": "20664",
"license": "mit",
"hash": 3432014883960493000,
"line_mean": 28.7752161383,
"line_max": 120,
"alpha_frac": 0.643437863,
"autogenerated": false,
"ratio": 2.869202999166898,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9324657853994018,
"avg_score": 0.03759660163457606,
"num_lines": 694
} |
# $Id: debug.py,v 1.3 2010/10/23 12:43:55 ceballos Exp $
import FWCore.ParameterSet.Config as cms
process = cms.Process('FILLER')
# import of standard configurations
process.load('Configuration/StandardSequences/Services_cff')
process.load('FWCore/MessageService/MessageLogger_cfi')
process.load('Configuration/StandardSequences/GeometryIdeal_cff')
process.load('Configuration/StandardSequences/MagneticField_38T_cff')
process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')
process.load('Configuration/EventContent/EventContent_cff')
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('Mit_014a'),
annotation = cms.untracked.string('RECODEBUG'),
name = cms.untracked.string('BambuProduction')
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.options = cms.untracked.PSet(
Rethrow = cms.untracked.vstring('ProductNotFound'),
fileMode = cms.untracked.string('NOMERGE')
)
# input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:/build/bendavid/RECODEBUG/Zee7TeVTrackingParticles/D87A5BD6-40A0-DE11-A3A9-00E08178C0B1.root')
)
process.source.inputCommands = cms.untracked.vstring("keep *", "drop *_MEtoEDMConverter_*_*", "drop L1GlobalTriggerObjectMapRecord_hltL1GtObjectMap__HLT")
# other statements
process.GlobalTag.globaltag = 'START38_V12::All'
process.add_(cms.Service("ObjectService"))
process.load("MitProd.BAMBUSequences.BambuFillRECODEBUG_cfi")
process.MitTreeFiller.TreeWriter.fileName = 'XX-MITDATASET-XX'
process.MitTreeFiller.TreeWriter.maxSize = cms.untracked.uint32(1790)
#hack pixelLess tracking back (present in special startup MC samples)
#process.MitTreeFiller.PixelLessTracks.active = True
process.bambu_step = cms.Path(process.BambuFillRECODEBUG)
# schedule definition
process.schedule = cms.Schedule(process.bambu_step)
| {
"repo_name": "cpausmit/Kraken",
"path": "filefi/014/debug.py",
"copies": "1",
"size": "1936",
"license": "mit",
"hash": 5936773567815664000,
"line_mean": 36.2307692308,
"line_max": 154,
"alpha_frac": 0.7804752066,
"autogenerated": false,
"ratio": 3.010886469673406,
"config_test": true,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.9186136986266611,
"avg_score": 0.021044938001359068,
"num_lines": 52
} |
"""Default drawing engine."""
import os.path
import pygame, pygame.transform
from ocempgui.draw import Draw, String, Image
from ocempgui.widgets.StyleInformation import StyleInformation
from ocempgui.widgets.Constants import *
# Function cache
array3d = pygame.surfarray.array3d
blit_array = pygame.surfarray.blit_array
class DefaultEngine (object):
"""DefaultEngine (style) -> DefaultEngine
Default drawing engine for OcempGUI's widget system.
Drawing engines are used by the widgets of OcempGUI to draw certain
parts of widgets, such as the background, borders and shadows on a
surface.
Attributes:
style - The Style instance, which makes use of the engine.
"""
__slots__ = ["style"]
def __init__ (self, style):
self.style = style
def get_icon_path (self, size):
"""D.get_icon_path (size) -> path
Gets the absolute path to the icons for a specific size.
"""
return os.path.join (os.path.dirname (__file__), "icons", size)
def draw_rect (self, width, height, state, cls=None, style=None):
"""D.draw_rect (...) -> Surface
Creates a rectangle surface based on the style information.
The rectangle will have the passed width and height and will be
filled with the 'bgcolor'[state] value of the passed style.
If no style is passed, the method will try to retrieve a style
using the get_style() method.
"""
self_style = self.style
if not style:
style = self_style.get_style (cls)
image = self_style.get_style_entry (cls, style, "image", state)
color = self_style.get_style_entry (cls, style, "bgcolor", state)
surface = Draw.draw_rect (width, height, color)
if image:
img = pygame.transform.scale (Image.load_image (image),
(width, height))
surface.blit (img, (0, 0))
return surface
def draw_border (self, surface, state, cls=None, style=None,
bordertype=BORDER_FLAT, rect=None, space=0):
"""D.draw_border (...) -> None
Draws a border on the passed using a specific border type.
Dependant on the border type, this method will draw a rectangle
border on the passed surface.
The 'rect' argument indicates the drawing area at which's edges
the border will be drawn. This can differ from the real surface
rect. A None value (the default) will draw the border around the
surface.
'space' denotes, how many pixels will be left between each
border pixel before drawing the next one. A value of 0 thus
causes the method to draw a solid border while other values will
draw dashed borders.
The BORDER_RAISED, BORDER_SUNKEN, BORDER_ETCHED_IN and
BORDER_ETCHED_OUT border types use the 'lightcolor', 'darkcolor'
and 'shadow' style entries for drawing, BORDER_FLAT uses the
'bordercolor' entry.
If no style is passed, the method will try to retrieve a style
using the get_style() method.
Raises a ValueError, if the passed bordertype argument is
not a value of the BORDER_TYPES tuple.
"""
if bordertype not in BORDER_TYPES:
raise ValueError ("bordertype must be a value from BORDER_TYPES")
# Do nothing in case, that the border type is none.
if bordertype == BORDER_NONE:
return surface
self_style = self.style
if not style:
style = self_style.get_style (cls)
# The spacing for the lines. space == 0 actually means a solid line,
# spacing == 1 a dashed one with 1px spaces, etc.
# Add one pixel for the slicing later on.
space += 1
# Maybe pixel3d should be used here, but it does not support 24
# bit color depths.
array = array3d (surface)
# Create the area, the border should surround.
# We will use the passed padding for it.
r = rect
if r == None:
r = surface.get_rect ()
# Dependant on the border style, we will care about the used
# colors. 3D effect such as sunken or raised make use of the
# light/darkcolor style keys, the flat one uses the fgcolor. If
# it is a 3D effect, we are going to use the shadow key to
# determine, how many lines the distinction shall have.
#
# The drawing is done as follows:
# * First fill the upper row of the (n,m) matrix with the given
# color and color only every wanted pixel.
# * The color the bottom row of the matrix in the same way.
# * The left column will be colored as well.
# * The same again with the right column.
if bordertype == BORDER_FLAT:
color = self_style.get_style_entry (cls, style, "bordercolor",
state)
array[r.left:r.right:space, r.top] = color
array[r.left:r.right:space, r.bottom - 1] = color
array[r.left, r.top:r.bottom:space] = color
array[r.right - 1, r.top:r.bottom:space] = color
elif bordertype in (BORDER_SUNKEN, BORDER_RAISED):
shadow = self_style.get_style_entry (cls, style, "shadow")
if shadow < 1:
return surface # No shadow wanted.
# Create the colors.
color1 = self_style.get_style_entry (cls, style, "lightcolor",
state)
color2 = self_style.get_style_entry (cls, style, "darkcolor",
state)
if bordertype == BORDER_SUNKEN:
color1, color2 = color2, color1
# By default we will create bevel edges, for which the
# topleft colors take the most place. Thus the bottomleft
# array slices will be reduced continously.
for i in xrange (shadow):
array[r.left + i:r.right - i:space, r.top + i] = color1
array[r.left + i:r.right - i:space,
r.bottom - (i + 1)] = color2
array[r.left + i, r.top + i:r.bottom - i:space] = color1
array[r.right - (i + 1),
r.top + i + 1:r.bottom - i:space] = color2
elif bordertype in (BORDER_ETCHED_IN, BORDER_ETCHED_OUT):
shadow = self_style.get_style_entry (cls, style, "shadow")
if shadow < 1:
return surface # No shadow wanted.
color1 = self_style.get_style_entry (cls, style, "lightcolor",
state)
color2 = self_style.get_style_entry (cls, style, "darkcolor",
state)
if bordertype == BORDER_ETCHED_OUT:
color1, color2 = color2, color1
s = shadow
# First (inner) rectangle.
array[r.left + s:r.right:space, r.top + s:r.top + 2 * s] = color1
array[r.left + s:r.right:space,r.bottom - s:r.bottom] = color1
array[r.left + s:r.left + 2 * s, r.top + s:r.bottom:space] = color1
array[r.right - s:r.right, r.top + s:r.bottom:space] = color1
# Second (outer) rectangle.
array[r.left:r.right - s:space, r.top:r.top + s] = color2
array[r.left:r.right - s:space,
r.bottom - 2*s:r.bottom - s] = color2
array[r.left:r.left + s, r.top:r.bottom - s:space] = color2
array[r.right - 2 * s:r.right - s,
r.top:r.bottom - s:space] = color2
# Blit the new surface.
blit_array (surface, array)
def draw_dropshadow (self, surface, cls=None, style=None):
"""D.draw_dropshadow (...) -> None
Draws a drop shadow on the surface.
The drop shadow will be drawn on the right and bottom side of
the surface and usually uses a black and grey color.
"""
if not style:
style = self.style.get_style (cls)
shadow = self.style.get_style_entry (cls, style, "shadow")
color = self.style.get_style_entry (cls, style, "shadowcolor")
if shadow < 2:
shadow = 2
half = shadow / 2
start = max (half, 3)
rect = surface.get_rect ()
array = array3d (surface)
# Right and bottom inner shadow.
array[rect.left + start:rect.right - half,
rect.bottom - shadow:rect.bottom - half] = color[0]
array[rect.right - shadow:rect.right - half,
rect.top + start:rect.bottom - half] = color[0]
# Right and bottom outer shadow.
array[rect.left + start:rect.right - half,
rect.bottom - half:rect.bottom] = color[1]
array[rect.right - half:rect.right,
rect.top + start:rect.bottom] = color[1]
blit_array (surface, array)
def draw_slider (self, width, height, state, cls=None, style=None):
"""D.draw_slider (...) -> Surface
Creates a rectangle surface with a grip look.
TODO: At the moment, this method creates a simple rectangle
surface with raised border. In future versions it will create a
surface with a grip look.
"""
if not style:
style = self.style.get_style (cls)
# Create the surface.
surface = self.draw_rect (width, height, state, cls, style)
self.draw_border (surface, state, cls, style, BORDER_RAISED)
rect = surface.get_rect ()
return surface
def draw_string (self, text, state, cls=None, style=None):
"""D.draw_string (...) -> Surface
Creates a string surface based on the style information.
Creates a transparent string surface from the provided text
based on the style information. The method makes use of the
'font' style entry to determine the font and size and uses the
'fgcolor' style entry for the color.
If no style is passed, the method will try to retrieve a style
using the get_style() method.
"""
self_style = self.style
if not style:
style = self_style.get_style (cls)
name = self_style.get_style_entry (cls, style, "font", "name")
size = self_style.get_style_entry (cls, style, "font", "size")
alias = self_style.get_style_entry (cls, style, "font", "alias")
st = self_style.get_style_entry (cls, style, "font", "style")
color = self_style.get_style_entry (cls, style, "fgcolor", state)
return String.draw_string (text, name, size, alias, color, st)
def draw_string_with_mnemonic (self, text, state, mnemonic, cls=None,
style=None):
"""D.draw_string_with_mnemonic (...) -> Surface
Creates a string surface with an additional underline.
This method basically does the same as the draw_string()
method, but additionally underlines the character specified with
the 'mnemonic' index argument using the 'fgcolor' style entry..
If no style is passed, the method will try to retrieve a style
using the get_style() method.
"""
self_style = self.style
if not style:
style = self_style.get_style (cls)
name = self_style.get_style_entry (cls, style, "font", "name")
size = self_style.get_style_entry (cls, style, "font", "size")
alias = self_style.get_style_entry (cls, style, "font", "alias")
st = self_style.get_style_entry (cls, style, "font", "style")
fgcolor = self_style.get_style_entry (cls, style, "fgcolor", state)
font = String.create_font (name, size, st)
surface = String.draw_string (text, name, size, alias, fgcolor)
left = font.size (text[:mnemonic])
right = font.size (text[mnemonic + 1:])
height = surface.get_rect ().height - 2
width = surface.get_rect ().width
Draw.draw_line (surface, fgcolor, (left[0], height),
(width - right[0], height), 1)
return surface
def draw_arrow (self, surface, arrowtype, state, cls=None, style=None):
"""D.draw_arrow (...) -> None
Draws an arrow on a surface.
Draws an arrow with on a surface using the passed arrowtype as
arrow direction. The method uses a third of the surface width
(or height for ARROW_TOP/ARROW_DOWN) as arrow width and places
it on the center of the surface. It also uses the 'fgcolor'
style entry as arrow color.
If no style is passed, the method will try to retrieve a style
using the get_style() method.
Raises a ValueError, if the passed arrowtype argument is not a
value of the ARROW_TYPES tuple.
"""
if arrowtype not in ARROW_TYPES:
raise TypeError ("arrowtype must be a value of ARROW_TYPES")
if not style:
style = self.style.get_style (cls)
color = self.style.get_style_entry (cls, style, "fgcolor", state)
rect = surface.get_rect ()
array = array3d (surface)
if arrowtype in (ARROW_LEFT, ARROW_RIGHT):
arrow_width = rect.width / 3
center = rect.centery
if center % 2 == 0:
center -= 1
if arrowtype == ARROW_LEFT:
for i in xrange (arrow_width):
col = arrow_width + i
array[col:col + arrow_width - i:1, center + i] = color
array[col:col + arrow_width - i:1, center - i] = color
elif arrowtype == ARROW_RIGHT:
for i in xrange (arrow_width):
col = rect.width - arrow_width - i - 1
array[col:col - arrow_width + i:-1, center + i] = color
array[col:col - arrow_width + i:-1, center - i] = color
elif arrowtype in (ARROW_UP, ARROW_DOWN):
arrow_height = rect.height / 3
center = rect.centerx
if center % 2 == 0:
center -= 1
if arrowtype == ARROW_UP:
for i in xrange (arrow_height):
row = arrow_height + i
array[center + i, row:row + arrow_height - i:1] = color
array[center - i, row:row + arrow_height - i:1] = color
elif arrowtype == ARROW_DOWN:
for i in xrange (arrow_height):
row = rect.height - arrow_height - i - 1
array[center + i, row:row - arrow_height + i:-1] = color
array[center - i, row:row - arrow_height + i:-1] = color
# Blit the new surface.
blit_array (surface, array)
def draw_check (self, surface, rect, checked, state, cls=None, style=None):
"""D.draw_check (...) -> None
Creates a surface with a check box.
Creates a surface with check box using a width and height of 14
pixels. The method uses a sunken border effect and makes use of
the 'lightcolor' and 'darkcolor' style entries for the border.
Dependant on the passed 'state' argument, the method will either
use fixed color values of
(255, 255, 255) for the background and
(0, 0, 0) for the check,
which is only drawn, if the 'checked' argument evaluates to
True. If the 'state' argument is set to STATE_INSENSITIVE the
'bgcolor' and 'fgcolor' style entries are used instead.
If no style is passed, the method will try to retrieve a style
using the get_style() method.
"""
self_style = self.style
if not style:
style = self_style.get_style (cls)
# Some colors we need.
bg = (255, 255, 255) # Default background color.
check = (0, 0, 0) # Check color.
sh = (150, 150, 150) # Check shadow to make it look smooth.
array = array3d (surface)
# Draw the borders and fill the rest.
dark = self_style.get_style_entry (cls, style, "darkcolor",state)
light = self_style.get_style_entry (cls, style, "lightcolor", state)
array[rect.left:rect.left + 2, rect.top:rect.bottom] = dark
array[rect.left:rect.right, rect.top:rect.top + 2] = dark
array[rect.right - 2:rect.right, rect.top:rect.bottom] = light
array[rect.left:rect.right, rect.bottom - 2:rect.bottom] = light
array[rect.left, rect.bottom - 2] = dark
array[rect.right - 2, rect.top] = dark
if state == STATE_INSENSITIVE:
array[rect.left + 2:rect.right - 2,
rect.top + 2:rect.bottom - 2] = \
self_style.get_style_entry (cls, style, "bgcolor", state)
check = self_style.get_style_entry (cls, style, "fgcolor", state)
sh = check
else:
array[rect.left + 2:rect.right - 2,
rect.top + 2:rect.bottom - 2] = bg
if checked:
# Place a check into the drawn box by direct pixel
# manipulation.
# TODO: provide a handy matrix for this, so it can be merged
# and changed quickly and vary in size.
#
# 11 13
# 0 1 2 3 4 5 6 7 8 9 10 12
# -----------------------------
# 0 |* * * * * * * * * * * * * *|
# 1 |* * * * * * * * * * * * * *|
# 2 |* * 0 0 0 0 0 0 0 0 0 0 * *|
# 3 |* * 0 0 0 0 0 0 0 0 # # * *|
# 4 |* * 0 0 0 0 0 0 0 # # # * *|
# 5 |* * 0 0 0 0 0 0 # # # 0 * *|
# 6 |* * 0 0 # # 0 # # # 0 0 * *|
# 7 |* * 0 # # # 0 # # 0 0 0 * *|
# 8 |* * 0 0 # # # # # 0 0 0 * *|
# 9 |* * 0 0 0 # # # 0 0 0 0 * *|
# 10 |* * 0 0 0 0 0 # 0 0 0 0 * *|
# 11 |* * 0 0 0 0 0 0 0 0 0 0 * *|
# 12 |* * * * * * * * * * * * * *|
# 13 |* * * * * * * * * * * * * *|
# -----------------------------
# * = border shadow
# 0 = unset
# # = set with a specific color.
#
array[rect.left + 3, rect.top + 7] = sh
array[rect.left + 4, rect.top + 6] = sh
array[rect.left + 4, rect.top + 7:rect.top + 9] = check
array[rect.left + 5, rect.top + 6] = sh
array[rect.left + 5, rect.top + 7:rect.top + 9] = check
array[rect.left + 5, rect.top + 9] = sh
array[rect.left + 6, rect.top + 7] = sh
array[rect.left + 6, rect.top + 8:rect.top + 10] = check
array[rect.left + 7, rect.top + 6] = sh
array[rect.left + 7, rect.top + 7:rect.top + 11] = check
array[rect.left + 8, rect.top + 5:rect.top + 9] = check
array[rect.left + 9, rect.top + 4:rect.top + 7] = check
array[rect.left + 10, rect.top + 3:rect.top + 6] = check
array[rect.left + 11, rect.top + 3:rect.top + 5] = sh
#self.__checks[style] =
blit_array (surface, array)
def draw_radio (self, surface, rect, checked, state, cls=None, style=None):
"""D.draw_radio (...) -> None
Creates a surface with a radio check box.
Creates a surface with radio check box using a width and height
of 14 pixels. The method uses a sunken border effect and makes
use of the 'lightcolor' and 'darkcolor' style entries for the
border. Dependant on the passed 'state' argument, the method
will either use fixed color values of
(255, 255, 255) for the background and
(0, 0, 0) for the check,
which is only drawn, if the 'checked' argument evaluates to
True. If the 'state' argument is set to STATE_INSENSITIVE the
'bgcolor' and 'fgcolor' style entries are used instead.
If no style is passed, the method will try to retrieve a style
using the get_style() method.
"""
if not style:
style = self.style.get_style (cls)
# We need some colors for the radio check.
sh1 = (0, 0, 0) # Border topleft.
sh2 = (150, 150, 150) # Border shadow top- and bottomleft.
sh3 = (255, 255, 255) # Outer border shadow bottomleft.
bg = (255, 255, 255) # Background color for the check.
check = (0, 0, 0) # Color of the radio check.
if state == STATE_INSENSITIVE:
bg = self.style.get_style_entry (cls, style, "bgcolor", state)
check = self.style.get_style_entry (cls, style, "fgcolor", state)
sh1 = check
sh2 = self.style.get_style_entry (cls, style, "fgcolor", state)
sh3 = (240, 240, 240)
# The complete radio check will be drawn by manipulating pixels
# of the box.
# TODO: provide a handy matrix for this, so it can be merged
# and changed quickly and vary in size.
# 11 13
# 0 1 2 3 4 5 6 7 8 9 10 12
# -----------------------------
# 0 |x x x x x x x x x x x x x x|
# 1 |x x x x x * * * * x x x x x|
# 2 |x x x * * s s s s s * x x x|
# 3 |x x * s s 0 0 0 0 0 s * x x|
# 4 |x x * s 0 0 0 0 0 0 0 * x x|
# 5 |x * s 0 0 0 # # # 0 0 0 * x|
# 6 |x * s 0 0 # # # # # 0 0 * 8|
# 7 |x * s 0 0 # # # # # 0 0 * 8|
# 8 |x * s 0 0 # # # # # 0 0 * 8|
# 9 |x x s 0 0 0 # # # 0 0 0 * 8|
# 10 |x x * s 0 0 0 0 0 0 0 * 8 x|
# 11 |x x x * * 0 0 0 0 0 * * 8 x|
# 12 |x x x x x * * * * * 8 8 x x|
# 13 |x x x x x x 8 8 8 8 x x x x|
# -----------------------------
# x = default background color
# * = border shadow (sh2)
# s = topleft border (sh1)
# 0 = background color (bg)
# 8 = border shadow 2 (sh3)
# # = check color (check)
#
array = array3d (surface)
array[rect.left + 1, rect.top + 5:rect.top + 9] = sh2
array[rect.left + 2, rect.top + 3:rect.top + 5] = sh2
array[rect.left + 2, rect.top + 5:rect.top + 10] = sh1
array[rect.left + 2, rect.top + 10] = sh2
array[rect.left + 3:rect.left + 5, rect.top + 2] = sh2
array[rect.left + 3, rect.top + 3:rect.top + 5] = sh1
array[rect.left + 3:rect.left + 12, rect.top + 5:rect.top + 10] = bg
array[rect.left + 3, rect.top + 10] = sh1
array[rect.left + 3:rect.left + 5, rect.top + 11] = sh2
array[rect.left + 4, rect.top + 3] = sh1
array[rect.left + 4:rect.left + 11, rect.top + 4] = bg
array[rect.left + 4:rect.left + 11, rect.top + 10] = bg
array[rect.left + 5:rect.left + 9, rect.top + 1] = sh2
array[rect.left + 5:rect.left + 10, rect.top + 2] = sh1
array[rect.left + 5:rect.left + 10, rect.top + 3] = bg
array[rect.left + 5:rect.left + 10, rect.top + 11] = bg
array[rect.left + 5:rect.left + 10, rect.top + 12] = sh2
array[rect.left + 6:rect.left + 10, rect.top + 13] = sh3
array[rect.left + 10, rect.top + 2] = sh2
array[rect.left + 10, rect.top + 3] = sh1
array[rect.left + 10:rect.left + 12, rect.top + 11] = sh2
array[rect.left + 10:rect.left + 12, rect.top + 12] = sh3
array[rect.left + 11, rect.top + 3:rect.top + 5] = sh2
array[rect.left + 11, rect.top + 10] = sh2
array[rect.left + 12, rect.top + 5:rect.top + 10] = sh2
array[rect.left + 12, rect.top + 10:rect.top + 12] = sh3
array[rect.left + 13, rect.top + 6:rect.top + 10] = sh3
if checked:
array[rect.left + 5:rect.left + 10,
rect.top + 6:rect.top + 9] = check
array[rect.left + 6:rect.left + 9, rect.top + 5] = check
array[rect.left + 6:rect.left + 9, rect.top + 9] = check
blit_array (surface, array)
def draw_caption (self, width, title=None, state=STATE_NORMAL, cls=None,
style=None):
"""D.draw_caption (...) -> Surface
Creates and a caption bar suitable for Window objects.
Creates a rectangle surface with a flat border and the passed
'title' text argument. The method uses a fixed color value of
(124, 153, 173) for the surface.
The passed 'width' will be ignored, if the size of the title
text exceeds it. Instead the width of the title text plus an
additional spacing of 4 pixels will be used.
The height of the surface relies on the title text height plus
an additional spacing of 4 pixels.
If no style is passed, the method will try to retrieve a style
using the get_style() method.
"""
if not title:
title = ""
if not style:
style = self.style.get_style (cls)
# Create the window title.
surface_text = self.draw_string (title, state, cls, style)
rect_text = surface_text.get_rect ()
# Add two pixels for the border and 2 extra ones for spacing.
cap_height = rect_text.height + 4
if width < rect_text.width + 4:
width = rect_text.width + 4
color = None
if state == STATE_ACTIVE:
color = StyleInformation.get ("CAPTION_ACTIVE_COLOR")
else:
color = StyleInformation.get ("CAPTION_INACTIVE_COLOR")
# Create the complete surface.
surface = Draw.draw_rect (width, cap_height, color)
self.draw_border (surface, state, cls, style,
StyleInformation.get ("CAPTION_BORDER"))
surface.blit (surface_text, (2, 2))
return surface
def draw_caret (self, surface, x, y, thickness, state, cls=None,
style=None):
"""D.draw_caret (...) -> None
Draws a caret line on a surface.
Draws a vertical caret line onto the passed surface. The
position of the caret will be set using the 'x' argument. The
length of the caret line can be adjusted using the 'y' argument.
The thickness of the caret line is set by the 'thickness'
argument. This method makes use of the 'fgcolor' style entry for
the color of the caret line.
If no style is passed, the method will try to retrieve a style
using the get_style() method.
"""
if not style:
style = self.style.get_style (cls)
rect = surface.get_rect ()
ax = (rect.topleft[0] + x, rect.topleft[1] + y)
bx = (rect.bottomleft[0] + x, rect.bottomleft[1] - y)
Draw.draw_line (surface, self.style.get_style_entry (cls, style,
"fgcolor", state),
ax, bx, thickness)
def draw_label (self, label):
"""D.draw_label (...) -> Surface
Creates the surface for the passed Label widget.
"""
cls = label.__class__
st = label.style
width = 2 * label.padding
height = 2 * label.padding
labels = []
rtext = None
if not label.multiline:
# Draw a single line.
if label.mnemonic[0] != -1:
rtext = self.draw_string_with_mnemonic (label.text, label.state,
label.mnemonic[0], cls,
st)
else:
rtext = self.draw_string (label.text, label.state, cls, st)
rect = rtext.get_rect ()
width += rect.width
height += rect.height
labels.append ((rtext, rect))
else:
# Multiple lines.
cur = 0 # Current postion marker for the mnemonic.
lines = label.get_lines ()
mnemonic = label.mnemonic[0]
last = len(lines) - 1
for i, line in enumerate(lines):
if (mnemonic != -1) and (cur <= mnemonic) and \
((cur + len (line)) > mnemonic):
rtext = self.draw_string_with_mnemonic (line, label.state,
mnemonic - cur,
cls, st)
else:
rtext = self.draw_string (line, label.state, cls, st)
rect = rtext.get_rect ()
if width < rect.width:
width = rect.width + 2 * label.padding
if (label.linespace != 0) and (i != last):
# Do not consider linespace for the last line
rect.height += label.linespace
height += rect.height
labels.append ((rtext, rect))
cur += len (line) + 1
# Guarantee size.
width, height = label.check_sizes (width, height)
surface = self.draw_rect (width, height, label.state, cls, st)
blit = surface.blit
posy = label.padding
totalheight = 0
totalwidth = 0
for rtext, rect in labels:
totalheight += rect.height
totalwidth = max (totalwidth, rect.width)
# Blit all in the center using centered justification.
posx = (width - totalwidth) / 2
posy = (height - totalheight) / 2
for rtext, rect in labels:
rect.topleft = posx, posy
posy += rect.height
if label.align & ALIGN_TOP == ALIGN_TOP:
posy = label.padding
for rtext, rect in labels:
rect.top = posy
posy += rect.height
elif label.align & ALIGN_BOTTOM == ALIGN_BOTTOM:
posy = height - label.padding - totalheight
for rtext, rect in labels:
rect.top = posy
posy += rect.height
if label.align & ALIGN_LEFT == ALIGN_LEFT:
for rtext, rect in labels:
rect.left = label.padding
elif label.align & ALIGN_RIGHT == ALIGN_RIGHT:
right = width - label.padding
for rtext, rect in labels:
rect.right = right
else:
for rtext, rect in labels:
rect.left = (width - rect.width) / 2
for rtext, rect in labels:
blit (rtext, rect)
return surface
def draw_button (self, button):
"""D.draw_button (...) -> Surface
Creates the surface for the passed Button widget.
"""
cls = button.__class__
border = self.style.get_border_size (cls, button.style, button.border)
active = StyleInformation.get ("ACTIVE_BORDER")
border_active = self.style.get_border_size (cls, button.style, active)
# Create the absolute sizes of the surface. This includes the
# padding as well as the shadow and additional pixels for the
# dashed border.
width = 2 * (button.padding + border + border_active)
height = 2 * (button.padding + border + border_active)
if button.child:
width += button.child.width
height += button.child.height
# Guarantee size.
width, height = button.check_sizes (width, height)
surface = self.draw_rect (width, height, button.state, cls,
button.style)
self.draw_border (surface, button.state, cls, button.style,
button.border)
# Draw a dashed border around the label, if the button has
# focus.
if button.focus:
r = None
if button.child:
r = button.child.rect
r.x -= border_active
r.y -= border_active
r.width += 2 * border_active
r.height += 2 * border_active
else:
adj = border + 2 * border_active
r = surface.get_rect ()
r.topleft = (adj, adj)
r.width -= 2 * adj
r.height -= 2 * adj
self.draw_border (surface, button.state, cls,button.style, active,
r, StyleInformation.get ("ACTIVE_BORDER_SPACE"))
return surface
def draw_checkbutton (self, button):
"""D.draw_checkbutton (...) -> Surface
Creates the surface for the passed CheckButton widget.
"""
cls = button.__class__
active = StyleInformation.get ("ACTIVE_BORDER")
border = self.style.get_border_size (cls, button.style, active)
checksize = StyleInformation.get ("CHECK_SIZE")
# Create the absolute sizes of the surface, including the
# padding.
width = 2 * (button.padding + border)
height = 2 * (button.padding + border)
if button.child:
width += button.child.width
height += button.child.height
# Create check box.
rect_check = pygame.Rect (button.padding, button.padding, checksize,
checksize)
# The layout looks like:
# ----------------
# | X | child |
# ----------------
# Check Child
# Thus we have to add a specific spacing between the child and the
# check. By default we will use the given StyleInformation value.
width += rect_check.width + StyleInformation.get ("CHECK_SPACING")
if height < rect_check.height:
# Do not forget to add the padding!
height = rect_check.height + 2 * button.padding
# Guarantee size.
width, height = button.check_sizes (width, height)
# The surface on which both components will be placed.
surface = self.draw_rect (width, height, button.state, cls,
button.style)
rect_surface = surface.get_rect ()
# Draw the check on the surface.
rect_check.centery = rect_surface.centery
self.draw_check (surface, rect_check, button.active, button.state, cls,
button.style)
# Draw a dashed border around the label, if the button has
# focus.
if button.focus:
r = None
if button.child:
r = button.child.rect
r.x -= border
r.y -= border
r.width += 2 * border
r.height += 2 * border
else:
r = rect_surface
r.topleft = (border, border)
r.width -= 2 * border
r.height -= 2 * border
self.draw_border (surface, button.state, cls, button.style, active,
r, StyleInformation.get ("ACTIVE_BORDER_SPACE"))
return surface
def draw_entry (self, entry):
"""D.draw_entry (...) -> Surface
Creates the surface for the passed Entry widget.
"""
cls = entry.__class__
border = self.style.get_border_size (cls, entry.style, entry.border)
# Peek the style so we can calculate the font.
st = entry.style or self.style.get_style (cls)
fn = self.style.get_style_entry (cls, st, "font", "name")
sz = self.style.get_style_entry (cls, st, "font", "size")
fs = self.style.get_style_entry (cls, st, "font", "style")
font = String.create_font (fn, sz, fs)
height = font.get_height () + 2 * (entry.padding + border)
width, height = entry.check_sizes (0, height)
# Main surface.
surface = self.draw_rect (width, height, entry.state, cls, entry.style)
self.draw_border (surface, entry.state, cls, entry.style, entry.border)
return surface
def draw_imagebutton (self, button):
"""D.draw_imagebutton (button) -> Surface
Creates the surface for the passed ImageButton widget.
"""
cls = button.__class__
border = self.style.get_border_size (cls, button.style, button.border)
active = StyleInformation.get ("ACTIVE_BORDER")
border_active = self.style.get_border_size (cls, button.style, active)
spacing = StyleInformation.get ("IMAGEBUTTON_SPACING")
width = 2 * (button.padding + border + border_active)
height = 2 * (button.padding + border + border_active)
rect_child = None
if button.child:
rect_child = button.child.rect
width += button.child.width
height += button.child.height
rect_img = None
if button.picture:
rect_img = button.picture.get_rect ()
width += rect_img.width
if button.child:
width += spacing
needed_he = rect_img.height + \
2 * (button.padding + border + border_active)
if height < needed_he:
height = needed_he
# Guarantee size.
width, height = button.check_sizes (width, height)
surface = self.draw_rect (width, height, button.state, cls,
button.style)
rect = surface.get_rect ()
self.draw_border (surface, button.state, cls, button.style,
button.border)
# Dashed border.
if button.focus:
r = None
if rect_img and rect_child:
rect_img.center = rect.center
rect_img.right -= button.child.rect.width / 2 + spacing
rect_child.center = rect.center
rect_child.left = rect_img.right + spacing
r = rect_img.union (rect_child)
elif rect_img:
rect_img.center = rect.center
r = rect_img
elif rect_child:
rect_child.center = rect.center
r = rect_child
if r:
r.x -= border_active
r.y -= border_active
r.width += 2 * border_active
r.height += 2 * border_active
else:
adj = border + 2 * border_active
r = rect
r.topleft = (adj, adj)
r.width -= 2 * adj
r.height -= 2 * adj
self.draw_border (surface, button.state, cls, button.style, active,
r, StyleInformation.get ("ACTIVE_BORDER_SPACE"))
return surface
def draw_radiobutton (self, button):
"""D.draw_radiobutton (button) -> Surface
Creates the surface for the passed RadioButton widget.
"""
cls = button.__class__
active = StyleInformation.get ("ACTIVE_BORDER")
border = self.style.get_border_size (cls, button.style, active)
radiosize = StyleInformation.get ("RADIO_SIZE")
# Create the absolute sizes of the surface, including the
# padding and.
width = 2 * (button.padding + border)
height = 2 * (button.padding + border)
if button.child:
width += button.child.width
height += button.child.height
# Create radio box.
rect_radio = pygame.Rect (button.padding, button.padding, radiosize,
radiosize)
# The layout looks like:
# ----------------
# | X | child |
# ----------------
# Check Child
# Thus we have to add a specific spacing between the child and the
# check. By default we will use the given StyleInformation value.
width += rect_radio.width + StyleInformation.get ("RADIO_SPACING")
if height < rect_radio.height:
# Do not forget to add the padding!
height = rect_radio.height + 2 * button.padding
# Guarantee size.
width, height = button.check_sizes (width, height)
# The surface on which both components will be placed.
surface = self.draw_rect (width, height, button.state, cls,
button.style)
rect_surface = surface.get_rect ()
rect_radio.centery = rect_surface.centery
self.draw_radio (surface, rect_radio, button.active, button.state, cls,
button.style)
# Draw a dashed border around the label, if the button has
# focus.
if button.focus:
r = None
if button.child:
r = button.child.rect
r.x -= border
r.y -= border
r.width += 2 * border
r.height += 2 * border
else:
r = rect_surface
r.topleft = (border, border)
r.width -= 2 * border
r.height -= 2 * border
self.draw_border (surface, button.state, cls, button.style, active,
r, StyleInformation.get ("ACTIVE_BORDER_SPACE"))
return surface
def draw_progressbar (self, bar):
"""D.draw.progressbar (...) -> Surface
Creates the surface for the passed ProgressBar widget.
"""
cls = bar.__class__
border_type = StyleInformation.get ("PROGRESSBAR_BORDER")
border = self.style.get_border_size (cls, bar.style, border_type)
st = bar.style or self.style.get_style (cls)
# Guarantee size.
width, height = bar.check_sizes (0, 0)
surface = self.draw_rect (width, height, bar.state, cls, st)
# Status area.
width -= 2 * border
height -= 2 * border
width = int (width * bar.value / 100)
# Draw the progress.
sf_progress = Draw.draw_rect (width, height,
StyleInformation.get ("PROGRESS_COLOR"))
surface.blit (sf_progress, (border, border))
self.draw_border (surface, bar.state, cls, st, border_type)
return surface
def draw_frame (self, frame):
"""D.draw_frame (...) -> Surface
Creates the surface for the passed Frame widget.
"""
cls = frame.__class__
# Guarantee size.
width, height = frame.calculate_size ()
width, height = frame.check_sizes (width, height)
surface = self.draw_rect (width, height, frame.state, cls, frame.style)
rect = surface.get_rect ()
if frame.widget:
rect.y = frame.widget.height / 2
rect.height -= frame.widget.height / 2
self.draw_border (surface, frame.state, cls, frame.style, frame.border,
rect)
return surface
def draw_table (self, table):
"""D.draw_table (...) -> Surface
Creates the surface for the passed Table widget.
"""
cls = table.__class__
width, height = table.calculate_size ()
width, height = table.check_sizes (width, height)
return self.draw_rect (width, height, table.state, cls, table.style)
def draw_scale (self, scale, orientation):
"""D.draw_scale (...) -> Surface
Creates the surface for the passed Scale widget.
If the passed orientation argument is ORIENTATION_VERTICAL, the
vertical slider information (VSCALE_SLIDER_SIZE) is used,
otherwise the horizontal (HSCALE_SLIDER_SIZE).
"""
cls = scale.__class__
# Use a default value for the slider, if not set in the style.
slider = None
if orientation == ORIENTATION_VERTICAL:
slider = StyleInformation.get ("VSCALE_SLIDER_SIZE")
else:
slider = StyleInformation.get ("HSCALE_SLIDER_SIZE")
width, height = scale.check_sizes (0, 0)
if width < slider[0]:
width = slider[0]
if height < slider[1]:
height = slider[1]
# The slider line in the middle will be a third of the complete
# width/height. To center it correctly, we need an odd value.
if (orientation == ORIENTATION_VERTICAL) and (height % 2 == 0):
width += 1
elif width % 2 == 0:
height += 1
# Main surface to draw on. We do not want to have any resizing,
# thus we are doing this in two steps.
surface = self.draw_rect (width, height, scale.state, cls, scale.style)
rect = None
if orientation == ORIENTATION_VERTICAL:
r = pygame.Rect (width / 3, 0, width / 3, height)
r.centery = height / 2
else:
r = pygame.Rect (0, height / 3, width, height /3)
r.centerx = width / 2
self.draw_border (surface, scale.state, cls, scale.style,
StyleInformation.get ("SCALE_BORDER"), r)
return surface
def draw_scrollbar (self, scrollbar, orientation):
"""D.draw_scrollbar (...) -> Surface
Creates the surface for the passed ScrollBar widget.
If the passed orientation argument is ORIENTATION_VERTICAL, the
vertical slider information (VSCALE_SLIDER_SIZE) is used,
otherwise the horizontal (HSCALE_SLIDER_SIZE).
"""
cls = scrollbar.__class__
border_type = StyleInformation.get ("SCROLLBAR_BORDER")
border = self.style.get_border_size (cls, scrollbar.style, border_type)
# We use a temporary state here, so that just the buttons will
# have the typical sunken effect.
tmp_state = scrollbar.state
if scrollbar.state == STATE_ACTIVE:
tmp_state = STATE_NORMAL
# Guarantee size.
width, height = scrollbar.check_sizes (0, 0)
size_button = (0, 0)
if orientation == ORIENTATION_VERTICAL:
size_button = StyleInformation.get ("VSCROLLBAR_BUTTON_SIZE")
if width < (size_button[0] + 2 * border):
width = size_button[0] + 2 * border
if height < 2 * (size_button[1] + border):
height = 2 * (size_button[1] + 2 * border)
else:
size_button = StyleInformation.get ("HSCROLLBAR_BUTTON_SIZE")
if width < 2 * (size_button[0] + border):
width = 2 * (size_button[0] + border)
if height < (size_button[1] + 2 * border):
height = size_button[1] + 2 * border
surface = self.draw_rect (width, height, tmp_state, cls,
scrollbar.style)
self.draw_border (surface, tmp_state, cls, scrollbar.style,
border_type)
return surface
def draw_scrolledwindow (self, window):
"""D.draw_scrolledwindow (...) -> Surface
Creates the Surface for the passed ScrolledWindow widget.
"""
cls = window.__class__
width, height = window.check_sizes (0, 0)
surface = self.draw_rect (width, height, window.state, cls,
window.style)
return surface
def draw_viewport (self, viewport):
"""D.draw_viewport (...) -> Surface
Creates the Surface for the passed ViewPort widget.
"""
cls = viewport.__class__
width, height = viewport.check_sizes (0, 0)
surface = self.draw_rect (width, height, viewport.state, cls,
viewport.style)
self.draw_border (surface, viewport.state, cls, viewport.style,
StyleInformation.get ("VIEWPORT_BORDER"))
return surface
def draw_statusbar (self, statusbar):
"""D.draw_statusbar (...) -> Surface
Creates the Surface for the passed StatusBar widget.
"""
cls = statusbar.__class__
border_type = StyleInformation.get ("STATUSBAR_BORDER")
# Guarantee size.
width, height = statusbar.calculate_size ()
width, height = statusbar.check_sizes (width, height)
# Create the main surface
surface = self.draw_rect (width, height, statusbar.state, cls,
statusbar.style)
self.draw_border (surface, statusbar.state, cls, statusbar.style,
border_type)
return surface
def draw_imagemap (self, imagemap):
"""D.draw_imagemap (...) -> Surface
Creates the Surface for the passed ImageMap widget.
"""
cls = imagemap.__class__
border_type = StyleInformation.get ("IMAGEMAP_BORDER")
border = self.style.get_border_size (cls, imagemap.style, border_type)
rect_image = imagemap.picture.get_rect ()
width = rect_image.width + 2 * border
height = rect_image.height + 2 * border
# Guarantee size.
width, height = imagemap.check_sizes (width, height)
surface = self.draw_rect (width, height, imagemap.state, cls,
imagemap.style)
self.draw_border (surface, imagemap.state, cls, imagemap.style,
border_type)
return surface
def draw_textlistitem (self, viewport, item):
"""D.draw_textlistitem (...) -> Surface
Creates the Surface for the passed TextListItem.
"""
text = item.text or ""
return self.draw_string (text, viewport.state, item.__class__,
item.style)
def draw_filelistitem (self, viewport, item):
"""D.draw_filelistitem (...) -> Surface
Creates the Surface for the passed FileListItem.
"""
style = viewport.style or self.style.get_style (viewport.__class__)
color = StyleInformation.get ("SELECTION_COLOR")
style = item.style
if not style:
style = self.style.get_style (item.__class__)
rect_icon = None
if item.icon:
rect_icon = item.icon.get_rect ()
else:
rect_icon = pygame.Rect (0, 0, 0, 0)
sf_text = self.draw_textlistitem (viewport, item)
rect_text = sf_text.get_rect ()
width = rect_icon.width + 2 + rect_text.width
height = rect_icon.height
if height < rect_text.height:
height = rect_text.height
surface = None
if item.selected:
surface = Draw.draw_rect (width, height, color)
else:
surface = self.draw_rect (width, height, viewport.state,
item.__class__, item.style)
rect = surface.get_rect ()
if item.icon:
rect_icon.centery = rect.centery
surface.blit (item.icon, rect_icon)
rect_text.centery = rect.centery
surface.blit (sf_text, (rect_icon.width + 2, rect_text.y))
return surface
def draw_window (self, window):
"""D.draw_window (...) -> Surface
Creates the Surface for the passed Window.
"""
cls = window.__class__
style = window.style or self.style.get_style (cls)
border_type = StyleInformation.get ("WINDOW_BORDER")
border = self.style.get_border_size (cls, style, border_type)
dropshadow = self.style.get_style_entry (cls, style, "shadow")
width = 2 * (window.padding + border) + dropshadow
height = 2 * (window.padding + border) + dropshadow
if window.child:
width += window.child.width
height += window.child.height
width, height = window.check_sizes (width, height)
# Calculate the height of the caption bar.
fn = self.style.get_style_entry (cls, style, "font", "name")
sz = self.style.get_style_entry (cls, style, "font", "size")
fs = self.style.get_style_entry (cls, style, "font", "style")
font = String.create_font (fn, sz, fs)
height_caption = font.get_height () + 2 * self.style.get_border_size \
(cls, style, StyleInformation.get ("CAPTION_BORDER"))
height += height_caption
# Create the surface.
surface = self.draw_rect (width, height, window.state, cls, style)
self.draw_border (surface, window.state, cls, style, border_type,
pygame.Rect (0, 0, width - dropshadow,
height - dropshadow))
if dropshadow > 0:
self.draw_dropshadow (surface, cls, style)
return surface
def draw_graph2d (self, graph):
"""D.draw_graph2d (...) -> Surface
Creates the Surface for the passed Graph2D
"""
cls = graph.__class__
width, height = graph.check_sizes (0, 0)
surface = self.draw_rect (width, height, graph.state, cls, graph.style)
# Draw dashed border, if focused.
if graph.focus:
self.draw_border (surface, graph.state, cls, graph.style,
StyleInformation.get ("ACTIVE_BORDER"),
space = \
StyleInformation.get ("ACTIVE_BORDER_SPACE"))
return surface
def draw_imagelabel (self, label):
"""D.draw_imagelabel (...) -> Surface
Creates the surface for the passed ImageLabel widget.
"""
cls = label.__class__
border = self.style.get_border_size (cls, label.style, label.border)
width = 2 * (border + label.padding)
height = 2 * (border + label.padding)
rect_img = label.picture.get_rect ()
width += rect_img.width
height += rect_img.height
# Guarantee size.
width, height = label.check_sizes (width, height)
surface = self.draw_rect (width, height, label.state, cls, label.style)
self.draw_border (surface, label.state, cls, label.style, label.border)
return surface
def draw_tooltipwindow (self, window):
"""D.draw_tooltipwindow (...) -> Surface
Creates the surface for the passed TooltipWindow widget.
"""
cls = window.__class__
style = window.style or self.style.get_style (cls)
border_type = StyleInformation.get ("TOOLTIPWINDOW_BORDER")
border = self.style.get_border_size (cls, style, border_type)
color = StyleInformation.get ("TOOLTIPWINDOW_COLOR")
width = 2 * (window.padding + border)
height = 2 * (window.padding + border)
rtext = self.draw_string (window.text, window.state, cls, style)
rect = rtext.get_rect ()
width += rect.width
height += rect.height
# Guarantee size.
width, height = window.check_sizes (width, height)
surface = Draw.draw_rect (width, height, color)
surface.blit (rtext, (window.padding, window.padding))
self.draw_border (surface, window.state, cls, style, border_type)
return surface
def draw_box (self, box):
"""D.draw_box (...) -> Surface
Creates the surface for the passed Box widget.
"""
cls = box.__class__
# Guarantee size.
width, height = box.check_sizes (0, 0)
return self.draw_rect (width, height, box.state, cls, box.style)
def draw_alignment (self, alignment):
"""D.draw_alignment (...) -> Surface
Creates the surface for the passed Alignment widget.
"""
# Guarantee size.
width, height = 0, 0
if alignment.child:
width += alignment.child.width
height += alignment.child.height
width, height = alignment.check_sizes (width, height)
return self.draw_rect (width, height, alignment.state,
alignment.__class__, alignment.style)
| {
"repo_name": "prim/ocempgui",
"path": "data/themes/default/DefaultEngine.py",
"copies": "1",
"size": "57426",
"license": "bsd-2-clause",
"hash": -2948845936167941600,
"line_mean": 39.0181184669,
"line_max": 80,
"alpha_frac": 0.5459722077,
"autogenerated": false,
"ratio": 4.0012541806020065,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5047226388302006,
"avg_score": null,
"num_lines": null
} |
# tab:2
#
#
# "Copyright (c) 2000-2005 The Regents of the University of California.
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose, without fee, and without written agreement is
# hereby granted, provided that the above copyright notice, the following
# two paragraphs and the author appear in all copies of this software.
#
# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
# DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
# OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF
# CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
# ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION TO
# PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS."
#
#
#
# @author Jonathan Hui <[email protected]>
#
from copy import deepcopy
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from pylab import *
from pytos.Comm import Comm, MessageQueue
import sys
import pytos.tools.Drain as Drain
import pytos.util.NescApp as NescApp
import threading
import Tkinter as Tk
class DelugeNode( object ) :
def __init__( self, x, y ) :
self.x = x
self.y = y
self.numPgs = 0
self.numPgsComplete = 0
def update( self ) :
text(self.x, self.y+0.2, "%d/%d" % (self.numPgsComplete, self.numPgs));
class DelugeDemoGui( object ) :
def __init__( self, buildDir="", motecom=None ) :
self.nodeStats = {}
self.numPgs = 0
app = NescApp.NescApp(buildDir, motecom, tosbase=True)
drain = app.rpc.receiveComm
self.initializeGui()
statsMsgQueue = MessageQueue(8)
drain.register( deepcopy(app.msgs.DelugeStatsMsg) , statsMsgQueue )
msgThread = threading.Thread(target=self.processMessages,
args=(statsMsgQueue,))
msgThread.setDaemon(True)
self.runMsgThread = True
self.pauseMsgThread = False
msgThread.start()
self.tkRoot.mainloop()
def initializeGui(self) :
self.tkRoot = Tk.Tk()
self.frame = Tk.Frame( self.tkRoot )
self.frame.pack()
self.fig = figure()
self.axes = subplot( 111 )
self.canvas = FigureCanvasTkAgg( self.fig, master = self.tkRoot )
self.canvas.show()
self.canvas.get_tk_widget().pack( side = Tk.TOP, fill = Tk.BOTH, expand = 1 )
self.toolbar = NavigationToolbar2TkAgg( self.canvas, self.tkRoot )
self.toolbar.update()
self.canvas._tkcanvas.pack( side=Tk.TOP, fill=Tk.BOTH, expand=1 )
def processMessages(self, msgQueue) :
while True :
(addr,msg) = msgQueue.get()
src = msg.getParentMsg("DrainMsg").source
print "Node %d: Progress : %d of %d" % (src, msg.stats.numPgsComplete, msg.stats.numPgs)
if src not in self.nodeStats :
node = DelugeNode( msg.stats.location.x,
msg.stats.location.y );
# node = DelugeNode( (src-1)%3, floor((src-1)/3) )
self.nodeStats[src] = node
self.nodeStats[src].x = msg.stats.location.x;
self.nodeStats[src].y = msg.stats.location.y;
if msg.stats.numPgs == 0 :
self.numPgs = 1
if msg.stats.numPgs > self.numPgs :
self.numPgs = msg.stats.numPgs
self.nodeStats[src].numPgs = msg.stats.numPgs
if (msg.stats.numPgsComplete == 0
or msg.stats.numPgsComplete > self.nodeStats[src].numPgsComplete) :
self.nodeStats[src].numPgsComplete = msg.stats.numPgsComplete
self.draw()
def draw( self ) :
if len(self.nodeStats) > 0 :
X = array([n.x for n in self.nodeStats.values()])
Y = array([n.y for n in self.nodeStats.values()])
P = array([n.numPgsComplete for n in self.nodeStats.values()])
clf()
scatter(X, Y, s=1024, marker='o', c=P, vmin=0, vmax=self.numPgs, faceted=False)
axis([min(X)-1, max(X)+1, min(Y)-1, max(Y)+1])
colorbar()
#self.axes.set_xlim(min(X)-1, max(X)+1)
#self.axes.set_ylim(min(Y)-1, max(Y)+1)
#self.xlim = self.axes.get_xlim()
#self.ylim = self.axes.get_ylim()
#colorbar()
for n in self.nodeStats.values() :
n.update()
self.canvas.draw()
if __name__ == "__main__" :
if len(sys.argv) == 3 :
app = DelugeDemoGui(buildDir = sys.argv[1], motecom = sys.argv[2], )
elif len(sys.argv) == 2 :
app = DelugeDemoGui(buildDir = sys.argv[1], )
else :
app = DelugeDemoGui()
| {
"repo_name": "ekiwi/tinyos-1.x",
"path": "contrib/nestfe/nesc/apps/DelugeDemo/DelugeDemoGui.py",
"copies": "2",
"size": "5093",
"license": "bsd-3-clause",
"hash": 1248256254951846400,
"line_mean": 33.1812080537,
"line_max": 100,
"alpha_frac": 0.62085215,
"autogenerated": false,
"ratio": 3.313597918022121,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9820306165419957,
"avg_score": 0.022828780520432718,
"num_lines": 149
} |
# "Copyright (c) 2000-2003 The Regents of the University of California.
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose, without fee, and without written agreement
# is hereby granted, provided that the above copyright notice, the following
# two paragraphs and the author appear in all copies of this software.
#
# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
# DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
# OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY
# OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
# ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION TO
# PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS."
# @author Cory Sharp <[email protected]>
import os
from jpype import jimport
def findImage( imageName ) :
if os.path.isfile(imageName) :
return imageName
if os.path.isfile( "%s.xml" % imageName ) :
return "%s.xml" % imageName
if os.environ.has_key("TOS_IMAGE_REPO") :
repo = "%s/%s" % (os.environ["TOS_IMAGE_REPO"], imageName)
if os.path.isfile( repo ) :
return repo
if os.path.isfile( "%s.xml" % repo ) :
return "%s.xml" % repo
return imageName
class Deluge(object) :
def __init__(self, comm) :
self._comm = comm
def run(self, args):
jimport.net.tinyos.deluge.Deluge( self._comm.moteif, args )
def ping(self) :
self.run([ "-p" ])
def reboot(self, imageNum) :
self.run([ "-r", "-f", "-in=%d" % imageNum ])
def erase(self, imageNum) :
self.run([ "-e", "-f", "-in=%d" % imageNum ])
def inject(self, imageNum, imageName) :
self.run([ "-i", "-f", "-in=%d" % imageNum, "-ti=%s" % findImage(imageName) ])
| {
"repo_name": "fresskarma/tinyos-1.x",
"path": "contrib/nestfe/python/pytos/Deluge.py",
"copies": "2",
"size": "2132",
"license": "bsd-3-clause",
"hash": 2218043131843665400,
"line_mean": 30.8208955224,
"line_max": 82,
"alpha_frac": 0.6899624765,
"autogenerated": false,
"ratio": 3.1679049034175333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9595295548174014,
"avg_score": 0.052514366348703774,
"num_lines": 67
} |
"""An abstract widget for diagram and graph implementations."""
from BaseWidget import BaseWidget
from Constants import *
import base
class Diagram (BaseWidget):
"""Diagram () -> Diagram
An abstract widget class for diagram and graph implementations.
The Diagram class contains the minimum set of attributes and methods
needed to visualize diagrams or graphs from arbitrary data.
Diagrams can have different resolutions, dependant on the value
range, that should be displayed. Inheritors thus have to implement
the 'units' attribute and its related methods get_units() and
set_units(), which define, how many pixels between each full unit
have to be left. Greater values usually result in a higher
resolution, resp. pixel amount between the values.
To allow the user to know about the kind of data, that is evaluated
and displayed, the 'scale_units' attribute and its related methods
get_scale_units() and set_scale_units() must be implemented. Those
define the concrete type of data, that is displayed on each axis of
the diagram (e.g. cm, inch, kg...).
The 'axes' attribute and its related methods get_axes() and
set_axes(), which have to be implemented. denote the axes, which are
used to set the data and its results into relation. A typical
cartesian coordinate plane for example will have two axes (x and y).
The 'orientation' attribute should be respected by inheritors to
allow displaying data in a vertical or horizontal align.
diagram.orientation = ORIENTATION_HORIZONTAL
diagram.set_orientation (ORIENTATION_VERTICAL)
The Diagram contains a 'negative' attribute and set_negative()
method, which indicate, whether negative values should be shown or
not.
diagram.negative = True
diagram.set_negative = False
The 'origin' attribute and set_origin() method set the point of
origin of the diagram on its widget surface and denote a tuple of an
x and y value. Inheritors should use this to set the point of origin
of their diagram type. Most diagram implementors usually would use
this as a relative coordinate to the bottom left corner of the
widget surface.
Note, that this is different from a real relative position on the
widget surface, as those are related to the topleft corner
diagram.origin = 10, 10
diagram.set_origin (20, 20)
The 'data' attribute and set_data() method set the data to be
evaluated by the diagram inheritor using the evaluate() method. It
is up to the inheritor to perform additional sanity checks.
diagram.data = mydata
diagram.set_data (mydata)
An evaluation function, which processes the set data can be set
using the 'eval_func' attribute or set_eval_func() method. If set,
the evaluate() method will process the set data using the eval_func
and store the return values in its 'values' attribute. Otherwise, the
values will be set to the data.
def power_1 (x):
return x**2 - x
diagram.eval_func = power_1
The evaluate() method of the widget distinguishes between the type
of data and will act differently, dependant on whether it is a
sequence or not. Lists and tuples will be passed to the eval_func
using the python map() function, else the complete data will be
passed to the eval_func:
# Data is list or tuple:
self.values = map (self.eval_func, data)
# Any other type of data:
self.values = self.eval_func (data)
The result values can also be set manually without any processing
using the 'values' attribute and set_values() method. This can be
useful for inheritors like a bar chart for example.
self.values = myvalues
self.set_values (myvalues)
A concrete implementation of the Diagram class can be found as
Graph2D widget within this module.
Default action (invoked by activate()):
None
Mnemonic action (invoked by activate_mnemonic()):
None
Attributes:
scale_units - The scale unit(s) to set for the axes.
units - Pixels per unit to set.
axes - The axes to show.
negative - Indicates, that negative vaues should be taken into
account.
orientation - The orientation mapping of the axes.
origin - The position of the point of origin on the widget.
data - Data to evaluate.
values - Result values of the set data after evaluation.
eval_func - Evaluation function to calculate the values.
"""
def __init__ (self):
BaseWidget.__init__ (self)
# Negative values. Influences the axes.
self._negative = False
# Horizontal or vertical mapping of the axes.
self._orientation = ORIENTATION_HORIZONTAL
# Coordinates of the point of origin on the widget.
self._origin = (0, 0)
# The data to evaluate and the return values.
self._data = None
self._values = None
# The eval func.
self._evalfunc = None
def get_scale_units (self):
"""D.get_scale_units (...) -> None
Gets the scale units of the axes.
This method has to be implemented by inherited widgets.
"""
raise NotImplementedError
def set_scale_units (self, units):
"""D.set_scale_units (...) -> None
Sets the scale units of the axes.
This method has to be implemented by inherited widgets.
"""
raise NotImplementedError
def get_units (self):
"""D.set_units (...) -> None
Gets the pixels per unit for dimensioning.
This method has to be implemented by inherited widgets.
"""
raise NotImplementedError
def set_units (self, units):
"""D.set_units (...) -> None
Sets the pixels per unit for dimensioning.
This method has to be implemented by inherited widgets.
"""
raise NotImplementedError
def get_axes (self):
"""D.get_axes (...) -> None
Gets the amount and names of the axes.
This method has to be implemented by inherited widgets.
"""
raise NotImplementedError
def set_axes (self, axes):
"""D.set_axes (...) -> None
Sets the amount and names of the axes.
This method has to be implemented by inherited widgets.
"""
raise NotImplementedError
def set_negative (self, negative=True):
"""D.set_negative (...) -> None
Sets the indicator, whether negative values should be shown.
"""
self._negative = negative
self.dirty = True
def set_orientation (self, orientation=ORIENTATION_HORIZONTAL):
"""D.set_orientation (...) -> None
Sets the orientation of the axes.
Raises a ValueError, if the passed argument is not a value of
the ORIENTATION_TYPES tuple.
"""
if orientation not in ORIENTATION_TYPES:
raise ValueError("orientation must be a value of ORIENATION_TYPES")
self._orientation = orientation
self.dirty = True
def set_origin (self, x, y):
"""D.set_origin (...) -> None
Sets the coordinates of the point of origin on the widget.
Raises a TypeError, if the passed arguments are not integers.
"""
if (type (x) != int) or (type (y) != int):
raise TypeError ("x and y must be integers")
self._origin = (x, y)
self.dirty = True
def set_data (self, data):
"""D.set_data (...) -> None
Sets the data to evaluate.
This method does not perform any consistency checking or
whatsoever.
"""
self._data = data
if self.data != None:
self.evaluate ()
else:
self.values = None
def set_values (self, values):
"""D.set_values (...) -> None
Sets the values without processing the data.
"""
self._values = values
self.dirty = True
def set_eval_func (self, func):
"""D.set_eval_func (...) -> None
Sets the evaluation function for the data.
Raises a TypeError, if func is not callable.
"""
if not callable (func):
raise TypeError ("func must be callable")
self._evalfunc = func
if self.data != None:
self.evaluate ()
else:
self.dirty = True
def evaluate (self):
"""D.evaluate () -> None
Calulates the result values from the set data.
Calculates the result values from the set the data using the set
evaluation function.
If the set data is a sequence, eval_func will be applied to each
item of it (using map()) to build the return values:
values = map (eval_func, data)
If the set data is not a list or tuple, the data will be passed
in it entirety to eval_func in order to calculate the return
values:
values = eval_func (data)
The 'negative' attribute neither does affect the data nor the
return values.
"""
if self.eval_func != None:
if self.data != None:
if type (self.data) in (list, tuple):
self.values = map (self.eval_func, self.data)
else:
self.values = self.eval_func (self.data)
return
self.values = self.data
scale_units = property (lambda self: self.get_scale_units (),
lambda self, var: self.set_scale_units (var),
doc = "The scale units of the axes.")
units = property (lambda self: self.get_units (),
lambda self, var: self.set_units (var),
doc = "The pixels per unit to set.")
axes = property (lambda self: self.get_axes (),
lambda self, var: self.set_axes (var),
doc = "The axes to show.")
negative = property (lambda self: self._negative,
lambda self, var: self.set_negative (var),
doc = "Indicates, whether negative values are shown.")
orientation = property (lambda self: self._orientation,
lambda self, var: self.set_orientation (var),
doc = "The orientation of the axes.")
origin = property (lambda self: self._origin,
lambda self, (x, y): self.set_origin (x, y),
doc = "Coordinates of the point of origin on the " \
"widget")
data = property (lambda self: self._data,
lambda self, var: self.set_data (var),
doc = "The data to evaluate.")
values = property (lambda self: self._values,
lambda self, var: self.set_values (var),
doc = "The calculated values of the set data.")
eval_func = property (lambda self: self._evalfunc,
lambda self, var: self.set_eval_func (var),
doc = "The evaluation function for calculation.")
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/widgets/Diagram.py",
"copies": "1",
"size": "12618",
"license": "bsd-2-clause",
"hash": 9003016170335364000,
"line_mean": 35.8947368421,
"line_max": 79,
"alpha_frac": 0.6329053733,
"autogenerated": false,
"ratio": 4.52258064516129,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007632080492072407,
"num_lines": 342
} |
"""A modal window suitable for dialogs and similar purposes."""
from Window import Window
from ocempgui.events import EventManager
class DialogWindow (Window):
"""DialogWindow (title=None) -> DialogWindow
A modal window widget, which blocks any other input.
The DialogWindow sets itself and its children in a modal state by
limiting the event distribution to itself.
Default action (invoked by activate()):
See the Window class.
Mnemonic action (invoked by activate_mnemonic()):
See the Window class.
"""
def __init__ (self, title=None):
Window.__init__ (self, title)
def set_event_manager (self, manager):
"""D.set_event_manager (...) -> None
Sets the event manager of the DialogWindow.
Sets the event manager of the DialogWindow and invokes the
grab_events() method for the DialogWindow.
"""
if (manager == None) and (self.manager != None):
self.manager.event_grabber = None
Window.set_event_manager (self, manager)
if self.manager != None:
self.manager.event_grabber = self
def destroy (self):
"""D.destroy () -> None
Destroys the DialogWindow and removes it from its event system.
"""
self._stop_events = True
if self.manager != None:
self.manager.event_grabber = None
Window.destroy (self)
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/widgets/DialogWindow.py",
"copies": "1",
"size": "2845",
"license": "bsd-2-clause",
"hash": 6185230607747743000,
"line_mean": 39.6428571429,
"line_max": 79,
"alpha_frac": 0.7026362039,
"autogenerated": false,
"ratio": 4.32370820668693,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01210759301997422,
"num_lines": 70
} |
# $Id: diameter.py 23 2006-11-08 15:45:33Z dugsong $
"""Diameter."""
import struct
import dpkt
# Diameter Base Protocol - RFC 3588
# http://tools.ietf.org/html/rfc3588
# Request/Answer Command Codes
ABORT_SESSION = 274
ACCOUTING = 271
CAPABILITIES_EXCHANGE = 257
DEVICE_WATCHDOG = 280
DISCONNECT_PEER = 282
RE_AUTH = 258
SESSION_TERMINATION = 275
class Diameter(dpkt.Packet):
__hdr__ = (
('v', 'B', 1),
('len', '3s', 0),
('flags', 'B', 0),
('cmd', '3s', 0),
('app_id', 'I', 0),
('hop_id', 'I', 0),
('end_id', 'I', 0)
)
def _get_r(self):
return (self.flags >> 7) & 0x1
def _set_r(self, r):
self.flags = (self.flags & ~0x80) | ((r & 0x1) << 7)
request_flag = property(_get_r, _set_r)
def _get_p(self):
return (self.flags >> 6) & 0x1
def _set_p(self, p):
self.flags = (self.flags & ~0x40) | ((p & 0x1) << 6)
proxiable_flag = property(_get_p, _set_p)
def _get_e(self):
return (self.flags >> 5) & 0x1
def _set_e(self, e):
self.flags = (self.flags & ~0x20) | ((e & 0x1) << 5)
error_flag = property(_get_e, _set_e)
def _get_t(self):
return (self.flags >> 4) & 0x1
def _set_t(self, t):
self.flags = (self.flags & ~0x10) | ((t & 0x1) << 4)
retransmit_flag = property(_get_t, _set_t)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.cmd = (ord(self.cmd[0]) << 16) | \
(ord(self.cmd[1]) << 8) | \
ord(self.cmd[2])
self.len = (ord(self.len[0]) << 16) | \
(ord(self.len[1]) << 8) | \
ord(self.len[2])
self.data = self.data[:self.len - self.__hdr_len__]
l = []
while self.data:
avp = AVP(self.data)
l.append(avp)
self.data = self.data[len(avp):]
self.data = self.avps = l
def pack_hdr(self):
self.len = chr((self.len >> 16) & 0xff) + \
chr((self.len >> 8) & 0xff) + \
chr(self.len & 0xff)
self.cmd = chr((self.cmd >> 16) & 0xff) + \
chr((self.cmd >> 8) & 0xff) + \
chr(self.cmd & 0xff)
return dpkt.Packet.pack_hdr(self)
def __len__(self):
return self.__hdr_len__ + \
sum(map(len, self.data))
def __str__(self):
return self.pack_hdr() + \
''.join(map(str, self.data))
class AVP(dpkt.Packet):
__hdr__ = (
('code', 'I', 0),
('flags', 'B', 0),
('len', '3s', 0),
)
def _get_v(self):
return (self.flags >> 7) & 0x1
def _set_v(self, v):
self.flags = (self.flags & ~0x80) | ((v & 0x1) << 7)
vendor_flag = property(_get_v, _set_v)
def _get_m(self):
return (self.flags >> 6) & 0x1
def _set_m(self, m):
self.flags = (self.flags & ~0x40) | ((m & 0x1) << 6)
mandatory_flag = property(_get_m, _set_m)
def _get_p(self):
return (self.flags >> 5) & 0x1
def _set_p(self, p):
self.flags = (self.flags & ~0x20) | ((p & 0x1) << 5)
protected_flag = property(_get_p, _set_p)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.len = (ord(self.len[0]) << 16) | \
(ord(self.len[1]) << 8) | \
ord(self.len[2])
if self.vendor_flag:
self.vendor = struct.unpack('>I', self.data[:4])[0]
self.data = self.data[4:self.len - self.__hdr_len__]
else:
self.data = self.data[:self.len - self.__hdr_len__]
def pack_hdr(self):
self.len = chr((self.len >> 16) & 0xff) + \
chr((self.len >> 8) & 0xff) + \
chr(self.len & 0xff)
data = dpkt.Packet.pack_hdr(self)
if self.vendor_flag:
data += struct.pack('>I', self.vendor)
return data
def __len__(self):
length = self.__hdr_len__ + \
sum(map(len, self.data))
if self.vendor_flag:
length += 4
return length
if __name__ == '__main__':
import unittest
class DiameterTestCase(unittest.TestCase):
def testPack(self):
d = Diameter(self.s)
self.failUnless(self.s == str(d))
d = Diameter(self.t)
self.failUnless(self.t == str(d))
def testUnpack(self):
d = Diameter(self.s)
self.failUnless(d.len == 40)
#self.failUnless(d.cmd == DEVICE_WATCHDOG_REQUEST)
self.failUnless(d.request_flag == 1)
self.failUnless(d.error_flag == 0)
self.failUnless(len(d.avps) == 2)
avp = d.avps[0]
#self.failUnless(avp.code == ORIGIN_HOST)
self.failUnless(avp.mandatory_flag == 1)
self.failUnless(avp.vendor_flag == 0)
self.failUnless(avp.len == 12)
self.failUnless(len(avp) == 12)
self.failUnless(avp.data == '\x68\x30\x30\x32')
# also test the optional vendor id support
d = Diameter(self.t)
self.failUnless(d.len == 44)
avp = d.avps[0]
self.failUnless(avp.vendor_flag == 1)
self.failUnless(avp.len == 16)
self.failUnless(len(avp) == 16)
self.failUnless(avp.vendor == 3735928559)
self.failUnless(avp.data == '\x68\x30\x30\x32')
s = '\x01\x00\x00\x28\x80\x00\x01\x18\x00\x00\x00\x00\x00\x00\x41\xc8\x00\x00\x00\x0c\x00\x00\x01\x08\x40\x00\x00\x0c\x68\x30\x30\x32\x00\x00\x01\x28\x40\x00\x00\x08'
t = '\x01\x00\x00\x2c\x80\x00\x01\x18\x00\x00\x00\x00\x00\x00\x41\xc8\x00\x00\x00\x0c\x00\x00\x01\x08\xc0\x00\x00\x10\xde\xad\xbe\xef\x68\x30\x30\x32\x00\x00\x01\x28\x40\x00\x00\x08'
unittest.main()
| {
"repo_name": "ashrith/dpkt",
"path": "dpkt/diameter.py",
"copies": "15",
"size": "5848",
"license": "bsd-3-clause",
"hash": 4676060708779074000,
"line_mean": 31.3093922652,
"line_max": 190,
"alpha_frac": 0.4972640219,
"autogenerated": false,
"ratio": 2.8223938223938223,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005108685702038329,
"num_lines": 181
} |
# $Id: display.py,v 1.1 2004/08/14 22:18:28 jp Exp $
from __future__ import generators
from Tkinter import *
from math import pi,sin,cos
from base import BaseObject
RADS_PER_DEG = pi/180
DEGS_PER_RAD = 180/pi
def pol2cart(r,theta):
x = r * cos(theta)
y = r * sin(theta)
return x,y
def enum(seq):
i = 0
for x in seq:
yield (i,x)
i += 1
class LRFDisplay(Canvas,BaseObject):
robot_radius = 0.2
lrf_shape = (10.0,10.0)
lrf_resolution = 1.0
lrf_start_angle = -90
point_width = 1.0
def __init__(self,parent,**config):
Canvas.__init__(self,parent,**config)
self.channels = {None:([],{})}
self.bind('Configure',self.reconfigure())
self.canvas_shape = self.winfo_reqwidth(),self.winfo_reqheight()
lw,lh = self.lrf_shape
self.lrf_origin = (lw/2.0,self.robot_radius)
self.draw_origin()
def draw_origin(self):
l,t = self.lrf2canvas(-self.robot_radius,self.robot_radius)
r,b = self.lrf2canvas(self.robot_radius,-self.robot_radius)
xo,yo = self.lrf2canvas(0,0)
circle = self.create_oval(l,t,r,b,fill='red')
line = self.create_line(xo,yo,xo,t)
self.lift(line,circle)
def reconfigure(self):
self.canvas_shape = (self.winfo_width(),self.winfo_height())
def draw_scan(self,scan,channel=None):
if channel not in self.channels:
self.init_channel(channel,fill='black',outline='black')
points,config = self.channels[channel]
if len(points) != len(scan):
self.init_points(len(scan),channel=channel)
points = self.channels[channel][0]
theta = self.lrf_start_angle
for i,r in enum(scan):
x,y = pol2cart(r,(theta+90)*RADS_PER_DEG)
x,y = self.lrf2canvas(x,y)
left,top,right,bottom = self.coords(points[i])
x_old = left+(right-left)
y_old = top+(bottom-top)
x_move = x - x_old
y_move = y - y_old
self.move(points[i],x_move,y_move)
theta += self.lrf_resolution
def draw_weights(self,w):
self.draw_scan(w)
def init_points(self,n,channel=None):
points,config = self.channels[channel]
points += [self.create_rectangle(0,0,self.point_width,self.point_width,**config)
for i in range(len(points),n)]
def init_channel(self,channel=None,**config):
self.channels[channel] = ([],config)
def lrf2canvas(self,x_in,y_in):
lrf_width,lrf_height = self.lrf_shape
xo,yo = self.lrf_origin
width,height = self.canvas_shape
x_out = (x_in+xo) * (width/lrf_width)
y_out = (y_in-(lrf_height-yo)) * -(height/lrf_height)
return x_out,y_out
class SOMDisplay(Frame):
lrf_params = {'bg' : 'white',
'width' : 200,
'height': 100}
def __init__(self,parent,rows=6,cols=6,lrf_params={},**config):
Frame.__init__(self,parent,**config)
self.canvases = [[LRFDisplay(self,**self.lrf_params) for i in range(cols)] for j in range(rows)]
for row in range(rows):
for col in range(cols):
self.canvases[row][col].grid(row=row,column=col)
def postscript(self,filestem):
i = 0
for row in range(rows):
for col in range(cols):
self.canvases[row][col].postscript(filestem+'unit%0.3d'%i)
i+=1
class SOMWindow(Toplevel):
def __init__(self,root,som,**kw):
Toplevel.__init__(self,root,**kw)
self.title('SOM View')
self.som = som
Button(self,text="Refresh",command=self.redraw_som).pack()
self.som_display = SOMDisplay(self,
rows=self.som.ydim,
cols=self.som.xdim)
self.som_display.pack()
def redraw_som(self):
for y in range(self.som.ydim):
for x in range(self.som.xdim):
w = self.som.get_model_vector((x,y))
self.som_display.canvases[x][y].draw_weights(w)
if __name__ == '__main__':
root = Tk()
LRFDisplay.lrf_shape = (20.0,10.0)
lrf = LRFDisplay(root,bg='white',width=200,height=100)
lrf.pack()
lrf.draw_weights([5 for i in range(180)])
som_window = Toplevel(root)
som_disp = SOMDisplay(som_window,rows=4,cols=4)
som_disp.pack()
# root.mainloop()
| {
"repo_name": "ronaldahmed/robot-navigation",
"path": "neural-navigation-with-lstm/MARCO/plastk/display.py",
"copies": "2",
"size": "4588",
"license": "mit",
"hash": -8028202697278231000,
"line_mean": 27.1472392638,
"line_max": 104,
"alpha_frac": 0.5494768963,
"autogenerated": false,
"ratio": 3.1839000693962527,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9609669023550134,
"avg_score": 0.02474158842922386,
"num_lines": 163
} |
import optparse, os, platform, sys, shutil, subprocess, shlex
def getHostOS():
host_os = "win_x64"
if platform.system().lower().startswith("win"):
if 'PROGRAMFILES(X86)' in os.environ:
host_os = "win_x64"
else:
host_os = "win_32"
elif platform.system().lower().startswith("linux"):
if platform.machine() == 'x86_64':
host_os = "linux_x86-64"
else:
host_os = "linux_i686"
return host_os
def checkSumFile( fileName ):
"""Compute hash of the specified file"""
m = sha()
try:
fd = open(fileName,"rb")
except IOError:
print( "Unable to open the file in readmode: %s" % filename )
return
for eachLine in fd:
m.update( eachLine )
fd.close()
return m.hexdigest()
def copy( common_prefix, filename, distFolder, srcDir=None, destname=None ):
"""
Copy the filename (relative to the src directory) to the destname relative to the Dist folder.
"""
#print('got request to copy\n common_prefix = %s\n filename=%s\n distFolder=%s\n srcDir=%s\n destname=%s' % (common_prefix, filename, distFolder, srcDir, destname))
if destname == None:
destname = os.path.basename( filename )
if srcDir == None:
srcDir = ""
allowDoesntExist = (filename[-1:] == '?')
if allowDoesntExist:
filename = filename[0:-1]
srcPath = os.path.realpath( os.path.join( srcDir, filename ) )
destPath = os.path.realpath( os.path.join( distFolder, destname ) )
if not os.path.exists( srcPath ):
if allowDoesntExist:
print ("Optional file %s does not exist." % srcPath)
return (False, False)
else:
print( "File %s does not exist." % srcPath )
return ( destPath, None )
if os.path.isdir( srcPath ):
result = []
if not os.path.isdir( destPath ):
os.makedirs( destPath )
for file in os.listdir( srcPath ):
if file.startswith( 'CMake' ) or file.startswith( 'CTest' ) or file.endswith( '.cmake' ):
continue
elif file.startswith( '.svn' ) or file.startswith('.bzr'):
continue
callResult = copy( common_prefix, file, distFolder, srcPath, os.path.join( destname, file ) )
if isinstance( callResult, list ):
result += callResult
else:
result.append( callResult )
return result
else:
try:
sourceCheckSum = checkSumFile( srcPath )
except:
sourceCheckSum = "Not Computed."
try:
destCheckSum = checkSumFile( destPath )
except:
destCheckSum = None
relDestPath = destPath.replace( common_prefix, "").replace( '\\', '/' )
if ( os.path.exists( srcPath ) and os.path.exists( destPath ) ) and ( sourceCheckSum == destCheckSum ):
return ( relDestPath, sourceCheckSum )
try:
if not os.path.isdir( os.path.dirname( destPath ) ):
os.makedirs( os.path.dirname( destPath ) )
if not os.path.islink( os.path.join( srcDir, filename ) ):
print( "Copying %s to %s" % ( srcPath, relDestPath ) )
returnVal = shutil.copy( srcPath, destPath )
elif not ( os.name == 'nt' ):
print( "Going to handle %s" % (os.path.join( srcDir, filename ) ) )
# Name of file symlink is pointing to
#linkProtoName = os.path.join( distFolder, os.path.basename( srcPath ) )
linkProtoName = os.path.basename( srcPath )
# Name of symlink
#linkName = os.path.basename( destDir )
linkName = os.path.join( distFolder, destname )
#linkName = os.path.basename( filename )
print( "Linking %s to %s" % ( linkProtoName, linkName ) )
os.symlink( linkProtoName.strip(), linkName.strip() )
except:
print( "Copy of %s returned an exception" % srcPath )
return ( relDestPath, None )
return ( relDestPath, sourceCheckSum )
return None
def copyConfFile( common_prefix, filename, distFolder, srcDir=None, destname=None ):
"""
Copy the filename (relative to the src directory) to the destname relative to the Dist folder
only if the file does not match the destination file or the destination.default file. This will
allow the user to make changes to the configuration files without them being overwritten.
"""
if destname == None:
destname = os.path.basename( filename )
if srcDir == None:
srcDir = ""
pieces = destname.split('.')
pieces.insert( -1, '1' )
comparename = '.'.join( pieces )
srcPath = os.path.realpath( os.path.join( srcDir, filename ) )
destPath = os.path.realpath( os.path.join( distFolder, destname ) )
comparePath = os.path.realpath( os.path.join( distFolder, comparename ) )
if not os.path.exists( srcPath ):
print( "File %s does not exist." % srcPath )
return ( destPath, None )
try:
sourceCheckSum = checkSumFile( srcPath )
except:
sourceCheckSum = "Not Computed."
try:
destCheckSum = checkSumFile( destPath )
except:
destCheckSum = None
try:
compareDestCheckSum = checkSumFile( comparePath )
except:
compareDestCheckSum = "Not Computed."
relDestPath = destPath.replace( common_prefix, "").replace( '\\', '/' )
relComparePath = comparePath.replace( common_prefix, "").replace( '\\', '/' )
if ( os.path.exists( srcPath ) and os.path.exists( destPath ) ) and ( sourceCheckSum == destCheckSum == compareDestCheckSum ):
return [ ( relDestPath, sourceCheckSum ), ( relComparePath, sourceCheckSum ) ]
try:
if not os.path.isdir( os.path.dirname( destPath ) ):
os.makedirs( os.path.dirname( destPath ) )
if ( sourceCheckSum != destCheckSum ) and ( destCheckSum == compareDestCheckSum ) or ( not os.path.exists( destPath ) ):
print( "Copying %s to %s" % ( srcPath, relDestPath ) )
returnVal = shutil.copy( srcPath, destPath )
if ( sourceCheckSum != compareDestCheckSum ) or ( not os.path.exists( comparePath ) ):
print( "Creating a default revision at %s" % relComparePath )
returnVal = shutil.copy( srcPath, comparePath )
except:
print( "Copy of %s returned an exception" % srcPath )
raise
return ( relDestPath, None )
return [ ( relDestPath, sourceCheckSum ), ( relComparePath, sourceCheckSum ) ]
def getList( option, opt_str, value, parser ):
"""
optparse callback for returning a list from a string of format
1a:1b,2a:2b,3a:3b,...
"""
setattr( parser.values, option.dest, [ tuple(x.split(',')) for x in value.split('&') ] )
| {
"repo_name": "perfectsearch/sandman",
"path": "code/buildscripts/distfunctions.py",
"copies": "1",
"size": "7239",
"license": "mit",
"hash": 1207912189858357800,
"line_mean": 37.5053191489,
"line_max": 173,
"alpha_frac": 0.5855781185,
"autogenerated": false,
"ratio": 3.8220696937697993,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9694321795379375,
"avg_score": 0.042665203378084944,
"num_lines": 188
} |
"""Reader for existing document trees."""
from docutils import readers, utils, transforms
class Reader(readers.ReReader):
"""
Adapt the Reader API for an existing document tree.
The existing document tree must be passed as the ``source`` parameter to
the `docutils.core.Publisher` initializer, wrapped in a
`docutils.io.DocTreeInput` object::
pub = docutils.core.Publisher(
..., source=docutils.io.DocTreeInput(document), ...)
The original document settings are overridden; if you want to use the
settings of the original document, pass ``settings=document.settings`` to
the Publisher call above.
"""
supported = ('doctree',)
config_section = 'doctree reader'
config_section_dependencies = ('readers',)
def parse(self):
"""
No parsing to do; refurbish the document tree instead.
Overrides the inherited method.
"""
self.document = self.input
# Create fresh Transformer object, to be populated from Writer
# component.
self.document.transformer = transforms.Transformer(self.document)
# Replace existing settings object with new one.
self.document.settings = self.settings
# Create fresh Reporter object because it is dependent on
# (new) settings.
self.document.reporter = utils.new_reporter(
self.document.get('source', ''), self.document.settings)
| {
"repo_name": "juanmont/one",
"path": ".vscode/extensions/tht13.rst-vscode-2.0.0/src/python/docutils/readers/doctree.py",
"copies": "246",
"size": "1607",
"license": "apache-2.0",
"hash": -6472174062464439000,
"line_mean": 33.9347826087,
"line_max": 77,
"alpha_frac": 0.6726820162,
"autogenerated": false,
"ratio": 4.3432432432432435,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 46
} |
"""Reader for existing document trees."""
from docutils import readers, utils, transforms
class Reader(readers.ReReader):
"""
Adapt the Reader API for an existing document tree.
The existing document tree must be passed as the ``source`` parameter to
the `docutils.core.Publisher` initializer, wrapped in a
`docutils.io.DocTreeInput` object::
pub = docutils.core.Publisher(
..., source=docutils.io.DocTreeInput(document), ...)
The original document settings are overridden; if you want to use the
settings of the original document, pass ``settings=document.settings`` to
the Publisher call above.
"""
supported = ('doctree',)
config_section = 'doctree reader'
config_section_dependencies = ('readers',)
def parse(self):
"""
No parsing to do; refurbish the document tree instead.
Overrides the inherited method.
"""
self.document = self.input
# Create fresh Transformer object, to be populated from Writer
# component.
self.document.transformer = transforms.Transformer(self.document)
# Replace existing settings object with new one.
self.document.settings = self.settings
# Create fresh Reporter object because it is dependent on
# (new) settings.
self.document.reporter = utils.new_reporter(
self.document.get('source', ''), self.document.settings)
| {
"repo_name": "rimbalinux/MSISDNArea",
"path": "docutils/readers/doctree.py",
"copies": "2",
"size": "1653",
"license": "bsd-3-clause",
"hash": -8188344925744133000,
"line_mean": 33.9347826087,
"line_max": 77,
"alpha_frac": 0.6539624924,
"autogenerated": false,
"ratio": 4.443548387096774,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 46
} |
"""
Simple internal document tree Writer, writes Docutils XML.
"""
__docformat__ = 'reStructuredText'
import docutils
from docutils import frontend, writers
class Writer(writers.Writer):
supported = ('xml',)
"""Formats this writer supports."""
settings_spec = (
'"Docutils XML" Writer Options',
'Warning: the --newlines and --indents options may adversely affect '
'whitespace; use them only for reading convenience.',
(('Generate XML with newlines before and after tags.',
['--newlines'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Generate XML with indents and newlines.',
['--indents'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Omit the XML declaration. Use with caution.',
['--no-xml-declaration'],
{'dest': 'xml_declaration', 'default': 1, 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Omit the DOCTYPE declaration.',
['--no-doctype'],
{'dest': 'doctype_declaration', 'default': 1,
'action': 'store_false', 'validator': frontend.validate_boolean}),))
settings_defaults = {'output_encoding_error_handler': 'xmlcharrefreplace'}
config_section = 'docutils_xml writer'
config_section_dependencies = ('writers',)
output = None
"""Final translated form of `document`."""
xml_declaration = '<?xml version="1.0" encoding="%s"?>\n'
#xml_stylesheet = '<?xml-stylesheet type="text/xsl" href="%s"?>\n'
doctype = (
'<!DOCTYPE document PUBLIC'
' "+//IDN docutils.sourceforge.net//DTD Docutils Generic//EN//XML"'
' "http://docutils.sourceforge.net/docs/ref/docutils.dtd">\n')
generator = '<!-- Generated by Docutils %s -->\n'
def translate(self):
settings = self.document.settings
indent = newline = ''
if settings.newlines:
newline = '\n'
if settings.indents:
newline = '\n'
indent = ' '
output_prefix = []
if settings.xml_declaration:
output_prefix.append(
self.xml_declaration % settings.output_encoding)
if settings.doctype_declaration:
output_prefix.append(self.doctype)
output_prefix.append(self.generator % docutils.__version__)
docnode = self.document.asdom().childNodes[0]
self.output = (''.join(output_prefix)
+ docnode.toprettyxml(indent, newline))
| {
"repo_name": "alekseyev/wheatleycms",
"path": "docutils/writers/docutils_xml.py",
"copies": "66",
"size": "2727",
"license": "bsd-3-clause",
"hash": 8807744361097495000,
"line_mean": 36.3561643836,
"line_max": 79,
"alpha_frac": 0.6050605061,
"autogenerated": false,
"ratio": 4.1633587786259545,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""
Simple internal document tree Writer, writes Docutils XML.
"""
__docformat__ = 'reStructuredText'
import docutils
from docutils import frontend, writers
class Writer(writers.Writer):
supported = ('xml',)
"""Formats this writer supports."""
settings_spec = (
'"Docutils XML" Writer Options',
'Warning: the --newlines and --indents options may adversely affect '
'whitespace; use them only for reading convenience.',
(('Generate XML with newlines before and after tags.',
['--newlines'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Generate XML with indents and newlines.',
['--indents'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Omit the XML declaration. Use with caution.',
['--no-xml-declaration'],
{'dest': 'xml_declaration', 'default': 1, 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Omit the DOCTYPE declaration.',
['--no-doctype'],
{'dest': 'doctype_declaration', 'default': 1,
'action': 'store_false', 'validator': frontend.validate_boolean}),))
settings_defaults = {'output_encoding_error_handler': 'xmlcharrefreplace'}
config_section = 'docutils_xml writer'
config_section_dependencies = ('writers',)
output = None
"""Final translated form of `document`."""
xml_declaration = '<?xml version="1.0" encoding="%s"?>\n'
#xml_stylesheet = '<?xml-stylesheet type="text/xsl" href="%s"?>\n'
doctype = (
'<!DOCTYPE document PUBLIC'
' "+//IDN docutils.sourceforge.net//DTD Docutils Generic//EN//XML"'
' "http://docutils.sourceforge.net/docs/ref/docutils.dtd">\n')
generator = '<!-- Generated by Docutils %s -->\n'
def translate(self):
settings = self.document.settings
indent = newline = ''
if settings.newlines:
newline = '\n'
if settings.indents:
newline = '\n'
indent = ' '
output_prefix = []
if settings.xml_declaration:
output_prefix.append(
self.xml_declaration % settings.output_encoding)
if settings.doctype_declaration:
output_prefix.append(self.doctype)
output_prefix.append(self.generator % docutils.__version__)
docnode = self.document.asdom().childNodes[0]
self.output = (''.join(output_prefix)
+ docnode.toprettyxml(indent, newline))
| {
"repo_name": "rimbalinux/LMD3",
"path": "docutils/writers/docutils_xml.py",
"copies": "2",
"size": "2800",
"license": "bsd-3-clause",
"hash": -8488675864830142000,
"line_mean": 36.3561643836,
"line_max": 79,
"alpha_frac": 0.5892857143,
"autogenerated": false,
"ratio": 4.236006051437216,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5825291765737216,
"avg_score": null,
"num_lines": null
} |
"""
Simple document tree Writer, writes Docutils XML according to
http://docutils.sourceforge.net/docs/ref/docutils.dtd.
"""
__docformat__ = 'reStructuredText'
import sys
import xml.sax.saxutils
from StringIO import StringIO
import docutils
from docutils import frontend, writers, nodes
class RawXmlError(docutils.ApplicationError): pass
class Writer(writers.Writer):
supported = ('xml',)
"""Formats this writer supports."""
settings_spec = (
'"Docutils XML" Writer Options',
None,
(('Generate XML with newlines before and after tags.',
['--newlines'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Generate XML with indents and newlines.',
['--indents'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Omit the XML declaration. Use with caution.',
['--no-xml-declaration'],
{'dest': 'xml_declaration', 'default': 1, 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Omit the DOCTYPE declaration.',
['--no-doctype'],
{'dest': 'doctype_declaration', 'default': 1,
'action': 'store_false', 'validator': frontend.validate_boolean}),))
settings_defaults = {'output_encoding_error_handler': 'xmlcharrefreplace'}
config_section = 'docutils_xml writer'
config_section_dependencies = ('writers',)
output = None
"""Final translated form of `document`."""
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = XMLTranslator
def translate(self):
self.visitor = visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
self.output = ''.join(visitor.output)
class XMLTranslator(nodes.GenericNodeVisitor):
xml_declaration = '<?xml version="1.0" encoding="%s"?>\n'
# TODO: add stylesheet options similar to HTML and LaTeX writers?
#xml_stylesheet = '<?xml-stylesheet type="text/xsl" href="%s"?>\n'
doctype = (
'<!DOCTYPE document PUBLIC'
' "+//IDN docutils.sourceforge.net//DTD Docutils Generic//EN//XML"'
' "http://docutils.sourceforge.net/docs/ref/docutils.dtd">\n')
generator = '<!-- Generated by Docutils %s -->\n'
xmlparser = xml.sax.make_parser()
"""SAX parser instance to check/exctract raw XML."""
xmlparser.setFeature(
"http://xml.org/sax/features/external-general-entities", True)
def __init__(self, document):
nodes.NodeVisitor.__init__(self, document)
# Reporter
self.warn = self.document.reporter.warning
self.error = self.document.reporter.error
# Settings
self.settings = settings = document.settings
self.indent = self.newline = ''
if settings.newlines:
self.newline = '\n'
if settings.indents:
self.newline = '\n'
self.indent = ' '
self.level = 0 # indentation level
self.in_simple = 0 # level of nesting inside mixed-content elements
# Output
self.output = []
if settings.xml_declaration:
self.output.append(
self.xml_declaration % settings.output_encoding)
if settings.doctype_declaration:
self.output.append(self.doctype)
self.output.append(self.generator % docutils.__version__)
# initialize XML parser
self.the_handle=TestXml()
self.xmlparser.setContentHandler(self.the_handle)
# generic visit and depart methods
# --------------------------------
def default_visit(self, node):
"""Default node visit method."""
if not self.in_simple:
self.output.append(self.indent*self.level)
self.output.append(node.starttag(xml.sax.saxutils.quoteattr))
self.level += 1
if isinstance(node, nodes.TextElement):
self.in_simple += 1
if not self.in_simple:
self.output.append(self.newline)
def default_departure(self, node):
"""Default node depart method."""
self.level -= 1
if not self.in_simple:
self.output.append(self.indent*self.level)
self.output.append(node.endtag())
if isinstance(node, nodes.TextElement):
self.in_simple -= 1
if not self.in_simple:
self.output.append(self.newline)
# specific visit and depart methods
# ---------------------------------
def visit_Text(self, node):
text = xml.sax.saxutils.escape(node.astext())
self.output.append(text)
def depart_Text(self, node):
pass
def visit_raw(self, node):
if 'xml' not in node.get('format', '').split():
# skip other raw content?
# raise nodes.SkipNode
self.default_visit(node)
return
# wrap in <raw> element
self.default_visit(node) # or not?
xml_string = node.astext()
self.output.append(xml_string)
self.default_departure(node) # or not?
# Check validity of raw XML:
if isinstance(xml_string, unicode) and sys.version_info < (3,):
xml_string = xml_string.encode('utf8')
try:
self.xmlparser.parse(StringIO(xml_string))
except xml.sax._exceptions.SAXParseException, error:
col_num = self.the_handle.locator.getColumnNumber()
line_num = self.the_handle.locator.getLineNumber()
srcline = node.line
if not isinstance(node.parent, nodes.TextElement):
srcline += 2 # directive content start line
msg = 'Invalid raw XML in column %d, line offset %d:\n%s' % (
col_num, line_num, node.astext())
self.warn(msg, source=node.source, line=srcline+line_num-1)
raise nodes.SkipNode # content already processed
class TestXml(xml.sax.ContentHandler):
def setDocumentLocator(self, locator):
self.locator = locator
| {
"repo_name": "mcr/ietfdb",
"path": "docutils/writers/docutils_xml.py",
"copies": "4",
"size": "6271",
"license": "bsd-3-clause",
"hash": -2182509082923135700,
"line_mean": 34.0335195531,
"line_max": 79,
"alpha_frac": 0.6080369957,
"autogenerated": false,
"ratio": 4.045806451612903,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010187013180895104,
"num_lines": 179
} |
"""
Simple document tree Writer, writes Docutils XML according to
http://docutils.sourceforge.net/docs/ref/docutils.dtd.
"""
__docformat__ = 'reStructuredText'
import sys
# Work around broken PyXML and obsolete python stdlib behaviour. (The stdlib
# replaces its own xml module with PyXML if the latter is installed. However,
# PyXML is no longer maintained and partially incompatible/buggy.) Reverse
# the order in which xml module and submodules are searched to import stdlib
# modules if they exist and PyXML modules if they do not exist in the stdlib.
#
# See http://sourceforge.net/tracker/index.php?func=detail&aid=3552403&group_id=38414&atid=422030
# and http://lists.fedoraproject.org/pipermail/python-devel/2012-July/000406.html
import xml
if "_xmlplus" in xml.__path__[0]: # PyXML sub-module
xml.__path__.reverse() # If both are available, prefer stdlib over PyXML
import xml.sax.saxutils
from io import StringIO
import docutils
from docutils import frontend, writers, nodes
class RawXmlError(docutils.ApplicationError): pass
class Writer(writers.Writer):
supported = ('xml',)
"""Formats this writer supports."""
settings_spec = (
'"Docutils XML" Writer Options',
None,
(('Generate XML with newlines before and after tags.',
['--newlines'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Generate XML with indents and newlines.',
['--indents'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Omit the XML declaration. Use with caution.',
['--no-xml-declaration'],
{'dest': 'xml_declaration', 'default': 1, 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Omit the DOCTYPE declaration.',
['--no-doctype'],
{'dest': 'doctype_declaration', 'default': 1,
'action': 'store_false', 'validator': frontend.validate_boolean}),))
settings_defaults = {'output_encoding_error_handler': 'xmlcharrefreplace'}
config_section = 'docutils_xml writer'
config_section_dependencies = ('writers',)
output = None
"""Final translated form of `document`."""
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = XMLTranslator
def translate(self):
self.visitor = visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
self.output = ''.join(visitor.output)
class XMLTranslator(nodes.GenericNodeVisitor):
xml_declaration = '<?xml version="1.0" encoding="%s"?>\n'
# TODO: add stylesheet options similar to HTML and LaTeX writers?
#xml_stylesheet = '<?xml-stylesheet type="text/xsl" href="%s"?>\n'
doctype = (
'<!DOCTYPE document PUBLIC'
' "+//IDN docutils.sourceforge.net//DTD Docutils Generic//EN//XML"'
' "http://docutils.sourceforge.net/docs/ref/docutils.dtd">\n')
generator = '<!-- Generated by Docutils %s -->\n'
xmlparser = xml.sax.make_parser()
"""SAX parser instance to check/exctract raw XML."""
xmlparser.setFeature(
"http://xml.org/sax/features/external-general-entities", True)
def __init__(self, document):
nodes.NodeVisitor.__init__(self, document)
# Reporter
self.warn = self.document.reporter.warning
self.error = self.document.reporter.error
# Settings
self.settings = settings = document.settings
self.indent = self.newline = ''
if settings.newlines:
self.newline = '\n'
if settings.indents:
self.newline = '\n'
self.indent = ' '
self.level = 0 # indentation level
self.in_simple = 0 # level of nesting inside mixed-content elements
# Output
self.output = []
if settings.xml_declaration:
self.output.append(
self.xml_declaration % settings.output_encoding)
if settings.doctype_declaration:
self.output.append(self.doctype)
self.output.append(self.generator % docutils.__version__)
# initialize XML parser
self.the_handle=TestXml()
self.xmlparser.setContentHandler(self.the_handle)
# generic visit and depart methods
# --------------------------------
def default_visit(self, node):
"""Default node visit method."""
if not self.in_simple:
self.output.append(self.indent*self.level)
self.output.append(node.starttag(xml.sax.saxutils.quoteattr))
self.level += 1
if isinstance(node, nodes.TextElement):
self.in_simple += 1
if not self.in_simple:
self.output.append(self.newline)
def default_departure(self, node):
"""Default node depart method."""
self.level -= 1
if not self.in_simple:
self.output.append(self.indent*self.level)
self.output.append(node.endtag())
if isinstance(node, nodes.TextElement):
self.in_simple -= 1
if not self.in_simple:
self.output.append(self.newline)
# specific visit and depart methods
# ---------------------------------
def visit_Text(self, node):
text = xml.sax.saxutils.escape(node.astext())
self.output.append(text)
def depart_Text(self, node):
pass
def visit_raw(self, node):
if 'xml' not in node.get('format', '').split():
# skip other raw content?
# raise nodes.SkipNode
self.default_visit(node)
return
# wrap in <raw> element
self.default_visit(node) # or not?
xml_string = node.astext()
self.output.append(xml_string)
self.default_departure(node) # or not?
# Check validity of raw XML:
if isinstance(xml_string, str) and sys.version_info < (3,):
xml_string = xml_string.encode('utf8')
try:
self.xmlparser.parse(StringIO(xml_string))
except xml.sax._exceptions.SAXParseException as error:
col_num = self.the_handle.locator.getColumnNumber()
line_num = self.the_handle.locator.getLineNumber()
srcline = node.line
if not isinstance(node.parent, nodes.TextElement):
srcline += 2 # directive content start line
msg = 'Invalid raw XML in column %d, line offset %d:\n%s' % (
col_num, line_num, node.astext())
self.warn(msg, source=node.source, line=srcline+line_num-1)
raise nodes.SkipNode # content already processed
class TestXml(xml.sax.ContentHandler):
def setDocumentLocator(self, locator):
self.locator = locator
| {
"repo_name": "mglukhikh/intellij-community",
"path": "python/helpers/py3only/docutils/writers/docutils_xml.py",
"copies": "46",
"size": "6973",
"license": "apache-2.0",
"hash": -6924664598543777000,
"line_mean": 35.3177083333,
"line_max": 97,
"alpha_frac": 0.6238347913,
"autogenerated": false,
"ratio": 3.99141385231826,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012323004012354678,
"num_lines": 192
} |
# $Id: dpkt.py 43 2007-08-02 22:42:59Z jon.oberheide $
# -*- coding: utf-8 -*-
"""Simple packet creation and parsing."""
from __future__ import absolute_import
import copy
import itertools
import socket
import struct
import array
from .compat import compat_ord, compat_izip, iteritems
class Error(Exception):
pass
class UnpackError(Error):
pass
class NeedData(UnpackError):
pass
class PackError(Error):
pass
class _MetaPacket(type):
def __new__(cls, clsname, clsbases, clsdict):
t = type.__new__(cls, clsname, clsbases, clsdict)
st = getattr(t, '__hdr__', None)
if st is not None:
# XXX - __slots__ only created in __new__()
clsdict['__slots__'] = [x[0] for x in st] + ['data']
t = type.__new__(cls, clsname, clsbases, clsdict)
t.__hdr_fields__ = [x[0] for x in st]
t.__hdr_fmt__ = getattr(t, '__byte_order__', '>') + ''.join([x[1] for x in st])
t.__hdr_len__ = struct.calcsize(t.__hdr_fmt__)
t.__hdr_defaults__ = dict(compat_izip(
t.__hdr_fields__, [x[2] for x in st]))
return t
class Packet(_MetaPacket("Temp", (object,), {})):
"""Base packet class, with metaclass magic to generate members from self.__hdr__.
Attributes:
__hdr__: Packet header should be defined as a list of
(name, structfmt, default) tuples.
__byte_order__: Byte order, can be set to override the default ('>')
Example:
>>> class Foo(Packet):
... __hdr__ = (('foo', 'I', 1), ('bar', 'H', 2), ('baz', '4s', 'quux'))
...
>>> foo = Foo(bar=3)
>>> foo
Foo(bar=3)
>>> str(foo)
'\x00\x00\x00\x01\x00\x03quux'
>>> foo.bar
3
>>> foo.baz
'quux'
>>> foo.foo = 7
>>> foo.baz = 'whee'
>>> foo
Foo(baz='whee', foo=7, bar=3)
>>> Foo('hello, world!')
Foo(baz=' wor', foo=1751477356L, bar=28460, data='ld!')
"""
def __init__(self, *args, **kwargs):
"""Packet constructor with ([buf], [field=val,...]) prototype.
Arguments:
buf -- optional packet buffer to unpack
Optional keyword arguments correspond to members to set
(matching fields in self.__hdr__, or 'data').
"""
self.data = b''
if args:
try:
self.unpack(args[0])
except struct.error:
if len(args[0]) < self.__hdr_len__:
raise NeedData
raise UnpackError('invalid %s: %r' %
(self.__class__.__name__, args[0]))
else:
for k in self.__hdr_fields__:
setattr(self, k, copy.copy(self.__hdr_defaults__[k]))
for k, v in iteritems(kwargs):
setattr(self, k, v)
def __len__(self):
return self.__hdr_len__ + len(self.data)
def __getitem__(self, k):
try:
return getattr(self, k)
except AttributeError:
raise KeyError
def __repr__(self):
# Collect and display protocol fields in order:
# 1. public fields defined in __hdr__, unless their value is default
# 2. properties derived from _private fields defined in __hdr__
# 3. dynamically added fields from self.__dict__, unless they are _private
# 4. self.data when it's present
l = []
# maintain order of fields as defined in __hdr__
for field_name, _, _ in getattr(self, '__hdr__', []):
field_value = getattr(self, field_name)
if field_value != self.__hdr_defaults__[field_name]:
if field_name[0] != '_':
l.append('%s=%r' % (field_name, field_value)) # (1)
else:
# interpret _private fields as name of properties joined by underscores
for prop_name in field_name.split('_'): # (2)
if isinstance(getattr(self.__class__, prop_name, None), property):
l.append('%s=%r' % (prop_name, getattr(self, prop_name)))
# (3)
l.extend(
['%s=%r' % (attr_name, attr_value)
for attr_name, attr_value in iteritems(self.__dict__)
if attr_name[0] != '_' # exclude _private attributes
and attr_name != self.data.__class__.__name__.lower()]) # exclude fields like ip.udp
# (4)
if self.data:
l.append('data=%r' % self.data)
return '%s(%s)' % (self.__class__.__name__, ', '.join(l))
def __str__(self):
return str(self.__bytes__())
def __bytes__(self):
return self.pack_hdr() + bytes(self.data)
def pack_hdr(self):
"""Return packed header string."""
try:
return struct.pack(self.__hdr_fmt__,
*[getattr(self, k) for k in self.__hdr_fields__])
except struct.error:
vals = []
for k in self.__hdr_fields__:
v = getattr(self, k)
if isinstance(v, tuple):
vals.extend(v)
else:
vals.append(v)
try:
return struct.pack(self.__hdr_fmt__, *vals)
except struct.error as e:
raise PackError(str(e))
def pack(self):
"""Return packed header + self.data string."""
return bytes(self)
def unpack(self, buf):
"""Unpack packet header fields from buf, and set self.data."""
for k, v in compat_izip(self.__hdr_fields__,
struct.unpack(self.__hdr_fmt__, buf[:self.__hdr_len__])):
setattr(self, k, v)
self.data = buf[self.__hdr_len__:]
# XXX - ''.join([(len(`chr(x)`)==3) and chr(x) or '.' for x in range(256)])
__vis_filter = b'................................ !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[.]^_`abcdefghijklmnopqrstuvwxyz{|}~.................................................................................................................................'
def hexdump(buf, length=16):
"""Return a hexdump output string of the given buffer."""
n = 0
res = []
while buf:
line, buf = buf[:length], buf[length:]
hexa = ' '.join(['%02x' % compat_ord(x) for x in line])
line = line.translate(__vis_filter).decode('utf-8')
res.append(' %04d: %-*s %s' % (n, length * 3, hexa, line))
n += length
return '\n'.join(res)
def in_cksum_add(s, buf):
n = len(buf)
cnt = (n // 2) * 2
a = array.array('H', buf[:cnt])
if cnt != n:
a.append(compat_ord(buf[-1]))
return s + sum(a)
def in_cksum_done(s):
s = (s >> 16) + (s & 0xffff)
s += (s >> 16)
return socket.ntohs(~s & 0xffff)
def in_cksum(buf):
"""Return computed Internet checksum."""
return in_cksum_done(in_cksum_add(0, buf))
def test_utils():
__buf = b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e'
__hd = ' 0000: 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e ...............'
h = hexdump(__buf)
assert (h == __hd)
c = in_cksum(__buf)
assert (c == 51150)
| {
"repo_name": "dimagol/trex-core",
"path": "scripts/external_libs/dpkt-1.9.1/dpkt/dpkt.py",
"copies": "3",
"size": "7224",
"license": "apache-2.0",
"hash": -2979717684457334300,
"line_mean": 32.1376146789,
"line_max": 275,
"alpha_frac": 0.4944629014,
"autogenerated": false,
"ratio": 3.6411290322580645,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0020173166169836472,
"num_lines": 218
} |
# $Id: dpkt.py 43 2007-08-02 22:42:59Z jon.oberheide $
# -*- coding: utf-8 -*-
"""Simple packet creation and parsing."""
import copy
import itertools
import socket
import struct
import array
class Error(Exception):
pass
class UnpackError(Error):
pass
class NeedData(UnpackError):
pass
class PackError(Error):
pass
class _MetaPacket(type):
# @profile
def __new__(cls, clsname, clsbases, clsdict):
from pprint import pprint
# pprint((cls, clsname, clsbases, clsdict))
t = type.__new__(cls, clsname, clsbases, clsdict)
st = getattr(t, '__hdr__', None)
if st is not None:
# XXX - __slots__ only created in __new__()
clsdict['__slots__'] = [x[0] for x in st] + ['data']
t = type.__new__(cls, clsname, clsbases, clsdict)
# pprint(vars(t))
t.__hdr_fields__ = [x[0] for x in st]
t.__hdr_fmt__ = getattr(t, '__byte_order__', '>') + ''.join([x[1] for x in st])
t.__hdr_len__ = struct.calcsize(t.__hdr_fmt__)
t.__hdr_defaults__ = dict(zip(
t.__hdr_fields__, [x[2] for x in st]))
for x in st:
setattr(t,x[0],x[2])
# pprint(vars(t))
# pprint((cls, clsname, clsbases, clsdict))
return t
class Packet(object):
"""Base packet class, with metaclass magic to generate members from
self.__hdr__.
__hdr__ should be defined as a list of (name, structfmt, default) tuples
__byte_order__ can be set to override the default ('>')
Example::
>>> class Foo(Packet):
... __hdr__ = (('foo', 'I', 1), ('bar', 'H', 2), ('baz', '4s', 'quux'))
...
>>> foo = Foo(bar=3)
>>> foo
Foo(bar=3)
>>> str(foo)
'\x00\x00\x00\x01\x00\x03quux'
>>> foo.bar
3
>>> foo.baz
'quux'
>>> foo.foo = 7
>>> foo.baz = 'whee'
>>> foo
Foo(baz='whee', foo=7, bar=3)
>>> Foo('hello, world!')
Foo(baz=' wor', foo=1751477356L, bar=28460, data='ld!')
"""
__metaclass__ = _MetaPacket
# @profile
def __init__(self, *args, **kwargs):
"""Packet constructor with ([buf], [field=val,...]) prototype.
Arguments:
buf -- optional packet buffer to unpack
Optional keyword arguments correspond to members to set
(matching fields in self.__hdr__, or 'data').
"""
self.data = ''
if args:
try:
self.unpack(args[0])
except struct.error:
if len(args[0]) < self.__hdr_len__:
raise NeedData
raise UnpackError('invalid %s: %r' %
(self.__class__.__name__, args[0]))
else:
# from pprint import pprint
# pprint(self.__hdr_fields__)
# pprint(kwargs.iteritems())
# for k in self.__hdr_fields__:
# setattr(self, k, copy.copy(self.__hdr_defaults__[k]))
# setattr(self, k, self.__hdr_defaults__[k])
for k, v in kwargs.iteritems():
setattr(self, k, v)
def __len__(self):
return self.__hdr_len__ + len(self.data)
def __getitem__(self, k):
try:
return getattr(self, k)
except AttributeError:
raise KeyError
def __repr__(self):
# Collect and display protocol fields in order:
# 1. public fields defined in __hdr__, unless their value is default
# 2. properties derived from _private fields defined in __hdr__
# 3. dynamically added fields from self.__dict__, unless they are _private
# 4. self.data when it's present
l = []
# maintain order of fields as defined in __hdr__
for field_name, _, _ in getattr(self, '__hdr__', []):
field_value = getattr(self, field_name)
if field_value != self.__hdr_defaults__[field_name]:
if field_name[0] != '_':
l.append('%s=%r' % (field_name, field_value)) # (1)
else:
# interpret _private fields as name of properties joined by underscores
for prop_name in field_name.split('_'): # (2)
if isinstance(getattr(self.__class__, prop_name, None), property):
l.append('%s=%r' % (prop_name, getattr(self, prop_name)))
# (3)
l.extend(
['%s=%r' % (attr_name, attr_value)
for attr_name, attr_value in self.__dict__.iteritems()
if attr_name[0] != '_' # exclude _private attributes
and attr_name != self.data.__class__.__name__.lower()]) # exclude fields like ip.udp
# (4)
if self.data:
l.append('data=%r' % self.data)
return '%s(%s)' % (self.__class__.__name__, ', '.join(l))
def __str__(self):
return self.pack_hdr() + str(self.data)
# @profile
def pack_hdr(self):
"""Return packed header string."""
try:
return struct.pack(self.__hdr_fmt__,
*[getattr(self, k) for k in self.__hdr_fields__])
except struct.error:
vals = []
for k in self.__hdr_fields__:
v = getattr(self, k)
if isinstance(v, tuple):
vals.extend(v)
else:
vals.append(v)
try:
return struct.pack(self.__hdr_fmt__, *vals)
except struct.error, e:
raise PackError(str(e))
def pack(self):
"""Return packed header + self.data string."""
return str(self)
def unpack(self, buf):
"""Unpack packet header fields from buf, and set self.data."""
for k, v in itertools.izip(self.__hdr_fields__,
struct.unpack(self.__hdr_fmt__, buf[:self.__hdr_len__])):
setattr(self, k, v)
self.data = buf[self.__hdr_len__:]
# XXX - ''.join([(len(`chr(x)`)==3) and chr(x) or '.' for x in range(256)])
__vis_filter = """................................ !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[.]^_`abcdefghijklmnopqrstuvwxyz{|}~................................................................................................................................."""
def hexdump(buf, length=16):
"""Return a hexdump output string of the given buffer."""
n = 0
res = []
while buf:
line, buf = buf[:length], buf[length:]
hexa = ' '.join(['%02x' % ord(x) for x in line])
line = line.translate(__vis_filter)
res.append(' %04d: %-*s %s' % (n, length * 3, hexa, line))
n += length
return '\n'.join(res)
def in_cksum_add(s, buf):
n = len(buf)
cnt = (n / 2) * 2
a = array.array('H', buf[:cnt])
if cnt != n:
a.append(struct.unpack('H', buf[-1] + '\x00')[0])
return s + sum(a)
def in_cksum_done(s):
s = (s >> 16) + (s & 0xffff)
s += (s >> 16)
return socket.ntohs(~s & 0xffff)
def in_cksum(buf):
"""Return computed Internet checksum."""
return in_cksum_done(in_cksum_add(0, buf))
| {
"repo_name": "yangbh/dpkt",
"path": "dpkt/dpkt.py",
"copies": "1",
"size": "7213",
"license": "bsd-3-clause",
"hash": 4597679080673776600,
"line_mean": 32.2396313364,
"line_max": 278,
"alpha_frac": 0.4887009566,
"autogenerated": false,
"ratio": 3.745067497403946,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4733768454003946,
"avg_score": null,
"num_lines": null
} |
# $Id: echo.py 90 2008-01-16 17:08:42Z ghandal $
# echo.py - demonstracja pygglib - Gadu-Gadu echo
# (c) Marek Chrusciel
# Jakub Kosinski
# Marcin Krupowicz
# Mateusz Strycharski
#
import time
import sys
if sys.platform == 'win32':
sys.path.append(".\\..") # - dla windowsa
else:
sys.path.append("../") # - dla linuksa
from pygglib import GGSession
from GGConstans import *
from Contacts import *
def on_login_ok_event_handler(sender, args):
print '---'
print 'Succesfully logged in.'
def on_login_failed_event_handler(sender, args):
print '---'
print 'Login failed!'
def on_need_email_event_handler(sender, args):
print '---'
print 'Server needs e-mail!'
def on_msg_recv_event_handler(sender, args):
print '---'
contact = sender.contacts_list[args.sender]
if contact != None:
print 'Message from', contact.shown_name
else:
print 'Message from', args.sender
print 'Message sequence number:', args.seq
print 'Message Classes:', GGMsgTypes.reverse_lookup(args.msg_class)
print '"' + args.message + '"'
def on_msg_recv_echo(sender, args):
assert type(sender) == GGSession
sender.send_msg(args.sender, args.message, msg_class = GGMsgTypes.Chat)
def on_unknown_packet_event_handler(sender, args):
print '---'
print 'Unknown packet received: type: 0x%04x, length: 0x%04x' % (args.type, args.length)
print
def on_msg_ack_event_handler(sender, args):
print '---'
print 'Message ack received: status: %s, recipient: %d, seq: %d' % (GGMsgStatus.reverse_lookup_without_mask(args.status), args.recipient, args.seq)
print
def on_notify_reply_event_handler(sender, args):
print '---'
print 'Notify from server:'
for contact in args.contacts_list.data:
print contact.shown_name + ' is ' + GGStatuses.reverse_lookup_without_mask(contact.status)
if contact.description != "":
print 'Description:', contact.description
def on_userlist_reply_event_handler(sender, args):
print '---'
print 'Contacts list received from server'
for contact in sender.contacts_list.data:
print contact.shown_name + ': ' + str(contact.uin)
def on_status_changed_event_handler(sender, args):
print '---'
print args.contact.shown_name + ' has changed status.'
print 'New status: ', GGStatuses.reverse_lookup_without_mask(args.contact.status)
if args.contact.description != '':
print '"' + args.contact.description + '"'
def on_disconnecting_event_handler(sender, args):
print '---'
print 'Server has closed the connection'
if __name__ == "__main__":
contacts_list = ContactsList([Contact({'uin':3993939,'shown_name':'Tralala'}), Contact({'uin':4668758,'shown_name':'Anna'})])
# Inicjowanie sesji
session = GGSession(uin = 11327271, password = 'eto2007', initial_status = GGStatuses.AvailDescr, initial_description = 'pygglib echo demo', contacts_list = contacts_list)
# Rejestrowanie obslugi zdarzen
session.register('on_login_ok', on_login_ok_event_handler)
session.register('on_msg_recv', on_msg_recv_event_handler)
session.register('on_msg_recv', on_msg_recv_echo)
session.register('on_unknown_packet', on_unknown_packet_event_handler)
session.register('on_send_msg_ack', on_msg_ack_event_handler)
session.register('on_notify_reply', on_notify_reply_event_handler)
session.register('on_userlist_reply', on_userlist_reply_event_handler)
session.register('on_status_changed', on_status_changed_event_handler)
session.register('on_disconnecting', on_disconnecting_event_handler)
session.login()
session.import_contacts_list()
x = ''
while x != 'quit':
x = raw_input()
print 'wylogowuje...'
session.logout()
print 'wylogowalem'
| {
"repo_name": "jakubkosinski/pygglib",
"path": "samples/echo.py",
"copies": "1",
"size": "3692",
"license": "mit",
"hash": 5466477677088465000,
"line_mean": 33.5,
"line_max": 172,
"alpha_frac": 0.6952871073,
"autogenerated": false,
"ratio": 2.9394904458598727,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41347775531598724,
"avg_score": null,
"num_lines": null
} |
# TODO: Handle text selections
"""Abstract text editing class."""
from pygame.locals import *
from BaseWidget import BaseWidget
from Constants import *
import base
_TIMER = 50
class Editable (BaseWidget):
"""Editable () -> Editable
An abstract widget class, which can handle text input.
The Editable is an abstract class, which can handle (unicode) text
input. It supports a caret for the input cursor, undo of text input
using the ESC key and input notifications via the RETURN/ENTER key.
Text can be set directly using the 'text' attribute or the
set_text() method. By assigning the attribute or using the method,
the caret position will be reset to 0.
editable.text = 'Test'
editable.set_text (text)
The 'caret' attribute indicates the current cursor position for
input operations within the text and can be modified
programmatically by reassigning it or using the set_caret() method.
editable.caret = 5
editable.set_caret (5)
It is possible to prevent the text from editing using the
'editable' attribute or set_editable() method. If 'editable' is
False, no text input can be made and an input notification or undo
operation will not be possible.
editable.editable = False
editable.set_editable (False)
Note: Dependant on the font set in the style of the Editable, it is
possible, that certain characters are not displayed correctly. It is
strongly recommended to use a fully unicode capable font, if
non-ascii characters should be displayed.
Default action (invoked by activate()):
Give the Editable the input focus for text editing.
Mnemonic action (invoked by activate_mnemonic()):
None
Signals:
SIG_KEYDOWN - Invoked, when a key is pressed while the Editable has
the input.
SIG_INPUT - Invoked, when the input is validated or aborted using
RETURN or ESC.
Attributes:
text - The text displayed on the Editable.
editable - Indicates, whether the text can be edited or not.
caret - Caret ( | ) position on input.
caret_visible - Indicates, whether the caret is currently visible.
"""
def __init__ (self):
BaseWidget.__init__ (self)
self._text = ''
self._editable = True
# Caret | position.
self._caret = 0
self._caretvisible = True
self._timer = _TIMER
self._signals[SIG_KEYDOWN] = []
self._signals[SIG_INPUT] = []
self._signals[SIG_TICK] = None # No events for this one.
# Temporary placeholder for text input and ESCAPE.
self._temp = None
def set_text (self, text):
"""E.set_text (...) -> None
Sets the text of the Editable to the passed argument.
Raises a TypeError if the passed argument is not a string or
unicode.
"""
if type (text) not in (str, unicode):
raise TypeError ("text must be a string or unicode")
self._text = text
self._temp = self._text
self.caret = 0 # Reset caret.
self.dirty = True
def set_caret (self, pos):
"""E.set_caret (...) -> None
Sets the caret to the passed position.
Moves the input caret to the given position within the text.
0 is the very first position within the text (before the first
character), a value equal to or greater than the length of the
text will set the caret behind the last character position.
Raises a TypeError, if the passed argument is not a positive
integer.
"""
if (type (pos) != int) or (pos < 0):
raise TypeError ("pos must be a positive integer")
if pos > len (self.text):
self._caret = len (self.text)
else:
self._caret = pos
self.dirty = True
def _set_caret_visible (self, visible):
"""E._set_caret_visible (...) -> None
Sets the visibility of the caret.
"""
self._caretvisible = visible
if visible:
self._timer = _TIMER # Reset timer.
def set_editable (self, editable):
"""E.set_editable (...) -> None
Enables or disables text editing of the Editable.
This causes the Editable to ignore SIG_KEYDOWN events, which
would modify the text of it. It also blocks input
notifications and undo operations.
"""
self._editable = editable
def set_focus (self, focus=True):
"""E.set_focus (...) -> bool
Sets the input and action focus of the Editable.
Sets the input and action focus of the Editable and returns True
upon success or False, if the focus could not be set.
Note: This method only works as supposed using
a render loop, which supports the Renderer class specification.
"""
if not self.sensitive:
return False
if focus:
# Save the text after activation and reset the caret blink
# effects.
self._set_caret_visible (True)
self._temp = self.text
self._caretvisible = True
self._timer = _TIMER
self.state = STATE_ACTIVE
elif self._temp != self.text:
# If the Editable looses its input focus _and_ has changed text,
# it will be validated by default.
self._temp = self.text
self.state = STATE_NORMAL
self.run_signal_handlers (SIG_INPUT)
else:
# The Editable looses its input focus without any change.
self.state = STATE_NORMAL
BaseWidget.set_focus (self, focus)
return True
def activate (self):
"""E.activate () -> None
Activates the Editable default action.
Activates the Editable default action. This usually means
giving the Editable the input focus.
"""
if not self.sensitive:
return
self.focus = True
def notify (self, event):
"""E.notify (...) -> None
Notifies the Editable about an event.
"""
if not self.sensitive:
return
# The next few events are only available, if the entry is focused.
if self.focus:
# Blinking caret.
if event.signal == SIG_TICK:
self._timer -= 1
if self._timer == 0:
self._caretvisible = not self._caretvisible
self._timer = _TIMER
self.dirty = True
elif event.signal == SIG_KEYDOWN:
self.run_signal_handlers (SIG_KEYDOWN, event.data)
event.handled = self._input (event.data)
self._caretvisible= True
BaseWidget.notify (self, event)
def _input (self, event):
"""E._input (...) -> bool
Receives the SIG_KEYDOWN events and updates the text.
"""
handled = False
caret = self._caret
if event.key == K_ESCAPE:
if self.editable:
self._text = self._temp # Undo text input.
self.run_signal_handlers (SIG_INPUT)
handled = True
elif event.key in (K_RETURN, K_KP_ENTER):
if self.editable:
self._temp = self.text
self.run_signal_handlers (SIG_INPUT)
handled = True
# Move caret right and left on the corresponding key press.
elif event.key == K_RIGHT:
if caret < len (self._text):
caret += 1
handled = True
elif event.key == K_LEFT:
if caret > 0:
caret -= 1
handled = True
# Go the start (home) of the text.
elif event.key == K_HOME:
caret = 0
handled = True
# Go to the end (end) of the text.
elif event.key == K_END:
caret = len (self._text)
handled = True
# The next statements directly influence the text, thus we have
# to check, if it is editable or not.
elif self.editable:
# Delete at the position (delete).
if event.key == K_DELETE:
if caret < len (self._text):
self._text = self._text[:caret] + self._text[caret + 1:]
handled = True
# Delete backwards (backspace).
elif event.key == K_BACKSPACE:
if caret > 0:
self._text = self._text[:caret - 1] + self._text[caret:]
caret -= 1
handled = True
# Non-printable characters or maximum exceeded.
elif (len (event.unicode) == 0) or (ord (event.unicode) < 32):
# Any unicode character smaller than 0x0020 (32, SPC) is
# ignored as those are control sequences.
return False
# Any other case is okay, so show it.
else:
self._text = self._text[:caret] + event.unicode + \
self._text[caret:]
caret += 1
handled = True
self.set_caret (caret)
return handled
# properties
text = property (lambda self: self._text,
lambda self, var: self.set_text (var),
doc = "The text to display on the Editable.")
caret = property (lambda self: self._caret,
lambda self, var: self.set_caret (var),
doc = "The caret position.")
editable = property (lambda self: self._editable,
lambda self, var: self.set_editable (var),
doc = "Indicates, if the text can be edited or not.")
caret_visible = property (lambda self: self._caretvisible,
lambda self, var: self._set_caret_visible (var),
doc = "Indicates, if the caret is currently " \
"visible.")
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/widgets/Editable.py",
"copies": "1",
"size": "11582",
"license": "bsd-2-clause",
"hash": -3502954222686398500,
"line_mean": 34.9689440994,
"line_max": 78,
"alpha_frac": 0.5865135555,
"autogenerated": false,
"ratio": 4.415554708349219,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007888474658918641,
"num_lines": 322
} |
from Tkinter import *
import Pmw
class CD_handler:
def import_CD_tempscan(self):
"""Import a temperature scan from a Jasco CD spec"""
import tkFileDialog, os
filename=tkFileDialog.askopenfilename(defaultextension='.txt',initialdir=os.getcwd(),
filetypes=[("Jasco txt","*.txt"),
("All files","*.*")],
parent=self.ekin_win)
if not filename:
return
#
# If we got a filename then read the stuff
#
import os
if not os.path.isfile(filename):
import tkMessageBox
tkMessageBox.showwarning('File not found',
'I could not find %s' %filename,
parent=self.ekin_win)
return
#
# Open and read file
#
fd=open(filename)
lines=fd.readlines()
fd.close()
#
# Parse file
#
# Format is:
# 00 TITLE 0.6mg/ml hewl sigma
# 01 DATA TYPE
# 02 ORIGIN JASCO
# 03 OWNER
# 04 DATE 06/10/17
# 05 TIME 17:59:52
# 06 SPECTROMETER/DATA SYSTEM J-810
# 07 DELTAT 0
# 08 TUNITS Temperature [C]
# 09 FIRSTT 25
# 10 LASTT 35
# 11 NPOINTST 3
# 12 DELTAX -0.5
# 13 XUNITS Wavelength [nm]
# 14 YUNITS CD[mdeg]
# 15 YUNITS2 HT[V]
# 16 FIRSTX 350
# 17 LASTX 250
# 18 NPOINTS 201
# 19 XYDATA1 25 30 35
#
# For a file with scans of three temperatures
#
data={}
data['TITLE']=lines[0]
data['ORIGIN']=lines[1]
data['DATE']=lines[4]+lines[5]
data['SPEC']=lines[6]
#
# Get the range of temps
#
t_temps=lines[19].split()[1:]
temps=[]
for t in t_temps:
temps.append(float(t))
#
# Get the number of wavelengths
#
lambda_points=int(lines[18].split()[-1])
#
# Read the data
#
raw_data={}
for x in range(lambda_points):
line=lines[x+20]
line=line.split()
wavlen=float(line[0])
count=0
for temp in temps:
count=count+1
mdeg=float(line[count])
if not raw_data.has_key(temp):
raw_data[temp]=[]
raw_data[temp].append([wavlen,mdeg])
#
# Insert the tabs
#
temp1=temps[0]
dps=len(raw_data[temp1])
for temp in temps:
newtab_num=self.add_datalist('CD(T'+str(temp)+')',data_points=dps)
count=0
for wavlen,mdeg in raw_data[temp]:
self.data[newtab_num][0][count]['var'].set(wavlen)
self.data[newtab_num][1][count]['var'].set(mdeg)
count=count+1
self.mode_var.set(5)
self.update_mode()
self.redraw_graph(fitting=1)
return
#
# -----
#
def insert_CD_temp_datatab(self):
"""Insert datatab for fitting the temperature dependence of CD data"""
#
# Get the wavelengths
#
thistab = self.nb.index(self.nb.getcurselection())
wavlens_dps=self.data[thistab][0].keys()
wavlens=[]
for dp in wavlens_dps:
if dp!='label' and dp!='label_widget':
wavlens.append(self.data[thistab][0][dp]['var'].get())
wavlens.sort()
self.wavlen=StringVar()
#
# Open window for selecting the wavelength
#
# first check if there is any imported data - bad hack
if self.tabnames[thistab]=='data' or self.tabnames[thistab]=='Temp-dependence':
import tkMessageBox
tkMessageBox.showwarning("Import Data First",
"Please import data or choose the first tab.")
return
self.wav_win=Toplevel()
self.wav_win.title('Select wavelength')
#
# Set the geometry
#PEATDB.PEAT_window.set_geometry(self.ekin_win,self.wav_win)
self.wav_win.protocol("WM_DELETE_WINDOW",self.wav_win_cancel)
#
# Entry field for wavelength instead - asked for by Una
#
wav_entry=Entry(self.wav_win,textvariable=self.wavlen,width=10,font='Courier 14',justify='center')
wav_entry.grid(row=1,column=0,sticky='news',padx=3,pady=3,columnspan=2)
# Label
lb=Label(self.wav_win,text='Choose Wavelength',font='Arial 16',justify='center')
lb.grid(row=0,column=0,sticky='news',padx=3,pady=3,columnspan=2)
#
increment = float(wavlens[1])-float(wavlens[0])
self.wavscale=Scale(self.wav_win,resolution=increment,variable=self.wavlen,orient='horizontal',showvalue=0,
command=self.insert_wavdata,from_=wavlens[0],to=wavlens[-1])
self.wavscale.grid(row=2,column=0,sticky='news',padx=3,pady=3,columnspan=2)
self.wavlen.set(wavlens[0])
#
# Manually specify min and max mdeg
#
Label(self.wav_win,text='min mdeg').grid(row=3,column=0)
self.min_mdeg=DoubleVar()
self.min_entry=Entry(self.wav_win,textvariable=self.min_mdeg,width=10,font='Courier 14',justify='center')
self.min_entry.grid(row=4,column=0,sticky='news',padx=3,pady=3)
self.min_entry.bind('<KeyPress>',self.update_CDmin)
#
Label(self.wav_win,text='max mdeg').grid(row=3,column=1,padx=3,pady=3)
self.max_mdeg=DoubleVar()
self.max_entry=Entry(self.wav_win,textvariable=self.max_mdeg,width=10,font='Courier 14',justify='center')
self.max_entry.grid(row=4,column=1,sticky='news',padx=3,pady=3)
self.max_entry.bind('<KeyPress>',self.update_CDmax)
#
Button(self.wav_win,text='Cancel',command=self.wav_win_cancel).grid(row=5,column=0,sticky='news',padx=3,pady=4,columnspan=1)
Button(self.wav_win,text='Done',command=self.wav_win_close).grid(row=5,column=1,sticky='news',padx=3,pady=4,columnspan=1)
#
# Add the new datatab
#
dps=len(self.data.keys())
names=self.tabnames.values()
#if already created temp dep. tab do not do it again
if not 'Temp-dependence' in names:
self.temp_dep_tab=self.add_datalist('Temp-dependence',data_points=dps)
else:
thistab_num=len(self.datatabs)-1
self.nb.selectpage(self.tabnames[thistab_num])
#
# Plot the data
#
self.CD_min='auto'
self.CD_max='auto'
self.insert_wavdata()
#
# Update the fitting model
#
self.FIT.update_model('CD - Thermal denaturation')
self.reprint_parameters()
self.redraw_graph()
return
#
# ----
#
def wav_win_close(self,event=None):
"""
Scale the CD data between 0 and 1, and destroy wavelength selection window
"""
#
# Get data
#
chosen_wavelength=self.wavlen.get()
#
data=[]
for tabnum in self.tabnames:
if self.tabnames[tabnum][:4]=='CD(T':
#
# Grab data from here
#
temp=float(self.tabnames[tabnum][4:-1])
for dp in self.data[tabnum][0].keys():
if dp!='label' and dp!='label_widget':
wavlen=self.data[tabnum][0][dp]['var'].get()
if wavlen==chosen_wavelength:
data.append([temp,wavlen,self.data[tabnum][1][dp]['var'].get()])
#
# Normalise
#
i=0
for temp,wavlen,mdeg in data:
mdeg=float(mdeg)+abs(self.min_mdeg.get())
mdeg=float(mdeg)/(self.max_mdeg.get()-self.min_mdeg.get())
data[i][2]=mdeg
i=i+1
#
# Change fitting model
#
self.FIT.update_model('Thermal denaturation')
self.reprint_parameters()
self.redraw_graph()
#
# Insert normalised data
#
count=0
for temp,wavlen,mdeg in data:
self.data[self.temp_dep_tab][0][count]['var'].set(temp)
self.data[self.temp_dep_tab][1][count]['var'].set(mdeg)
count=count+1
self.redraw_graph(fitting=1)
#
# Close window
#
if self.wav_win:
self.wav_win.destroy()
return
#
# ----
#
def wav_win_cancel(self,event=None):
"""
Destroy wavelength selection window and delete the new tab
"""
if self.wav_win:
self.delete_datatab()
self.wav_win.destroy()
return
#
# -----
#
def update_CDmin(self,event=None):
"""Update the min value for CD signal"""
self.CD_min='manual'
self.insert_wavdata()
return
#
# -----
#
def update_CDmax(self,event=None):
"""Update the max value for CD signal"""
self.CD_max='manual'
self.insert_wavdata()
return
#
# ------
#
def insert_wavdata(self,junk=None):
"""Get the data for the wavelength that was selected and insert it in the sheet"""
#
# Loop over all datatabs and get the ones that are CD(T) data
#
chosen_wavelength=self.wavlen.get()
#
data=[]
for tabnum in self.tabnames:
if self.tabnames[tabnum][:4]=='CD(T':
#
# Grab data from here
#
temp=float(self.tabnames[tabnum][4:-1])
for dp in self.data[tabnum][0].keys():
if dp!='label' and dp!='label_widget':
wavlen=self.data[tabnum][0][dp]['var'].get()
if wavlen==chosen_wavelength:
data.append([temp,wavlen,self.data[tabnum][1][dp]['var'].get()])
#
# find min and max of CD signal
#
min_val=0
max_val=-1000
i=0
for temp,wavlen,mdeg in data:
mdeg=float(mdeg)
if mdeg>max_val:
max_val=mdeg
if mdeg<min_val:
min_val=mdeg
i=i+1
#
# Did the user specify one value or the other?
#
if self.CD_min=='auto':
self.min_mdeg.set(min_val)
#
if self.CD_max=='auto':
self.max_mdeg.set(max_val)
#
# Insert data from the wavelength
#
count=0
for temp,wavlen,mdeg in data:
self.data[self.temp_dep_tab][0][count]['var'].set(temp)
self.data[self.temp_dep_tab][1][count]['var'].set(mdeg)
count=count+1
self.redraw_graph(fitting=1)
return
| {
"repo_name": "dmnfarrell/peat",
"path": "PEATDB/Ekin/Ekin_CD.py",
"copies": "1",
"size": "11253",
"license": "mit",
"hash": 5894380464278538000,
"line_mean": 30.8781869688,
"line_max": 132,
"alpha_frac": 0.516484493,
"autogenerated": false,
"ratio": 3.538679245283019,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9471891934739516,
"avg_score": 0.01665436070870064,
"num_lines": 353
} |
"""$Id: en.py 747 2007-03-29 10:27:14Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 747 $"
__date__ = "$Date: 2007-03-29 10:27:14 +0000 (Thu, 29 Mar 2007) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
import feedvalidator
from feedvalidator.logging import *
line = "line %(line)s"
column = "column %(column)s"
occurances = " (%(msgcount)s occurrences)"
messages = {
SAXError: "XML parsing error: %(exception)s",
NotHtml: "Invalid HTML",
UnicodeError: "%(exception)s (maybe a high-bit character?)",
UndefinedElement: "Undefined %(parent)s element: %(element)s",
MissingNamespace: "Missing namespace for %(element)s",
MissingElement: "Missing %(parent)s element: %(element)s",
MissingOptionalElement: "%(parent)s should contain a %(element)s element",
MissingRecommendedElement: "%(parent)s should contain a %(element)s element",
MissingAttribute: "Missing %(element)s attribute: %(attr)s",
UnexpectedAttribute: "Unexpected %(attribute)s attribute on %(element)s element",
NoBlink: "There is no blink element in RSS; use blogChannel:blink instead",
NoThrWhen: "There is no thr:when element in Atom; use thr:updated instead",
NoBlink: "There is no thr:when attribute in Atom; use thr:updated instead",
InvalidWidth: "%(element)s must be between 1 and 144",
InvalidHeight: "%(element)s must be between 1 and 400",
InvalidHour: "%(element)s must be an integer between 0 and 24",
InvalidDay: "%(element)s must be Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, or Sunday",
InvalidInteger: "%(element)s must be an integer",
InvalidNonNegativeInteger: "%(element)s must be a non-negative integer",
InvalidPositiveInteger: "%(element)s must be a positive integer",
InvalidLatitude: "%(element)s must be between -90 and 90",
InvalidLongitude: "%(element)s must be between -180 and 180",
InvalidCommaSeparatedIntegers: "%(element)s must be comma-separated integers",
InvalidHttpGUID: "guid must be a full URL, unless isPermaLink attribute is false",
InvalidUpdatePeriod: "%(element)s must be hourly, daily, weekly, monthly, or yearly",
NotBlank: "%(element)s should not be blank",
AttrNotBlank: "The %(attr)s attribute of %(element)s should not be blank",
DuplicateElement: "%(parent)s contains more than one %(element)s",
DuplicateSemantics: "A channel should not include both %(core)s and %(ext)s",
DuplicateItemSemantics: "An item should not include both %(core)s and %(ext)s",
DuplicateValue: "%(element)s values must not be duplicated within a feed",
NonstdPrefix: '"%(preferred)s" is the preferred prefix for the namespace "%(ns)s"',
ReservedPrefix: 'The prefix "%(prefix)s" generally is associated with the namespace "%(ns)s"',
InvalidContact: "%(element)s must include an email address",
InvalidAddrSpec: "%(element)s must be an email address",
InvalidLink: "%(element)s must be a valid URI",
InvalidIRI: "%(element)s must be a valid IRI",
InvalidFullLink: "%(element)s must be a full and valid URL",
InvalidUriChar: "Invalid character in a URI",
InvalidISO8601Date: "%(element)s must be an ISO8601 date",
InvalidISO8601DateTime: "%(element)s must be an ISO8601 date-time",
InvalidW3CDTFDate: "%(element)s must be an W3CDTF date",
InvalidRFC2822Date: "%(element)s must be an RFC-822 date-time",
IncorrectDOW: "Incorrect day of week",
InvalidRFC3339Date: "%(element)s must be an RFC-3339 date-time",
InvalidNPTTime: "%(attr)s must be an NPT-time",
InvalidLanguage: "%(element)s must be an ISO-639 language code",
InvalidURIAttribute: "%(attr)s attribute of %(element)s must be a valid URI",
InvalidURLAttribute: "%(attr)s attribute of %(element)s must be a full URL",
InvalidIntegerAttribute: "%(attr)s attribute of %(element)s must be a positive integer",
InvalidBooleanAttribute: "%(attr)s attribute of %(element)s must be 'true' or 'false'",
InvalidMIMEAttribute: "%(attr)s attribute of %(element)s must be a valid MIME type",
ItemMustContainTitleOrDescription: "item must contain either title or description",
ContainsHTML: "%(element)s should not contain HTML",
ContainsEmail: "%(element)s should not include email address",
ContainsUndeclaredHTML: "%(element)s should not contain HTML unless declared in the type attribute",
NotEnoughHoursInTheDay: "skipHours can not contain more than 24 hour elements",
EightDaysAWeek: "skipDays can not contain more than 7 day elements",
SecurityRisk: "%(element)s should not contain %(tag)s tag",
SecurityRiskAttr: "%(element)s should not contain %(attr)s attribute",
ContainsRelRef: "%(element)s should not contain relative URL references",
ContainsSystemEntity: "Feeds must not contain SYSTEM entities",
InvalidContentMode: "mode must be 'xml', 'escaped', or 'base64'",
InvalidMIMEType: "Not a valid MIME type",
NotEscaped: "%(element)s claims to be escaped, but isn't",
NotInline: "%(element)s claims to be inline, but may contain html",
NotBase64: "%(element)s claims to be base64-encoded, but isn't",
InvalidURN: "%(element)s is not a valid URN",
InvalidTAG: "%(element)s is not a valid TAG",
InvalidURI: "%(element)s is not a valid URI",
ObsoleteVersion: "This feed is an obsolete version",
ObsoleteNamespace: "This feed uses an obsolete namespace",
InvalidNamespace: "%(element)s is in an invalid namespace: %(namespace)s",
InvalidDoctype: "This feed contains conflicting DOCTYPE and version information",
DuplicateAtomLink: "Duplicate alternate links with the same type and hreflang",
MissingHref: "%(element)s must have an href attribute",
AtomLinkNotEmpty: "%(element)s should not have text (all data is in attributes)",
BadCharacters: '%(element)s contains bad characters',
BadXmlVersion: "Incorrect XML Version: %(version)s",
UnregisteredAtomLinkRel: "%(value)s is not a registered link relationship",
HttpError: "Server returned %(status)s",
IOError: "%(exception)s (%(message)s; misconfigured server?)",
ObscureEncoding: "Obscure XML character encoding: %(encoding)s",
NonstdEncoding: "This encoding is not mandated by the XML specification: %(encoding)s",
UnexpectedContentType: '%(type)s should not be served with the "%(contentType)s" media type',
EncodingMismatch: 'Your feed appears to be encoded as "%(encoding)s", but your server is reporting "%(charset)s"',
UnknownEncoding: "Unknown XML character encoding: %(encoding)s",
NotSufficientlyUnique: "The specified guid is not sufficiently unique",
MissingEncoding: "No character encoding was specified",
UnexpectedText: "Unexpected Text",
ValidatorLimit: "Unable to validate, due to hardcoded resource limits (%(limit)s)",
TempRedirect: "Temporary redirect",
TextXml: "Content type of text/xml with no charset",
Uncompressed: "Response is not compressed",
HttpProtocolError: 'Response includes bad HTTP header name: "%(header)s"',
NonCanonicalURI: 'Identifier "%(uri)s" is not in canonical form (the canonical form would be "%(curi)s")',
InvalidRDF: 'RDF parsing error: %(message)s',
InvalidDuration: 'Invalid duration: "%(value)s"',
InvalidYesNo: '%(element)s must be "yes", "no", or "clean"',
TooLong: 'length of %(len)d exceeds the maximum allowable for %(element)s of %(max)d',
InvalidItunesCategory: '%(text)s is not one of the predefined iTunes categories or sub-categories',
ObsoleteItunesCategory: '%(text)s is an obsolete iTunes category or sub-category',
InvalidKeywords: 'Use commas to separate keywords',
InvalidTextType: 'type attribute must be "text", "html", or "xhtml"',
MissingXhtmlDiv: 'Missing xhtml:div element',
MissingSelf: 'Missing atom:link with rel="self"',
DuplicateEntries: 'Two entries with the same id',
MisplacedMetadata: '%(element)s must appear before all entries',
MissingSummary: 'Missing summary',
MissingTextualContent: 'Missing textual content',
MissingContentOrAlternate: 'Missing content or alternate link',
MissingSourceElement: "Missing %(parent)s element: %(element)s",
MissingTypeAttr: "Missing %(element)s attribute: %(attr)s",
HtmlFragment: "%(type)s type used for a document fragment",
DuplicateUpdated: "Two entries with the same value for atom:updated",
UndefinedNamedEntity: "Undefined named entity",
ImplausibleDate: "Implausible date",
UnexpectedWhitespace: "Whitespace not permitted here",
SameDocumentReference: "Same-document reference",
SelfDoesntMatchLocation: "Self reference doesn't match document location",
InvalidOPMLVersion: 'The "version" attribute for the opml element must be 1.0 or 1.1.',
MissingXmlURL: 'An <outline> element whose type is "rss" must have an "xmlUrl" attribute.',
InvalidOutlineVersion: 'An <outline> element whose type is "rss" may have a version attribute, whose value must be RSS, RSS1, RSS2, or scriptingNews.',
InvalidOutlineType: 'The type attribute on an <outline> element should be a known type.',
InvalidExpansionState: '<expansionState> is a comma-separated list of line numbers.',
InvalidTrueFalse: '%(element)s must be "true" or "false"',
MissingOutlineType: 'An <outline> element with more than just a "text" attribute should have a "type" attribute indicating how the other attributes are to be interpreted.',
MissingTitleAttr: 'Missing outline attribute: title',
MissingUrlAttr: 'Missing outline attribute: url',
NotUTF8: 'iTunes elements should only be present in feeds encoded as UTF-8',
MissingItunesElement: 'Missing recommended iTunes %(parent)s element: %(element)s',
UnsupportedItunesFormat: 'Format %(extension)s is not supported by iTunes',
InvalidCountryCode: "Invalid country code: \"%(value)s\"",
InvalidCurrencyUnit: "Invalid value for %(attr)s",
InvalidFloat: "Invalid value for %(attr)s",
InvalidFloatUnit: "Invalid value for %(attr)s",
InvalidFullLocation: "Invalid value for %(attr)s",
InvalidGender: "Invalid value for %(attr)s",
InvalidIntUnit: "Invalid value for %(attr)s",
InvalidLabel: "Invalid value for %(attr)s",
InvalidLocation: "Invalid value for %(attr)s",
InvalidMaritalStatus: "Invalid value for %(attr)s",
InvalidPaymentMethod: "Invalid value for %(attr)s",
InvalidPercentage: '%(element)s must be a percentage',
InvalidPriceType: "Invalid value for %(attr)s",
InvalidRatingType: "Invalid value for %(attr)s",
InvalidReviewerType: "Invalid value for %(attr)s",
InvalidSalaryType: "Invalid value for %(attr)s",
InvalidServiceType: "Invalid value for %(attr)s",
InvalidValue: "Invalid value for %(attr)s",
InvalidYear: "Invalid value for %(attr)s",
TooMany: "%(parent)s contains more than ten %(element)s elements",
InvalidPermalink: "guid must be a full URL, unless isPermaLink attribute is false",
NotInANamespace: "Missing namespace for %(element)s",
UndeterminableVocabulary:"Missing namespace for %(element)s",
SelfNotAtom: '"self" link references a non-Atom representation',
InvalidFormComponentName: 'Invalid form component name',
ImageLinkDoesntMatch: "Image link doesn't match channel link",
ImageUrlFormat: "Image not in required format",
ProblematicalRFC822Date: "Problematical RFC 822 date-time value",
DuplicateEnclosure: "item contains more than one enclosure",
MissingItunesEmail: "The recommended <itunes:email> element is missing",
MissingGuid: "%(parent)s should contain a %(element)s element",
UriNotIri: "IRI found where URL expected",
ObsoleteWikiNamespace: "Obsolete Wiki Namespace",
DuplicateDescriptionSemantics: "Avoid %(element)s",
InvalidCreditRole: "Invalid Credit Role",
InvalidMediaTextType: 'type attribute must be "plain" or "html"',
InvalidMediaHash: 'Invalid Media Hash',
InvalidMediaRating: 'Invalid Media Rating',
InvalidMediaRestriction: "media:restriction must be 'all' or 'none'",
InvalidMediaRestrictionRel: "relationship must be 'allow' or 'disallow'",
InvalidMediaRestrictionType: "type must be 'country' or 'uri'",
InvalidMediaMedium: 'Invalid content medium: "%(value)s"',
InvalidMediaExpression: 'Invalid content expression: "%(value)s"',
DeprecatedMediaAdult: 'media:adult is deprecated',
CommentRSS: 'wfw:commentRSS should be wfw:commentRss',
NonSpecificMediaType: '"%(contentType)s" media type is not specific enough',
DangerousStyleAttr: "style attribute contains potentially dangerous content",
NotURLEncoded: "%(element)s must be URL encoded",
InvalidLocalRole: "Invalid local role",
InvalidEncoding: "Invalid character encoding",
ShouldIncludeExample: "OpenSearchDescription should include an example Query",
InvalidAdultContent: "Non-boolean value for %(element)s",
InvalidLocalParameter: "Invalid local parameter name",
UndeclaredPrefix: "Undeclared %(element)s prefix",
InvalidExtensionAttr: "Invalid extension attribute: %(attribute)s",
DeprecatedDTD: "The use of this DTD has been deprecated by Netscape",
MisplacedXHTMLContent: "Misplaced XHTML content",
SchemeNotIANARegistered: "URI scheme not IANA registered",
InvalidCoord: "Invalid coordinates",
InvalidCoordList: "Invalid coordinate list",
CoordComma: "Comma found in coordinate pair",
}
| {
"repo_name": "manderson23/NewsBlur",
"path": "vendor/feedvalidator/i18n/en.py",
"copies": "16",
"size": "14502",
"license": "mit",
"hash": 7871399160008374000,
"line_mean": 68.3875598086,
"line_max": 179,
"alpha_frac": 0.6741828713,
"autogenerated": false,
"ratio": 4.030572540300167,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""$Id: entry.py 699 2006-09-25 02:01:18Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 699 $"
__date__ = "$Date: 2006-09-25 02:01:18 +0000 (Mon, 25 Sep 2006) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import *
from logging import *
from itunes import itunes_item
from extension import extension_entry
#
# pie/echo entry element.
#
class entry(validatorBase, extension_entry, itunes_item):
def getExpectedAttrNames(self):
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'parseType')]
def prevalidate(self):
self.links=[]
self.content=None
def validate(self):
if not 'title' in self.children:
self.log(MissingElement({"parent":self.name, "element":"title"}))
if not 'author' in self.children and not 'author' in self.parent.children:
self.log(MissingElement({"parent":self.name, "element":"author"}))
if not 'id' in self.children:
self.log(MissingElement({"parent":self.name, "element":"id"}))
if not 'updated' in self.children:
self.log(MissingElement({"parent":self.name, "element":"updated"}))
if self.content:
if not 'summary' in self.children:
if self.content.attrs.has_key((None,"src")):
self.log(MissingSummary({"parent":self.parent.name, "element":self.name}))
ctype = self.content.type
if ctype.find('/') > -1 and not (
ctype.endswith('+xml') or ctype.endswith('/xml') or
ctype.startswith('text/')):
self.log(MissingSummary({"parent":self.parent.name, "element":self.name}))
else:
if not 'summary' in self.children:
self.log(MissingTextualContent({"parent":self.parent.name, "element":self.name}))
for link in self.links:
if link.rel == 'alternate': break
else:
self.log(MissingContentOrAlternate({"parent":self.parent.name, "element":self.name}))
# can only have one alternate per type
types={}
for link in self.links:
if not link.rel=='alternate': continue
if not link.type in types: types[link.type]=[]
if link.hreflang in types[link.type]:
self.log(DuplicateAtomLink({"parent":self.name, "element":"link", "type":link.type, "hreflang":link.hreflang}))
else:
types[link.type] += [link.hreflang]
if self.itunes: itunes_item.validate(self)
def do_author(self):
from author import author
return author()
def do_category(self):
from category import category
return category()
def do_content(self):
from content import content
self.content=content()
return self.content, noduplicates()
def do_contributor(self):
from author import author
return author()
def do_id(self):
return canonicaluri(), nows(), noduplicates(), unique('id',self.parent,DuplicateEntries)
def do_link(self):
from link import link
self.links += [link()]
return self.links[-1]
def do_published(self):
return rfc3339(), nows(), noduplicates()
def do_source(self):
return source(), noduplicates()
def do_rights(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_summary(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_title(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_updated(self):
return rfc3339(), nows(), noduplicates(), unique('updated',self.parent,DuplicateUpdated)
from feed import feed
class source(feed):
def missingElement(self, params):
self.log(MissingSourceElement(params))
def validate(self):
self.validate_metadata()
def do_author(self):
if not 'author' in self.parent.children:
self.parent.children.append('author')
return feed.do_author(self)
def do_entry(self):
self.log(UndefinedElement({"parent":self.name, "element":"entry"}))
return eater()
| {
"repo_name": "Einsteinish/PyTune3",
"path": "vendor/feedvalidator/entry.py",
"copies": "16",
"size": "4034",
"license": "mit",
"hash": 3719198430064036400,
"line_mean": 31.272,
"line_max": 119,
"alpha_frac": 0.665840357,
"autogenerated": false,
"ratio": 3.601785714285714,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A widget, which handles text input."""
from Editable import Editable
from ocempgui.draw import String
from Constants import *
import base
class Entry (Editable):
"""Entry (text="") -> Entry
A widget class suitable for a single line of text input.
The Entry widget is a text input box for a single line of text. It
allows an unlimited amount of text input, but is usually more
suitable for a small or medium amount, which can be scrolled, if the
text size exceeds the visible widget size.
The 'padding' attribute and set_padding() method are used to place a
certain amount of pixels between the text and the outer edges of the
Entry.
entry.padding = 10
entry.set_padding (10)
The Entry supports different border types by setting its 'border'
attribute to a valid value of the BORDER_TYPES constants.
entry.border = BORDER_SUNKEN
entry.set_border (BORDER_SUNKEN)
It also features a password mode, which will cause it to display the
text as asterisks ('*'). It will not encrypt or protect the internal
text attribute however. The password mode can be set using the
'password' attribute and set_password() method.
entry.password = True
entry.set_password (False)
The Entry uses a default size for itself by setting the 'size'
attribute to a width of 94 pixels and a height of 24 pixels.
Default action (invoked by activate()):
See the Editable class.
Mnemonic action (invoked by activate_mnemonic()):
None
Signals:
SIG_MOUSEDOWN - Invoked, when a mouse button gets pressed on the
Entry.
SIG_MOUSEMOVE - Invoked, when the mouse moves over the Entry.
Attributes:
password - Characters will be drawn as '*' (asterisk).
padding - Additional padding between text and borders. Default is 2.
border - The border style to set for the Entry.
"""
def __init__ (self, text=""):
Editable.__init__ (self)
self._realtext = None
self._padding = 2
self._border = BORDER_SUNKEN
self._password = False
self._offset = 0 # Front offset in pixels.
# Pixel sizes of text to the left and right of caret, and char
# the caret is at.
self._leftsize = (0, 0)
self._rightsize = (0, 0)
self._cursize = (0, 0)
self._font = None
self._signals[SIG_MOUSEDOWN] = []
self._signals[SIG_MOUSEMOVE] = []
self.minsize = 94, 24 # Default size to use.
self.text = text
def set_padding (self, padding):
"""E.set_padding (...) -> None
Sets the padding between the edges and text of the Entry.
The padding value is the amount of pixels to place between the
edges of the Entry and the displayed text.
Raises a TypeError, if the argument is not a positive integer.
"""
if (type (padding) != int) or (padding < 0):
raise TypeError ("Argument must be a positive integer")
self._padding = padding
self.dirty = True
def set_password (self, password):
"""E.set_password (...) -> None
When this is set this to True, the entry's content will be drawn
with '*' (asterisk) characters instead of the actual
characters. This is useful for password dialogs, where it is
undesirable for the password to be displayed on-screen.
"""
self._password = password
self.dirty = True
def _set_caret_position (self, eventarea, position):
"""W._set_caret_position (...) -> None
Sets the position of the caret based on the given pixel position.
"""
# Get the relative mouse point.
mpos = (position[0] - eventarea.x - self.padding,
position[1] - eventarea.y - self.padding)
if mpos[0] <= 0:
# User clicked on the border or into the padding area.
self.caret = 0
return
caret = self.caret
mpoint = self._offset + mpos[0]
left, right, current = self._get_text_overhang (caret)
if mpoint > (left[0] + right[0]):
# User clicked past the length of the text.
self.caret = len (self.text)
return
# Find the click inside the text area
while (mpoint > left[0]) and (caret <= len (self.text)):
caret += 1
left, right, current = self._get_text_overhang (caret)
while (mpoint < left[0]) and (caret > 0):
caret -= 1
left, right, current = self._get_text_overhang (caret)
# Move caret to left or right, based on center of clicked character
if mpoint > (left[0] + (current[0] / 2) + self.border):
caret += 1
self.caret = caret
def _get_text_overhang (self, pos):
"""E._get_text_overhang (...) -> (int, int), (int, int), (int, int)
Gets the pixel sizes to the left and right of the caret and
the character size the caret is at in this order..
"""
# TODO: the text display should be separated in an own TextView
# class.
if self._font == None:
self._cursize = (0, 0)
return
text = self.text
if self.password:
text = '*' * len (self.text)
self._leftsize = self._font.size (text[:pos])
self._rightsize = self._font.size (text[pos:])
try:
self._cursize = self._font.size (text[pos])
except IndexError:
self._cursize = (0, 0)
return self._leftsize, self._rightsize, self._cursize
def _calculate_offset (self, textwidth, font):
"""E._calculate_offset (...) -> int
Calculates the left pixel offset for the Entry text.
"""
self._font = font
self._get_text_overhang (self.caret)
bump_size = self.minsize[0] / 4
while self._leftsize[0] < self._offset:
new_offset = self._offset - bump_size
self._offset = max (new_offset, 0)
while self._leftsize[0] > (self._offset + textwidth):
self._offset += bump_size
return min (-self._offset, 0)
def notify (self, event):
"""E.notify (...) -> None
Notifies the Entry about an event.
"""
if not self.sensitive:
return
if event.signal == SIG_MOUSEDOWN:
eventarea = self.rect_to_client ()
if eventarea.collidepoint (event.data.pos):
self.run_signal_handlers (SIG_MOUSEDOWN, event.data)
if event.data.button == 1:
self._caret_visible = True
self._set_caret_position (eventarea, event.data.pos)
if not self.focus:
self.activate ()
event.handled = True
elif event.signal == SIG_MOUSEMOVE:
eventarea = self.rect_to_client ()
if eventarea.collidepoint (event.data.pos):
self.run_signal_handlers (SIG_MOUSEMOVE, event.data)
self.entered = True
event.handled = True
else:
self.entered = False
Editable.notify (self, event)
def set_border (self, border):
"""E.set_border (...) -> None
Sets the border type to be used by the Entry.
Raises a ValueError, if the passed argument is not a value from
BORDER_TYPES
"""
if border not in BORDER_TYPES:
raise ValueError ("border must be a value from BORDER_TYPES")
self._border = border
self.dirty = True
def draw_bg (self):
"""E.draw_bg () -> Surface
Draws the surface of the Entry and returns it.
Draws the background surface of the Entry and returns it.
Creates the visible background surface of the Entry and returns
it to the caller.
"""
return base.GlobalStyle.engine.draw_entry (self)
def draw (self):
"""E.draw () -> None
Draws the Entry surface.
"""
Editable.draw (self)
cls = self.__class__
style = base.GlobalStyle
engine = style.engine
st = self.style or style.get_style (cls)
border = style.get_border_size (cls, st, self.border)
text = self.text
rect = self.image.get_rect ()
if self.password:
text = '*' * len (self.text)
rtext = style.engine.draw_string (text, self.state, cls, st)
# The 'inner' surface, which we will use for blitting the text.
sf_text = engine.draw_rect (rect.width - 2 * (self.padding + border),
rect.height - 2 * (self.padding + border),
self.state, cls, st)
# Adjust entry offset based on caret location.
font = String.create_font \
(style.get_style_entry (cls, st, "font", "name"),
style.get_style_entry (cls, st, "font", "size"))
rect_sftext = sf_text.get_rect ()
blit_pos = self._calculate_offset (rect_sftext.width, font)
sf_text.blit (rtext, (blit_pos, 0))
# Draw caret.
if self.focus and self.caret_visible:
# The caret position is at the end of the left overhang.
caret_pos = self._get_text_overhang (self.caret)[0][0]
engine.draw_caret (sf_text, blit_pos + caret_pos, 1, 2, self.state,
cls, st)
rect_sftext.center = rect.center
self.image.blit (sf_text, rect_sftext)
padding = property (lambda self: self._padding,
lambda self, var: self.set_padding (var),
doc = "The additional padding for the Entry.")
border = property (lambda self: self._border,
lambda self, var: self.set_border (var),
doc = "The border style to set for the Entry.")
password = property (lambda self: self._password,
lambda self, var: self.set_password (var),
doc = "Indicates the password mode for the Entry.")
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/widgets/Entry.py",
"copies": "1",
"size": "11696",
"license": "bsd-2-clause",
"hash": 6193064870625271000,
"line_mean": 36.2484076433,
"line_max": 79,
"alpha_frac": 0.5942202462,
"autogenerated": false,
"ratio": 4.140176991150443,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.011940626729686876,
"num_lines": 314
} |
# $Id: ErrorCatchers.py,v 1.7 2005/01/03 19:59:07 tavis_rudd Exp $
"""ErrorCatcher class for Cheetah Templates
Meta-Data
================================================================================
Author: Tavis Rudd <[email protected]>
Version: $Revision: 1.7 $
Start Date: 2001/08/01
Last Revision Date: $Date: 2005/01/03 19:59:07 $
"""
__author__ = "Tavis Rudd <[email protected]>"
__revision__ = "$Revision: 1.7 $"[11:-2]
import time
from Cheetah.NameMapper import NotFound
class Error(Exception):
pass
class ErrorCatcher:
_exceptionsToCatch = (NotFound,)
def __init__(self, templateObj):
pass
def exceptions(self):
return self._exceptionsToCatch
def warn(self, exc_val, code, rawCode, lineCol):
return rawCode
## make an alias
Echo = ErrorCatcher
class BigEcho(ErrorCatcher):
def warn(self, exc_val, code, rawCode, lineCol):
return "="*15 + "<" + rawCode + " could not be found>" + "="*15
class KeyError(ErrorCatcher):
def warn(self, exc_val, code, rawCode, lineCol):
raise KeyError("no '%s' in this Template Object's Search List" % rawCode)
class ListErrors(ErrorCatcher):
"""Accumulate a list of errors."""
_timeFormat = "%c"
def __init__(self, templateObj):
ErrorCatcher.__init__(self, templateObj)
self._errors = []
def warn(self, exc_val, code, rawCode, lineCol):
dict = locals().copy()
del dict['self']
dict['time'] = time.strftime(self._timeFormat,
time.localtime(time.time()))
self._errors.append(dict)
return rawCode
def listErrors(self):
"""Return the list of errors."""
return self._errors
| {
"repo_name": "dragondjf/QMarkdowner",
"path": "Cheetah/ErrorCatchers.py",
"copies": "16",
"size": "1755",
"license": "mit",
"hash": 3103235015702775300,
"line_mean": 27.3064516129,
"line_max": 82,
"alpha_frac": 0.5897435897,
"autogenerated": false,
"ratio": 3.567073170731707,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.03747826892862295,
"num_lines": 62
} |
# $Id: ethernet.py 65 2010-03-26 02:53:51Z dugsong $
# -*- coding: utf-8 -*-
"""Ethernet II, LLC (802.3+802.2), LLC/SNAP, and Novell raw 802.3,
with automatic 802.1q, MPLS, PPPoE, and Cisco ISL decapsulation."""
import struct
import dpkt
import stp
ETH_CRC_LEN = 4
ETH_HDR_LEN = 14
ETH_LEN_MIN = 64 # minimum frame length with CRC
ETH_LEN_MAX = 1518 # maximum frame length with CRC
ETH_MTU = (ETH_LEN_MAX - ETH_HDR_LEN - ETH_CRC_LEN)
ETH_MIN = (ETH_LEN_MIN - ETH_HDR_LEN - ETH_CRC_LEN)
# Ethernet payload types - http://standards.ieee.org/regauth/ethertype
ETH_TYPE_PUP = 0x0200 # PUP protocol
ETH_TYPE_IP = 0x0800 # IP protocol
ETH_TYPE_ARP = 0x0806 # address resolution protocol
ETH_TYPE_AOE = 0x88a2 # AoE protocol
ETH_TYPE_CDP = 0x2000 # Cisco Discovery Protocol
ETH_TYPE_DTP = 0x2004 # Cisco Dynamic Trunking Protocol
ETH_TYPE_REVARP = 0x8035 # reverse addr resolution protocol
ETH_TYPE_8021Q = 0x8100 # IEEE 802.1Q VLAN tagging
ETH_TYPE_IPX = 0x8137 # Internetwork Packet Exchange
ETH_TYPE_IP6 = 0x86DD # IPv6 protocol
ETH_TYPE_PPP = 0x880B # PPP
ETH_TYPE_MPLS = 0x8847 # MPLS
ETH_TYPE_MPLS_MCAST = 0x8848 # MPLS Multicast
ETH_TYPE_PPPoE_DISC = 0x8863 # PPP Over Ethernet Discovery Stage
ETH_TYPE_PPPoE = 0x8864 # PPP Over Ethernet Session Stage
ETH_TYPE_LLDP = 0x88CC # Link Layer Discovery Protocol
# MPLS label stack fields
MPLS_LABEL_MASK = 0xfffff000
MPLS_QOS_MASK = 0x00000e00
MPLS_TTL_MASK = 0x000000ff
MPLS_LABEL_SHIFT = 12
MPLS_QOS_SHIFT = 9
MPLS_TTL_SHIFT = 0
MPLS_STACK_BOTTOM = 0x0100
class Ethernet(dpkt.Packet):
__hdr__ = (
('dst', '6s', ''),
('src', '6s', ''),
('type', 'H', ETH_TYPE_IP)
)
_typesw = {}
def _unpack_data(self, buf):
if self.type == ETH_TYPE_8021Q:
self.tag, self.type = struct.unpack('>HH', buf[:4])
buf = buf[4:]
elif self.type == ETH_TYPE_MPLS or self.type == ETH_TYPE_MPLS_MCAST:
# XXX - skip labels (max # of labels is undefined, just use 24)
self.labels = []
for i in range(24):
entry = struct.unpack('>I', buf[i * 4:i * 4 + 4])[0]
label = ((entry & MPLS_LABEL_MASK) >> MPLS_LABEL_SHIFT,
(entry & MPLS_QOS_MASK) >> MPLS_QOS_SHIFT,
(entry & MPLS_TTL_MASK) >> MPLS_TTL_SHIFT)
self.labels.append(label)
if entry & MPLS_STACK_BOTTOM:
break
self.type = ETH_TYPE_IP
buf = buf[(i + 1) * 4:]
try:
self.data = self._typesw[self.type](buf)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, dpkt.UnpackError):
self.data = buf
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.type > 1500:
# Ethernet II
self._unpack_data(self.data)
elif (self.dst.startswith('\x01\x00\x0c\x00\x00') or
self.dst.startswith('\x03\x00\x0c\x00\x00')):
# Cisco ISL
self.vlan = struct.unpack('>H', self.data[6:8])[0]
self.unpack(self.data[12:])
elif self.data.startswith('\xff\xff'):
# Novell "raw" 802.3
self.type = ETH_TYPE_IPX
self.data = self.ipx = self._typesw[ETH_TYPE_IPX](self.data[2:])
else:
# 802.2 LLC
self.dsap, self.ssap, self.ctl = struct.unpack('BBB', self.data[:3])
if self.data.startswith('\xaa\xaa'):
# SNAP
self.type = struct.unpack('>H', self.data[6:8])[0]
self._unpack_data(self.data[8:])
else:
# non-SNAP
dsap = ord(self.data[0])
if dsap == 0x06: # SAP_IP
self.data = self.ip = self._typesw[ETH_TYPE_IP](self.data[3:])
elif dsap == 0x10 or dsap == 0xe0: # SAP_NETWARE{1,2}
self.data = self.ipx = self._typesw[ETH_TYPE_IPX](self.data[3:])
elif dsap == 0x42: # SAP_STP
self.data = self.stp = stp.STP(self.data[3:])
@classmethod
def set_type(cls, t, pktclass):
cls._typesw[t] = pktclass
@classmethod
def get_type(cls, t):
return cls._typesw[t]
# XXX - auto-load Ethernet dispatch table from ETH_TYPE_* definitions
def __load_types():
g = globals()
for k, v in g.iteritems():
if k.startswith('ETH_TYPE_'):
name = k[9:]
modname = name.lower()
try:
mod = __import__(modname, g, level=1)
Ethernet.set_type(v, getattr(mod, name))
except (ImportError, AttributeError):
continue
if not Ethernet._typesw:
__load_types()
def test_eth(): # TODO recheck this test
s = ('\x00\xb0\xd0\xe1\x80\x72\x00\x11\x24\x8c\x11\xde\x86\xdd\x60\x00\x00\x00'
'\x00\x28\x06\x40\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x11\x24\xff\xfe\x8c'
'\x11\xde\xfe\x80\x00\x00\x00\x00\x00\x00\x02\xb0\xd0\xff\xfe\xe1\x80\x72'
'\xcd\xd3\x00\x16\xff\x50\xd7\x13\x00\x00\x00\x00\xa0\x02\xff\xff\x67\xd3'
'\x00\x00\x02\x04\x05\xa0\x01\x03\x03\x00\x01\x01\x08\x0a\x7d\x18\x3a\x61'
'\x00\x00\x00\x00')
assert Ethernet(s)
if __name__ == '__main__':
test_eth()
print 'Tests Successful...'
| {
"repo_name": "bpanneton/dpkt",
"path": "dpkt/ethernet.py",
"copies": "5",
"size": "5375",
"license": "bsd-3-clause",
"hash": 3753075930122482700,
"line_mean": 35.0738255034,
"line_max": 84,
"alpha_frac": 0.568,
"autogenerated": false,
"ratio": 2.8289473684210527,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5896947368421053,
"avg_score": null,
"num_lines": null
} |
"""Event management system for any type of events and objects."""
from Signals import Event
from INotifyable import INotifyable
class EventManager (object):
"""EventManager () -> EventManager
An event distribution system.
The EventManager enables objects to receive events. Each object can
register several signal types, on which occurance the EventManager
will call the object's 'notify' method with that event.
Events also can be distributed by invoking the 'emit()' method of
the EventManager.
Attributes:
queues - A dict with signal-list associations of registered objects.
event_grabber - The event grabbing object, which will receive all events.
"""
def __init__ (self):
self.queues = {}
self._grabber = None
def __len__ (self):
"""E.__len__ () -> len (E)
Returns the amount of objects within the EventManager
"""
length = 0
evlist = self.queues.keys ()
for signal in evlist:
length += len (self.queues[signal])
return length
def add_object (self, obj, *signals):
"""E.add_object (...) -> None
Adds an object to the EventManger.
Adds an object as listener for one or more events to the
EventManager. Each event type in the *signals argument will
cause the object to be added to a respective queue, on which
events with the same type will be emitted.
Raises an AttributeError, if the passed 'obj' argument does
not have a callable notify attribute.
"""
if not isinstance (obj, INotifyable):
print "Warning: object should inherit from INotifyable"
if not hasattr (obj, "notify") or not callable (obj.notify):
raise AttributeError ("notify() method not found in object %s"
% obj)
for key in signals:
self.queues.setdefault (key, []).append (obj)
def add_high_priority_object (self, obj, *signals):
"""E.add_high_priority_object (...) -> None
Adds a high priority object to the EventManager.
High priority objects do not differ from normal objects. Instead
they are added in the first place to the signal queues instead
of being appended to them like add_object() does. This results
in notifying those objects in the first place.
Raises an AttributeError, if the passed 'obj' argument does
not have a callable notify attribute.
"""
if not isinstance (obj, INotifyable):
print "*** Warning: object should inherit from INotifyable"
if not hasattr (obj, "notify") or not callable (obj.notify):
raise AttributeError ("notify() method not found in object %s"
% obj)
for key in signals:
self.queues.setdefault (key, []).insert (0, obj)
def remove_object (self, obj, *signals):
"""E.remove_object (...) -> None
Removes an object from the EventManager.
Removes the object from the queues passed as the 'signals'
arguments. If 'signals' is None, the object will be removed
from all queues of the EventManager.
"""
if signals:
evlist = signals
else:
evlist = self.queues.keys ()
for signal in evlist:
if obj in self.queues[signal]:
self.queues[signal].remove (obj)
def clear (self):
"""E.clear () -> None
Removes all objects and signals from all event queues.
"""
self.event_grabber = None
self.queues = {}
def grab_events (self, obj):
"""E.grab_events (...) -> None
Sets an event grabber object for the EventManager.
Causes the EventManager to send _all_ its events only to this
object instead of the objects in its queues. It is up to the
event grabbing object to filter the events, it received.
"""
if (obj != None) and not isinstance (obj, INotifyable):
print "*** Warning: object should inherit from INotifyable"
if not hasattr (obj, "notify") or not callable (obj.notify):
raise AttributeError ("notify() method not found in object %s"
% obj)
self._grabber = obj
def emit (self, signal, data):
"""E.emit (...) -> None
Emits an event, which will be sent to the objects.
Emits an event on a specific queue of the EventManager, which
will be sent to the objects in that queue. If one of the
receiving objects sets the 'handled' attribute of the event to
True, the emission will stop immediately so that following
objects will not receive the event.
"""
self.emit_event (Event (signal, data))
def emit_event (self, event):
"""E.emit_event (...) -> None
Emits an event, which will be sent to the objects.
Emits an event on a specific queue of the EventManager, which
will be sent to the objects in that queue. If one of the
receiving objects sets the 'handled' attribute of the event to
True, the emission will stop immediately so that following
objects will not receive the event.
"""
if self.event_grabber:
self.event_grabber.notify (event)
return
evlist = self.queues.get (event.signal, [])
for obj in evlist:
obj.notify (event)
if event.handled:
break
event_grabber = property (lambda self: self._grabber,
lambda self, var: self.grab_events (var),
doc = "Sets the event grabber object.")
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/events/EventManager.py",
"copies": "1",
"size": "7259",
"license": "bsd-2-clause",
"hash": -5009778052737995000,
"line_mean": 38.8846153846,
"line_max": 79,
"alpha_frac": 0.6343849015,
"autogenerated": false,
"ratio": 4.57403906742281,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5708423968922811,
"avg_score": null,
"num_lines": null
} |
from twisted.spread.pb import DeadReferenceError
""" Core interface to the event bus. This module also contains
a base class for all event bus plugins.
The interface may differ and the provider also, but Peloton contrains
the message bus to one that supports the AMQP protocol as defined
at http://amqp.org/.
The message format is simple: a pickled dictionary is sent over the bus
with one mandatory key (sender_guid) and an arbitrary number
of arbitrary keys. These are de-pickled before being passed to registered
event handlers.
This module defines three ready-made event handlers of use:
- The QueueEventHandler extends the Python Queue class. Events
are received and placed on the queue so that other code can
simply get() them off as required.
- The MethodEventHandler reflects the call through to another method
with which it was initialised.
- For debugging, the DebugEventHandler simply dumps the message to the
logger with which it was initialised.
"""
from peloton.exceptions import MessagingError
class AbstractEventHandler(object):
""" Base class for all event handlers. """
def eventReceived(self, msg, exchange='', key='', ctag=''):
""" Handle message 'msg'. """
raise NotImplementedError
class EventDispatcher(object):
""" The event dispatcher is a static component of the Peloton kernel
which performs two roles:
1. It provides an INTERNEL event routing mechanism for coupling
components within a kernel. Such events are wholly separate
from the external messaging bus.
2. It provides an interface to the EXTERNAL event bus provided
by the plugin which registers itself under the name 'eventbus'.
Thus one interface manages all messaging and internal messages are
completely isolated from the external bus.
"""
def __init__(self, kernel):
self.kernel = kernel
self.eventKeys = {}
self.handlers={}
# if any calls are made to register prior to
# the external bus being connected, we collect the
# registrations and pump them through once ready.
self.preInitRegistrations=[]
self.preInitEvents=[]
def joinExternalBus(self):
""" Called once the plugins have been loaded. """
externalBus = self.kernel.plugins['eventbus']
setattr(self, 'register', externalBus.register)
setattr(self, 'deregister', externalBus.deregister)
setattr(self, 'fireEvent', externalBus.fireEvent)
setattr(self, 'getRegisteredExchanges', externalBus.getRegisteredExchanges)
# push through any pre-init registrations
while self.preInitRegistrations:
args, kwargs = self.preInitRegistrations.pop(0)
self.register(*args, **kwargs)
self.kernel.logger.debug("Pre-init registration for %s " % args[0])
# push through any pre-init events
if self.preInitEvents:
self.kernel.logger.debug("Pre-init events being fired (%d) " % len(self.preInitEvents))
while self.preInitEvents:
args, kwargs = self.preInitEvents.pop(0)
self.fireEvent(*args, **kwargs)
def registerInternal(self, key, handler):
""" Register handler for internal events keyed on 'key'.
Handler must be an instance of AbstractEventHandler"""
if not isinstance(handler, AbstractEventHandler):
raise MessagingError("Internal subscription to %s attempted with invalid handler: %s" % (key, str(handler)))
try:
handlers = self.eventKeys[key]
if handler not in handlers:
handlers.append(handler)
except KeyError:
self.eventKeys[key] = [handler]
try:
keys = self.handlers[handler]
if key not in keys:
keys.append(key)
except KeyError:
self.handlers[handler] = [key]
def deregisterInternal(self, handler):
""" De-register this handler for internal events. """
try:
eventKeys = self.handlers[handler]
except Exception, e:
self.kernel.logger.debug("Attempt to remove unregistered internal handler!")
return
for key in eventKeys:
self.eventKeys[key].remove(handler)
# if this was the one and only listener,
# remove the entry in the keys list
if not self.eventKeys[key]:
del(self.eventKeys[key])
del(self.handlers[handler])
def fireInternalEvent(self, key, **kargs):
""" Fire internal event which is a dictionary composed
of the kwargs of this method. """
try:
handlers = self.eventKeys[key]
for handler in handlers:
handler.eventReceived(kargs, None, key)
except KeyError:
# no-one interested in this event
pass
def register(self, *args, **kwargs):
""" Temporary method that collects calls to register prior to
the external event bus 'register' being hooked in. """
self.preInitRegistrations.append((args, kwargs))
def fireEvent(self, *args, **kwargs):
""" Temporary method that collects events to be fired as soon
as the external event bus is initialised. """
self.preInitEvents.append((args, kwargs))
def getRegisteredExchanges(self):
""" Temporary method that returns an empty list. """
return []
class AbstractEventBusPlugin(object):
""" Define all methods that the plugins must provide
to be a valid profider for the dispatcher.
"""
def register(self, key, handler, exchange):
""" Register 'handler' for events broadcast on 'exchange'
with routing key/topic 'key'. Handler is an instance of
peloton.events.AbstractEventHandler.
An implementation of the Event Bus MUST permit a single handler
to be registered for multiple events with multiple calls to register."""
raise NotImplementedError
def deregister(self, handler):
""" De-register the specified handler from the event to which it
was bound. """
raise NotImplementedError
def fireEvent(self, key, exchange='events', **kargs):
""" Fire an event on the specified exchange with the
specified routing key. All other keyword arguments are made
into the event message. """
raise NotImplementedError
class DebugEventHandler(AbstractEventHandler):
""" Dump message to the logger with which the handler is initialised. """
def __init__(self, logger):
AbstractEventHandler.__init__(self)
self.logger = logger
def eventReceived(self, msg, exchange='', key='', ctag=''):
self.logger.info("%s: %s.%s | %s" % (ctag, exchange, key, str(msg)))
class MethodEventHandler(AbstractEventHandler):
"""Initialise with a callable that accepts the
four arguments msg, exchange, key and ctag. This handler
will simply pass the call through. """
def __init__(self, f):
AbstractEventHandler.__init__(self)
self.f = f
def eventReceived(self, msg, exchange='', key='', ctag=''):
self.f(msg, exchange, key, ctag)
class RemoteEventHandler(AbstractEventHandler):
"""Server side handler that takes a Referenceable that
provides remote_eventReceived and reflects the call through.
This is the proxy for a remote handler. """
def __init__(self, remoteHandler):
AbstractEventHandler.__init__(self)
self.remoteHandler = remoteHandler
def eventReceived(self, msg, exchange='', key='', ctag=''):
self.remoteHandler.callRemote('eventReceived', msg, exchange, \
key, ctag)
from Queue import Queue
class QueueEventHandler(AbstractEventHandler, Queue):
"""Queue implementation that is a Peloton event handler;
events will be placed on the Queue for subsequent consumption.
All the benefits of queue, all the benefits of an event handler!
Use to handle events asynchronously or as a place from which
multiple threads can pick events off in turn.
"""
def __init__(self, *args, **kwargs):
AbstractEventHandler.__init__(self)
Queue.__init__(self, *args, **kwargs)
def eventReceived(self, msg, exchange='', key='', ctag=''):
self.put((msg, exchange, key, ctag))
| {
"repo_name": "aquamatt/Peloton",
"path": "src/peloton/events.py",
"copies": "1",
"size": "8619",
"license": "bsd-3-clause",
"hash": -4803046973787079000,
"line_mean": 38.3561643836,
"line_max": 120,
"alpha_frac": 0.6592412113,
"autogenerated": false,
"ratio": 4.493743482794578,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5652984694094577,
"avg_score": null,
"num_lines": null
} |
"""
This module contains practical examples of Docutils client code.
Importing this module from client code is not recommended; its contents are
subject to change in future Docutils releases. Instead, it is recommended
that you copy and paste the parts you need into your own code, modifying as
necessary.
"""
from docutils import core, io
def html_parts(input_string, source_path=None, destination_path=None,
input_encoding='unicode', doctitle=1, initial_header_level=1):
"""
Given an input string, returns a dictionary of HTML document parts.
Dictionary keys are the names of parts, and values are Unicode strings;
encoding is up to the client.
Parameters:
- `input_string`: A multi-line text string; required.
- `source_path`: Path to the source file or object. Optional, but useful
for diagnostic output (system messages).
- `destination_path`: Path to the file or object which will receive the
output; optional. Used for determining relative paths (stylesheets,
source links, etc.).
- `input_encoding`: The encoding of `input_string`. If it is an encoded
8-bit string, provide the correct encoding. If it is a Unicode string,
use "unicode", the default.
- `doctitle`: Disable the promotion of a lone top-level section title to
document title (and subsequent section title to document subtitle
promotion); enabled by default.
- `initial_header_level`: The initial level for header elements (e.g. 1
for "<h1>").
"""
overrides = {'input_encoding': input_encoding,
'doctitle_xform': doctitle,
'initial_header_level': initial_header_level}
parts = core.publish_parts(
source=input_string, source_path=source_path,
destination_path=destination_path,
writer_name='html', settings_overrides=overrides)
return parts
def html_body(input_string, source_path=None, destination_path=None,
input_encoding='unicode', output_encoding='unicode',
doctitle=1, initial_header_level=1):
"""
Given an input string, returns an HTML fragment as a string.
The return value is the contents of the <body> element.
Parameters (see `html_parts()` for the remainder):
- `output_encoding`: The desired encoding of the output. If a Unicode
string is desired, use the default value of "unicode" .
"""
parts = html_parts(
input_string=input_string, source_path=source_path,
destination_path=destination_path,
input_encoding=input_encoding, doctitle=doctitle,
initial_header_level=initial_header_level)
fragment = parts['html_body']
if output_encoding != 'unicode':
fragment = fragment.encode(output_encoding)
return fragment
def internals(input_string, source_path=None, destination_path=None,
input_encoding='unicode', settings_overrides=None):
"""
Return the document tree and publisher, for exploring Docutils internals.
Parameters: see `html_parts()`.
"""
if settings_overrides:
overrides = settings_overrides.copy()
else:
overrides = {}
overrides['input_encoding'] = input_encoding
output, pub = core.publish_programmatically(
source_class=io.StringInput, source=input_string,
source_path=source_path,
destination_class=io.NullOutput, destination=None,
destination_path=destination_path,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='null',
settings=None, settings_spec=None, settings_overrides=overrides,
config_section=None, enable_exit_status=None)
return pub.writer.document, pub
| {
"repo_name": "adieu/allbuttonspressed",
"path": "docutils/examples.py",
"copies": "66",
"size": "3940",
"license": "bsd-3-clause",
"hash": 2305290964698974200,
"line_mean": 40.0416666667,
"line_max": 77,
"alpha_frac": 0.6870558376,
"autogenerated": false,
"ratio": 4.138655462184874,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00030193236714975844,
"num_lines": 96
} |
"""
This module contains practical examples of Docutils client code.
Importing this module from client code is not recommended; its contents are
subject to change in future Docutils releases. Instead, it is recommended
that you copy and paste the parts you need into your own code, modifying as
necessary.
"""
from docutils import core, io
def html_parts(input_string, source_path=None, destination_path=None,
input_encoding='unicode', doctitle=1, initial_header_level=1):
"""
Given an input string, returns a dictionary of HTML document parts.
Dictionary keys are the names of parts, and values are Unicode strings;
encoding is up to the client.
Parameters:
- `input_string`: A multi-line text string; required.
- `source_path`: Path to the source file or object. Optional, but useful
for diagnostic output (system messages).
- `destination_path`: Path to the file or object which will receive the
output; optional. Used for determining relative paths (stylesheets,
source links, etc.).
- `input_encoding`: The encoding of `input_string`. If it is an encoded
8-bit string, provide the correct encoding. If it is a Unicode string,
use "unicode", the default.
- `doctitle`: Disable the promotion of a lone top-level section title to
document title (and subsequent section title to document subtitle
promotion); enabled by default.
- `initial_header_level`: The initial level for header elements (e.g. 1
for "<h1>").
"""
overrides = {'input_encoding': input_encoding,
'doctitle_xform': doctitle,
'initial_header_level': initial_header_level}
parts = core.publish_parts(
source=input_string, source_path=source_path,
destination_path=destination_path,
writer_name='html', settings_overrides=overrides)
return parts
def html_body(input_string, source_path=None, destination_path=None,
input_encoding='unicode', output_encoding='unicode',
doctitle=1, initial_header_level=1):
"""
Given an input string, returns an HTML fragment as a string.
The return value is the contents of the <body> element.
Parameters (see `html_parts()` for the remainder):
- `output_encoding`: The desired encoding of the output. If a Unicode
string is desired, use the default value of "unicode" .
"""
parts = html_parts(
input_string=input_string, source_path=source_path,
destination_path=destination_path,
input_encoding=input_encoding, doctitle=doctitle,
initial_header_level=initial_header_level)
fragment = parts['html_body']
if output_encoding != 'unicode':
fragment = fragment.encode(output_encoding)
return fragment
def internals(input_string, source_path=None, destination_path=None,
input_encoding='unicode', settings_overrides=None):
"""
Return the document tree and publisher, for exploring Docutils internals.
Parameters: see `html_parts()`.
"""
if settings_overrides:
overrides = settings_overrides.copy()
else:
overrides = {}
overrides['input_encoding'] = input_encoding
output, pub = core.publish_programmatically(
source_class=io.StringInput, source=input_string,
source_path=source_path,
destination_class=io.NullOutput, destination=None,
destination_path=destination_path,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='null',
settings=None, settings_spec=None, settings_overrides=overrides,
config_section=None, enable_exit_status=None)
return pub.writer.document, pub
| {
"repo_name": "rimbalinux/LMD3",
"path": "docutils/examples.py",
"copies": "2",
"size": "4036",
"license": "bsd-3-clause",
"hash": -5802761906913425000,
"line_mean": 40.0416666667,
"line_max": 77,
"alpha_frac": 0.6707135778,
"autogenerated": false,
"ratio": 4.208550573514077,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5879264151314076,
"avg_score": null,
"num_lines": null
} |
import sys
sys.path.append( "../../glapi_parser" )
import apiutil
apiutil.CopyrightC()
print """
/* DO NOT EDIT - THIS FILE AUTOMATICALLY GENERATED BY expando.py SCRIPT */
#include <stdio.h>
#include "cr_error.h"
#include "cr_spu.h"
#include "cr_dlm.h"
#include "expandospu.h"
"""
allFunctions = []
generatedFunctions = []
for func_name in apiutil.GetDispatchedFunctions(sys.argv[1]+"/APIspec.txt"):
if apiutil.FindSpecial("expando", func_name):
allFunctions.append(func_name)
elif apiutil.CanCompile(func_name) or apiutil.SetsClientState(func_name):
generatedFunctions.append(func_name)
allFunctions.append(func_name)
for func_name in generatedFunctions:
params = apiutil.Parameters(func_name)
return_type = apiutil.ReturnType(func_name)
basicCallString = apiutil.MakeCallString(params)
declarationString = apiutil.MakeDeclarationString(params)
dlmCallString = basicCallString
chromiumProps = apiutil.ChromiumProps(func_name)
needClientState = 0
if apiutil.UsesClientState(func_name):
dlmCallString = basicCallString + ", clientState"
needClientState = 1
needDL = 0
if apiutil.CanCompile(func_name):
needDL = 1
print 'static %s EXPANDOSPU_APIENTRY expando%s( %s )' % ( return_type, func_name, declarationString)
print '{'
if needDL:
print '\tGLenum dlMode = crDLMGetCurrentMode();'
if needClientState:
print '\tCRContext *stateContext = crStateGetCurrent();'
print '\tCRClientState *clientState = NULL;'
print '\tif (stateContext != NULL) {'
print '\t\tclientState = &(stateContext->client);'
print '\t}'
if needDL:
if "checklist" in chromiumProps:
print '\tif (dlMode != GL_FALSE && crDLMCheckList%s(%s)) {' % (func_name, basicCallString)
else:
print '\tif (dlMode != GL_FALSE) {'
print '\t\tcrDLMCompile%s(%s);' % (func_name, dlmCallString)
# If we're only compiling, return now.
print '\t\tif (dlMode == GL_COMPILE) return %s;' % '0' if return_type != "void" else ""
print '\t}'
# If it gets this far, we're either just executing, or executing
# and compiling. Either way, pass the call to the super SPU,
# and to the state tracker (if appropriate; note that we only
# track client-side state, not all state).
if return_type != "void":
print '\t%s rc = expando_spu.super.%s(%s);' % (return_type, func_name, basicCallString)
else:
print '\texpando_spu.super.%s(%s);' % (func_name, basicCallString)
if apiutil.SetsClientState(func_name):
print '\tcrState%s( %s );' % (func_name, basicCallString)
if return_type != "void":
print "\treturn rc;"
print '}'
print ''
# Generate the table of named functions. including all the static generated
# functions as well as the special functions.
print 'SPUNamedFunctionTable _cr_expando_table[] = {'
for func_name in allFunctions:
print '\t{ "%s", (SPUGenericFunction) expando%s },' % (func_name, func_name )
print '\t{ NULL, NULL }'
print '};'
| {
"repo_name": "egraba/vbox_openbsd",
"path": "VirtualBox-5.0.0/src/VBox/HostServices/SharedOpenGL/expando/expando.py",
"copies": "3",
"size": "2992",
"license": "mit",
"hash": -7037906131233853000,
"line_mean": 31.8791208791,
"line_max": 101,
"alpha_frac": 0.7075534759,
"autogenerated": false,
"ratio": 3.007035175879397,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5214588651779397,
"avg_score": null,
"num_lines": null
} |
"""$Id: extension.py 750 2007-04-06 18:40:28Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net>, Mark Pilgrim <http://diveintomark.org/> and Phil Ringnalda <http://philringnalda.com>"
__version__ = "$Revision: 750 $"
__date__ = "$Date: 2007-04-06 18:40:28 +0000 (Fri, 06 Apr 2007) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby, Mark Pilgrim and Phil Ringnalda"
from validators import *
from logging import *
########################################################################
# Extensions that are valid everywhere #
########################################################################
class extension_everywhere:
def do_dc_title(self):
return text(), noduplicates()
def do_dc_description(self):
return text(), noduplicates()
def do_dc_publisher(self):
if "webMaster" in self.children:
self.log(DuplicateSemantics({"core":"webMaster", "ext":"dc:publisher"}))
return text() # duplicates allowed
def do_dc_contributor(self):
return text() # duplicates allowed
def do_dc_type(self):
return text(), noduplicates()
def do_dc_format(self):
return text(), noduplicates()
def do_dc_identifier(self):
return text()
def do_dc_source(self):
if "source" in self.children:
self.log(DuplicateItemSemantics({"core":"source", "ext":"dc:source"}))
return text(), noduplicates()
def do_dc_language(self):
if "language" in self.children:
self.log(DuplicateSemantics({"core":"language", "ext":"dc:language"}))
return iso639(), noduplicates()
def do_dc_relation(self):
return text(), # duplicates allowed
def do_dc_coverage(self):
return text(), # duplicates allowed
def do_dc_rights(self):
if "copyright" in self.children:
self.log(DuplicateSemantics({"core":"copyright", "ext":"dc:rights"}))
return nonhtml(), noduplicates()
def do_dcterms_alternative(self):
return text() #duplicates allowed
def do_dcterms_abstract(self):
return text(), noduplicates()
def do_dcterms_tableOfContents(self):
return rdfResourceURI(), noduplicates()
def do_dcterms_created(self):
return w3cdtf(), noduplicates()
def do_dcterms_valid(self):
return eater()
def do_dcterms_available(self):
return eater()
def do_dcterms_issued(self):
return w3cdtf(), noduplicates()
def do_dcterms_modified(self):
if "lastBuildDate" in self.children:
self.log(DuplicateSemantics({"core":"lastBuildDate", "ext":"dcterms:modified"}))
return w3cdtf(), noduplicates()
def do_dcterms_dateAccepted(self):
return text(), noduplicates()
def do_dcterms_dateCopyrighted(self):
return text(), noduplicates()
def do_dcterms_dateSubmitted(self):
return text(), noduplicates()
def do_dcterms_extent(self):
return positiveInteger(), nonblank(), noduplicates()
# def do_dcterms_medium(self):
# spec defines it as something that should never be used
# undefined element'll do for now
def do_dcterms_isVersionOf(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_hasVersion(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_isReplacedBy(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_replaces(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_isRequiredBy(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_requires(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_isPartOf(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_hasPart(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_isReferencedBy(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_references(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_isFormatOf(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_hasFormat(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_conformsTo(self):
return rdfResourceURI() # duplicates allowed
def do_dcterms_spatial(self):
return eater()
def do_dcterms_temporal(self):
return eater()
def do_dcterms_audience(self):
return text()
def do_dcterms_mediator(self):
return text(), noduplicates()
# added to DMCI, but no XML mapping has been defined
def do_dcterms_accessRights(self):
return eater()
def do_dcterms_accrualMethod(self):
return eater()
def do_dcterms_accrualPeriodicity(self):
return eater()
def do_dcterms_accrualPolicy(self):
return eater()
def do_dcterms_bibliographicCitation(self):
return eater()
def do_dcterms_educationLevel(self):
return eater()
def do_dcterms_instructionalMethod(self):
return eater()
def do_dcterms_license(self):
return eater()
def do_dcterms_provenance(self):
return eater()
def do_dcterms_rightsHolder(self):
return eater()
def do_rdfs_seeAlso(self):
return rdfResourceURI() # duplicates allowed
def do_geo_Point(self):
return geo_point()
def do_geo_lat(self):
return latitude()
def do_geo_long(self):
return longitude()
def do_geo_alt(self):
return decimal()
def do_geourl_latitude(self):
return latitude()
def do_geourl_longitude(self):
return longitude()
def do_icbm_latitude(self):
return latitude()
def do_icbm_longitude(self):
return longitude()
########################################################################
# Extensions that are valid at either the channel or item levels #
########################################################################
from media import media_elements, media_content, media_group
class extension_channel_item(extension_everywhere, media_elements):
def do_taxo_topics(self):
return eater()
def do_l_link(self):
return l_link()
########################################################################
# Extensions that are valid at only at the item level #
########################################################################
class extension_item(extension_channel_item):
def do_annotate_reference(self):
return rdfResourceURI(), noduplicates()
def do_ag_source(self):
return text(), noduplicates()
def do_ag_sourceURL(self):
return rfc2396_full(), noduplicates()
def do_ag_timestamp(self):
return iso8601(), noduplicates()
def do_ev_startdate(self):
return iso8601(), noduplicates()
def do_ev_enddate(self):
return iso8601(), noduplicates()
def do_ev_location(self):
return eater()
def do_ev_organizer(self):
return eater()
def do_ev_type(self):
return text(), noduplicates()
def do_foaf_maker(self):
return eater()
def do_foaf_primaryTopic(self):
return eater()
def do_slash_comments(self):
return nonNegativeInteger()
def do_slash_section(self):
return text()
def do_slash_department(self):
return text()
def do_slash_hit_parade(self):
return commaSeparatedIntegers(), noduplicates()
def do_thr_children(self):
return eater()
def do_thr_in_reply_to(self):
return in_reply_to()
def do_wfw_comment(self):
return rfc2396_full(), noduplicates()
def do_wfw_commentRss(self):
return rfc2396_full(), noduplicates()
def do_wfw_commentRSS(self):
self.log(CommentRSS({"parent":self.parent.name, "element":self.name}))
return rfc2396_full(), noduplicates()
def do_wiki_diff(self):
return text()
def do_wiki_history(self):
return text()
def do_wiki_importance(self):
return text()
def do_wiki_status(self):
return text()
def do_wiki_version(self):
return text()
def do_g_actor(self):
return nonhtml(), noduplicates()
def do_g_age(self):
return nonNegativeInteger(), noduplicates()
def do_g_agent(self):
return nonhtml(), noduplicates()
def do_g_area(self):
return nonhtml(), noduplicates() # intUnit
def do_g_apparel_type(self):
return nonhtml(), noduplicates()
def do_g_artist(self):
return nonhtml(), noduplicates()
def do_g_author(self):
return nonhtml(), noduplicates()
def do_g_bathrooms(self):
return nonNegativeInteger(), noduplicates()
def do_g_bedrooms(self):
return nonNegativeInteger(), noduplicates()
def do_g_brand(self):
return nonhtml(), noduplicates()
def do_g_calories(self):
return g_float(), noduplicates()
def do_g_cholesterol(self):
return g_float(), noduplicates()
def do_g_color(self):
return nonhtml(), noduplicates()
def do_g_cooking_time(self):
return g_float(), noduplicates()
def do_g_condition(self):
return nonhtml(), noduplicates()
def do_g_course(self):
return nonhtml(), noduplicates()
def do_g_course_date_range(self):
return g_dateTimeRange(), noduplicates()
def do_g_course_number(self):
return nonhtml(), noduplicates()
def do_g_course_times(self):
return nonhtml(), noduplicates()
def do_g_cuisine(self):
return nonhtml(), noduplicates()
def do_g_currency(self):
return iso4217(), noduplicates()
def do_g_delivery_notes(self):
return nonhtml(), noduplicates()
def do_g_delivery_radius(self):
return floatUnit(), noduplicates()
def do_g_education(self):
return nonhtml(), noduplicates()
def do_g_employer(self):
return nonhtml(), noduplicates()
def do_g_ethnicity(self):
return nonhtml(), noduplicates()
def do_g_event_date_range(self):
return g_dateTimeRange(), noduplicates()
def do_g_expiration_date(self):
return iso8601_date(), noduplicates()
def do_g_expiration_date_time(self):
return iso8601(), noduplicates()
def do_g_fiber(self):
return g_float(), noduplicates()
def do_g_from_location(self):
return g_locationType(), noduplicates()
def do_g_gender(self):
return g_genderEnumeration(), noduplicates()
def do_g_hoa_dues(self):
return g_float(), noduplicates()
def do_g_format(self):
return nonhtml(), noduplicates()
def do_g_id(self):
return nonhtml(), noduplicates()
def do_g_image_link(self):
return rfc2396_full(), maxten()
def do_g_immigration_status(self):
return nonhtml(), noduplicates()
def do_g_interested_in(self):
return nonhtml(), noduplicates()
def do_g_isbn(self):
return nonhtml(), noduplicates()
def do_g_job_function(self):
return nonhtml(), noduplicates()
def do_g_job_industry(self):
return nonhtml(), noduplicates()
def do_g_job_type(self):
return nonhtml(), noduplicates()
def do_g_label(self):
return g_labelType(), maxten()
def do_g_listing_type(self):
return truefalse(), noduplicates()
def do_g_location(self):
return g_full_locationType(), noduplicates()
def do_g_main_ingredient(self):
return nonhtml(), noduplicates()
def do_g_make(self):
return nonhtml(), noduplicates()
def do_g_manufacturer(self):
return nonhtml(), noduplicates()
def do_g_manufacturer_id(self):
return nonhtml(), noduplicates()
def do_g_marital_status(self):
return g_maritalStatusEnumeration(), noduplicates()
def do_g_meal_type(self):
return nonhtml(), noduplicates()
def do_g_megapixels(self):
return floatUnit(), noduplicates()
def do_g_memory(self):
return floatUnit(), noduplicates()
def do_g_mileage(self):
return g_intUnit(), noduplicates()
def do_g_model(self):
return nonhtml(), noduplicates()
def do_g_model_number(self):
return nonhtml(), noduplicates()
def do_g_name_of_item_being_reviewed(self):
return nonhtml(), noduplicates()
def do_g_news_source(self):
return nonhtml(), noduplicates()
def do_g_occupation(self):
return nonhtml(), noduplicates()
def do_g_payment_notes(self):
return nonhtml(), noduplicates()
def do_g_pages(self):
return positiveInteger(), nonblank(), noduplicates()
def do_g_payment_accepted(self):
return g_paymentMethodEnumeration()
def do_g_pickup(self):
return truefalse(), noduplicates()
def do_g_preparation_time(self):
return floatUnit(), noduplicates()
def do_g_price(self):
return floatUnit(), noduplicates()
def do_g_price_type(self):
return g_priceTypeEnumeration(), noduplicates()
def do_g_processor_speed(self):
return floatUnit(), noduplicates()
def do_g_product_type(self):
return nonhtml(), noduplicates()
def do_g_property_type(self):
return nonhtml(), noduplicates()
def do_g_protein(self):
return floatUnit(), noduplicates()
def do_g_publication_name(self):
return nonhtml(), noduplicates()
def do_g_publication_volume(self):
return nonhtml(), noduplicates()
def do_g_publish_date(self):
return iso8601_date(), noduplicates()
def do_g_quantity(self):
return nonNegativeInteger(), nonblank(), noduplicates()
def do_g_rating(self):
return g_ratingTypeEnumeration(), noduplicates()
def do_g_review_type(self):
return nonhtml(), noduplicates()
def do_g_reviewer_type(self):
return g_reviewerTypeEnumeration(), noduplicates()
def do_g_salary(self):
return g_float(), noduplicates()
def do_g_salary_type(self):
return g_salaryTypeEnumeration(), noduplicates()
def do_g_saturated_fat(self):
return g_float(), noduplicates()
def do_g_school_district(self):
return nonhtml(), noduplicates()
def do_g_service_type(self):
return nonhtml(), noduplicates()
def do_g_servings(self):
return g_float(), noduplicates()
def do_g_sexual_orientation(self):
return nonhtml(), noduplicates()
def do_g_size(self):
return nonhtml(), noduplicates() # TODO: expressed in either two or three dimensions.
def do_g_shipping(self):
return g_shipping(), noduplicates()
def do_g_sodium(self):
return g_float(), noduplicates()
def do_g_subject(self):
return nonhtml(), noduplicates()
def do_g_subject_area(self):
return nonhtml(), noduplicates()
def do_g_tax_percent(self):
return percentType(), noduplicates()
def do_g_tax_region(self):
return nonhtml(), noduplicates()
def do_g_to_location(self):
return g_locationType(), noduplicates()
def do_g_total_carbs(self):
return g_float(), noduplicates()
def do_g_total_fat(self):
return g_float(), noduplicates()
def do_g_travel_date_range(self):
return g_dateTimeRange(), noduplicates()
def do_g_university(self):
return nonhtml(), noduplicates()
def do_g_upc(self):
return nonhtml(), noduplicates()
def do_g_url_of_item_being_reviewed(self):
return rfc2396_full(), noduplicates()
def do_g_vehicle_type(self):
return nonhtml(), noduplicates()
def do_g_vin(self):
return nonhtml(), noduplicates()
def do_g_weight(self):
return floatUnit(), noduplicates()
def do_g_year(self):
return g_year(), noduplicates()
def do_media_group(self):
return media_group()
def do_media_content(self):
return media_content()
def do_georss_where(self):
return georss_where()
def do_georss_point(self):
return gml_pos()
def do_georss_line(self):
return gml_posList()
def do_georss_polygon(self):
return gml_posList()
def do_georss_featuretypetag(self):
return text()
def do_georss_relationshiptag(self):
return text()
def do_georss_featurename(self):
return text()
def do_georss_elev(self):
return decimal()
def do_georss_floor(self):
return Integer()
def do_georss_radius(self):
return Float()
class georss_where(validatorBase):
def do_gml_Point(self):
return gml_point()
def do_gml_LineString(self):
return gml_line()
def do_gml_Polygon(self):
return gml_polygon()
def do_gml_Envelope(self):
return gml_envelope()
class geo_srsName(validatorBase):
def getExpectedAttrNames(self):
return [(None, u'srsName')]
class gml_point(geo_srsName):
def do_gml_pos(self):
return gml_pos()
class geo_point(validatorBase):
def do_geo_lat(self):
return latitude()
def do_geo_long(self):
return longitude()
def validate(self):
if "geo_lat" not in self.children:
self.log(MissingElement({"parent":self.name.replace('_',':'), "element":"geo:lat"}))
if "geo_long" not in self.children:
self.log(MissingElement({"parent":self.name.replace('_',':'), "element":"geo:long"}))
class gml_pos(text):
def validate(self):
if not re.match('^[-+]?\d+\.?\d*[ ,][-+]?\d+\.?\d*$', self.value):
return self.log(InvalidCoord({'value':self.value}))
if self.value.find(',')>=0:
self.log(CoordComma({'value':self.value}))
class gml_line(geo_srsName):
def do_gml_posList(self):
return gml_posList()
class gml_posList(text):
def validate(self):
if self.value.find(',')>=0:
# ensure that commas are only used to separate lat and long
if not re.match('^[-+.0-9]+[, ][-+.0-9]( [-+.0-9]+[, ][-+.0-9])+$',
value.strip()):
return self.log(InvalidCoordList({'value':self.value}))
self.log(CoordComma({'value':self.value}))
self.value=self.value.replace(',',' ')
values = self.value.strip().split()
if len(values)<3 or len(values)%2 == 1:
return self.log(InvalidCoordList({'value':self.value}))
for value in values:
if not re.match('^[-+]?\d+\.?\d*$', value):
return self.log(InvalidCoordList({'value':value}))
class gml_polygon(geo_srsName):
def do_gml_exterior(self):
return gml_exterior()
class gml_exterior(validatorBase):
def do_gml_LinearRing(self):
return gml_linearRing()
class gml_linearRing(geo_srsName):
def do_gml_posList(self):
return gml_posList()
class gml_envelope(geo_srsName):
def do_gml_lowerCorner(self):
return gml_pos()
def do_gml_upperCorner(self):
return gml_pos()
class access_restriction(enumeration):
error = InvalidAccessRestrictionRel
valuelist = ["allow", "deny"]
def getExpectedAttrNames(self):
return [(None, u'relationship')]
def prevalidate(self):
self.children.append(True) # force warnings about "mixed" content
if not self.attrs.has_key((None,"relationship")):
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"relationship"}))
else:
self.value=self.attrs.getValue((None,"relationship"))
########################################################################
# Extensions that are valid at only at the RSS 2.0 item level #
########################################################################
class extension_rss20_item(extension_item):
def do_trackback_ping(self):
return rfc2396_full(), noduplicates()
def do_trackback_about(self):
return rfc2396_full()
def do_dcterms_accessRights(self):
return eater()
def do_dcterms_accrualMethod(self):
return eater()
def do_dcterms_accrualPeriodicity(self):
return eater()
def do_dcterms_accrualPolicy(self):
return eater()
def do_dcterms_bibliographicCitation(self):
return eater()
def do_dcterms_educationLevel(self):
return eater()
def do_dcterms_instructionalMethod(self):
return eater()
def do_dcterms_license(self):
return eater()
def do_dcterms_provenance(self):
return eater()
def do_dcterms_rightsHolder(self):
return eater()
########################################################################
# Extensions that are valid at only at the RSS 1.0 item level #
########################################################################
class extension_rss10_item(extension_item):
def do_trackback_ping(self):
return rdfResourceURI(), noduplicates()
def do_trackback_about(self):
return rdfResourceURI()
def do_l_permalink(self):
return l_permalink()
class l_permalink(rdfResourceURI, MimeType):
lNS = u'http://purl.org/rss/1.0/modules/link/'
def getExpectedAttrNames(self):
return rdfResourceURI.getExpectedAttrNames(self) + [(self.lNS, u'type')]
def validate(self):
if (self.lNS, 'type') in self.attrs.getNames():
self.value=self.attrs.getValue((self.lNS, 'type'))
MimeType.validate(self)
return rdfResourceURI.validate(self)
class l_link(rdfResourceURI, MimeType):
lNS = u'http://purl.org/rss/1.0/modules/link/'
def getExpectedAttrNames(self):
return rdfResourceURI.getExpectedAttrNames(self) + [
(self.lNS, u'lang'), (self.lNS, u'rel'),
(self.lNS, u'type'), (self.lNS, u'title')
]
def prevalidate(self):
self.validate_optional_attribute((self.lNS,'lang'), iso639)
self.validate_required_attribute((self.lNS,'rel'), rfc2396_full)
self.validate_optional_attribute((self.lNS,'title'), nonhtml)
if self.attrs.has_key((self.lNS, "type")):
if self.attrs.getValue((self.lNS, "type")).find(':') < 0:
self.validate_optional_attribute((self.lNS,'type'), MimeType)
else:
self.validate_optional_attribute((self.lNS,'type'), rfc2396_full)
########################################################################
# Extensions that are valid at only at the Atom entry level #
########################################################################
class extension_entry(extension_item):
def do_dc_creator(self): # atom:creator
return text() # duplicates allowed
def do_dc_subject(self): # atom:category
return text() # duplicates allowed
def do_dc_date(self): # atom:published
return w3cdtf(), noduplicates()
def do_creativeCommons_license(self):
return rfc2396_full()
def do_trackback_ping(self):
return rfc2396_full(), noduplicates()
# XXX This should have duplicate semantics with link[@rel='related']
def do_trackback_about(self):
return rfc2396_full()
########################################################################
# Extensions that are valid at only at the channel level #
########################################################################
class extension_channel(extension_channel_item):
def do_admin_generatorAgent(self):
if "generator" in self.children:
self.log(DuplicateSemantics({"core":"generator", "ext":"admin:generatorAgent"}))
return admin_generatorAgent(), noduplicates()
def do_admin_errorReportsTo(self):
return admin_errorReportsTo(), noduplicates()
def do_blogChannel_blogRoll(self):
return rfc2396_full(), noduplicates()
def do_blogChannel_mySubscriptions(self):
return rfc2396_full(), noduplicates()
def do_blogChannel_blink(self):
return rfc2396_full(), noduplicates()
def do_blogChannel_changes(self):
return rfc2396_full(), noduplicates()
def do_sy_updatePeriod(self):
return sy_updatePeriod(), noduplicates()
def do_sy_updateFrequency(self):
return positiveInteger(), nonblank(), noduplicates()
def do_sy_updateBase(self):
return w3cdtf(), noduplicates()
def do_foaf_maker(self):
return eater()
def do_cp_server(self):
return rdfResourceURI()
def do_wiki_interwiki(self):
return text()
def do_thr_in_reply_to(self):
return in_reply_to()
def do_cf_listinfo(self):
from cf import listinfo
return listinfo()
def do_cf_treatAs(self):
from cf import treatAs
return treatAs()
def do_opensearch_totalResults(self):
return nonNegativeInteger(), noduplicates()
def do_opensearch_startIndex(self):
return Integer(), noduplicates()
def do_opensearch_itemsPerPage(self):
return nonNegativeInteger(), noduplicates()
def do_opensearch_Query(self):
from opensearch import Query
return Query()
def do_xhtml_div(self):
return eater()
def do_xhtml_meta(self):
return xhtml_meta()
class xhtml_meta(validatorBase):
def getExpectedAttrNames(self):
return [ (None, u'name'), (None, u'content') ]
def prevalidate(self):
self.validate_required_attribute((None,'name'), xhtmlMetaEnumeration)
self.validate_required_attribute((None,'content'), robotsEnumeration)
class xhtmlMetaEnumeration(caseinsensitive_enumeration):
error = InvalidMetaName
valuelist = ["robots"]
class robotsEnumeration(caseinsensitive_enumeration):
error = InvalidMetaContent
valuelist = [
"all", "none",
"index", "index,follow", "index,nofollow",
"noindex", "noindex,follow", "noindex,nofollow",
"follow", "follow,index", "follow,noindex",
"nofollow", "nofollow,index", "nofollow,noindex"]
########################################################################
# Extensions that are valid at only at the Atom feed level #
########################################################################
class extension_feed(extension_channel):
def do_dc_creator(self): # atom:creator
return text() # duplicates allowed
def do_dc_subject(self): # atom:category
return text() # duplicates allowed
def do_dc_date(self): # atom:updated
return w3cdtf(), noduplicates()
def do_creativeCommons_license(self):
return rfc2396_full()
def do_access_restriction(self):
return access_restriction()
########################################################################
# Validators #
########################################################################
class admin_generatorAgent(rdfResourceURI): pass
class admin_errorReportsTo(rdfResourceURI): pass
class sy_updatePeriod(text):
def validate(self):
if self.value not in ('hourly', 'daily', 'weekly', 'monthly', 'yearly'):
self.log(InvalidUpdatePeriod({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidUpdatePeriod({"parent":self.parent.name, "element":self.name, "value":self.value}))
class g_complex_type(validatorBase):
def getExpectedAttrNames(self):
if self.getFeedType() == TYPE_RSS1:
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'parseType')]
else:
return []
class g_shipping(g_complex_type):
def do_g_service(self):
return g_serviceTypeEnumeration(), noduplicates()
def do_g_country(self):
return iso3166(), noduplicates()
def do_g_price(self):
return floatUnit(), noduplicates()
class g_dateTimeRange(g_complex_type):
def do_g_start(self):
return iso8601(), noduplicates()
def do_g_end(self):
return iso8601(), noduplicates()
class g_labelType(text):
def validate(self):
if self.value.find(',')>=0:
self.log(InvalidLabel({"parent":self.parent.name, "element":self.name,
"attr": ':'.join(self.name.split('_',1)), "value":self.value}))
class g_locationType(text):
def validate(self):
if len(self.value.split(',')) not in [2,3]:
self.log(InvalidLocation({"parent":self.parent.name, "element":self.name,
"attr": ':'.join(self.name.split('_',1)), "value":self.value}))
class g_full_locationType(text):
def validate(self):
fields = self.value.split(',')
if len(fields) != 5 or 0 in [len(f.strip()) for f in fields]:
self.log(InvalidFullLocation({"parent":self.parent.name, "element":self.name,
"attr": ':'.join(self.name.split('_',1)), "value":self.value}))
class g_genderEnumeration(enumeration):
error = InvalidGender
valuelist = ["Male", "M", "Female", "F"]
class g_maritalStatusEnumeration(enumeration):
error = InvalidMaritalStatus
valuelist = ["single", "divorced", "separated", "widowed", "married", "in relationship"]
class g_paymentMethodEnumeration(enumeration):
error = InvalidPaymentMethod
valuelist = ["Cash", "Check", "Visa", "MasterCard",
"AmericanExpress", "Discover", "WireTransfer"]
class g_priceTypeEnumeration(enumeration):
error = InvalidPriceType
valuelist = ["negotiable", "starting"]
class g_ratingTypeEnumeration(enumeration):
error = InvalidRatingType
valuelist = ["1", "2", "3", "4", "5"]
class g_reviewerTypeEnumeration(enumeration):
error = InvalidReviewerType
valuelist = ["editorial", "user"]
class g_salaryTypeEnumeration(enumeration):
error = InvalidSalaryType
valuelist = ["starting", "negotiable"]
class g_serviceTypeEnumeration(enumeration):
error = InvalidServiceType
valuelist = ['FedEx', 'UPS', 'DHL', 'Mail', 'Other', 'Overnight', 'Standard']
class g_float(text):
def validate(self):
import re
if not re.match('\d+\.?\d*\s*\w*', self.value):
self.log(InvalidFloat({"parent":self.parent.name, "element":self.name,
"attr": ':'.join(self.name.split('_',1)), "value":self.value}))
class floatUnit(text):
def validate(self):
import re
if not re.match('\d+\.?\d*\s*\w*$', self.value):
self.log(InvalidFloatUnit({"parent":self.parent.name, "element":self.name,
"attr": ':'.join(self.name.split('_',1)), "value":self.value}))
class decimal(text):
def validate(self):
import re
if not re.match('[-+]?\d+\.?\d*\s*$', self.value):
self.log(InvalidFloatUnit({"parent":self.parent.name, "element":self.name,
"attr": ':'.join(self.name.split('_',1)), "value":self.value}))
class g_year(text):
def validate(self):
import time
try:
year = int(self.value)
if year < 1900 or year > time.localtime()[0]+4: raise InvalidYear
except:
self.log(InvalidYear({"parent":self.parent.name, "element":self.name,
"attr": ':'.join(self.name.split('_',1)), "value":self.value}))
class g_intUnit(text):
def validate(self):
try:
if int(self.value.split(' ')[0].replace(',','')) < 0: raise InvalidIntUnit
except:
self.log(InvalidIntUnit({"parent":self.parent.name, "element":self.name,
"attr": ':'.join(self.name.split('_',1)), "value":self.value}))
class maxten(validatorBase):
def textOK(self):
pass
def prevalidate(self):
if 10 == len([1 for child in self.parent.children if self.name==child]):
self.log(TooMany({"parent":self.parent.name, "element":self.name}))
class in_reply_to(canonicaluri, xmlbase):
def getExpectedAttrNames(self):
return [(None, u'href'), (None, u'ref'), (None, u'source'), (None, u'type')]
def validate(self):
if self.attrs.has_key((None, "href")):
self.value = self.attrs.getValue((None, "href"))
self.name = "href"
xmlbase.validate(self)
if self.attrs.has_key((None, "ref")):
self.value = self.attrs.getValue((None, "ref"))
self.name = "ref"
canonicaluri.validate(self)
if self.attrs.has_key((None, "source")):
self.value = self.attrs.getValue((None, "source"))
self.name = "source"
xmlbase.validate(self)
if self.attrs.has_key((None, "type")):
self.value = self.attrs.getValue((None, "type"))
if not mime_re.match(self.value):
self.log(InvalidMIMEType({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.value}))
else:
self.log(ValidMIMEAttribute({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.value}))
| {
"repo_name": "slava-sh/NewsBlur",
"path": "vendor/feedvalidator/extension.py",
"copies": "16",
"size": "30895",
"license": "mit",
"hash": -3998763994073540600,
"line_mean": 26.7583108715,
"line_max": 136,
"alpha_frac": 0.641236446,
"autogenerated": false,
"ratio": 3.4795585088410856,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# $Id: f25618704b7ebe12c191cc1a51055c26db731b85 $
"""
Gadfly extended database driver.
"""
__docformat__ = "restructuredtext en"
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import os
import sys
from grizzled.db.base import (Cursor, DB, DBDriver, Error, Warning,
TableMetadata, IndexMetadata, RDBMSMetadata)
# ---------------------------------------------------------------------------
# Exports
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
class GadflyCursor(Cursor):
def __init__(self, real_cursor, driver):
self.real_cursor = real_cursor
self.driver = driver
@property
def rowcount(self):
total = len(self.real_cursor.fetchall())
self.real_cursor.reset_results()
return total
@property
def description(self):
return self.real_cursor.description
def close(self):
try:
self.real_cursor.close()
except:
raise Error(sys.exc_info()[1])
def execute(self, statement, parameters=None):
try:
if parameters:
result = self.real_cursor.execute(statement, parameters)
else:
result = self.real_cursor.execute(statement)
return result
except:
raise Error(sys.exc_info()[1])
def executemany(self, statement, *parameters):
try:
return self.real_cursor.executemany(statement, *parameters)
except:
raise Error(sys.exc_info()[1])
def fetchall(self):
try:
return self.real_cursor.fetchall()
except:
raise Error(sys.exc_info()[1])
def fetchone(self):
try:
return self.real_cursor.fetchone()
except:
s = sys.exc_info()[1]
if (type(s) == str) and (s.startswith('no more')):
return None
raise Error(s)
def fetchmany(self, n):
try:
return self.real_cursor.fetchmany(n)
except:
s = sys.exc_info()[1]
if (type(s) == str) and (s.startswith('no more')):
return None
raise Error(s)
class GadflyDB(DB):
def __init__(self, db, driver):
DB.__init__(self, db, driver)
self.__db = db
self.__driver = driver
def cursor(self):
return Cursor(GadflyCursor(self.__db.cursor(), self.__driver),
self.__driver)
class GadflyDriver(DBDriver):
"""DB Driver for Gadfly, a pure Python RDBMS"""
def __init__(self):
gadfly = self.get_import()
gadfly.error = Exception()
def get_import(self):
import gadfly
return gadfly
def get_display_name(self):
return "Gadfly"
def connect(self,
host=None,
port=None,
user='',
password='',
database='default'):
gadfly = self.get_import()
directory = os.path.dirname(database)
database = os.path.basename(database)
if database.endswith('.gfd'):
database = database[:-4]
try:
g = gadfly.gadfly()
g.startup(database, directory)
return GadflyDB(g, self)
except IOError:
raise Error(sys.exc_info()[1])
def get_tables(self, cursor):
cursor.execute('SELECT table_name FROM __table_names__ '
'WHERE is_view = 0')
table_names = []
for row in cursor.fetchall():
table_names += [row[0]]
return table_names
def get_rdbms_metadata(self, cursor):
import gadfly
version = '.'.join([str(i) for i in gadfly.version_info])
return RDBMSMetadata('gadfly', 'gadfly', version)
def get_table_metadata(self, table, cursor):
self._ensure_valid_table(cursor, table)
cursor.execute("SELECT column_name FROM __columns__ "
"WHERE table_name = '%s'" % table.upper())
result = []
column_names = []
for row in cursor.fetchall():
result += [TableMetadata(row[0], 'object', None, None, None, True)]
return result
def get_index_metadata(self, table, cursor):
self._ensure_valid_table(cursor, table)
cursor.execute("SELECT is_unique, index_name FROM __indices__ "
"WHERE table_name = '%s'" % table.upper())
indexes = []
result = []
for row in cursor.fetchall():
indexes.append(row)
for unique, index_name in indexes:
cursor.execute("SELECT column_name FROM __indexcols__ "
"WHERE index_name = '%s'" % index_name)
cols = []
for row in cursor.fetchall():
cols.append(row[0])
if unique:
description = 'UNIQUE'
else:
description = 'NON-UNIQUE'
result.append(IndexMetadata(index_name, cols, description))
return result
def _is_valid_table(self, cursor, table_name):
tables = self.get_tables(cursor)
return table_name.upper() in tables
| {
"repo_name": "illicitonion/givabit",
"path": "lib/sdks/google_appengine_1.7.1/google_appengine/lib/grizzled/grizzled/db/dbgadfly.py",
"copies": "19",
"size": "5495",
"license": "apache-2.0",
"hash": -9154142955900766000,
"line_mean": 28.7027027027,
"line_max": 79,
"alpha_frac": 0.4946314832,
"autogenerated": false,
"ratio": 4.337016574585635,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# $Id: f38a8ecb542b475e96a9f613b3bd9bc269f2473d $
"""
SQL Server extended database driver.
"""
__docformat__ = "restructuredtext en"
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import os
import sys
from grizzled.db.base import (DBDriver, Error, Warning, TableMetadata,
IndexMetadata, RDBMSMetadata)
# ---------------------------------------------------------------------------
# Exports
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
class SQLServerDriver(DBDriver):
"""DB Driver for Microsoft SQL Server, using the pymssql DB API module."""
def get_import(self):
import pymssql
return pymssql
def get_display_name(self):
return 'SQL Server'
def do_connect(self,
host='localhost',
port=None,
user='',
password='',
database='default'):
dbi = self.get_import()
self.db_name = database
if port == None:
port = '1433'
return dbi.connect(host='%s:%s' % (host, port),
user=user,
password=password,
database=database)
def get_tables(self, cursor):
cursor.execute("select name from %s..sysobjects where xtype = 'U'" %
self.db_name)
table_names = []
rs = cursor.fetchone()
while rs is not None:
table_names += [rs[0]]
rs = cursor.fetchone()
return table_names
def get_rdbms_metadata(self, cursor):
product = ''
version = ''
vendor = 'Microsoft Corporation'
cursor.execute('xp_msver');
rs = cursor.fetchone()
while rs is not None:
name = rs[1].lower()
value = rs[3]
if name == 'productname':
product = value
elif name == 'productversion':
version = value
elif name == 'companyname':
vendor == value
rs = cursor.fetchone()
return RDBMSMetadata(vendor, product, version)
def get_table_metadata(self, table, cursor):
self._ensure_valid_table(cursor, table)
dbi = self.get_import()
cursor.execute("SELECT column_name, data_type, " \
"character_maximum_length, numeric_precision, " \
"numeric_scale, is_nullable "\
"FROM information_schema.columns WHERE "\
"LOWER(table_name) = '%s'" % table)
rs = cursor.fetchone()
results = []
while rs is not None:
is_nullable = False
if rs[5] == 'YES':
is_nullable = True
data = TableMetadata(rs[0], rs[1], rs[2], rs[3], rs[4], is_nullable)
results += [data]
rs = cursor.fetchone()
return results
def get_index_metadata(self, table, cursor):
self._ensure_valid_table(cursor, table)
dbi = self.get_import()
cursor.execute("EXEC sp_helpindex '%s'" % table)
rs = cursor.fetchone()
results_by_name = {}
while rs is not None:
name = rs[0]
description = rs[1]
columns = rs[2].split(', ')
results_by_name[name] = IndexMetadata(name, columns, description)
rs = cursor.fetchone()
names = results_by_name.keys()
names.sort()
result = []
for name in names:
result += [results_by_name[name]]
return result
| {
"repo_name": "Kazade/NeHe-Website",
"path": "google_appengine/lib/grizzled/grizzled/db/sqlserver.py",
"copies": "19",
"size": "3923",
"license": "bsd-3-clause",
"hash": -7382432226359239000,
"line_mean": 30.6370967742,
"line_max": 80,
"alpha_frac": 0.4496558756,
"autogenerated": false,
"ratio": 4.670238095238095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0020314554886993854,
"num_lines": 124
} |
# $Id: f485c4e2802f66973d04d5047ee9d3e5cfd249ce $
"""
PostgreSQL extended database driver.
"""
__docformat__ = "restructuredtext en"
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import os
import sys
import re
from grizzled.db.base import (DBDriver, Error, Warning, TableMetadata,
IndexMetadata, RDBMSMetadata)
# ---------------------------------------------------------------------------
# Constants
# ---------------------------------------------------------------------------
VENDOR = 'PostgreSQL Global Development Group'
PRODUCT = 'PostgreSQL'
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
class PostgreSQLDriver(DBDriver):
"""DB Driver for PostgreSQL, using the psycopg2 DB API module."""
TYPE_RE = re.compile('([a-z ]+)(\([0-9]+\))?')
def get_import(self):
import psycopg2
return psycopg2
def get_display_name(self):
return "PostgreSQL"
def do_connect(self,
host='localhost',
port=None,
user='',
password='',
database='default'):
dbi = self.get_import()
dsn = 'host=%s dbname=%s user=%s password=%s' %\
(host, database, user, password)
return dbi.connect(dsn=dsn)
def get_rdbms_metadata(self, cursor):
cursor.execute('SELECT version()')
rs = cursor.fetchone()
if rs is None:
result = RDBMSMetadata(VENDOR, PRODUCT, 'unknown')
else:
result = RDBMSMetadata(VENDOR, PRODUCT, rs[0])
return result
def get_table_metadata(self, table, cursor):
self._ensure_valid_table(cursor, table)
sel = """\
SELECT a.attname, pg_catalog.format_type(a.atttypid, a.atttypmod),
(SELECT substring(d.adsrc for 128)
FROM pg_catalog.pg_attrdef d
WHERE d.adrelid = a.attrelid AND
d.adnum = a.attnum AND a.atthasdef) AS DEFAULT,
a.attnotnull,
a.attnum,
a.attrelid as table_oid
FROM pg_catalog.pg_attribute a
WHERE a.attrelid =
(SELECT c.oid FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE (pg_table_is_visible(c.oid)) AND c.relname = '%s'
AND c.relkind in ('r','v'))
AND a.attnum > 0
AND NOT a.attisdropped
ORDER BY a.attnum"""
cursor.execute(sel % table)
rs = cursor.fetchone()
results = []
while rs is not None:
column = rs[0]
coltype = rs[1]
null = not rs[3]
match = self.TYPE_RE.match(coltype)
if match:
coltype = match.group(1)
size = match.group(2)
if size:
size = size[1:-1]
if 'char' in coltype:
max_char_size = size
precision = None
else:
max_char_size = None
precision = size
data = TableMetadata(column,
coltype,
max_char_size,
precision,
0,
null)
results += [data]
rs = cursor.fetchone()
return results
def get_index_metadata(self, table, cursor):
self._ensure_valid_table(cursor, table)
# First, issue one query to get the list of indexes for the table.
index_names = self.__get_index_names(table, cursor)
# Now we need two more queries: One to get the columns in the
# index and another to get descriptive information.
results = []
for name in index_names:
columns = self.__get_index_columns(name, cursor)
desc = self.__get_index_description(name, cursor)
results += [IndexMetadata(name, columns, desc)]
return results
def get_tables(self, cursor):
sel = "SELECT tablename FROM pg_tables " \
"WHERE tablename NOT LIKE 'pg_%' AND tablename NOT LIKE 'sql\_%'"
cursor.execute(sel)
table_names = []
rs = cursor.fetchone()
while rs is not None:
table_names += [rs[0]]
rs = cursor.fetchone()
return table_names
def __get_index_names(self, table, cursor):
# Adapted from the pgsql command "\d indexname", PostgreSQL 8.
# (Invoking the pgsql command with -E shows the issued SQL.)
sel = "SELECT n.nspname, c.relname as \"IndexName\", c2.relname " \
"FROM pg_catalog.pg_class c " \
"JOIN pg_catalog.pg_index i ON i.indexrelid = c.oid " \
"JOIN pg_catalog.pg_class c2 ON i.indrelid = c2.oid " \
"LEFT JOIN pg_catalog.pg_user u ON u.usesysid = c.relowner " \
"LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace " \
"WHERE c.relkind IN ('i','') " \
"AND n.nspname NOT IN ('pg_catalog', 'pg_toast') " \
"AND pg_catalog.pg_table_is_visible(c.oid) " \
"AND c2.relname = '%s'" % table.lower()
cursor.execute(sel)
index_names = []
rs = cursor.fetchone()
while rs is not None:
index_names += [rs[1]]
rs = cursor.fetchone()
return index_names
def __get_index_columns(self, index_name, cursor):
# Adapted from the pgsql command "\d indexname", PostgreSQL 8.
# (Invoking the pgsql command with -E shows the issued SQL.)
sel = "SELECT a.attname, " \
"pg_catalog.format_type(a.atttypid, a.atttypmod), " \
"a.attnotnull " \
"FROM pg_catalog.pg_attribute a, pg_catalog.pg_index i " \
"WHERE a.attrelid in " \
" (SELECT c.oid FROM pg_catalog.pg_class c " \
"LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace " \
" WHERE pg_catalog.pg_table_is_visible(c.oid) " \
"AND c.relname ~ '^(%s)$') " \
"AND a.attnum > 0 AND NOT a.attisdropped " \
"AND a.attrelid = i.indexrelid " \
"ORDER BY a.attnum" % index_name
cursor.execute(sel)
columns = []
rs = cursor.fetchone()
while rs is not None:
columns += [rs[0]]
rs = cursor.fetchone()
return columns
def __get_index_description(self, index_name, cursor):
sel = "SELECT i.indisunique, i.indisprimary, i.indisclustered, " \
"a.amname, c2.relname, " \
"pg_catalog.pg_get_expr(i.indpred, i.indrelid, true) " \
"FROM pg_catalog.pg_index i, pg_catalog.pg_class c, " \
"pg_catalog.pg_class c2, pg_catalog.pg_am a " \
"WHERE i.indexrelid = c.oid AND c.relname ~ '^(%s)$' " \
"AND c.relam = a.oid AND i.indrelid = c2.oid" % index_name
cursor.execute(sel)
desc = ''
rs = cursor.fetchone()
if rs is not None:
if str(rs[1]) == "True":
desc += "(PRIMARY) "
if str(rs[0]) == "True":
desc += "Unique"
else:
desc += "Non-unique"
if str(rs[2]) == "True":
desc += ", clustered"
else:
desc += ", non-clustered"
if rs[3] is not None:
desc += " %s" % rs[3]
desc += ' index'
if desc == '':
desc = None
return desc
| {
"repo_name": "gauribhoite/personfinder",
"path": "env/google_appengine/lib/grizzled/grizzled/db/postgresql.py",
"copies": "19",
"size": "7987",
"license": "apache-2.0",
"hash": -2194436294461057500,
"line_mean": 34.1850220264,
"line_max": 80,
"alpha_frac": 0.4768999624,
"autogenerated": false,
"ratio": 4.029767911200807,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# $Id: f8ce5bf718c826df5fb3cd06701dc2bf6e144acb $
"""
Network-related methods and classes.
"""
from __future__ import absolute_import
__docformat__ = 'restructuredtext en'
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import urlparse
import shutil
import tempfile
import urllib2
import logging
import os
# ---------------------------------------------------------------------------
# Exports
# ---------------------------------------------------------------------------
__all__ = ['download']
# ---------------------------------------------------------------------------
# Globals
# ---------------------------------------------------------------------------
log = logging.getLogger('grizzled.net')
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Functions
# ---------------------------------------------------------------------------
def download(url, directory=None, bufsize=8192):
"""
Download the specified URL to a directory. This module properly handles
HTTP authentication for URLs like this one::
https://user:password@localhost:8080/foo/bar/baz.tgz
Note, however, that user/password authentication is only supported for
"http" and "https" URLs.
:Parameters:
url : str
the URL to download
directory : str
The directory to receive the downloaded file. If this parameter is
omitted, ``download()`` will create a temporary directory to
contain the file.
bufsize : int
buffer size to use when reading URL
:rtype: tuple
:return: A (*download_directory*, *downloaded_file*) tuple
"""
pieces = urlparse.urlparse(url)
path = pieces.path
if not directory:
directory = tempfile.mkdtemp(prefix='download')
outputPath = os.path.join(directory, os.path.basename(path))
# Handle user/password explicitly.
if pieces.scheme.startswith('http') and pieces.username:
# Initialize basic HTTP authentication for this URL.
# See http://aspn.activestate.com/ASPN/docs/ActivePython/2.5/howto/urllib2/index.html
#
# NOTE: This is necessary because urllib doesn't handle URLs like
# http://user:password@host:port/...
# Get the user name and password from the URL.
user, password = pieces.username, pieces.password
netloc = pieces.hostname
if pieces.port:
pieces.hostname += ':%d' % pieces.port
newPieces = (pieces.scheme, netloc, pieces.path, pieces.query,
pieces.params, pieces.fragment)
url = urlparse.urlunparse(newPieces)
log.debug('Installing authorization handler for URL %s' % url)
passwordMgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
passwordMgr.add_password(realm=None,
uri=url,
user=user,
passwd=password)
authHandler = urllib2.HTTPBasicAuthHandler(passwordMgr)
opener = urllib2.build_opener(authHandler)
opener.open(url)
urllib2.install_opener(opener)
log.debug('Downloading "%s" to "%s"' % (url, outputPath))
shutil.copyfileobj(urllib2.urlopen(url), open(outputPath, 'wb'), bufsize)
return (outputPath, directory)
| {
"repo_name": "undoware/neutron-drive",
"path": "google_appengine/lib/grizzled/grizzled/net/__init__.py",
"copies": "19",
"size": "3602",
"license": "bsd-3-clause",
"hash": 619234990109712600,
"line_mean": 33.9708737864,
"line_max": 93,
"alpha_frac": 0.5038867296,
"autogenerated": false,
"ratio": 5.009735744089013,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# $Id: fa873c96e7b5ed23437473a2b6d0b9a3871d4a18 $
"""
Introduction
============
The ``db`` module is a DB API wrapper. It provides a DB API-compliant API that
wraps real underlying DB API drivers, simplifying some non-portable operations
like ``connect()`` and providing some new operations.
Some drivers come bundled with this package. Others can be added on the fly.
Getting the List of Drivers
===========================
To get a list of all drivers currently registered with this module, use the
``get_driver_names()`` method:
.. python::
import db
for driver_name in db.get_driver_names():
print driver_name
Currently, this module provides the following bundled drivers:
+------------------+------------+-------------------+
| Driver Name, | | |
| as passed to | | Underlying Python |
| ``get_driver()`` | Database | DB API module |
+==================+============+===================+
| dummy | None | ``db.DummyDB`` |
+------------------+------------+-------------------+
| gadfly | Gadfly | ``gadfly`` |
+------------------+------------+-------------------+
| mysql | MySQL | ``MySQLdb`` |
+------------------+------------+-------------------+
| oracle | Oracle | ``cx_Oracle`` |
+------------------+------------+-------------------+
| postgresql | PostgreSQL | ``psycopg2`` |
+------------------+------------+-------------------+
| sqlserver | SQL Server | ``pymssql`` |
+------------------+------------+-------------------+
| sqlite | SQLite 3 | ``sqlite3`` |
+------------------+------------+-------------------+
To use a given driver, you must have the corresponding Python DB API module
installed on your system.
Adding a Driver
===============
It's possible to add a new driver to the list of drivers supplied by this
module. To do so:
1. The driver class must extend ``DBDriver`` and provide the appropriate
methods. See examples in this module.
2. The driver's module (or the calling program) must register the driver
with this module by calling the ``add_driver()`` function.
DB API Factory Functions
========================
The ``Binary()``, ``Date()``, ``DateFromTicks()``, ``Time()``,
``TimeFromTicks()``, ``TimeStamp()`` and ``TimestampFromTicks()`` DB API
functions can be found in the DB class. Thus, to make a string into a BLOB
with this API, you use:
.. python::
driver = db.get_driver(driver_name)
db = driver.connect(...)
blob = db.Binary(some_string)
"""
__docformat__ = "restructuredtext en"
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import re
import time
import os
import sys
from datetime import date, datetime
from grizzled.exception import ExceptionWithMessage
from grizzled.decorators import abstract
from grizzled.db import (base, dummydb, dbgadfly, mysql, oracle, postgresql,
sqlite, sqlserver)
from grizzled.db.base import *
# ---------------------------------------------------------------------------
# Exports
# ---------------------------------------------------------------------------
__all__ = ['get_driver', 'add_driver', 'get_driver_names', 'DBDriver',
'DB', 'Cursor', 'DBError', 'Error', 'Warning', 'apilevel',
'threadsafety', 'paramstyle']
# ---------------------------------------------------------------------------
# Globals
# ---------------------------------------------------------------------------
DummyDriver = dummydb.DummyDriver
GadflyDriver = dbgadfly.GadflyDriver
MySQLDriver = mysql.MySQLDriver
OracleDriver = oracle.OracleDriver
PostgreSQLDriver = postgresql.PostgreSQLDriver
SQLite3Driver = sqlite.SQLite3Driver
SQLServerDriver = sqlserver.SQLServerDriver
drivers = { 'dummy' : 'DummyDriver',
'mysql' : 'MySQLDriver',
'postgresql' : 'PostgreSQLDriver',
'sqlserver' : 'SQLServerDriver',
'sqlite' : 'SQLite3Driver',
'oracle' : 'OracleDriver',
'gadfly' : 'GadflyDriver'}
apilevel = '2.0'
threadsafety = '1'
paramstyle = None
# ---------------------------------------------------------------------------
# Functions
# ---------------------------------------------------------------------------
def add_driver(key, driver_class, force=False):
"""
Add a driver class to the list of drivers.
:Parameters:
key : str
the key, also used as the driver's name
driver_class : class
the ``DBDriver`` subclass object
force : bool
``True`` to force registration of the driver, even if there's an
existing driver with the same key; ``False`` to throw an exception
if there's an existing driver with the same key.
:raise ValueError: There's an existing driver with the same key, and
``force`` is ``False``
"""
try:
drivers[key]
if not force:
raise ValueError, 'A DB driver named "%s" is already installed' %\
key
except KeyError:
pass
drivers[key] = driver_class
def get_drivers():
"""
Get the list of drivers currently registered with this API. The result is
a list of ``DBDriver`` subclasses. Note that these are classes, not
instances. Once way to use the resulting list is as follows:
.. python::
for driver in db.get_drivers():
print driver.__doc__
:rtype: list
:return: list of ``DBDriver`` class names
"""
return [str(d) for d in drivers.values()]
def get_driver_names():
"""
Get the list of driver names currently registered with this API.
Each of the returned names may be used as the first parameter to
the ``get_driver()`` function.
"""
return drivers.keys()
def get_driver(driver_name):
"""
Get the DB API object for the specific database type. The list of
legal database types are available by calling ``get_driver_names()``.
:Parameters:
driver_name : str
name (key) of the driver
:rtype: DBDriver
:return: the instantiated driver
:raise ValueError: Unknown driver name
"""
try:
o = drivers[driver_name]
if type(o) == str:
exec 'd = %s()' % o
else:
d = o()
return d
except KeyError:
raise ValueError, 'Unknown driver name: "%s"' % driver_name
| {
"repo_name": "overtherain/scriptfile",
"path": "software/googleAppEngine/lib/grizzled/grizzled/db/__init__.py",
"copies": "19",
"size": "6667",
"license": "mit",
"hash": -4993002045308931000,
"line_mean": 31.5219512195,
"line_max": 78,
"alpha_frac": 0.5083245838,
"autogenerated": false,
"ratio": 4.480510752688172,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""$Id: feed.py 699 2006-09-25 02:01:18Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 699 $"
__date__ = "$Date: 2006-09-25 02:01:18 +0000 (Mon, 25 Sep 2006) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import *
from logging import *
from itunes import itunes_channel
from extension import extension_feed
#
# Atom root element
#
class feed(validatorBase, extension_feed, itunes_channel):
def prevalidate(self):
self.links = []
def missingElement(self, params):
offset = [self.line - self.dispatcher.locator.getLineNumber(),
self.col - self.dispatcher.locator.getColumnNumber()]
self.log(MissingElement(params), offset)
def validate_metadata(self):
if not 'title' in self.children:
self.missingElement({"parent":self.name, "element":"title"})
if not 'id' in self.children:
self.missingElement({"parent":self.name, "element":"id"})
if not 'updated' in self.children:
self.missingElement({"parent":self.name, "element":"updated"})
# ensure that there is a link rel="self"
for link in self.links:
if link.rel=='self': break
else:
offset = [self.line - self.dispatcher.locator.getLineNumber(),
self.col - self.dispatcher.locator.getColumnNumber()]
self.log(MissingSelf({"parent":self.parent.name, "element":self.name}), offset)
# can only have one alternate per type
types={}
for link in self.links:
if not link.rel=='alternate': continue
if not link.type in types: types[link.type]={}
if link.rel in types[link.type]:
if link.hreflang in types[link.type][link.rel]:
self.log(DuplicateAtomLink({"parent":self.name, "element":"link", "type":link.type, "hreflang":link.hreflang}))
else:
types[link.type][link.rel] += [link.hreflang]
else:
types[link.type][link.rel] = [link.hreflang]
if self.itunes: itunes_channel.validate(self)
def metadata(self):
if 'entry' in self.children:
self.log(MisplacedMetadata({"parent":self.name, "element":self.child}))
def validate(self):
if not 'entry' in self.children:
self.validate_metadata()
def do_author(self):
self.metadata()
from author import author
return author()
def do_category(self):
self.metadata()
from category import category
return category()
def do_contributor(self):
self.metadata()
from author import author
return author()
def do_generator(self):
self.metadata()
from generator import generator
return generator(), nonblank(), noduplicates()
def do_id(self):
self.metadata()
return canonicaluri(), nows(), noduplicates()
def do_icon(self):
self.metadata()
return nonblank(), nows(), rfc2396(), noduplicates()
def do_link(self):
self.metadata()
from link import link
self.links += [link()]
return self.links[-1]
def do_logo(self):
self.metadata()
return nonblank(), nows(), rfc2396(), noduplicates()
def do_title(self):
self.metadata()
from content import textConstruct
return textConstruct(), noduplicates()
def do_subtitle(self):
self.metadata()
from content import textConstruct
return textConstruct(), noduplicates()
def do_rights(self):
self.metadata()
from content import textConstruct
return textConstruct(), noduplicates()
def do_updated(self):
self.metadata()
return rfc3339(), nows(), noduplicates()
def do_entry(self):
if not 'entry' in self.children:
self.validate_metadata()
from entry import entry
return entry()
| {
"repo_name": "Suninus/NewsBlur",
"path": "vendor/feedvalidator/feed.py",
"copies": "16",
"size": "3754",
"license": "mit",
"hash": -8333954536131213000,
"line_mean": 28.7936507937,
"line_max": 121,
"alpha_frac": 0.6587639851,
"autogenerated": false,
"ratio": 3.666015625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# $Id: feeds.py 5ee3537e5395 2009/08/22 11:31:30 jpartogi $
from django.utils.translation import ugettext as _
from django.contrib.syndication.feeds import Feed
from django.contrib.sites.models import Site
from django.utils.feedgenerator import Atom1Feed
from simple_blog.models import Entry, Category
class EntriesFeed(Feed):
feed_type = Atom1Feed
description_template = 'blog/feed.html'
def title(self):
if not hasattr(self, '_site'):
self._site = Site.objects.get_current()
return _("%(site_name)s Feed") % dict(site_name=self._site.name)
def description(self):
if not hasattr(self, '_site'):
self._site = Site.objects.get_current()
return _("Latest blog entries on %(site_name)s") % dict(site_name=self._site.name)
def items(self):
return Entry.objects.get_latest_posted_entries()[:10]
def item_author_name(self, item):
"""
Takes an item, as returned by items(), and returns the item's
author's name as a normal Python string.
"""
return item.creator
def item_pubdate(self, item):
"""
Takes an item, as returned by items(), and returns the item's
pubdate.
"""
return item.posted
def item_categories(self):
"""
Returns the categories for every item in the feed.
"""
return Category.objects.all()
def link(self):
if not hasattr(self, '_site'):
self._site = Site.objects.get_current()
return "http://%s/" % (self._site.domain) | {
"repo_name": "jpartogi/django-simple-blog",
"path": "simple_blog/feeds.py",
"copies": "1",
"size": "1576",
"license": "bsd-3-clause",
"hash": 6331282401669285000,
"line_mean": 31.1836734694,
"line_max": 90,
"alpha_frac": 0.6224619289,
"autogenerated": false,
"ratio": 3.8817733990147785,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5004235327914779,
"avg_score": null,
"num_lines": null
} |
"""File selection dialog class."""
import os
from GenericDialog import GenericDialog
from FileList import FileList
from Frame import HFrame
from Label import Label
from Entry import Entry
from Constants import *
class FileDialog (GenericDialog):
"""FileDialog (title, buttons, results, directory=None) -> FileDialog
A modal file selection dialog.
The FileDialog is a GenericDialog, that allows the user to select
files and directories from a filesystem using a FileList widget. It
also displays an entry widget to allow quick modifications of the
current directory path.
The selected files and directories can be retrieved at any moment
using the get_filenames() method, which returns them as a list.
selection = dialog.get_filenames ()
Default action (invoked by activate()):
See the GenericDialog class.
Mnemonic action (invoked by activate_mnemonic()):
See the GenericDialog class.
Attributes:
filelist - The FileList widget displayed on the FileDialog.
"""
def __init__ (self, title, buttons, results, directory=None):
GenericDialog.__init__ (self, title, buttons, results)
if directory == None:
directory = os.curdir
directory = os.path.abspath (directory)
# Path access.
self._pathframe = HFrame ()
self._pathframe.minsize = 200, self._pathframe.minsize[1]
self._pathframe.padding = 0
self._pathframe.border = BORDER_NONE
self._lblpath = Label ("#Path:")
self._txtpath = Entry (directory)
self._txtpath.minsize = 165, self._txtpath.minsize[1]
self._lblpath.widget = self._txtpath
self._pathframe.add_child (self._lblpath, self._txtpath)
# File list browser.
self._filelist = FileList (200, 160, directory)
self.content.add_child (self._pathframe, self._filelist)
# Align anything on the right.
self.main.align = ALIGN_RIGHT
# Events.
self._txtpath.connect_signal (SIG_INPUT, self._set_directory)
self._filelist.connect_signal (SIG_LISTCHANGED, self._set_path)
def _set_path (self):
"""F._set_path () -> None
Sets the entry text to the current directory path.
"""
self._txtpath.text = self.filelist.directory
def _set_directory (self):
"""F._set_directory () -> None
Sets the directory to list after a text input
"""
print "switching directory..."
path = os.path.normpath (self._txtpath.text)
if os.path.isdir (path):
if path != self.filelist.directory:
self.filelist.directory = path
def get_filenames (self):
"""F.get_filenames () -> list
Gets a list with the selected filenames.
"""
items = self._filelist.get_selected ()
directory = self.filelist.directory
return [os.path.join (directory, item.text) for item in items]
filelist = property (lambda self: self._filelist,
doc ="The filelist shown on the FileDialog.")
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/widgets/FileDialog.py",
"copies": "1",
"size": "4548",
"license": "bsd-2-clause",
"hash": -6716973550813876000,
"line_mean": 37.8717948718,
"line_max": 78,
"alpha_frac": 0.6791996482,
"autogenerated": false,
"ratio": 4.3396946564885495,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.551889430468855,
"avg_score": null,
"num_lines": null
} |
"""A list item suitable for displaying file and directory information."""
import os, stat
from ocempgui.draw import Image
from ListItem import TextListItem
from ocempgui.widgets.images import Icons16x16
from ocempgui.widgets.Constants import *
from ocempgui.widgets import base
class FileListItem (TextListItem):
"""FileListItem (filename, filetype) -> FileListItem
A list item, which can display information about files.
The FileListItem class is a TextListItem, which contains additional
information about a file such as its type and a suitable icon
surface.
The 'filetype' argument of the constructor must be an integer value,
which matches a valid value of the stat.S_IF* data values. It will
be stored in the 'filetype' attribute of the FileSystem object.
Take a look at the stat module and os.stat() documentation for more
details about the different values.
item = FileSystem ('myfile', stat.S_IFDIR)
The passed filename will be stored in the 'text' attribute of the
FileListItem as described in the TextListItem documentation.
Dependant on the 'filetype' value a specific icon surface will be
set in the 'icon' attribute of the list item.
Attributes:
icon - The icon to display on the FileListItem.
filetype - The type of the file.
"""
def __init__ (self, filename, filetype):
TextListItem.__init__ (self, filename)
if type (filetype) != int:
raise TypeError ("filetype must be an integer")
self._filetype = filetype
self._icon = None
iconpath = base.GlobalStyle.engine.get_icon_path ("16x16")
if stat.S_ISDIR (filetype):
self._icon = Image.load_image (os.path.join (iconpath,
Icons16x16.FOLDER),
True)
elif stat.S_ISLNK (filetype):
self._icon = Image.load_image (os.path.join (Icons16x16.ICONPATH,
Icons16x16.FILE_LINK),
True)
elif stat.S_ISSOCK (filetype) or stat.S_ISFIFO (filetype):
self._icon = Image.load_image (os.path.join \
(iconpath, Icons16x16.FILE_SOCKET),
True)
elif stat.S_ISCHR (filetype):
self._icon = Image.load_image (os.path.join (iconpath,
Icons16x16.FILE_CHAR),
True)
else:
self._icon = Image.load_image (os.path.join (iconpath,
Icons16x16.FILE),
True)
icon = property (lambda self: self._icon, doc = "The icon to display.")
filetype = property (lambda self: self._filetype,
doc = "The type of the file.")
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/widgets/components/FileListItem.py",
"copies": "1",
"size": "4455",
"license": "bsd-2-clause",
"hash": -8782361504192421000,
"line_mean": 45.40625,
"line_max": 79,
"alpha_frac": 0.6316498316,
"autogenerated": false,
"ratio": 4.527439024390244,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5659088855990244,
"avg_score": null,
"num_lines": null
} |
"""Filesystem browsing and selection list."""
import os, stat
from pygame import K_RETURN
from ocempgui.widgets.components import ListItemCollection, FileListItem
from ScrolledList import ScrolledList
from Constants import *
import base
class FileList (ScrolledList):
"""FileList (width, height, directory=None) -> FileList
A scrolled list, which can list and browse a filesystem.
The FileList widget class is a ScrolledList, which can list and
browse filesystem contents using FileListItem objects. It supports
automatic directory changes and change notifications. By default the
list will be sorted with the directories first.
To set or get the directory of which's content should displayed, the
'directory' attribute and set_directory() method can be used.
filelist.directory = '/usr/bin'
if filelist.directory == 'C:\':
filelist.directory = 'C:\Windows'
Default action (invoked by activate()):
See the ScrolledList class.
Mnemonic action (invoked by activate_mnemonic()):
See the ScrolledList class.
Signals:
SIG_DOUBLECLICKED - Invoked, when the list is double-clicked.
Arguments:
directory - The directory to list the contents of.
"""
def __init__ (self, width, height, directory=None):
ScrolledList.__init__ (self, width, height)
self._signals[SIG_DOUBLECLICKED] = []
self._directory = None
self.selectionmode = SELECTION_SINGLE
self.set_directory (directory)
def set_directory (self, directory):
"""F.set_directory (...) -> None
Sets the directory to list the contents of.
Sets the directory the FileList should list the contents of. If
the directory could not be found or accessed, the bell keycode
'\\a' will be printed and the directory will remain unchanged.
Raises a ValueError, if the passed directory argument does not
exist or is not a directory path.
"""
if directory == None:
directory = os.path.curdir
if not os.path.exists (directory):
raise ValueError ("directory path does not exist")
if not os.path.isdir (directory):
raise ValueError ("argument is not a directory path")
olddir = self._directory
self._directory = os.path.normpath (directory)
try:
self._list_contents ()
except OSError:
print "\a"
self._directory = olddir
def _list_contents (self):
"""F.list_contents (...) -> None
Populates the directory contents to the scrolled list.
"""
items = ListItemCollection ()
items.append (FileListItem (os.pardir, stat.S_IFDIR))
stats = None
files = []
dirs = []
dappend = dirs.append
fappend = files.append
entries = os.listdir (self._directory)
isdir = os.path.isdir
pjoin = os.path.join
for filename in entries:
if isdir (pjoin (self._directory, filename)):
dappend (filename)
else:
fappend (filename)
dirs.sort ()
files.sort ()
map (items.append, [FileListItem (d, stat.S_IFDIR) for d in dirs])
for filename in files:
stats = os.stat (pjoin (self._directory, filename))
items.append (FileListItem (filename, stats.st_mode))
self.set_items (items)
def notify (self, event):
"""F.notify (...) -> None
Notifies the FileList about an event.
"""
if not self.sensitive:
return
if event.signal == SIG_DOUBLECLICKED:
eventarea = self.rect_to_client ()
if eventarea.collidepoint (event.data.pos):
self.run_signal_handlers (SIG_DOUBLECLICKED)
if event.data.button == 1:
# Get the item and switch to the directory on demand.
item = self.child.get_item_at_pos (event.data.pos)
if item and item.selected and stat.S_ISDIR (item.filetype):
self.set_directory (os.path.join (self.directory,
item.text))
event.handled = True
elif event.signal == SIG_KEYDOWN:
if event.data.key == K_RETURN:
if self.cursor and stat.S_ISDIR (self.cursor.filetype):
self.set_directory (os.path.join (self.directory,
self.cursor.text))
event.handled = True
ScrolledList.notify (self, event)
directory = property (lambda self: self._directory,
lambda self, var: self.set_directory (var),
doc = "The directory to list the contents of.")
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/widgets/FileList.py",
"copies": "1",
"size": "6315",
"license": "bsd-2-clause",
"hash": 8816359107555256000,
"line_mean": 38.46875,
"line_max": 79,
"alpha_frac": 0.632304038,
"autogenerated": false,
"ratio": 4.475549255846917,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5607853293846916,
"avg_score": null,
"num_lines": null
} |
""" $Id: filetail.py 1512 2011-05-20 16:14:29Z morrissj $
Python3 module for tailing a file such as a system log that grows continuously.
Transparently handles files that get rotated or trucated.
Inspired by the Perl File::Tail module.
A simple algorithm is used to dynamically sleep when no new data is available in
the file. The longer the amount of time goes by w/o new data the longer the
sleep interval will be (up to "max_interval") and starts at "interval".
Example:
from filetail import FileTail
tail = FileTail("/var/log/syslog")
for line in tail:
# do something
pass
"""
import os
import sys
from stat import *
from math import floor
from time import sleep, time
class FileTail(object):
"""
Tail a file, even if its rotated/truncated.
Inspiration came from the perl module File::Tail.
"""
def __init__(self,
file, # filename to monitor
start_pos="end", # where to initially start reading from
#max_buffer_size=16384, # Max buffer size hint (Not exact; @see file.readlines)
interval=0.1, # sleep time to wait if no data is present (dynamically changes)
#min_interval=0.01, # min sleep time
max_interval=5, # max sleep time
max_wait=60, # max time to wait with no data before reopening file
reopen_check="inode", # how to check if file is different (inode or time) - inode does not work on win32
encoding="utf-8" # file encoding
):
self.start_pos = start_pos
self.reopen_check = reopen_check
self.max_wait = max_wait
#self.max_buffer_size = max_buffer_size
#self.min_interval = min_interval
self.max_interval = max_interval
self.interval = interval
if self.interval > self.max_interval:
self.interval = self.max_interval
self.encoding = encoding
# will throw exception if it fails... caller should intercept
self.open(file, start_pos=start_pos)
# initialize some internal vars
self._buffer = []
self.last_time = time()
self.last_count = 0
def open(self, file, start_pos="head"):
"""Open the file to tail and initialize our state."""
fh = open(file, "r", encoding=self.encoding)
# seek to the initial position in the file we want to start reading
if start_pos == "end" or start_pos == "tail":
fh.seek(0, os.SEEK_END) # End of file
elif start_pos == "start" or start_pos == "head":
#fh.seek(0, os.SEEK_SET) # Beginning of file
pass
elif start_pos is not None:
if start_pos >= 0: # Absolute position
fh.seek(start_pos, os.SEEK_SET)
else: # Absolute position (from end)
fh.seek(abs(start_pos), os.SEEK_END)
# if we passed the end of the file rewind to the actual end.
# This avoids a potential race condition if the file was being rotated
# in the process of opening the file. Not sure if this can actually
# happen, but better safe than sorry.
pos = fh.tell()
if pos > os.stat(file)[ST_SIZE]:
pos = fh.tell()
self.fh = fh
self.pos = pos
self.stat = os.fstat(fh.fileno())
self.file = file
def reopen(self):
"""
Attempt to reopen the current file. If it doesn't appear to have
changed (been rotated) then the current file handle is not changed.
"""
#print("Reopening", self.file, "...", end="")
# if we don't have an opened file already then try to open it now
if not self.fh or self.fh.closed:
try:
self.open(self.file, start_pos="head");
except IOError:
return False
return True
# save current values
fh = self.fh
pos = self.pos
cur = self.stat
# reopen same file
try:
self.open(self.file, "head")
except IOError as e:
#print("FILE DOES NOT EXIST")
return False
new = self.stat
#print(new.st_ino, ' == ', cur.st_ino)
if (
(self.reopen_check == 'inode' and new.st_ino == cur.st_ino)
or
(self.reopen_check == 'time' and new.st_mtime <= floor(self.last_time) and new.st_size == pos)
):
#print("FILE NOT CHANGED")
# file appears to be the same or older than our last read
#self.last_time = new.st_mtime
self.fh = fh
self.pos = pos
self.stat = cur
return False
#print("NEW FILE")
return True
def __iter__(self):
"""
Return iterator to support:
for line in filetail:
print line
"""
self.wait_count = 0
return self
def __next__(self):
"""Interator "next" call."""
return self.next()
def next(self):
line = None
self.wait_count = 0
# low CPU (probably same as the block below this, but ALLOWS tell()!
while not line:
line = self.fh.readline()
if line != "":
# track the time we received new data and how much
self.last_time = time()
self.last_count = 1
else:
self.wait()
## uses the least amount of CPU, but does not allow me to tell()
## is that a bug in readlines()?
#while len(self._buffer) == 0:
# self._buffer = self.fh.readlines(self.max_buffer_size)
# if len(self._buffer) > 0:
# # track the time we received new data and how much
# self.last_time = time()
# self.last_count = len(self._buffer)
# self.wait_count = 0
# else:
# self.wait()
#line = self._buffer.pop(0)
# dealing with the file as binary isn't working as well as i hoped
#while len(self.lines) == 0:
# buffer = self.fh.read(self.max_buffer_size).decode(self.encoding)
# if buffer is not None:
# self._buffer += buffer
# size = self.enqueue(self._buffer)
# if size:
# self._buffer = self._buffer[size:]
# else:
# self.wait()
#line = self.lines.pop(0)
# uses too much CPU!! (but not 100%)
#line = self.fh.readline()
#while line == "":
# self.wait()
# line = self.fh.readline()
# if line != "":
# # track the time we received new data and how much
# self.pos = self.fh.tell()
# self.last_time = time()
# self.last_count = 1 #len(self._buffer)
# self.wait_count = 0
return line
#def enqueue(self, buffer):
# """
# Extract any lines from buffer and add to our self.lines list. Ignores
# the last line if it does not have a line termination ("\n")
# @return total characters extracted from buffer.
# """
# lines = buffer.splitlines(True)
# total = 0;
# for l in lines:
# if l.endswith("\n"):
# self.lines.append(l)
# total += len(l)
# return total
# wait for X seconds. The sleep interval is dynamically predicted based on
# how much was previously read. The predicted interval will never be more
# than max_interval. If enough time passes w/o any new data the file will
# be reopened and checked.
def wait(self):
if self.wait_count == 0:
self.pos = self.fh.tell()
self.stat = os.fstat(self.fh.fileno())
self.wait_count += 1
elapsed = time() - self.last_time
# if we've waited long enough try to reopen the file, if that returns
# true then we're done here and we do not sleep.
if elapsed >= self.max_wait:
self.last_time = time()
if self.reopen():
return
# determine delay value. Delay is longer based on total time passed
# note: currently last_count is always 1.
if self.last_count:
#delay = (time() - self.last_time) / self.last_count
delay = elapsed
else:
delay = self.interval
# don't delay too long
if delay > self.max_interval:
delay = self.max_interval
#elif delay < self.min_interval:
# delay = self.min_interval
#print("delay={:0.06f} elapsed={:0.06f}".format(delay, elapsed))
sleep(delay)
# end of FileTail class
def main():
print("No tests implemented.")
if __name__ == "__main__":
sys.exit(main())
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/577710_Tail_continuously_growing_file_like_tail_f/recipe-577710.py",
"copies": "1",
"size": "9171",
"license": "mit",
"hash": -2320170067840393700,
"line_mean": 33.477443609,
"line_max": 122,
"alpha_frac": 0.5318940137,
"autogenerated": false,
"ratio": 4.052585064074238,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00861814791387918,
"num_lines": 266
} |
"""Widget classes, which can place their chidren in a horizontal or
vertical alignment.
"""
from Container import Container
from BaseWidget import BaseWidget
from Constants import *
import base
class Frame (Container):
"""Frame (widget=None) -> Frame
A container widget class with decorative border.
The Frame widget is a container widget, which can draw a decorative
border around its children and supports a title widget, which will
be displayed in the topleft corner of the frame. It also supports
alignment of its children.
The 'align' attribute and set_align() method allow enable the frame
to align its children. Dependant on the alignment type (see also
ALIGN_TYPES) and the concrete Frame implementation, the children
will be placed differently within the frame.
frame.align = ALIGN_TOP
frame.set_align (ALIGN_TOP)
The border to draw around the children can be influenced using the
'border' attribute or set_border() method. The default is to draw a
sunken border.
frame.border = BORDER_ETCHED_IN
frame.set_border (BORDER_ETCHED_IN)
The 'widget' attribute contains the widget, which will be placed in
the topleft corner of the frame. It is suitable as title widget and
has no limitations about the type of the widget. It should be noted
that the widget can be removed by assinging None or passing None to
the set_title_widget() method. The old title widget of the Frame
will be destroyed, if you reassign the property.
frame.widget = Label ('Title')
frame.set_title_widget (Label ('Title'))
Default action (invoked by activate()):
None
Mnemonic action (invoked by activate_mnemonic()):
None
Attributes:
align - Alignment of the children.
border - The border style to set for the frame.
widget - Widget to put in the topleft corner of the frame.
"""
def __init__ (self, widget=None):
Container.__init__ (self)
self._widget = None
self._align = ALIGN_NONE
self._border = BORDER_ETCHED_IN
self.set_title_widget (widget)
def set_focus (self, focus=True):
"""F.set_focus (focus=True) -> None
Overrides the set_focus() behaviour for the Frame.
The Frame class is not focusable by default. It is a layout
class for other widgets, so it does not need to get the input
focus and thus it will return false without doing anything.
"""
return False
def set_title_widget (self, widget):
"""F.set_title_widget (...) -> None
Sets the widget to display in the topleft corner.
Raises a TypeError, if the passed argument does not inherit from
the BaseWidget class.
"""
if widget and (not isinstance (widget, BaseWidget)):
raise TypeError ("widget must inherit from BaseWidget")
if self._widget:
self._widget.parent = None
self._controls.remove (self._widget)
self._widget.destroy ()
self._widget = widget
if widget:
widget.parent = self
self._controls.append (widget)
if (widget.manager == None) and (self.manager != None):
widget.set_event_manager (self.manager)
self.dirty = True
def set_align (self, align):
"""F.set_align (...) -> None
Sets the alignment for the widgets.
Raises a TypeError, if the passed argument is not a value from
ALIGN_TYPES.
"""
if not constants_is_align (align):
raise TypeError ("align must be a value from ALIGN_TYPES")
self._align = align
self.dirty = True
def set_border (self, border):
"""F.set_border (...) -> None
Sets the border type to be used by the Frame.
Raises a ValueError, if the passed argument is not a value from
BORDER_TYPES
"""
if border not in BORDER_TYPES:
raise ValueError ("border must be a value from BORDER_TYPES")
self._border = border
self.dirty = True
def destroy (self):
"""F.destroy () -> None
Destroys the Frame and removes it from its event system.
"""
if self.widget:
self.widget.parent = None
Container.destroy (self)
align = property (lambda self: self._align,
lambda self, var: self.set_align (var),
doc = "The alignment to use for the children.")
border = property (lambda self: self._border,
lambda self, var: self.set_border (var),
doc = "The border style to set for the Frame.")
widget = property (lambda self: self._widget,
lambda self, var: self.set_title_widget (var),
doc = "The title widget to set for the Frame.")
class HFrame (Frame):
"""HFrame (widget=None) -> HFrame
A Frame widget class, which place its children horizontally.
The HFrame class places its attached children in a horizontal manner
and supports an alignment at the top or bottom of its edges.
Left or right alignment settings will be ignored by it.
Default action (invoked by activate()):
None
Mnemonic action (invoked by activate_mnemonic()):
None
"""
def __init__ (self, widget=None):
Frame.__init__ (self, widget)
def calculate_size (self):
"""H.calculate_size () -> int, int
Calculates the size needed by the children.
Calculates the size needed by the children and returns the
resulting width and height.
"""
border = base.GlobalStyle.get_border_size (self.__class__, self.style,
self.border)
width = 2 * (self.padding + border)
height = 0
add_width = 0
add_top = border
spacing = self.spacing
# Calculate the widget sizes.
if self.widget:
add_width = self.widget.width
if self.widget.height > border:
add_top = self.widget.height
for widget in self.children:
width += widget.width + spacing
if widget.height > height:
height = widget.height
if len (self.children) != 0:
width -= spacing # The last one adds unnecessary spacing.
if width <= add_width:
width = add_width + 2 * (self.padding + border)
# Only one border, the other one was added in add_top, if
# necessary
height += add_top + 2 * self.padding + border
return width, height
def dispose_widgets (self):
"""H.dispose_widgets (...) -> None
Moves the children of the HFrame to their correct positions.
"""
height = self.image.get_rect ().height
border = base.GlobalStyle.get_border_size (self.__class__,self.style,
self.border)
spacing = self.spacing
padding = self.padding
x = border + padding
y = 0
add_height = border
if self.widget:
self.widget.topleft = (x, y)
y += self.widget.height + padding
add_height = self.widget.height
# Check the alignment.
top = self.align & ALIGN_TOP == ALIGN_TOP
bottom = self.align & ALIGN_BOTTOM == ALIGN_BOTTOM
nothing = not top and not bottom
centery = (height + add_height - border) / 2
if nothing:
for widget in self.children:
y = centery - widget.height / 2
widget.topleft = x, y
x += widget.width + spacing
elif bottom:
for widget in self.children:
y = (height - border - padding - widget.height)
widget.topleft = x, y
x += widget.width + spacing
else:
for widget in self.children:
widget.topleft = x, y
x += widget.width + spacing
def draw_bg (self):
"""H.draw_bg () -> None
Draws the HFrame background surface and returns it.
Creates the visible surface of the HFrame and returns it to the
caller.
"""
return base.GlobalStyle.engine.draw_frame (self)
def draw (self):
"""H.draw () -> None
Draws the HFrame surface and places its children on it.
"""
Frame.draw (self)
self.dispose_widgets ()
blit = self.image.blit
if self.widget:
blit (self.widget.image, self.widget.rect)
for widget in self.children:
blit (widget.image, widget.rect)
class VFrame (Frame):
"""
A Frame widget class, which place its children vertically.
The VFrame class places its attached children in a vertical manner
and supports an alignment at the left or right of its edges. Top
or bottom alignment settings will be ignored by it.
Default action (invoked by activate()):
None
Mnemonic action (invoked by activate_mnemonic()):
None
"""
def __init__ (self, widget=None):
Frame.__init__ (self, widget)
def calculate_size (self):
"""V.calculate_size () -> int, int.
Calculates the size needed by the children.
Calculates the size needed by the children and returns the
resulting width and height.
"""
border = base.GlobalStyle.get_border_size (self.__class__, self.style,
self.border)
width = 0
height = 2 * self.padding + border
add_width = 0
add_top = border
spacing = self.spacing
# Calculate the widget sizes.
if self.widget:
add_width = self.widget.width
if self.widget.height > border:
add_top = self.widget.height
height += add_top
for widget in self.children:
height += widget.height + spacing
if widget.width > width:
width = widget.width
width += 2 * (self.padding + border)
if width <= add_width:
width = add_width + 2 * (self.padding + border)
# Last one adds too much spacing.
if len (self.children) != 0:
height -= spacing # The last one adds unnecessary spacing.
return width, height
def dispose_widgets (self):
"""V.dispose_widgets (...) -> None
Moves the children of the VFrame to their correct positions.
"""
width = self.image.get_rect ().width
border = base.GlobalStyle.get_border_size (self.__class__, self.style,
self.border)
spacing = self.spacing
padding = self.padding
x = padding + border
y = 0
add_height = border
if self.widget:
self.widget.topleft = x, y
add_height = self.widget.height
y += add_height + padding
# Check the alignment.
left = self.align & ALIGN_LEFT == ALIGN_LEFT
right = self.align & ALIGN_RIGHT == ALIGN_RIGHT
nothing = not left and not right
centerx = width / 2
if nothing:
for widget in self.children:
x = centerx - widget.width / 2
widget.topleft = x, y
y += widget.height + spacing
elif right:
for widget in self.children:
x = (width - border - padding - widget.width)
widget.topleft = x, y
y += widget.height + spacing
else:
for widget in self.children:
widget.topleft = x, y
y += widget.height + spacing
def draw_bg (self):
"""V.draw_bg () -> None
Draws the VFrame background surface and returns it.
Creates the visible surface of the VFrame and returns it to the
caller.
"""
return base.GlobalStyle.engine.draw_frame (self)
def draw (self):
"""V.draw () -> None
Draws the VFrame surface and places its children on it.
"""
Frame.draw (self)
self.dispose_widgets ()
blit = self.image.blit
if self.widget:
blit (self.widget.image, self.widget.rect)
for widget in self.children:
blit (widget.image, widget.rect)
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/widgets/Frame.py",
"copies": "1",
"size": "13967",
"license": "bsd-2-clause",
"hash": -3982400045983180000,
"line_mean": 33.6575682382,
"line_max": 78,
"alpha_frac": 0.595403451,
"autogenerated": false,
"ratio": 4.463726430169383,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00866217044361272,
"num_lines": 403
} |
"""
Command-line and common processing for Docutils front-end tools.
Exports the following classes:
* `OptionParser`: Standard Docutils command-line processing.
* `Option`: Customized version of `optparse.Option`; validation support.
* `Values`: Runtime settings; objects are simple structs
(``object.attribute``). Supports cumulative list settings (attributes).
* `ConfigParser`: Standard Docutils config file processing.
Also exports the following functions:
* Option callbacks: `store_multiple`, `read_config_file`.
* Setting validators: `validate_encoding`,
`validate_encoding_error_handler`,
`validate_encoding_and_error_handler`, `validate_boolean`,
`validate_threshold`, `validate_colon_separated_string_list`,
`validate_dependency_file`.
* `make_paths_absolute`.
"""
__docformat__ = 'reStructuredText'
import os
import os.path
import sys
import warnings
import ConfigParser as CP
import codecs
import docutils
import docutils.utils
import docutils.nodes
import optparse
from optparse import SUPPRESS_HELP
def store_multiple(option, opt, value, parser, *args, **kwargs):
"""
Store multiple values in `parser.values`. (Option callback.)
Store `None` for each attribute named in `args`, and store the value for
each key (attribute name) in `kwargs`.
"""
for attribute in args:
setattr(parser.values, attribute, None)
for key, value in kwargs.items():
setattr(parser.values, key, value)
def read_config_file(option, opt, value, parser):
"""
Read a configuration file during option processing. (Option callback.)
"""
try:
new_settings = parser.get_config_file_settings(value)
except ValueError, error:
parser.error(error)
parser.values.update(new_settings, parser)
def validate_encoding(setting, value, option_parser,
config_parser=None, config_section=None):
try:
codecs.lookup(value)
except LookupError:
raise (LookupError('setting "%s": unknown encoding: "%s"'
% (setting, value)),
None, sys.exc_info()[2])
return value
def validate_encoding_error_handler(setting, value, option_parser,
config_parser=None, config_section=None):
try:
codecs.lookup_error(value)
except AttributeError: # TODO: remove (only needed prior to Python 2.3)
if value not in ('strict', 'ignore', 'replace', 'xmlcharrefreplace'):
raise (LookupError(
'unknown encoding error handler: "%s" (choices: '
'"strict", "ignore", "replace", or "xmlcharrefreplace")' % value),
None, sys.exc_info()[2])
except LookupError:
raise (LookupError(
'unknown encoding error handler: "%s" (choices: '
'"strict", "ignore", "replace", "backslashreplace", '
'"xmlcharrefreplace", and possibly others; see documentation for '
'the Python ``codecs`` module)' % value),
None, sys.exc_info()[2])
return value
def validate_encoding_and_error_handler(
setting, value, option_parser, config_parser=None, config_section=None):
"""
Side-effect: if an error handler is included in the value, it is inserted
into the appropriate place as if it was a separate setting/option.
"""
if ':' in value:
encoding, handler = value.split(':')
validate_encoding_error_handler(
setting + '_error_handler', handler, option_parser,
config_parser, config_section)
if config_parser:
config_parser.set(config_section, setting + '_error_handler',
handler)
else:
setattr(option_parser.values, setting + '_error_handler', handler)
else:
encoding = value
validate_encoding(setting, encoding, option_parser,
config_parser, config_section)
return encoding
def validate_boolean(setting, value, option_parser,
config_parser=None, config_section=None):
if isinstance(value, unicode):
try:
return option_parser.booleans[value.strip().lower()]
except KeyError:
raise (LookupError('unknown boolean value: "%s"' % value),
None, sys.exc_info()[2])
return value
def validate_nonnegative_int(setting, value, option_parser,
config_parser=None, config_section=None):
value = int(value)
if value < 0:
raise ValueError('negative value; must be positive or zero')
return value
def validate_threshold(setting, value, option_parser,
config_parser=None, config_section=None):
try:
return int(value)
except ValueError:
try:
return option_parser.thresholds[value.lower()]
except (KeyError, AttributeError):
raise (LookupError('unknown threshold: %r.' % value),
None, sys.exc_info[2])
def validate_colon_separated_string_list(
setting, value, option_parser, config_parser=None, config_section=None):
if isinstance(value, unicode):
value = value.split(':')
else:
last = value.pop()
value.extend(last.split(':'))
return value
def validate_url_trailing_slash(
setting, value, option_parser, config_parser=None, config_section=None):
if not value:
return './'
elif value.endswith('/'):
return value
else:
return value + '/'
def validate_dependency_file(setting, value, option_parser,
config_parser=None, config_section=None):
try:
return docutils.utils.DependencyList(value)
except IOError:
return docutils.utils.DependencyList(None)
def validate_strip_class(setting, value, option_parser,
config_parser=None, config_section=None):
if config_parser: # validate all values
class_values = value
else: # just validate the latest value
class_values = [value[-1]]
for class_value in class_values:
normalized = docutils.nodes.make_id(class_value)
if class_value != normalized:
raise ValueError('invalid class value %r (perhaps %r?)'
% (class_value, normalized))
return value
def make_paths_absolute(pathdict, keys, base_path=None):
"""
Interpret filesystem path settings relative to the `base_path` given.
Paths are values in `pathdict` whose keys are in `keys`. Get `keys` from
`OptionParser.relative_path_settings`.
"""
if base_path is None:
base_path = os.getcwd()
for key in keys:
if key in pathdict:
value = pathdict[key]
if isinstance(value, list):
value = [make_one_path_absolute(base_path, path)
for path in value]
elif value:
value = make_one_path_absolute(base_path, value)
pathdict[key] = value
def make_one_path_absolute(base_path, path):
return os.path.abspath(os.path.join(base_path, path))
class Values(optparse.Values):
"""
Updates list attributes by extension rather than by replacement.
Works in conjunction with the `OptionParser.lists` instance attribute.
"""
def __init__(self, *args, **kwargs):
optparse.Values.__init__(self, *args, **kwargs)
if (not hasattr(self, 'record_dependencies')
or self.record_dependencies is None):
# Set up dependency list, in case it is needed.
self.record_dependencies = docutils.utils.DependencyList()
def update(self, other_dict, option_parser):
if isinstance(other_dict, Values):
other_dict = other_dict.__dict__
other_dict = other_dict.copy()
for setting in option_parser.lists.keys():
if (hasattr(self, setting) and setting in other_dict):
value = getattr(self, setting)
if value:
value += other_dict[setting]
del other_dict[setting]
self._update_loose(other_dict)
def copy(self):
"""Return a shallow copy of `self`."""
return self.__class__(defaults=self.__dict__)
class Option(optparse.Option):
ATTRS = optparse.Option.ATTRS + ['validator', 'overrides']
def process(self, opt, value, values, parser):
"""
Call the validator function on applicable settings and
evaluate the 'overrides' option.
Extends `optparse.Option.process`.
"""
result = optparse.Option.process(self, opt, value, values, parser)
setting = self.dest
if setting:
if self.validator:
value = getattr(values, setting)
try:
new_value = self.validator(setting, value, parser)
except Exception, error:
raise (optparse.OptionValueError(
'Error in option "%s":\n %s: %s'
% (opt, error.__class__.__name__, error)),
None, sys.exc_info()[2])
setattr(values, setting, new_value)
if self.overrides:
setattr(values, self.overrides, None)
return result
class OptionParser(optparse.OptionParser, docutils.SettingsSpec):
"""
Parser for command-line and library use. The `settings_spec`
specification here and in other Docutils components are merged to build
the set of command-line options and runtime settings for this process.
Common settings (defined below) and component-specific settings must not
conflict. Short options are reserved for common settings, and components
are restrict to using long options.
"""
standard_config_files = [
'/etc/docutils.conf', # system-wide
'./docutils.conf', # project-specific
'~/.docutils'] # user-specific
"""Docutils configuration files, using ConfigParser syntax. Filenames
will be tilde-expanded later. Later files override earlier ones."""
threshold_choices = 'info 1 warning 2 error 3 severe 4 none 5'.split()
"""Possible inputs for for --report and --halt threshold values."""
thresholds = {'info': 1, 'warning': 2, 'error': 3, 'severe': 4, 'none': 5}
"""Lookup table for --report and --halt threshold values."""
booleans={'1': 1, 'on': 1, 'yes': 1, 'true': 1,
'0': 0, 'off': 0, 'no': 0, 'false': 0, '': 0}
"""Lookup table for boolean configuration file settings."""
try:
default_error_encoding = sys.stderr.encoding or 'ascii'
except AttributeError:
default_error_encoding = 'ascii'
# TODO: variable no longer needed since 'backslashreplace' is
# part of Python >= 2.3 (required since Docutils 0.6)
if hasattr(codecs, 'backslashreplace_errors'):
default_error_encoding_error_handler = 'backslashreplace'
else:
default_error_encoding_error_handler = 'replace'
settings_spec = (
'General Docutils Options',
None,
(('Specify the document title as metadata.',
['--title'], {}),
('Include a "Generated by Docutils" credit and link.',
['--generator', '-g'], {'action': 'store_true',
'validator': validate_boolean}),
('Do not include a generator credit.',
['--no-generator'], {'action': 'store_false', 'dest': 'generator'}),
('Include the date at the end of the document (UTC).',
['--date', '-d'], {'action': 'store_const', 'const': '%Y-%m-%d',
'dest': 'datestamp'}),
('Include the time & date (UTC).',
['--time', '-t'], {'action': 'store_const',
'const': '%Y-%m-%d %H:%M UTC',
'dest': 'datestamp'}),
('Do not include a datestamp of any kind.',
['--no-datestamp'], {'action': 'store_const', 'const': None,
'dest': 'datestamp'}),
('Include a "View document source" link.',
['--source-link', '-s'], {'action': 'store_true',
'validator': validate_boolean}),
('Use <URL> for a source link; implies --source-link.',
['--source-url'], {'metavar': '<URL>'}),
('Do not include a "View document source" link.',
['--no-source-link'],
{'action': 'callback', 'callback': store_multiple,
'callback_args': ('source_link', 'source_url')}),
('Link from section headers to TOC entries. (default)',
['--toc-entry-backlinks'],
{'dest': 'toc_backlinks', 'action': 'store_const', 'const': 'entry',
'default': 'entry'}),
('Link from section headers to the top of the TOC.',
['--toc-top-backlinks'],
{'dest': 'toc_backlinks', 'action': 'store_const', 'const': 'top'}),
('Disable backlinks to the table of contents.',
['--no-toc-backlinks'],
{'dest': 'toc_backlinks', 'action': 'store_false'}),
('Link from footnotes/citations to references. (default)',
['--footnote-backlinks'],
{'action': 'store_true', 'default': 1,
'validator': validate_boolean}),
('Disable backlinks from footnotes and citations.',
['--no-footnote-backlinks'],
{'dest': 'footnote_backlinks', 'action': 'store_false'}),
('Enable section numbering by Docutils. (default)',
['--section-numbering'],
{'action': 'store_true', 'dest': 'sectnum_xform',
'default': 1, 'validator': validate_boolean}),
('Disable section numbering by Docutils.',
['--no-section-numbering'],
{'action': 'store_false', 'dest': 'sectnum_xform'}),
('Remove comment elements from the document tree.',
['--strip-comments'],
{'action': 'store_true', 'validator': validate_boolean}),
('Leave comment elements in the document tree. (default)',
['--leave-comments'],
{'action': 'store_false', 'dest': 'strip_comments'}),
('Remove all elements with classes="<class>" from the document tree. '
'Warning: potentially dangerous; use with caution. '
'(Multiple-use option.)',
['--strip-elements-with-class'],
{'action': 'append', 'dest': 'strip_elements_with_classes',
'metavar': '<class>', 'validator': validate_strip_class}),
('Remove all classes="<class>" attributes from elements in the '
'document tree. Warning: potentially dangerous; use with caution. '
'(Multiple-use option.)',
['--strip-class'],
{'action': 'append', 'dest': 'strip_classes',
'metavar': '<class>', 'validator': validate_strip_class}),
('Report system messages at or higher than <level>: "info" or "1", '
'"warning"/"2" (default), "error"/"3", "severe"/"4", "none"/"5"',
['--report', '-r'], {'choices': threshold_choices, 'default': 2,
'dest': 'report_level', 'metavar': '<level>',
'validator': validate_threshold}),
('Report all system messages. (Same as "--report=1".)',
['--verbose', '-v'], {'action': 'store_const', 'const': 1,
'dest': 'report_level'}),
('Report no system messages. (Same as "--report=5".)',
['--quiet', '-q'], {'action': 'store_const', 'const': 5,
'dest': 'report_level'}),
('Halt execution at system messages at or above <level>. '
'Levels as in --report. Default: 4 (severe).',
['--halt'], {'choices': threshold_choices, 'dest': 'halt_level',
'default': 4, 'metavar': '<level>',
'validator': validate_threshold}),
('Halt at the slightest problem. Same as "--halt=info".',
['--strict'], {'action': 'store_const', 'const': 1,
'dest': 'halt_level'}),
('Enable a non-zero exit status for non-halting system messages at '
'or above <level>. Default: 5 (disabled).',
['--exit-status'], {'choices': threshold_choices,
'dest': 'exit_status_level',
'default': 5, 'metavar': '<level>',
'validator': validate_threshold}),
('Enable debug-level system messages and diagnostics.',
['--debug'], {'action': 'store_true', 'validator': validate_boolean}),
('Disable debug output. (default)',
['--no-debug'], {'action': 'store_false', 'dest': 'debug'}),
('Send the output of system messages to <file>.',
['--warnings'], {'dest': 'warning_stream', 'metavar': '<file>'}),
('Enable Python tracebacks when Docutils is halted.',
['--traceback'], {'action': 'store_true', 'default': None,
'validator': validate_boolean}),
('Disable Python tracebacks. (default)',
['--no-traceback'], {'dest': 'traceback', 'action': 'store_false'}),
('Specify the encoding and optionally the '
'error handler of input text. Default: <locale-dependent>:strict.',
['--input-encoding', '-i'],
{'metavar': '<name[:handler]>',
'validator': validate_encoding_and_error_handler}),
('Specify the error handler for undecodable characters. '
'Choices: "strict" (default), "ignore", and "replace".',
['--input-encoding-error-handler'],
{'default': 'strict', 'validator': validate_encoding_error_handler}),
('Specify the text encoding and optionally the error handler for '
'output. Default: UTF-8:strict.',
['--output-encoding', '-o'],
{'metavar': '<name[:handler]>', 'default': 'utf-8',
'validator': validate_encoding_and_error_handler}),
('Specify error handler for unencodable output characters; '
'"strict" (default), "ignore", "replace", '
'"xmlcharrefreplace", "backslashreplace".',
['--output-encoding-error-handler'],
{'default': 'strict', 'validator': validate_encoding_error_handler}),
('Specify text encoding and error handler for error output. '
'Default: %s:%s.'
% (default_error_encoding, default_error_encoding_error_handler),
['--error-encoding', '-e'],
{'metavar': '<name[:handler]>', 'default': default_error_encoding,
'validator': validate_encoding_and_error_handler}),
('Specify the error handler for unencodable characters in '
'error output. Default: %s.'
% default_error_encoding_error_handler,
['--error-encoding-error-handler'],
{'default': default_error_encoding_error_handler,
'validator': validate_encoding_error_handler}),
('Specify the language (as 2-letter code). Default: en.',
['--language', '-l'], {'dest': 'language_code', 'default': 'en',
'metavar': '<name>'}),
('Write output file dependencies to <file>.',
['--record-dependencies'],
{'metavar': '<file>', 'validator': validate_dependency_file,
'default': None}), # default set in Values class
('Read configuration settings from <file>, if it exists.',
['--config'], {'metavar': '<file>', 'type': 'string',
'action': 'callback', 'callback': read_config_file}),
("Show this program's version number and exit.",
['--version', '-V'], {'action': 'version'}),
('Show this help message and exit.',
['--help', '-h'], {'action': 'help'}),
# Typically not useful for non-programmatical use:
(SUPPRESS_HELP, ['--id-prefix'], {'default': ''}),
(SUPPRESS_HELP, ['--auto-id-prefix'], {'default': 'id'}),
# Hidden options, for development use only:
(SUPPRESS_HELP, ['--dump-settings'], {'action': 'store_true'}),
(SUPPRESS_HELP, ['--dump-internals'], {'action': 'store_true'}),
(SUPPRESS_HELP, ['--dump-transforms'], {'action': 'store_true'}),
(SUPPRESS_HELP, ['--dump-pseudo-xml'], {'action': 'store_true'}),
(SUPPRESS_HELP, ['--expose-internal-attribute'],
{'action': 'append', 'dest': 'expose_internals',
'validator': validate_colon_separated_string_list}),
(SUPPRESS_HELP, ['--strict-visitor'], {'action': 'store_true'}),
))
"""Runtime settings and command-line options common to all Docutils front
ends. Setting specs specific to individual Docutils components are also
used (see `populate_from_components()`)."""
settings_defaults = {'_disable_config': None,
'_source': None,
'_destination': None,
'_config_files': None}
"""Defaults for settings that don't have command-line option equivalents."""
relative_path_settings = ('warning_stream',)
config_section = 'general'
version_template = ('%%prog (Docutils %s [%s], Python %s, on %s)'
% (docutils.__version__, docutils.__version_details__,
sys.version.split()[0], sys.platform))
"""Default version message."""
def __init__(self, components=(), defaults=None, read_config_files=None,
*args, **kwargs):
"""
`components` is a list of Docutils components each containing a
``.settings_spec`` attribute. `defaults` is a mapping of setting
default overrides.
"""
self.lists = {}
"""Set of list-type settings."""
self.config_files = []
"""List of paths of applied configuration files."""
optparse.OptionParser.__init__(
self, option_class=Option, add_help_option=None,
formatter=optparse.TitledHelpFormatter(width=78),
*args, **kwargs)
if not self.version:
self.version = self.version_template
# Make an instance copy (it will be modified):
self.relative_path_settings = list(self.relative_path_settings)
self.components = (self,) + tuple(components)
self.populate_from_components(self.components)
self.set_defaults_from_dict(defaults or {})
if read_config_files and not self.defaults['_disable_config']:
try:
config_settings = self.get_standard_config_settings()
except ValueError, error:
self.error(error)
self.set_defaults_from_dict(config_settings.__dict__)
def populate_from_components(self, components):
"""
For each component, first populate from the `SettingsSpec.settings_spec`
structure, then from the `SettingsSpec.settings_defaults` dictionary.
After all components have been processed, check for and populate from
each component's `SettingsSpec.settings_default_overrides` dictionary.
"""
for component in components:
if component is None:
continue
settings_spec = component.settings_spec
self.relative_path_settings.extend(
component.relative_path_settings)
for i in range(0, len(settings_spec), 3):
title, description, option_spec = settings_spec[i:i+3]
if title:
group = optparse.OptionGroup(self, title, description)
self.add_option_group(group)
else:
group = self # single options
for (help_text, option_strings, kwargs) in option_spec:
option = group.add_option(help=help_text, *option_strings,
**kwargs)
if kwargs.get('action') == 'append':
self.lists[option.dest] = 1
if component.settings_defaults:
self.defaults.update(component.settings_defaults)
for component in components:
if component and component.settings_default_overrides:
self.defaults.update(component.settings_default_overrides)
def get_standard_config_files(self):
"""Return list of config files, from environment or standard."""
try:
config_files = os.environ['DOCUTILSCONFIG'].split(os.pathsep)
except KeyError:
config_files = self.standard_config_files
# If 'HOME' is not set, expandvars() requires the 'pwd' module which is
# not available under certain environments, for example, within
# mod_python. The publisher ends up in here, and we need to publish
# from within mod_python. Therefore we need to avoid expanding when we
# are in those environments.
expand = os.path.expanduser
if 'HOME' not in os.environ:
try:
import pwd
except ImportError:
expand = lambda x: x
return [expand(f) for f in config_files if f.strip()]
def get_standard_config_settings(self):
settings = Values()
for filename in self.get_standard_config_files():
settings.update(self.get_config_file_settings(filename), self)
return settings
def get_config_file_settings(self, config_file):
"""Returns a dictionary containing appropriate config file settings."""
parser = ConfigParser()
parser.read(config_file, self)
self.config_files.extend(parser._files)
base_path = os.path.dirname(config_file)
applied = {}
settings = Values()
for component in self.components:
if not component:
continue
for section in (tuple(component.config_section_dependencies or ())
+ (component.config_section,)):
if section in applied:
continue
applied[section] = 1
settings.update(parser.get_section(section), self)
make_paths_absolute(
settings.__dict__, self.relative_path_settings, base_path)
return settings.__dict__
def check_values(self, values, args):
"""Store positional arguments as runtime settings."""
values._source, values._destination = self.check_args(args)
make_paths_absolute(values.__dict__, self.relative_path_settings,
os.getcwd())
values._config_files = self.config_files
return values
def check_args(self, args):
source = destination = None
if args:
source = args.pop(0)
if source == '-': # means stdin
source = None
if args:
destination = args.pop(0)
if destination == '-': # means stdout
destination = None
if args:
self.error('Maximum 2 arguments allowed.')
if source and source == destination:
self.error('Do not specify the same file for both source and '
'destination. It will clobber the source file.')
return source, destination
def set_defaults_from_dict(self, defaults):
self.defaults.update(defaults)
def get_default_values(self):
"""Needed to get custom `Values` instances."""
defaults = Values(self.defaults)
defaults._config_files = self.config_files
return defaults
def get_option_by_dest(self, dest):
"""
Get an option by its dest.
If you're supplying a dest which is shared by several options,
it is undefined which option of those is returned.
A KeyError is raised if there is no option with the supplied
dest.
"""
for group in self.option_groups + [self]:
for option in group.option_list:
if option.dest == dest:
return option
raise KeyError('No option with dest == %r.' % dest)
class ConfigParser(CP.ConfigParser):
old_settings = {
'pep_stylesheet': ('pep_html writer', 'stylesheet'),
'pep_stylesheet_path': ('pep_html writer', 'stylesheet_path'),
'pep_template': ('pep_html writer', 'template')}
"""{old setting: (new section, new setting)} mapping, used by
`handle_old_config`, to convert settings from the old [options] section."""
old_warning = """
The "[option]" section is deprecated. Support for old-format configuration
files may be removed in a future Docutils release. Please revise your
configuration files. See <http://docutils.sf.net/docs/user/config.html>,
section "Old-Format Configuration Files".
"""
not_utf8_error = """\
Unable to read configuration file "%s": content not encoded as UTF-8.
Skipping "%s" configuration file.
"""
def __init__(self, *args, **kwargs):
CP.ConfigParser.__init__(self, *args, **kwargs)
self._files = []
"""List of paths of configuration files read."""
def read(self, filenames, option_parser):
if type(filenames) in (str, unicode):
filenames = [filenames]
for filename in filenames:
try:
# Config files must be UTF-8-encoded:
fp = codecs.open(filename, 'r', 'utf-8')
except IOError:
continue
try:
CP.ConfigParser.readfp(self, fp, filename)
except UnicodeDecodeError:
sys.stderr.write(self.not_utf8_error % (filename, filename))
fp.close()
continue
fp.close()
self._files.append(filename)
if self.has_section('options'):
self.handle_old_config(filename)
self.validate_settings(filename, option_parser)
def handle_old_config(self, filename):
warnings.warn_explicit(self.old_warning, ConfigDeprecationWarning,
filename, 0)
options = self.get_section('options')
if not self.has_section('general'):
self.add_section('general')
for key, value in options.items():
if key in self.old_settings:
section, setting = self.old_settings[key]
if not self.has_section(section):
self.add_section(section)
else:
section = 'general'
setting = key
if not self.has_option(section, setting):
self.set(section, setting, value)
self.remove_section('options')
def validate_settings(self, filename, option_parser):
"""
Call the validator function and implement overrides on all applicable
settings.
"""
for section in self.sections():
for setting in self.options(section):
try:
option = option_parser.get_option_by_dest(setting)
except KeyError:
continue
if option.validator:
value = self.get(section, setting, raw=1)
try:
new_value = option.validator(
setting, value, option_parser,
config_parser=self, config_section=section)
except Exception, error:
raise (ValueError(
'Error in config file "%s", section "[%s]":\n'
' %s: %s\n %s = %s'
% (filename, section, error.__class__.__name__,
error, setting, value)), None, sys.exc_info()[2])
self.set(section, setting, new_value)
if option.overrides:
self.set(section, option.overrides, None)
def optionxform(self, optionstr):
"""
Transform '-' to '_' so the cmdline form of option names can be used.
"""
return optionstr.lower().replace('-', '_')
def get_section(self, section):
"""
Return a given section as a dictionary (empty if the section
doesn't exist).
"""
section_dict = {}
if self.has_section(section):
for option in self.options(section):
section_dict[option] = self.get(section, option, raw=1)
return section_dict
class ConfigDeprecationWarning(DeprecationWarning):
"""Warning for deprecated configuration file features."""
| {
"repo_name": "bobthecow/ManipulateCoda",
"path": "src/Support/Library/docutils/frontend.py",
"copies": "6",
"size": "33065",
"license": "mit",
"hash": -2262406258164184600,
"line_mean": 42.4494086728,
"line_max": 82,
"alpha_frac": 0.5704218963,
"autogenerated": false,
"ratio": 4.44302606826122,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.801344796456122,
"avg_score": null,
"num_lines": null
} |
"""
Transforms related to the front matter of a document or a section
(information found before the main text):
- `DocTitle`: Used to transform a lone top level section's title to
the document title, promote a remaining lone top-level section's
title to the document subtitle, and determine the document's title
metadata (document['title']) based on the document title and/or the
"title" setting.
- `SectionSubTitle`: Used to transform a lone subsection into a
subtitle.
- `DocInfo`: Used to transform a bibliographic field list into docinfo
elements.
"""
__docformat__ = 'reStructuredText'
import re
from docutils import nodes, utils
from docutils.transforms import TransformError, Transform
class TitlePromoter(Transform):
"""
Abstract base class for DocTitle and SectionSubTitle transforms.
"""
def promote_title(self, node):
"""
Transform the following tree::
<node>
<section>
<title>
...
into ::
<node>
<title>
...
`node` is normally a document.
"""
# `node` must not have a title yet.
assert not (len(node) and isinstance(node[0], nodes.title))
section, index = self.candidate_index(node)
if index is None:
return None
# Transfer the section's attributes to the node:
node.attributes.update(section.attributes)
# setup_child is called automatically for all nodes.
node[:] = (section[:1] # section title
+ node[:index] # everything that was in the
# node before the section
+ section[1:]) # everything that was in the section
assert isinstance(node[0], nodes.title)
return 1
def promote_subtitle(self, node):
"""
Transform the following node tree::
<node>
<title>
<section>
<title>
...
into ::
<node>
<title>
<subtitle>
...
"""
subsection, index = self.candidate_index(node)
if index is None:
return None
subtitle = nodes.subtitle()
# Transfer the subsection's attributes to the new subtitle:
# This causes trouble with list attributes! To do: Write a
# test case which catches direct access to the `attributes`
# dictionary and/or write a test case which shows problems in
# this particular case.
subtitle.attributes.update(subsection.attributes)
# We're losing the subtitle's attributes here! To do: Write a
# test case which shows this behavior.
# Transfer the contents of the subsection's title to the
# subtitle:
subtitle[:] = subsection[0][:]
node[:] = (node[:1] # title
+ [subtitle]
# everything that was before the section:
+ node[1:index]
# everything that was in the subsection:
+ subsection[1:])
return 1
def candidate_index(self, node):
"""
Find and return the promotion candidate and its index.
Return (None, None) if no valid candidate was found.
"""
index = node.first_child_not_matching_class(
nodes.PreBibliographic)
if index is None or len(node) > (index + 1) or \
not isinstance(node[index], nodes.section):
return None, None
else:
return node[index], index
class DocTitle(TitlePromoter):
"""
In reStructuredText_, there is no way to specify a document title
and subtitle explicitly. Instead, we can supply the document title
(and possibly the subtitle as well) implicitly, and use this
two-step transform to "raise" or "promote" the title(s) (and their
corresponding section contents) to the document level.
1. If the document contains a single top-level section as its
first non-comment element, the top-level section's title
becomes the document's title, and the top-level section's
contents become the document's immediate contents. The lone
top-level section header must be the first non-comment element
in the document.
For example, take this input text::
=================
Top-Level Title
=================
A paragraph.
Once parsed, it looks like this::
<document>
<section names="top-level title">
<title>
Top-Level Title
<paragraph>
A paragraph.
After running the DocTitle transform, we have::
<document names="top-level title">
<title>
Top-Level Title
<paragraph>
A paragraph.
2. If step 1 successfully determines the document title, we
continue by checking for a subtitle.
If the lone top-level section itself contains a single
second-level section as its first non-comment element, that
section's title is promoted to the document's subtitle, and
that section's contents become the document's immediate
contents. Given this input text::
=================
Top-Level Title
=================
Second-Level Title
~~~~~~~~~~~~~~~~~~
A paragraph.
After parsing and running the Section Promotion transform, the
result is::
<document names="top-level title">
<title>
Top-Level Title
<subtitle names="second-level title">
Second-Level Title
<paragraph>
A paragraph.
(Note that the implicit hyperlink target generated by the
"Second-Level Title" is preserved on the "subtitle" element
itself.)
Any comment elements occurring before the document title or
subtitle are accumulated and inserted as the first body elements
after the title(s).
This transform also sets the document's metadata title
(document['title']).
.. _reStructuredText: http://docutils.sf.net/rst.html
"""
default_priority = 320
def set_metadata(self):
"""
Set document['title'] metadata title from the following
sources, listed in order of priority:
* Existing document['title'] attribute.
* "title" setting.
* Document title node (as promoted by promote_title).
"""
if not self.document.hasattr('title'):
if self.document.settings.title is not None:
self.document['title'] = self.document.settings.title
elif len(self.document) and isinstance(self.document[0], nodes.title):
self.document['title'] = self.document[0].astext()
def apply(self):
if getattr(self.document.settings, 'doctitle_xform', 1):
# promote_(sub)title defined in TitlePromoter base class.
if self.promote_title(self.document):
# If a title has been promoted, also try to promote a
# subtitle.
self.promote_subtitle(self.document)
# Set document['title'].
self.set_metadata()
class SectionSubTitle(TitlePromoter):
"""
This works like document subtitles, but for sections. For example, ::
<section>
<title>
Title
<section>
<title>
Subtitle
...
is transformed into ::
<section>
<title>
Title
<subtitle>
Subtitle
...
For details refer to the docstring of DocTitle.
"""
default_priority = 350
def apply(self):
if not getattr(self.document.settings, 'sectsubtitle_xform', 1):
return
for section in self.document.traverse(nodes.section):
# On our way through the node tree, we are deleting
# sections, but we call self.promote_subtitle for those
# sections nonetheless. To do: Write a test case which
# shows the problem and discuss on Docutils-develop.
self.promote_subtitle(section)
class DocInfo(Transform):
"""
This transform is specific to the reStructuredText_ markup syntax;
see "Bibliographic Fields" in the `reStructuredText Markup
Specification`_ for a high-level description. This transform
should be run *after* the `DocTitle` transform.
Given a field list as the first non-comment element after the
document title and subtitle (if present), registered bibliographic
field names are transformed to the corresponding DTD elements,
becoming child elements of the "docinfo" element (except for a
dedication and/or an abstract, which become "topic" elements after
"docinfo").
For example, given this document fragment after parsing::
<document>
<title>
Document Title
<field_list>
<field>
<field_name>
Author
<field_body>
<paragraph>
A. Name
<field>
<field_name>
Status
<field_body>
<paragraph>
$RCSfile$
...
After running the bibliographic field list transform, the
resulting document tree would look like this::
<document>
<title>
Document Title
<docinfo>
<author>
A. Name
<status>
frontmatter.py
...
The "Status" field contained an expanded RCS keyword, which is
normally (but optionally) cleaned up by the transform. The sole
contents of the field body must be a paragraph containing an
expanded RCS keyword of the form "$keyword: expansion text $". Any
RCS keyword can be processed in any bibliographic field. The
dollar signs and leading RCS keyword name are removed. Extra
processing is done for the following RCS keywords:
- "RCSfile" expands to the name of the file in the RCS or CVS
repository, which is the name of the source file with a ",v"
suffix appended. The transform will remove the ",v" suffix.
- "Date" expands to the format "YYYY/MM/DD hh:mm:ss" (in the UTC
time zone). The RCS Keywords transform will extract just the
date itself and transform it to an ISO 8601 format date, as in
"2000-12-31".
(Since the source file for this text is itself stored under CVS,
we can't show an example of the "Date" RCS keyword because we
can't prevent any RCS keywords used in this explanation from
being expanded. Only the "RCSfile" keyword is stable; its
expansion text changes only if the file name changes.)
.. _reStructuredText: http://docutils.sf.net/rst.html
.. _reStructuredText Markup Specification:
http://docutils.sf.net/docs/ref/rst/restructuredtext.html
"""
default_priority = 340
biblio_nodes = {
'author': nodes.author,
'authors': nodes.authors,
'organization': nodes.organization,
'address': nodes.address,
'contact': nodes.contact,
'version': nodes.version,
'revision': nodes.revision,
'status': nodes.status,
'date': nodes.date,
'copyright': nodes.copyright,
'dedication': nodes.topic,
'abstract': nodes.topic}
"""Canonical field name (lowcased) to node class name mapping for
bibliographic fields (field_list)."""
def apply(self):
if not getattr(self.document.settings, 'docinfo_xform', 1):
return
document = self.document
index = document.first_child_not_matching_class(
nodes.PreBibliographic)
if index is None:
return
candidate = document[index]
if isinstance(candidate, nodes.field_list):
biblioindex = document.first_child_not_matching_class(
(nodes.Titular, nodes.Decorative))
nodelist = self.extract_bibliographic(candidate)
del document[index] # untransformed field list (candidate)
document[biblioindex:biblioindex] = nodelist
def extract_bibliographic(self, field_list):
docinfo = nodes.docinfo()
bibliofields = self.language.bibliographic_fields
labels = self.language.labels
topics = {'dedication': None, 'abstract': None}
for field in field_list:
try:
name = field[0][0].astext()
normedname = nodes.fully_normalize_name(name)
if not (len(field) == 2 and bibliofields.has_key(normedname)
and self.check_empty_biblio_field(field, name)):
raise TransformError
canonical = bibliofields[normedname]
biblioclass = self.biblio_nodes[canonical]
if issubclass(biblioclass, nodes.TextElement):
if not self.check_compound_biblio_field(field, name):
raise TransformError
utils.clean_rcs_keywords(
field[1][0], self.rcs_keyword_substitutions)
docinfo.append(biblioclass('', '', *field[1][0]))
elif issubclass(biblioclass, nodes.authors):
self.extract_authors(field, name, docinfo)
elif issubclass(biblioclass, nodes.topic):
if topics[canonical]:
field[-1] += self.document.reporter.warning(
'There can only be one "%s" field.' % name,
base_node=field)
raise TransformError
title = nodes.title(name, labels[canonical])
topics[canonical] = biblioclass(
'', title, classes=[canonical], *field[1].children)
else:
docinfo.append(biblioclass('', *field[1].children))
except TransformError:
if len(field[-1]) == 1 \
and isinstance(field[-1][0], nodes.paragraph):
utils.clean_rcs_keywords(
field[-1][0], self.rcs_keyword_substitutions)
docinfo.append(field)
nodelist = []
if len(docinfo) != 0:
nodelist.append(docinfo)
for name in ('dedication', 'abstract'):
if topics[name]:
nodelist.append(topics[name])
return nodelist
def check_empty_biblio_field(self, field, name):
if len(field[-1]) < 1:
field[-1] += self.document.reporter.warning(
'Cannot extract empty bibliographic field "%s".' % name,
base_node=field)
return None
return 1
def check_compound_biblio_field(self, field, name):
if len(field[-1]) > 1:
field[-1] += self.document.reporter.warning(
'Cannot extract compound bibliographic field "%s".' % name,
base_node=field)
return None
if not isinstance(field[-1][0], nodes.paragraph):
field[-1] += self.document.reporter.warning(
'Cannot extract bibliographic field "%s" containing '
'anything other than a single paragraph.' % name,
base_node=field)
return None
return 1
rcs_keyword_substitutions = [
(re.compile(r'\$' r'Date: (\d\d\d\d)[-/](\d\d)[-/](\d\d)[ T][\d:]+'
r'[^$]* \$', re.IGNORECASE), r'\1-\2-\3'),
(re.compile(r'\$' r'RCSfile: (.+),v \$', re.IGNORECASE), r'\1'),
(re.compile(r'\$[a-zA-Z]+: (.+) \$'), r'\1'),]
def extract_authors(self, field, name, docinfo):
try:
if len(field[1]) == 1:
if isinstance(field[1][0], nodes.paragraph):
authors = self.authors_from_one_paragraph(field)
elif isinstance(field[1][0], nodes.bullet_list):
authors = self.authors_from_bullet_list(field)
else:
raise TransformError
else:
authors = self.authors_from_paragraphs(field)
authornodes = [nodes.author('', '', *author)
for author in authors if author]
if len(authornodes) >= 1:
docinfo.append(nodes.authors('', *authornodes))
else:
raise TransformError
except TransformError:
field[-1] += self.document.reporter.warning(
'Bibliographic field "%s" incompatible with extraction: '
'it must contain either a single paragraph (with authors '
'separated by one of "%s"), multiple paragraphs (one per '
'author), or a bullet list with one paragraph (one author) '
'per item.'
% (name, ''.join(self.language.author_separators)),
base_node=field)
raise
def authors_from_one_paragraph(self, field):
text = field[1][0].astext().strip()
if not text:
raise TransformError
for authorsep in self.language.author_separators:
authornames = text.split(authorsep)
if len(authornames) > 1:
break
authornames = [author.strip() for author in authornames]
authors = [[nodes.Text(author)] for author in authornames if author]
return authors
def authors_from_bullet_list(self, field):
authors = []
for item in field[1][0]:
if len(item) != 1 or not isinstance(item[0], nodes.paragraph):
raise TransformError
authors.append(item[0].children)
if not authors:
raise TransformError
return authors
def authors_from_paragraphs(self, field):
for item in field[1]:
if not isinstance(item, nodes.paragraph):
raise TransformError
authors = [item.children for item in field[1]]
return authors
| {
"repo_name": "PatrickKennedy/Sybil",
"path": "docutils/transforms/frontmatter.py",
"copies": "2",
"size": "18886",
"license": "bsd-2-clause",
"hash": -1540852886770211000,
"line_mean": 35.88671875,
"line_max": 82,
"alpha_frac": 0.5627978397,
"autogenerated": false,
"ratio": 4.696841581696096,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6259639421396096,
"avg_score": null,
"num_lines": null
} |
"""
Transforms related to the front matter of a document or a section
(information found before the main text):
- `DocTitle`: Used to transform a lone top level section's title to
the document title, promote a remaining lone top-level section's
title to the document subtitle, and determine the document's title
metadata (document['title']) based on the document title and/or the
"title" setting.
- `SectionSubTitle`: Used to transform a lone subsection into a
subtitle.
- `DocInfo`: Used to transform a bibliographic field list into docinfo
elements.
"""
__docformat__ = 'reStructuredText'
import re
from docutils import nodes, utils
from docutils.transforms import TransformError, Transform
class TitlePromoter(Transform):
"""
Abstract base class for DocTitle and SectionSubTitle transforms.
"""
def promote_title(self, node):
"""
Transform the following tree::
<node>
<section>
<title>
...
into ::
<node>
<title>
...
`node` is normally a document.
"""
# `node` must not have a title yet.
assert not (len(node) and isinstance(node[0], nodes.title))
section, index = self.candidate_index(node)
if index is None:
return None
# Transfer the section's attributes to the node:
node.attributes.update(section.attributes)
# setup_child is called automatically for all nodes.
node[:] = (section[:1] # section title
+ node[:index] # everything that was in the
# node before the section
+ section[1:]) # everything that was in the section
assert isinstance(node[0], nodes.title)
return 1
def promote_subtitle(self, node):
"""
Transform the following node tree::
<node>
<title>
<section>
<title>
...
into ::
<node>
<title>
<subtitle>
...
"""
subsection, index = self.candidate_index(node)
if index is None:
return None
subtitle = nodes.subtitle()
# Transfer the subsection's attributes to the new subtitle:
# This causes trouble with list attributes! To do: Write a
# test case which catches direct access to the `attributes`
# dictionary and/or write a test case which shows problems in
# this particular case.
subtitle.attributes.update(subsection.attributes)
# We're losing the subtitle's attributes here! To do: Write a
# test case which shows this behavior.
# Transfer the contents of the subsection's title to the
# subtitle:
subtitle[:] = subsection[0][:]
node[:] = (node[:1] # title
+ [subtitle]
# everything that was before the section:
+ node[1:index]
# everything that was in the subsection:
+ subsection[1:])
return 1
def candidate_index(self, node):
"""
Find and return the promotion candidate and its index.
Return (None, None) if no valid candidate was found.
"""
index = node.first_child_not_matching_class(
nodes.PreBibliographic)
if index is None or len(node) > (index + 1) or \
not isinstance(node[index], nodes.section):
return None, None
else:
return node[index], index
class DocTitle(TitlePromoter):
"""
In reStructuredText_, there is no way to specify a document title
and subtitle explicitly. Instead, we can supply the document title
(and possibly the subtitle as well) implicitly, and use this
two-step transform to "raise" or "promote" the title(s) (and their
corresponding section contents) to the document level.
1. If the document contains a single top-level section as its
first non-comment element, the top-level section's title
becomes the document's title, and the top-level section's
contents become the document's immediate contents. The lone
top-level section header must be the first non-comment element
in the document.
For example, take this input text::
=================
Top-Level Title
=================
A paragraph.
Once parsed, it looks like this::
<document>
<section names="top-level title">
<title>
Top-Level Title
<paragraph>
A paragraph.
After running the DocTitle transform, we have::
<document names="top-level title">
<title>
Top-Level Title
<paragraph>
A paragraph.
2. If step 1 successfully determines the document title, we
continue by checking for a subtitle.
If the lone top-level section itself contains a single
second-level section as its first non-comment element, that
section's title is promoted to the document's subtitle, and
that section's contents become the document's immediate
contents. Given this input text::
=================
Top-Level Title
=================
Second-Level Title
~~~~~~~~~~~~~~~~~~
A paragraph.
After parsing and running the Section Promotion transform, the
result is::
<document names="top-level title">
<title>
Top-Level Title
<subtitle names="second-level title">
Second-Level Title
<paragraph>
A paragraph.
(Note that the implicit hyperlink target generated by the
"Second-Level Title" is preserved on the "subtitle" element
itself.)
Any comment elements occurring before the document title or
subtitle are accumulated and inserted as the first body elements
after the title(s).
This transform also sets the document's metadata title
(document['title']).
.. _reStructuredText: http://docutils.sf.net/rst.html
"""
default_priority = 320
def set_metadata(self):
"""
Set document['title'] metadata title from the following
sources, listed in order of priority:
* Existing document['title'] attribute.
* "title" setting.
* Document title node (as promoted by promote_title).
"""
if not self.document.hasattr('title'):
if self.document.settings.title is not None:
self.document['title'] = self.document.settings.title
elif len(self.document) and isinstance(self.document[0], nodes.title):
self.document['title'] = self.document[0].astext()
def apply(self):
if getattr(self.document.settings, 'doctitle_xform', 1):
# promote_(sub)title defined in TitlePromoter base class.
if self.promote_title(self.document):
# If a title has been promoted, also try to promote a
# subtitle.
self.promote_subtitle(self.document)
# Set document['title'].
self.set_metadata()
class SectionSubTitle(TitlePromoter):
"""
This works like document subtitles, but for sections. For example, ::
<section>
<title>
Title
<section>
<title>
Subtitle
...
is transformed into ::
<section>
<title>
Title
<subtitle>
Subtitle
...
For details refer to the docstring of DocTitle.
"""
default_priority = 350
def apply(self):
if not getattr(self.document.settings, 'sectsubtitle_xform', 1):
return
for section in self.document.traverse(nodes.section):
# On our way through the node tree, we are deleting
# sections, but we call self.promote_subtitle for those
# sections nonetheless. To do: Write a test case which
# shows the problem and discuss on Docutils-develop.
self.promote_subtitle(section)
class DocInfo(Transform):
"""
This transform is specific to the reStructuredText_ markup syntax;
see "Bibliographic Fields" in the `reStructuredText Markup
Specification`_ for a high-level description. This transform
should be run *after* the `DocTitle` transform.
Given a field list as the first non-comment element after the
document title and subtitle (if present), registered bibliographic
field names are transformed to the corresponding DTD elements,
becoming child elements of the "docinfo" element (except for a
dedication and/or an abstract, which become "topic" elements after
"docinfo").
For example, given this document fragment after parsing::
<document>
<title>
Document Title
<field_list>
<field>
<field_name>
Author
<field_body>
<paragraph>
A. Name
<field>
<field_name>
Status
<field_body>
<paragraph>
$RCSfile$
...
After running the bibliographic field list transform, the
resulting document tree would look like this::
<document>
<title>
Document Title
<docinfo>
<author>
A. Name
<status>
frontmatter.py
...
The "Status" field contained an expanded RCS keyword, which is
normally (but optionally) cleaned up by the transform. The sole
contents of the field body must be a paragraph containing an
expanded RCS keyword of the form "$keyword: expansion text $". Any
RCS keyword can be processed in any bibliographic field. The
dollar signs and leading RCS keyword name are removed. Extra
processing is done for the following RCS keywords:
- "RCSfile" expands to the name of the file in the RCS or CVS
repository, which is the name of the source file with a ",v"
suffix appended. The transform will remove the ",v" suffix.
- "Date" expands to the format "YYYY/MM/DD hh:mm:ss" (in the UTC
time zone). The RCS Keywords transform will extract just the
date itself and transform it to an ISO 8601 format date, as in
"2000-12-31".
(Since the source file for this text is itself stored under CVS,
we can't show an example of the "Date" RCS keyword because we
can't prevent any RCS keywords used in this explanation from
being expanded. Only the "RCSfile" keyword is stable; its
expansion text changes only if the file name changes.)
.. _reStructuredText: http://docutils.sf.net/rst.html
.. _reStructuredText Markup Specification:
http://docutils.sf.net/docs/ref/rst/restructuredtext.html
"""
default_priority = 340
biblio_nodes = {
'author': nodes.author,
'authors': nodes.authors,
'organization': nodes.organization,
'address': nodes.address,
'contact': nodes.contact,
'version': nodes.version,
'revision': nodes.revision,
'status': nodes.status,
'date': nodes.date,
'copyright': nodes.copyright,
'dedication': nodes.topic,
'abstract': nodes.topic}
"""Canonical field name (lowcased) to node class name mapping for
bibliographic fields (field_list)."""
def apply(self):
if not getattr(self.document.settings, 'docinfo_xform', 1):
return
document = self.document
index = document.first_child_not_matching_class(
nodes.PreBibliographic)
if index is None:
return
candidate = document[index]
if isinstance(candidate, nodes.field_list):
biblioindex = document.first_child_not_matching_class(
(nodes.Titular, nodes.Decorative))
nodelist = self.extract_bibliographic(candidate)
del document[index] # untransformed field list (candidate)
document[biblioindex:biblioindex] = nodelist
def extract_bibliographic(self, field_list):
docinfo = nodes.docinfo()
bibliofields = self.language.bibliographic_fields
labels = self.language.labels
topics = {'dedication': None, 'abstract': None}
for field in field_list:
try:
name = field[0][0].astext()
normedname = nodes.fully_normalize_name(name)
if not (len(field) == 2 and normedname in bibliofields
and self.check_empty_biblio_field(field, name)):
raise TransformError
canonical = bibliofields[normedname]
biblioclass = self.biblio_nodes[canonical]
if issubclass(biblioclass, nodes.TextElement):
if not self.check_compound_biblio_field(field, name):
raise TransformError
utils.clean_rcs_keywords(
field[1][0], self.rcs_keyword_substitutions)
docinfo.append(biblioclass('', '', *field[1][0]))
elif issubclass(biblioclass, nodes.authors):
self.extract_authors(field, name, docinfo)
elif issubclass(biblioclass, nodes.topic):
if topics[canonical]:
field[-1] += self.document.reporter.warning(
'There can only be one "%s" field.' % name,
base_node=field)
raise TransformError
title = nodes.title(name, labels[canonical])
topics[canonical] = biblioclass(
'', title, classes=[canonical], *field[1].children)
else:
docinfo.append(biblioclass('', *field[1].children))
except TransformError:
if len(field[-1]) == 1 \
and isinstance(field[-1][0], nodes.paragraph):
utils.clean_rcs_keywords(
field[-1][0], self.rcs_keyword_substitutions)
docinfo.append(field)
nodelist = []
if len(docinfo) != 0:
nodelist.append(docinfo)
for name in ('dedication', 'abstract'):
if topics[name]:
nodelist.append(topics[name])
return nodelist
def check_empty_biblio_field(self, field, name):
if len(field[-1]) < 1:
field[-1] += self.document.reporter.warning(
'Cannot extract empty bibliographic field "%s".' % name,
base_node=field)
return None
return 1
def check_compound_biblio_field(self, field, name):
if len(field[-1]) > 1:
field[-1] += self.document.reporter.warning(
'Cannot extract compound bibliographic field "%s".' % name,
base_node=field)
return None
if not isinstance(field[-1][0], nodes.paragraph):
field[-1] += self.document.reporter.warning(
'Cannot extract bibliographic field "%s" containing '
'anything other than a single paragraph.' % name,
base_node=field)
return None
return 1
rcs_keyword_substitutions = [
(re.compile(r'\$' r'Date: (\d\d\d\d)[-/](\d\d)[-/](\d\d)[ T][\d:]+'
r'[^$]* \$', re.IGNORECASE), r'\1-\2-\3'),
(re.compile(r'\$' r'RCSfile: (.+),v \$', re.IGNORECASE), r'\1'),
(re.compile(r'\$[a-zA-Z]+: (.+) \$'), r'\1'),]
def extract_authors(self, field, name, docinfo):
try:
if len(field[1]) == 1:
if isinstance(field[1][0], nodes.paragraph):
authors = self.authors_from_one_paragraph(field)
elif isinstance(field[1][0], nodes.bullet_list):
authors = self.authors_from_bullet_list(field)
else:
raise TransformError
else:
authors = self.authors_from_paragraphs(field)
authornodes = [nodes.author('', '', *author)
for author in authors if author]
if len(authornodes) >= 1:
docinfo.append(nodes.authors('', *authornodes))
else:
raise TransformError
except TransformError:
field[-1] += self.document.reporter.warning(
'Bibliographic field "%s" incompatible with extraction: '
'it must contain either a single paragraph (with authors '
'separated by one of "%s"), multiple paragraphs (one per '
'author), or a bullet list with one paragraph (one author) '
'per item.'
% (name, ''.join(self.language.author_separators)),
base_node=field)
raise
def authors_from_one_paragraph(self, field):
text = field[1][0].astext().strip()
if not text:
raise TransformError
for authorsep in self.language.author_separators:
authornames = text.split(authorsep)
if len(authornames) > 1:
break
authornames = [author.strip() for author in authornames]
authors = [[nodes.Text(author)] for author in authornames if author]
return authors
def authors_from_bullet_list(self, field):
authors = []
for item in field[1][0]:
if len(item) != 1 or not isinstance(item[0], nodes.paragraph):
raise TransformError
authors.append(item[0].children)
if not authors:
raise TransformError
return authors
def authors_from_paragraphs(self, field):
for item in field[1]:
if not isinstance(item, nodes.paragraph):
raise TransformError
authors = [item.children for item in field[1]]
return authors
| {
"repo_name": "rimbalinux/LMD3",
"path": "docutils/transforms/frontmatter.py",
"copies": "2",
"size": "19391",
"license": "bsd-3-clause",
"hash": -5619662392838649000,
"line_mean": 35.873046875,
"line_max": 82,
"alpha_frac": 0.5478830385,
"autogenerated": false,
"ratio": 4.778462296697881,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00008947814039408867,
"num_lines": 512
} |
"""
Transforms related to the front matter of a document or a section
(information found before the main text):
- `DocTitle`: Used to transform a lone top level section's title to
the document title, promote a remaining lone top-level section's
title to the document subtitle, and determine the document's title
metadata (document['title']) based on the document title and/or the
"title" setting.
- `SectionSubTitle`: Used to transform a lone subsection into a
subtitle.
- `DocInfo`: Used to transform a bibliographic field list into docinfo
elements.
"""
__docformat__ = 'reStructuredText'
import re
from docutils import nodes, utils
from docutils.transforms import TransformError, Transform
class TitlePromoter(Transform):
"""
Abstract base class for DocTitle and SectionSubTitle transforms.
"""
def promote_title(self, node):
"""
Transform the following tree::
<node>
<section>
<title>
...
into ::
<node>
<title>
...
`node` is normally a document.
"""
# Type check
if not isinstance(node, nodes.Element):
raise TypeError('node must be of Element-derived type.')
# `node` must not have a title yet.
assert not (len(node) and isinstance(node[0], nodes.title))
section, index = self.candidate_index(node)
if index is None:
return None
# Transfer the section's attributes to the node:
# NOTE: Change second parameter to False to NOT replace
# attributes that already exist in node with those in
# section
# NOTE: Remove third parameter to NOT copy the 'source'
# attribute from section
node.update_all_atts_concatenating(section, True, True)
# setup_child is called automatically for all nodes.
node[:] = (section[:1] # section title
+ node[:index] # everything that was in the
# node before the section
+ section[1:]) # everything that was in the section
assert isinstance(node[0], nodes.title)
return 1
def promote_subtitle(self, node):
"""
Transform the following node tree::
<node>
<title>
<section>
<title>
...
into ::
<node>
<title>
<subtitle>
...
"""
# Type check
if not isinstance(node, nodes.Element):
raise TypeError('node must be of Element-derived type.')
subsection, index = self.candidate_index(node)
if index is None:
return None
subtitle = nodes.subtitle()
# Transfer the subsection's attributes to the new subtitle
# NOTE: Change second parameter to False to NOT replace
# attributes that already exist in node with those in
# section
# NOTE: Remove third parameter to NOT copy the 'source'
# attribute from section
subtitle.update_all_atts_concatenating(subsection, True, True)
# Transfer the contents of the subsection's title to the
# subtitle:
subtitle[:] = subsection[0][:]
node[:] = (node[:1] # title
+ [subtitle]
# everything that was before the section:
+ node[1:index]
# everything that was in the subsection:
+ subsection[1:])
return 1
def candidate_index(self, node):
"""
Find and return the promotion candidate and its index.
Return (None, None) if no valid candidate was found.
"""
index = node.first_child_not_matching_class(
nodes.PreBibliographic)
if index is None or len(node) > (index + 1) or \
not isinstance(node[index], nodes.section):
return None, None
else:
return node[index], index
class DocTitle(TitlePromoter):
"""
In reStructuredText_, there is no way to specify a document title
and subtitle explicitly. Instead, we can supply the document title
(and possibly the subtitle as well) implicitly, and use this
two-step transform to "raise" or "promote" the title(s) (and their
corresponding section contents) to the document level.
1. If the document contains a single top-level section as its
first non-comment element, the top-level section's title
becomes the document's title, and the top-level section's
contents become the document's immediate contents. The lone
top-level section header must be the first non-comment element
in the document.
For example, take this input text::
=================
Top-Level Title
=================
A paragraph.
Once parsed, it looks like this::
<document>
<section names="top-level title">
<title>
Top-Level Title
<paragraph>
A paragraph.
After running the DocTitle transform, we have::
<document names="top-level title">
<title>
Top-Level Title
<paragraph>
A paragraph.
2. If step 1 successfully determines the document title, we
continue by checking for a subtitle.
If the lone top-level section itself contains a single
second-level section as its first non-comment element, that
section's title is promoted to the document's subtitle, and
that section's contents become the document's immediate
contents. Given this input text::
=================
Top-Level Title
=================
Second-Level Title
~~~~~~~~~~~~~~~~~~
A paragraph.
After parsing and running the Section Promotion transform, the
result is::
<document names="top-level title">
<title>
Top-Level Title
<subtitle names="second-level title">
Second-Level Title
<paragraph>
A paragraph.
(Note that the implicit hyperlink target generated by the
"Second-Level Title" is preserved on the "subtitle" element
itself.)
Any comment elements occurring before the document title or
subtitle are accumulated and inserted as the first body elements
after the title(s).
This transform also sets the document's metadata title
(document['title']).
.. _reStructuredText: http://docutils.sf.net/rst.html
"""
default_priority = 320
def set_metadata(self):
"""
Set document['title'] metadata title from the following
sources, listed in order of priority:
* Existing document['title'] attribute.
* "title" setting.
* Document title node (as promoted by promote_title).
"""
if not self.document.hasattr('title'):
if self.document.settings.title is not None:
self.document['title'] = self.document.settings.title
elif len(self.document) and isinstance(self.document[0], nodes.title):
self.document['title'] = self.document[0].astext()
def apply(self):
if getattr(self.document.settings, 'doctitle_xform', 1):
# promote_(sub)title defined in TitlePromoter base class.
if self.promote_title(self.document):
# If a title has been promoted, also try to promote a
# subtitle.
self.promote_subtitle(self.document)
# Set document['title'].
self.set_metadata()
class SectionSubTitle(TitlePromoter):
"""
This works like document subtitles, but for sections. For example, ::
<section>
<title>
Title
<section>
<title>
Subtitle
...
is transformed into ::
<section>
<title>
Title
<subtitle>
Subtitle
...
For details refer to the docstring of DocTitle.
"""
default_priority = 350
def apply(self):
if not getattr(self.document.settings, 'sectsubtitle_xform', 1):
return
for section in self.document.traverse(nodes.section):
# On our way through the node tree, we are deleting
# sections, but we call self.promote_subtitle for those
# sections nonetheless. To do: Write a test case which
# shows the problem and discuss on Docutils-develop.
self.promote_subtitle(section)
class DocInfo(Transform):
"""
This transform is specific to the reStructuredText_ markup syntax;
see "Bibliographic Fields" in the `reStructuredText Markup
Specification`_ for a high-level description. This transform
should be run *after* the `DocTitle` transform.
Given a field list as the first non-comment element after the
document title and subtitle (if present), registered bibliographic
field names are transformed to the corresponding DTD elements,
becoming child elements of the "docinfo" element (except for a
dedication and/or an abstract, which become "topic" elements after
"docinfo").
For example, given this document fragment after parsing::
<document>
<title>
Document Title
<field_list>
<field>
<field_name>
Author
<field_body>
<paragraph>
A. Name
<field>
<field_name>
Status
<field_body>
<paragraph>
$RCSfile$
...
After running the bibliographic field list transform, the
resulting document tree would look like this::
<document>
<title>
Document Title
<docinfo>
<author>
A. Name
<status>
frontmatter.py
...
The "Status" field contained an expanded RCS keyword, which is
normally (but optionally) cleaned up by the transform. The sole
contents of the field body must be a paragraph containing an
expanded RCS keyword of the form "$keyword: expansion text $". Any
RCS keyword can be processed in any bibliographic field. The
dollar signs and leading RCS keyword name are removed. Extra
processing is done for the following RCS keywords:
- "RCSfile" expands to the name of the file in the RCS or CVS
repository, which is the name of the source file with a ",v"
suffix appended. The transform will remove the ",v" suffix.
- "Date" expands to the format "YYYY/MM/DD hh:mm:ss" (in the UTC
time zone). The RCS Keywords transform will extract just the
date itself and transform it to an ISO 8601 format date, as in
"2000-12-31".
(Since the source file for this text is itself stored under CVS,
we can't show an example of the "Date" RCS keyword because we
can't prevent any RCS keywords used in this explanation from
being expanded. Only the "RCSfile" keyword is stable; its
expansion text changes only if the file name changes.)
.. _reStructuredText: http://docutils.sf.net/rst.html
.. _reStructuredText Markup Specification:
http://docutils.sf.net/docs/ref/rst/restructuredtext.html
"""
default_priority = 340
biblio_nodes = {
'author': nodes.author,
'authors': nodes.authors,
'organization': nodes.organization,
'address': nodes.address,
'contact': nodes.contact,
'version': nodes.version,
'revision': nodes.revision,
'status': nodes.status,
'date': nodes.date,
'copyright': nodes.copyright,
'dedication': nodes.topic,
'abstract': nodes.topic}
"""Canonical field name (lowcased) to node class name mapping for
bibliographic fields (field_list)."""
def apply(self):
if not getattr(self.document.settings, 'docinfo_xform', 1):
return
document = self.document
index = document.first_child_not_matching_class(
nodes.PreBibliographic)
if index is None:
return
candidate = document[index]
if isinstance(candidate, nodes.field_list):
biblioindex = document.first_child_not_matching_class(
(nodes.Titular, nodes.Decorative))
nodelist = self.extract_bibliographic(candidate)
del document[index] # untransformed field list (candidate)
document[biblioindex:biblioindex] = nodelist
def extract_bibliographic(self, field_list):
docinfo = nodes.docinfo()
bibliofields = self.language.bibliographic_fields
labels = self.language.labels
topics = {'dedication': None, 'abstract': None}
for field in field_list:
try:
name = field[0][0].astext()
normedname = nodes.fully_normalize_name(name)
if not (len(field) == 2 and normedname in bibliofields
and self.check_empty_biblio_field(field, name)):
raise TransformError
canonical = bibliofields[normedname]
biblioclass = self.biblio_nodes[canonical]
if issubclass(biblioclass, nodes.TextElement):
if not self.check_compound_biblio_field(field, name):
raise TransformError
utils.clean_rcs_keywords(
field[1][0], self.rcs_keyword_substitutions)
docinfo.append(biblioclass('', '', *field[1][0]))
elif issubclass(biblioclass, nodes.authors):
self.extract_authors(field, name, docinfo)
elif issubclass(biblioclass, nodes.topic):
if topics[canonical]:
field[-1] += self.document.reporter.warning(
'There can only be one "%s" field.' % name,
base_node=field)
raise TransformError
title = nodes.title(name, labels[canonical])
topics[canonical] = biblioclass(
'', title, classes=[canonical], *field[1].children)
else:
docinfo.append(biblioclass('', *field[1].children))
except TransformError:
if len(field[-1]) == 1 \
and isinstance(field[-1][0], nodes.paragraph):
utils.clean_rcs_keywords(
field[-1][0], self.rcs_keyword_substitutions)
docinfo.append(field)
nodelist = []
if len(docinfo) != 0:
nodelist.append(docinfo)
for name in ('dedication', 'abstract'):
if topics[name]:
nodelist.append(topics[name])
return nodelist
def check_empty_biblio_field(self, field, name):
if len(field[-1]) < 1:
field[-1] += self.document.reporter.warning(
'Cannot extract empty bibliographic field "%s".' % name,
base_node=field)
return None
return 1
def check_compound_biblio_field(self, field, name):
if len(field[-1]) > 1:
field[-1] += self.document.reporter.warning(
'Cannot extract compound bibliographic field "%s".' % name,
base_node=field)
return None
if not isinstance(field[-1][0], nodes.paragraph):
field[-1] += self.document.reporter.warning(
'Cannot extract bibliographic field "%s" containing '
'anything other than a single paragraph.' % name,
base_node=field)
return None
return 1
rcs_keyword_substitutions = [
(re.compile(r'\$' r'Date: (\d\d\d\d)[-/](\d\d)[-/](\d\d)[ T][\d:]+'
r'[^$]* \$', re.IGNORECASE), r'\1-\2-\3'),
(re.compile(r'\$' r'RCSfile: (.+),v \$', re.IGNORECASE), r'\1'),
(re.compile(r'\$[a-zA-Z]+: (.+) \$'), r'\1'),]
def extract_authors(self, field, name, docinfo):
try:
if len(field[1]) == 1:
if isinstance(field[1][0], nodes.paragraph):
authors = self.authors_from_one_paragraph(field)
elif isinstance(field[1][0], nodes.bullet_list):
authors = self.authors_from_bullet_list(field)
else:
raise TransformError
else:
authors = self.authors_from_paragraphs(field)
authornodes = [nodes.author('', '', *author)
for author in authors if author]
if len(authornodes) >= 1:
docinfo.append(nodes.authors('', *authornodes))
else:
raise TransformError
except TransformError:
field[-1] += self.document.reporter.warning(
'Bibliographic field "%s" incompatible with extraction: '
'it must contain either a single paragraph (with authors '
'separated by one of "%s"), multiple paragraphs (one per '
'author), or a bullet list with one paragraph (one author) '
'per item.'
% (name, ''.join(self.language.author_separators)),
base_node=field)
raise
def authors_from_one_paragraph(self, field):
text = field[1][0].astext().strip()
if not text:
raise TransformError
for authorsep in self.language.author_separators:
authornames = text.split(authorsep)
if len(authornames) > 1:
break
authornames = [author.strip() for author in authornames]
authors = [[nodes.Text(author)] for author in authornames if author]
return authors
def authors_from_bullet_list(self, field):
authors = []
for item in field[1][0]:
if len(item) != 1 or not isinstance(item[0], nodes.paragraph):
raise TransformError
authors.append(item[0].children)
if not authors:
raise TransformError
return authors
def authors_from_paragraphs(self, field):
for item in field[1]:
if not isinstance(item, nodes.paragraph):
raise TransformError
authors = [item.children for item in field[1]]
return authors
| {
"repo_name": "retomerz/intellij-community",
"path": "python/helpers/py3only/docutils/transforms/frontmatter.py",
"copies": "44",
"size": "19348",
"license": "apache-2.0",
"hash": -7820445597537339000,
"line_mean": 35.5746691871,
"line_max": 82,
"alpha_frac": 0.5625387637,
"autogenerated": false,
"ratio": 4.710981251521792,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""
Miscellaneous functions for use with Python code,
mostly based on Haskell library elements.
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2011-2013, Graham Klyne, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
from operator import concat, and_, or_
def concatMap(f,vs):
"""
Map function over list and concatenate results.
"""
return reduce( concat, map (f, vs), "")
def fst((a,b)):
"""
First element of a pair.
"""
return a
def snd((a,b)):
"""
Second element of a pair.
"""
return b
def iterAll(c,sentinel=None):
"""
Like the built-in 'iter' function, except that when the supplied container
has no more objects to return an indefinite sequence of 'sentinel' values is
returned. (This is almost the converse of built-in iter(c,sentinel).)
"""
i = iter(c)
try:
while True: yield i.next()
except StopIteration:
while True: yield sentinel
def zipAll(*lists):
"""
A zip-iterator that, unlike the built-in zip function, keeps on returning
tuples until all elements of the supplied lists have been returned. When
the values from any list have been exhausted, None values are returned.
The iterator stops when all lists have been exhausted.
"""
iters = map(iterAll,lists)
while True:
result = [i.next() for i in iters]
if allEq(None,result): break
yield tuple(result)
return
def isEq(v):
"""
Return a function that tests for equality with the supplied value.
(Curried equality function.)
"""
return (lambda v2: v==v2)
def isNe(v):
"""
Return a function that tests for inequality with the supplied value.
(Curried inequality function.)
"""
return (lambda v2: v!=v2)
def all_orig(f,c):
"""
Do all members of c satisfy f?
"""
for i in c:
if not f(i): return False
return True
def all(p, *lsargs):
"""
Test if all sets of members from supplied lists satisfy predicate p
"""
return reduce(and_, map(p, *lsargs), True)
def any(p, *lsargs):
"""
Test if all sets of members from supplied lists satisfy predicate p
"""
return reduce(or_, map(p, *lsargs), False)
def allEq(v,c):
"""
Are all members of c equal to v?
"""
return all(isEq(v),c)
def allNe(v,c):
"""
Are all members of c not equal to v?
"""
return all(isNe(v),c)
def filterSplit(p, values):
"""
Function filters a list into two sub-lists, the first containing entries
satisfying the supplied predicate p, and the second of entries not satisfying p.
"""
satp = []
notp = []
for v in values:
if p(v):
satp.append(v)
else:
notp.append(v)
return (satp,notp)
def cond(cond,v1,v2):
"""
Conditional expression.
"""
if cond:
return v1
else:
return v2
def interleave(l1,l2):
"""
Interleave lists.
"""
if not l1: return l2
if not l2: return l1
return [l1[0],l2[0]]+interleave(l1[1:],l2[1:])
def endsWith(base,suff):
"""
Test if list (sequence) ends with given suffix
"""
return base[-len(suff):] == suff
def formatIntList(ints, sep=",", intfmt=str):
"""
Format list of integers, using a supplied function to format each value,
and inserting a supplied separator between each.
Default comma-separated list of decimals.
"""
return sep.join(map(intfmt, ints))
def formatInt(fmt):
"""
returns a function to format a single integer value using the supplied
format string.
"""
def dofmt(n): return fmt % (n,)
return dofmt
def formatList(lst,left=0,right=0):
"""
Format a list over one or more lines, using the supplied margins.
Left margin padding is *not* added to the first line of output,
and no final newline is included following the last line of output.
"""
# Try for format on one line
out = formatList1(lst,right-left)
if not out:
# format over multiple lines
out = "("
pre = " "
pad = "\n"+left*" "
for i in lst:
out += pre
if isinstance(i,list) or isinstance(i,tuple):
out += formatList(i, left+2, right)
elif isinstance(i,dict):
out += formatDict(i, left+2, right, left+2)
else:
out += repr(i)
pre = pad+", "
out += pad + ")"
return out
def formatList1(lst,width):
"""
Attempt to format a list on a single line, within supplied width,
or return None if the list does not fit.
"""
out = "("
pre = ""
ol = 2
for i in lst:
o = pre+repr(i)
ol += len(o)
if ol > width: return None
pre = ", "
out += o
return out+")"
def formatDict(dic,left=0,right=0,pos=0):
"""
Format a dictionary over one or more lines, using the supplied margins.
Left margin padding is *not* added to the first line of output,
and no final newline is included following the last line of output.
"""
# Try for format on one line
out = formatDict1(dic,right-pos)
if not out:
# format over multiple lines
out = "{"
pre = " "
pad = "\n"+left*" "
for k in dic.keys():
out += pre
v = dic[k]
ks = repr(k)+': '
p = pos+2+len(ks)
if isinstance(v,dict):
o = formatDict1(v, right-p)
if not o:
o = pad + " " + formatDict(v, left+2, right, left+2)
out += ks + o
elif isinstance(v,list) or isinstance(v,tuple):
o = formatList1(v, right-p)
if not o:
o = pad + " " + formatList(v, left+2, right)
out += ks + o
else:
out += ks + repr(v)
pre = pad+", "
pos = left+2
out += pad + "}"
return out
def formatDict1(dic,width):
"""
Attempt to format a dictionary on a single line, within the supplied width,
or return None if it does not fit.
"""
out = "{"
pre = ""
ol = 2
for k in dic.keys():
v = dic[k]
o = pre + repr(k)+': '
if isinstance(v,dict):
vs = formatDict1(v,width)
if not vs: return None
o += vs
elif isinstance(v,list) or isinstance(v,tuple):
vs = formatList1(v,width)
if not vs: return None
o += vs
else:
o += repr(v)
ol += len(o)
if ol > width: return None
pre = ", "
out += o
return out+"}"
def compareLists(c1,c2):
"""
Compare a pair of lists, returning None if the lists are identical,
or a pair of lists containing:
(1) elements of first list not in second, and
(2) elements of second list not in first list.
"""
c1 = c1 or []
c2 = c2 or []
c1d = []
c2d = []
for c in c1:
if not (c in c2): c1d.append(c)
for c in c2:
if not (c in c1): c2d.append(c)
if c1d or c2d: return (c1d,c2d)
return None
def compareDicts(d1,d2):
"""
Return None if dictionaries are identical, or pair of lists containing
entries in d1 not in d2, and entries in d2 not in d1.
"""
dif1 = diffDicts(d1,d2)
dif2 = diffDicts(d2,d1)
if dif1 or dif2:
return (dif1,dif2)
else:
return None
def diffDicts(d1,d2):
"""
Return dictionary of entries in d1 that are not in d2.
"""
difs = {}
for (k,v1) in d1.iteritems():
if v1:
if k not in d2:
difs[k] = v1
else:
d = diffPair(v1,d2[k])
if nonEmpty(d):
difs[k] = d
return difs
def diffLists(t1,t2):
"""
Compares pairwise elements of 2 lists, and returns a list of elements
in the first that are not in the second.
Where the elements are dictionaries or tuples, the element difference is
determined recursively, otherwise the value is treated atomically.
"""
ps = zipAll(t1,t2)
ds = filter(nonEmpty, [diffPair(a,b) for (a,b) in ps])
return ds
def diffTuples(t1,t2):
"""
Compares pairwise elements of 2 tuples, and returns a list of elements
in the first that are not in the second.
Where the elements are dictionaries or tuples, the element difference is
determined recursively, otherwise the value is treated atomically.
"""
return tuple(diffLists(t1,t2))
def diffPair(v1,v2):
"""
Return the part of v1 that is not present in v2.
Returns None if v11 and v2 are equal, or if every element of v1 is
also present in v2.
"""
if isinstance(v1,tuple) and isinstance(v2,tuple):
return diffTuples(v1,v2)
if isinstance(v1,list) and isinstance(v2,list):
return diffLists(v1,v2)
if isinstance(v1,dict) and isinstance(v2,dict):
return diffDicts(v1,v2)
if v1!=v2:
return v1
return None
def nonEmpty(v):
"""
If v is a container (tuple, list or dictionary), return None if it is empty,
otherwise return v itself.
"""
if isinstance(v,(tuple,list,dict)):
if len(v) == 0: return None
return v
# End.
| {
"repo_name": "wf4ever/ro-manager",
"path": "src/MiscUtils/Functions.py",
"copies": "1",
"size": "9492",
"license": "mit",
"hash": 8216408904901107000,
"line_mean": 26.0427350427,
"line_max": 84,
"alpha_frac": 0.5685840708,
"autogenerated": false,
"ratio": 3.552395209580838,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9542431493080957,
"avg_score": 0.01570955745997611,
"num_lines": 351
} |
# $Id: Functions.py 1047 2009-01-15 14:48:58Z graham $
#
"""
Miscellaneous functions for use with Python code,
mostly based on Haskell library elements.
"""
from operator import concat, and_, or_
def concatMap(f,vs):
"""
Map function over list and concatenate results.
"""
return reduce( concat, map (f, vs), "")
def fst((a,b)):
"""
First element of a pair.
"""
return a
def snd((a,b)):
"""
Second element of a pair.
"""
return b
def iterAll(c,sentinel=None):
"""
Like the built-in 'iter' function, except that when the supplied container
has no more objects to return an indefinite sequence of 'sentinel' values is
returned. (This is almost the converse of built-in iter(c,sentinel).)
"""
i = iter(c)
try:
while True: yield i.next()
except StopIteration:
while True: yield sentinel
def zipAll(*lists):
"""
A zip-iterator that, unlike the built-in zip function, keeps on returning
tuples until all elements of the supplied lists have been returned. When
the values from any list have been exhausted, None values are returned.
The iterator stops when all lists have been exhausted.
"""
iters = map(iterAll,lists)
while True:
result = [i.next() for i in iters]
if allEq(None,result): break
yield tuple(result)
return
def isEq(v):
"""
Return a function that tests for equality with the supplied value.
(Curried equality function.)
"""
return (lambda v2: v==v2)
def isNe(v):
"""
Return a function that tests for inequality with the supplied value.
(Curried inequality function.)
"""
return (lambda v2: v!=v2)
def all_orig(f,c):
"""
Do all members of c satisfy f?
"""
for i in c:
if not f(i): return False
return True
def all(p, *lsargs):
"""
Test if all sets of members from supplied lists satisfy predicate p
"""
return reduce(and_, map(p, *lsargs), True)
def any(p, *lsargs):
"""
Test if all sets of members from supplied lists satisfy predicate p
"""
return reduce(or_, map(p, *lsargs), False)
def allEq(v,c):
"""
Are all members of c equal to v?
"""
return all(isEq(v),c)
def allNe(v,c):
"""
Are all members of c not equal to v?
"""
return all(isNe(v),c)
def filterSplit(p, values):
"""
Function filters a list into two sub-lists, the first containing entries
satisfying the supplied predicate p, and the second of entries not satisfying p.
"""
satp = []
notp = []
for v in values:
if p(v):
satp.append(v)
else:
notp.append(v)
return (satp,notp)
def cond(cond,v1,v2):
"""
Conditional expression.
"""
if cond:
return v1
else:
return v2
def interleave(l1,l2):
"""
Interleave lists.
"""
if not l1: return l2
if not l2: return l1
return [l1[0],l2[0]]+interleave(l1[1:],l2[1:])
def endsWith(base,suff):
"""
Test if list (sequence) ends with given suffix
"""
return base[-len(suff):] == suff
def formatIntList(ints, sep=",", intfmt=str):
"""
Format list of integers, using a supplied function to format each value,
and inserting a supplied separator between each.
Default comma-separated list of decimals.
"""
return sep.join(map(intfmt, ints))
def formatInt(fmt):
"""
returns a function to format a single integer value using the supplied
format string.
"""
def dofmt(n): return fmt % (n,)
return dofmt
def formatList(lst,left=0,right=0):
"""
Format a list over one or more lines, using the supplied margins.
Left margin padding is *not* added to the first line of output,
and no final newline is included following the last line of output.
"""
# Try for format on one line
out = formatList1(lst,right-left)
if not out:
# format over multiple lines
out = "("
pre = " "
pad = "\n"+left*" "
for i in lst:
out += pre
if isinstance(i,list) or isinstance(i,tuple):
out += formatList(i, left+2, right)
elif isinstance(i,dict):
out += formatDict(i, left+2, right, left+2)
else:
out += repr(i)
pre = pad+", "
out += pad + ")"
return out
def formatList1(lst,width):
"""
Attempt to format a list on a single line, within supplied width,
or return None if the list does not fit.
"""
out = "("
pre = ""
ol = 2
for i in lst:
o = pre+repr(i)
ol += len(o)
if ol > width: return None
pre = ", "
out += o
return out+")"
def formatDict(dic,left=0,right=0,pos=0):
"""
Format a dictionary over one or more lines, using the supplied margins.
Left margin padding is *not* added to the first line of output,
and no final newline is included following the last line of output.
"""
# Try for format on one line
out = formatDict1(dic,right-pos)
if not out:
# format over multiple lines
out = "{"
pre = " "
pad = "\n"+left*" "
for k in dic.keys():
out += pre
v = dic[k]
ks = repr(k)+': '
p = pos+2+len(ks)
if isinstance(v,dict):
o = formatDict1(v, right-p)
if not o:
o = pad + " " + formatDict(v, left+2, right, left+2)
out += ks + o
elif isinstance(v,list) or isinstance(v,tuple):
o = formatList1(v, right-p)
if not o:
o = pad + " " + formatList(v, left+2, right)
out += ks + o
else:
out += ks + repr(v)
pre = pad+", "
pos = left+2
out += pad + "}"
return out
def formatDict1(dic,width):
"""
Attempt to format a dictionary on a single line, within the supplied width,
or return None if it does not fit.
"""
out = "{"
pre = ""
ol = 2
for k in dic.keys():
v = dic[k]
o = pre + repr(k)+': '
if isinstance(v,dict):
vs = formatDict1(v,width)
if not vs: return None
o += vs
elif isinstance(v,list) or isinstance(v,tuple):
vs = formatList1(v,width)
if not vs: return None
o += vs
else:
o += repr(v)
ol += len(o)
if ol > width: return None
pre = ", "
out += o
return out+"}"
def compareLists(c1,c2):
"""
Compare a pair of lists, returning None if the lists are identical,
or a pair of lists containing:
(1) elements of first list not in second, and
(2) elements of second list not in first list.
"""
c1 = c1 or []
c2 = c2 or []
c1d = []
c2d = []
for c in c1:
if not (c in c2): c1d.append(c)
for c in c2:
if not (c in c1): c2d.append(c)
if c1d or c2d: return (c1d,c2d)
return None
def compareDicts(d1,d2):
"""
Return None if dictionaries are identical, or pair of lists containing
entries in d1 not in d2, and entries in d2 not in d1.
"""
dif1 = diffDicts(d1,d2)
dif2 = diffDicts(d2,d1)
if dif1 or dif2:
return (dif1,dif2)
else:
return None
def diffDicts(d1,d2):
"""
Return dictionary of entries in d1 that are not in d2.
"""
difs = {}
for (k,v1) in d1.iteritems():
if v1:
if k not in d2:
difs[k] = v1
else:
d = diffPair(v1,d2[k])
if nonEmpty(d):
difs[k] = d
return difs
def diffLists(t1,t2):
"""
Compares pairwise elements of 2 lists, and returns a list of elements
in the first that are not in the second.
Where the elements are dictionaries or tuples, the element difference is
determined recursively, otherwise the value is treated atomically.
"""
ps = zipAll(t1,t2)
ds = filter(nonEmpty, [diffPair(a,b) for (a,b) in ps])
return ds
def diffTuples(t1,t2):
"""
Compares pairwise elements of 2 tuples, and returns a list of elements
in the first that are not in the second.
Where the elements are dictionaries or tuples, the element difference is
determined recursively, otherwise the value is treated atomically.
"""
return tuple(diffLists(t1,t2))
def diffPair(v1,v2):
"""
Return the part of v1 that is not present in v2.
Returns None if v11 and v2 are equal, or if every element of v1 is
also present in v2.
"""
if isinstance(v1,tuple) and isinstance(v2,tuple):
return diffTuples(v1,v2)
if isinstance(v1,list) and isinstance(v2,list):
return diffLists(v1,v2)
if isinstance(v1,dict) and isinstance(v2,dict):
return diffDicts(v1,v2)
if v1!=v2:
return v1
return None
def nonEmpty(v):
"""
If v is a container (tuple, list or dictionary), return None if it is empty,
otherwise return v itself.
"""
if isinstance(v,(tuple,list,dict)):
if len(v) == 0: return None
return v
# End.
| {
"repo_name": "bhavanaananda/DataStage",
"path": "src/AdminUIHandler/MiscLib/Functions.py",
"copies": "8",
"size": "9308",
"license": "mit",
"hash": 7179975320672336000,
"line_mean": 25.8242074928,
"line_max": 84,
"alpha_frac": 0.5671465406,
"autogenerated": false,
"ratio": 3.5703874184886844,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8137533959088684,
"avg_score": null,
"num_lines": null
} |
"""An universal dialog window class with additional buttons."""
from DialogWindow import DialogWindow
from Frame import VFrame, HFrame
from Button import Button
from Constants import *
class GenericDialog (DialogWindow):
"""GenericDialog (title, buttons, results) -> GenericDialog
A generic dialog window, which supports result values.
The GenericDialog widget class is suitable to create dialog windows
which need to check for certain dialog results in an easy
way. Specific result values can be bound to the buttons of the
dialog and will be emitted upon clicking the buttons.
The creation of the GenericDialog is a bit different from other
widgets. Besides the title, it receives two lists for the buttons to
display and the results they have to send. Both lists need to have
the same length.
dialog = GenericDialog ('Test', [Button ('OK'), Button ('Cancel')],
[DLGRESULT_OK, DLGRESULT_CANCEL])
The index of the result values need to match the index of the
button, that should emit them. Of course the buttons can be changed
at runtime programmatically with the set_buttons() method, which is
similar to the constructor.
dialog.set_buttons ([Button ('Close'), Button ('ClickMe')],
[DLGRESULT_CLOSE, DLGRESULT_USER])
The result values to set must match a valid value from the
DLGRESULT_TYPES tuple.
Given the above set_buttons() example, a callback for the
SIG_DIALOGRESPONSE signal will receive either the DLGRESULT_CLOSE or
DLGRESULT_USER value upon which certain actions can take place.
def dialog_callback (result, dialog):
if result == DLGRESULT_CLOSE:
dialog.destroy ()
elif result == DLGRESULT_USER:
...
mydialog.connect_signal (SIG_DIALOGRESPONSE, dialog_callback, mydialog)
The GenericDialog is separated in several frames, to which user
content can be added. The main frame, which holds anything else, can
be accessed through the 'main' attribute. While it is possible to
change most of its attributes without unwanted side effects, its
children should not be modifed or deleted to prevent misbehaviour of
the dialog.
dialog.main.spacing = 10
The second frame, which is usually the most interesting is the
content frame, packed into the main frame. It can be accessed
through the 'content' attribute and should (only) be used to add own
widgets. Those can be deleted and modified, too.
label = Label ('Label on the dialog')
dialog.content.add_child (label)
dialog.content.add_child (Button ('Button after label'))
dialog.content.remove_child (label)
Default action (invoked by activate()):
See the DialogWindow class.
Mnemonic action (invoked by activate_mnemonic()):
See the DialogWindow class.
Signals:
SIG_DIALOGRESPONSE - Invoked, when an attached button is pressed.
"""
def __init__ (self, title, buttons, results):
DialogWindow.__init__ (self, title)
# The main frame holds the whole content for the window.
# The basic design idea of the dialog looks like:
# +----------------------+
# | Title caption |
# +----------------------+
# | main frame |
# | ++++++++++++++++++++ |
# | + + |
# | + with + |
# | + + |
# | + user content + |
# | + + |
# | +------------------+ |
# | + Action frame + |
# | ++++++++++++++++++++ |
# +----------------------+
self.padding = 5
self._mainframe = VFrame ()
self._mainframe.border = BORDER_NONE
self._mainframe.spacing = 5
self._mainframe.padding = 0
self._contentframe = VFrame ()
self._contentframe.border = BORDER_NONE
self._contentframe.spacing = 0
self._contentframe.padding = 0
self._mainframe.add_child (self._contentframe)
# Buttons will be placed in the action frame.
self._actionframe = HFrame ()
self._actionframe.border = BORDER_NONE
self._actionframe.padding = 0
self._mainframe.add_child (self._actionframe)
self.set_buttons (buttons, results)
self._signals[SIG_DIALOGRESPONSE] = []
self.set_child (self._mainframe)
def set_buttons (self, buttons, results):
"""G.set_buttons (buttons. results) -> None
Sets the buttons for the dialog and their wanted result values.
"""
for widget in self._actionframe.children:
widget.destroy ()
for i in xrange (len (buttons)):
if not isinstance (buttons[i], Button):
raise TypeError ("All elements in buttons must inherit "
"from Button")
if results[i] not in DLGRESULT_TYPES:
raise TypeError ("All elements in results must be values "
"of DLGRESULT_TYPES")
buttons[i].connect_signal (SIG_CLICKED, self._button_clicked,
results[i])
self._actionframe.add_child (buttons[i])
self.dirty = True
def _button_clicked (self, result):
"""G._button_clicked (...) -> None
Callback for the buttons in the action frame.
The _button_clicked method will run the SIG_DIALOGRESPONSE
signal handlers with the passed result.
"""
self.run_signal_handlers (SIG_DIALOGRESPONSE, result)
content = property (lambda self: self._contentframe,
doc = "The content frame to add widgets to.")
main = property (lambda self: self._mainframe,
doc = "The main frame of the dialog.")
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/widgets/GenericDialog.py",
"copies": "1",
"size": "7328",
"license": "bsd-2-clause",
"hash": -7258221487424784000,
"line_mean": 40.1685393258,
"line_max": 78,
"alpha_frac": 0.6375545852,
"autogenerated": false,
"ratio": 4.509538461538462,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5647093046738462,
"avg_score": null,
"num_lines": null
} |
"""
This file retrieves deployments from an Octopus application
"""
import logging, json, requests, time, sys
import ConfigParser
import splunk.Intersplunk as isp
import octopus_common
# Parse octopus.conf for configuration settings
stanza = octopus_common.getSelfConfStanza("octopus")
protocol = stanza['protocol']
hostname = stanza['hostname']
apikey = stanza['apikey']
# Setup logger object
logger = octopus_common.setup_logging()
logger.info(time.time())
try:
octopus_url = protocol + "://" + hostname + "/api/deployments/"
last_deployment_id = octopus_common.readCheckPoint("deployments")
while True:
# Setup response object and execute GET request
response = requests.get(
url = octopus_url,
headers = {
"X-Octopus-ApiKey": apikey,
},
)
response.raise_for_status()
# Handle response
json_response = json.loads(response.content)
# Get deployment ID from first deployment returned by the API which is the most recent deployment
try:
if json_response['Links']['Page.Current'].split('=')[1][:1] == '0':
deployment_id = json_response['Items'][0]['Id'].split('-')[1]
octopus_common.writeCheckPoint("deployments", deployment_id)
except Exception:
break
# Iterate deployments and print results to Splunk if it hasn't been printed before
for deployment in json_response['Items']:
# Get deployment ID
deployment_id = deployment['Id'].split('-')[1]
if int(deployment_id) > int(last_deployment_id):
print json.dumps(deployment)
# Try to get next page if available, else write most recent deployment id and exit
try:
octopus_url = protocol + "://" + hostname + json_response['Links']['Page.Next']
except Exception:
break
sys.exit(0)
# Catch exceptions if needed
except Exception as e:
logger.exception("Exception: " + str(e))
isp.generateErrorResults(str(e)) | {
"repo_name": "cmeerbeek/splunk-addon-octopus-deploy",
"path": "TA-OctopusNIX-Fwd/bin/get_deployments.py",
"copies": "2",
"size": "2057",
"license": "mit",
"hash": 8702554263712645000,
"line_mean": 29.2647058824,
"line_max": 101,
"alpha_frac": 0.6796305299,
"autogenerated": false,
"ratio": 3.6732142857142858,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5352844815614286,
"avg_score": null,
"num_lines": null
} |
"""
This file retrieves environments from an Octopus application
"""
import logging, json, requests, time, sys
import ConfigParser
import splunk.Intersplunk as isp
import octopus_common
# Parse octopus.conf for configuration settings
stanza = octopus_common.getSelfConfStanza("octopus")
protocol = stanza['protocol']
hostname = stanza['hostname']
apikey = stanza['apikey']
# Setup logger object
logger = octopus_common.setup_logging()
logger.info(time.time())
try:
octopus_url = protocol + "://" + hostname + "/api/environments/all"
# Setup response object and execute GET request
response = requests.get(
url = octopus_url,
headers = {
"X-Octopus-ApiKey": apikey,
},
)
response.raise_for_status()
# Handle response
json_response = json.loads(response.content)
# Iterate environments and print results to Splunk
for environment in json_response:
print json.dumps(environment)
sys.exit(0)
# Catch exceptions if needed
except Exception as e:
logger.exception("Exception: " + str(e))
isp.generateErrorResults(str(e)) | {
"repo_name": "cmeerbeek/splunk-addon-octopus-deploy",
"path": "TA-OctopusNIX-Fwd/bin/get_environments.py",
"copies": "2",
"size": "1187",
"license": "mit",
"hash": -1971521100591519500,
"line_mean": 23.75,
"line_max": 69,
"alpha_frac": 0.7270429655,
"autogenerated": false,
"ratio": 3.410919540229885,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5137962505729885,
"avg_score": null,
"num_lines": null
} |
"""
This file retrieves events from an Octopus application
"""
import logging, json, requests, time, sys
import ConfigParser
import splunk.Intersplunk as isp
import octopus_common
# Parse octopus.conf for configuration settings
stanza = octopus_common.getSelfConfStanza("octopus")
protocol = stanza['protocol']
hostname = stanza['hostname']
apikey = stanza['apikey']
# Setup logger object
logger = octopus_common.setup_logging()
logger.info(time.time())
try:
octopus_url = protocol + "://" + hostname + "/api/events/"
last_event_id = octopus_common.readCheckPoint("events")
while True:
# Setup response object and execute GET request
response = requests.get(
url = octopus_url,
headers = {
"X-Octopus-ApiKey": apikey,
},
)
response.raise_for_status()
# Handle response
json_response = json.loads(response.content)
# Get event ID from first event returned by the API which is the most recent event
try:
if json_response['Links']['Page.Current'].split('=')[1][:1] == '0':
event_id = json_response['Items'][0]['Id'].split('-')[1]
octopus_common.writeCheckPoint("events", event_id)
except Exception:
break
# Iterate events and print results to Splunk if it hasn't been printed before
for event in json_response['Items']:
event_id = event['Id'].split('-')[1]
# If event_id is smaller or equal to checkpoint exit
if int(event_id) <= int(last_event_id):
break
print json.dumps(event)
# Try to get next page if available, else exit
try:
octopus_url = protocol + "://" + hostname + json_response['Links']['Page.Next']
except Exception:
break
sys.exit(0)
# Catch exceptions if needed
except Exception as e:
logger.exception("Exception: " + str(e))
isp.generateErrorResults(str(e)) | {
"repo_name": "cmeerbeek/splunk-addon-octopus-deploy",
"path": "TA-OctopusNT-Fwd/bin/get_events.py",
"copies": "2",
"size": "1966",
"license": "mit",
"hash": -354156397359731140,
"line_mean": 27.1,
"line_max": 86,
"alpha_frac": 0.6658189217,
"autogenerated": false,
"ratio": 3.5296229802513466,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5195441901951346,
"avg_score": null,
"num_lines": null
} |
"""
This file retrieves machines from an Octopus application
"""
import logging, json, requests, time, sys
import ConfigParser
import splunk.Intersplunk as isp
import octopus_common
# Parse octopus.conf for configuration settings
stanza = octopus_common.getSelfConfStanza("octopus")
protocol = stanza['protocol']
hostname = stanza['hostname']
apikey = stanza['apikey']
# Setup logger object
logger = octopus_common.setup_logging()
logger.info(time.time())
try:
octopus_url = protocol + "://" + hostname + "/api/machines/all"
# Setup response object and execute GET request
response = requests.get(
url = octopus_url,
headers = {
"X-Octopus-ApiKey": apikey,
},
)
response.raise_for_status()
# Handle response
json_response = json.loads(response.content)
# Iterate projects and print results to Splunk
for machine in json_response:
print json.dumps(machine)
sys.exit(0)
# Catch exceptions if needed
except Exception as e:
logger.exception("Exception: " + str(e))
isp.generateErrorResults(str(e)) | {
"repo_name": "cmeerbeek/splunk-addon-octopus-deploy",
"path": "TA-OctopusNIX-Fwd/bin/get_machines.py",
"copies": "2",
"size": "1163",
"license": "mit",
"hash": 6930123404532113000,
"line_mean": 23.25,
"line_max": 65,
"alpha_frac": 0.7214101462,
"autogenerated": false,
"ratio": 3.3419540229885056,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5063364169188506,
"avg_score": null,
"num_lines": null
} |
"""
This file retrieves projects from an Octopus application
"""
import logging, json, requests, time, sys
import ConfigParser
import splunk.Intersplunk as isp
import octopus_common
# Parse octopus.conf for configuration settings
stanza = octopus_common.getSelfConfStanza("octopus")
protocol = stanza['protocol']
hostname = stanza['hostname']
apikey = stanza['apikey']
# Setup logger object
logger = octopus_common.setup_logging()
logger.info(time.time())
try:
octopus_url = protocol + "://" + hostname + "/api/projects/all"
# Setup response object and execute GET request
response = requests.get(
url = octopus_url,
headers = {
"X-Octopus-ApiKey": apikey,
},
)
response.raise_for_status()
# Handle response
json_response = json.loads(response.content)
# Iterate projects and print results to Splunk
for project in json_response:
print json.dumps(project)
sys.exit(0)
# Catch exceptions if needed
except Exception as e:
logger.exception("Exception: " + str(e))
isp.generateErrorResults(str(e)) | {
"repo_name": "cmeerbeek/splunk-addon-octopus-deploy",
"path": "TA-OctopusNT-Fwd/bin/get_projects.py",
"copies": "2",
"size": "1165",
"license": "mit",
"hash": -6451146240734415000,
"line_mean": 23.2916666667,
"line_max": 65,
"alpha_frac": 0.7201716738,
"autogenerated": false,
"ratio": 3.3477011494252875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9965083424900081,
"avg_score": 0.020557879665041048,
"num_lines": 48
} |
"""
This file retrieves releases from an Octopus application
"""
import logging, json, requests, time, sys
import ConfigParser
import splunk.Intersplunk as isp
import octopus_common
# Parse octopus.conf for configuration settings
stanza = octopus_common.getSelfConfStanza("octopus")
protocol = stanza['protocol']
hostname = stanza['hostname']
apikey = stanza['apikey']
# Setup logger object
logger = octopus_common.setup_logging()
logger.info(time.time())
try:
octopus_url = protocol + "://" + hostname + "/api/releases/"
last_release_id = octopus_common.readCheckPoint("releases")
while True:
# Setup response object and execute GET request
response = requests.get(
url = octopus_url,
headers = {
"X-Octopus-ApiKey": apikey,
},
)
response.raise_for_status()
# Handle response
json_response = json.loads(response.content)
# Get release ID from first release returned by the API which is the most recent release
try:
if json_response['Links']['Page.Current'].split('=')[1][:1] == '0':
release_id = json_response['Items'][0]['Id'].split('-')[1]
octopus_common.writeCheckPoint("releases", release_id)
except Exception:
break
# Iterate releases and print results to Splunk if it hasn't been printed before
for release in json_response['Items']:
release_id = release['Id'].split('-')[1]
if int(release_id) <= int(last_release_id):
break
print json.dumps(release)
# Try to get next page if available, else write most recent release id and exit
try:
octopus_url = protocol + "://" + hostname + json_response['Links']['Page.Next']
except Exception:
break
sys.exit(0)
# Catch exceptions if needed
except Exception as e:
logger.exception("Exception: " + str(e))
isp.generateErrorResults(str(e)) | {
"repo_name": "cmeerbeek/splunk-addon-octopus-deploy",
"path": "TA-OctopusNIX-Fwd/bin/get_releases.py",
"copies": "2",
"size": "1974",
"license": "mit",
"hash": 617843590866885800,
"line_mean": 27.6231884058,
"line_max": 92,
"alpha_frac": 0.6742654509,
"autogenerated": false,
"ratio": 3.5956284153005464,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.016687414350458646,
"num_lines": 69
} |