repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
msvbhat/distaf | distaf/util.py | 1 | 4872 | # This file is part of DiSTAF
# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from types import FunctionType
from distaf.client_rpyc import BigBang
from distaf.config_parser import get_global_config, get_testcase_config
testcases = {}
test_list = {}
test_seq = []
test_mounts = {}
globl_configs = {}
global_mode = None
tc = None
def distaf_init(config_file_string="config.yml"):
"""
The distaf init function which calls the BigBang
"""
config_files = config_file_string.split()
global globl_configs, global_mode, tc
globl_configs = get_global_config(config_files)
global_mode = globl_configs['global_mode']
tc = BigBang(globl_configs)
return globl_configs
def inject_gluster_logs(label, servers=''):
"""
Injects the label in gluster related logs
This is mainly to help identifying what was going
on during the test case
@parameter: A label string which will be injected to gluster logs
A list of servers in which this log inejection should be
done
@returns: None
"""
if servers == '':
servers = tc.all_nodes
cmd = "for file in `find $(gluster --print-logdir) -type f " \
"-name '*.log'`; do echo \"%s\" >> $file; done" % label
tc.run_servers(cmd, servers=servers, verbose=False)
return None
def testcase(name):
def decorator(func):
tc_config = get_testcase_config(func.__doc__)
def wrapper(self):
tc.logger.info("Starting the test: %s" % name)
voltype, mount_proto = test_seq.pop(0)
inject_gluster_logs("%s_%s" % (voltype, name))
_ret = True
globl_configs['reuse_setup'] = tc_config['reuse_setup']
globl_configs.update(tc_config)
globl_configs['voltype'] = voltype
globl_configs['mount_proto'] = mount_proto
if isinstance(func, FunctionType):
_ret = func()
else:
try:
func_obj = func(globl_configs)
ret = func_obj.setup()
if not ret:
tc.logger.error("The setup of %s failed" % name)
_ret = False
if _ret:
ret = func_obj.run()
if not ret:
tc.logger.error("The execution of testcase %s " \
"failed" % name)
_ret = False
ret = func_obj.teardown()
if not ret:
tc.logger.error("The teardown of %s failed" % name)
_ret = False
if len(test_seq) == 0 or voltype != test_seq[0][0]:
tc.logger.info("Last test case to use %s volume type" \
% voltype)
ret = func_obj.cleanup()
if not ret:
tc.logger.error("The cleanup of volume %s failed" \
% name)
_ret = False
except:
tc.logger.exception("Exception while running %s" % name)
_ret = False
self.assertTrue(_ret, "Testcase %s failed" % name)
inject_gluster_logs("%s_%s" % (voltype, name))
tc.logger.info("Ending the test: %s" % name)
return _ret
testcases[name] = wrapper
if not global_mode and tc_config is not None:
for voltype in tc_config['runs_on_volumes']:
if voltype not in test_list:
test_list[voltype] = []
if not tc_config['reuse_setup']:
test_list[voltype].insert(0, name)
else:
test_list[voltype].append(name)
test_mounts[name] = tc_config['runs_on_protocol']
return wrapper
return decorator
def distaf_finii():
"""
The fini() function which closes all connection to the servers
"""
tc.fini()
| gpl-2.0 |
pombredanne/psd-tools | tests/test_pixels.py | 8 | 5675 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import pytest
from psd_tools import PSDImage, Layer, Group
from .utils import full_name
PIXEL_COLORS = (
# filename probe point pixel value
('1layer.psd', (5, 5), (0x27, 0xBA, 0x0F)),
('group.psd', (10, 20), (0xFF, 0xFF, 0xFF)),
('hidden-groups.psd', (60, 100), (0xE1, 0x0B, 0x0B)),
('hidden-layer.psd', (0, 0), (0xFF, 0xFF, 0xFF)),
# ('note.psd', (30, 30), (0, 0, 0)), # what is it?
('smart-object-slice.psd', (70, 80), (0xAC, 0x19, 0x19)), # XXX: what is this test about?
)
TRANSPARENCY_PIXEL_COLORS = (
('transparentbg-gimp.psd', (14, 14), (0xFF, 0xFF, 0xFF, 0x13)),
('2layers.psd', (70, 30), (0xF1, 0xF3, 0xC1)), # why gimp shows it as F2F4C2 ?
)
MASK_PIXEL_COLORS = (
('clipping-mask.psd', (182, 68), (0xDA, 0xE6, 0xF7)), # this is a clipped point
('mask.psd', (87, 7), (0xFF, 0xFF, 0xFF)), # mask truncates the layer here
)
NO_LAYERS_PIXEL_COLORS = (
('history.psd', (70, 85), (0x24, 0x26, 0x29)),
)
PIXEL_COLORS_8BIT = (PIXEL_COLORS + NO_LAYERS_PIXEL_COLORS +
MASK_PIXEL_COLORS + TRANSPARENCY_PIXEL_COLORS)
PIXEL_COLORS_32BIT = (
('32bit.psd', (75, 15), (136, 139, 145)),
('32bit.psd', (95, 15), (0, 0, 0)),
('300dpi.psd', (70, 30), (0, 0, 0)),
('300dpi.psd', (50, 60), (214, 59, 59)),
('gradient fill.psd', (10, 15), (235, 241, 250)), # background
('gradient fill.psd', (70, 50), (0, 0, 0)), # black circle
('gradient fill.psd', (50, 50), (205, 144, 110)), # filled ellipse
('pen-text.psd', (50, 50), (229, 93, 93)),
('pen-text.psd', (170, 40), (0, 0, 0)),
('vector mask.psd', (10, 15), (255, 255, 255)),
('vector mask.psd', (50, 90), (221, 227, 236)),
('transparentbg.psd', (0, 0), (255, 255, 255, 0)),
('transparentbg.psd', (50, 50), (0, 0, 0, 255)),
('32bit5x5.psd', (0, 0), (235, 241, 250)), # why not equal to 16bit5x5.psd?
('32bit5x5.psd', (4, 0), (0, 0, 0)),
('32bit5x5.psd', (1, 3), (46, 196, 104)),
)
PIXEL_COLORS_16BIT = (
('16bit5x5.psd', (0, 0), (236, 242, 251)),
('16bit5x5.psd', (4, 0), (0, 0, 0)),
('16bit5x5.psd', (1, 3), (46, 196, 104)),
)
LAYER_COLORS = (
('1layer.psd', 0, (5, 5), (0x27, 0xBA, 0x0F)),
('2layers.psd', 1, (5, 5), (0x27, 0xBA, 0x0F)),
('2layers.psd', 1, (70, 30), (0x27, 0xBA, 0x0F)),
('2layers.psd', 0, (0, 0), (0, 0, 0, 0)),
('2layers.psd', 0, (62, 26), (0xF2, 0xF4, 0xC2, 0xFE)),
)
LAYER_COLORS_MULTIBYTE = (
('16bit5x5.psd', 1, (0, 0), (236, 242, 251, 255)),
('16bit5x5.psd', 1, (1, 3), (46, 196, 104, 255)),
('32bit5x5.psd', 1, (0, 0), (235, 241, 250, 255)), # why not equal to 16bit5x5.psd?
('32bit5x5.psd', 1, (1, 3), (46, 196, 104, 255)),
)
def color_PIL(psd, point):
im = psd.as_PIL()
return im.getpixel(point)
def color_pymaging(psd, point):
im = psd.as_pymaging()
return tuple(im.get_pixel(*point))
BACKENDS = [[color_PIL], [color_pymaging]]
@pytest.mark.parametrize(["get_color"], BACKENDS)
@pytest.mark.parametrize(["filename", "point", "color"], PIXEL_COLORS_8BIT)
def test_composite(filename, point, color, get_color):
psd = PSDImage.load(full_name(filename))
assert color == get_color(psd, point)
@pytest.mark.parametrize(["filename", "point", "color"], PIXEL_COLORS_32BIT)
def test_composite_32bit(filename, point, color):
psd = PSDImage.load(full_name(filename))
assert color == color_PIL(psd, point)
@pytest.mark.parametrize(["filename", "point", "color"], PIXEL_COLORS_16BIT)
def test_composite_16bit(filename, point, color):
psd = PSDImage.load(full_name(filename))
assert color == color_PIL(psd, point)
@pytest.mark.parametrize(["filename", "layer_num", "point", "color"], LAYER_COLORS_MULTIBYTE)
def test_layer_colors_multibyte(filename, layer_num, point, color):
psd = PSDImage.load(full_name(filename))
layer = psd.layers[layer_num]
assert color == color_PIL(layer, point)
@pytest.mark.parametrize(["get_color"], BACKENDS)
@pytest.mark.parametrize(["filename", "layer_num", "point", "color"], LAYER_COLORS)
def test_layer_colors(filename, layer_num, point, color, get_color):
psd = PSDImage.load(full_name(filename))
layer = psd.layers[layer_num]
assert color == get_color(layer, point)
@pytest.mark.parametrize(["filename", "point", "color"], PIXEL_COLORS + MASK_PIXEL_COLORS + TRANSPARENCY_PIXEL_COLORS)
def test_layer_merging_size(filename, point, color):
psd = PSDImage.load(full_name(filename))
merged_image = psd.as_PIL_merged()
assert merged_image.size == psd.as_PIL().size
@pytest.mark.parametrize(["filename", "point", "color"], PIXEL_COLORS)
def test_layer_merging_pixels(filename, point, color):
psd = PSDImage.load(full_name(filename))
merged_image = psd.as_PIL_merged()
assert color[:3] == merged_image.getpixel(point)[:3]
assert merged_image.getpixel(point)[3] == 255 # alpha channel
@pytest.mark.xfail
@pytest.mark.parametrize(["filename", "point", "color"], TRANSPARENCY_PIXEL_COLORS)
def test_layer_merging_pixels_transparency(filename, point, color):
psd = PSDImage.load(full_name(filename))
merged_image = psd.as_PIL_merged()
assert color == merged_image.getpixel(point)
| mit |
jazkarta/edx-platform-for-isc | common/djangoapps/track/tests/test_util.py | 239 | 1203 | from datetime import datetime
import json
from pytz import UTC
from django.test import TestCase
from track.utils import DateTimeJSONEncoder
class TestDateTimeJSONEncoder(TestCase):
def test_datetime_encoding(self):
a_naive_datetime = datetime(2012, 05, 01, 07, 27, 10, 20000)
a_tz_datetime = datetime(2012, 05, 01, 07, 27, 10, 20000, tzinfo=UTC)
a_date = a_naive_datetime.date()
an_iso_datetime = '2012-05-01T07:27:10.020000+00:00'
an_iso_date = '2012-05-01'
obj = {
'number': 100,
'string': 'hello',
'object': {'a': 1},
'a_datetime': a_naive_datetime,
'a_tz_datetime': a_tz_datetime,
'a_date': a_date,
}
to_json = json.dumps(obj, cls=DateTimeJSONEncoder)
from_json = json.loads(to_json)
self.assertEqual(from_json['number'], 100)
self.assertEqual(from_json['string'], 'hello')
self.assertEqual(from_json['object'], {'a': 1})
self.assertEqual(from_json['a_datetime'], an_iso_datetime)
self.assertEqual(from_json['a_tz_datetime'], an_iso_datetime)
self.assertEqual(from_json['a_date'], an_iso_date)
| agpl-3.0 |
Red-M/CloudBot-legacy | plugins/tell.py | 2 | 3615 | """ tell.py: written by sklnd in July 2009
2010.01.25 - modified by Scaevolus"""
import time
import re
from util import hook, timesince
db_ready = []
def db_init(db, conn):
"""Check that our db has the tell table, create it if not."""
global db_ready
if not conn.name in db_ready:
db.execute("create table if not exists tell"
"(user_to, user_from, message, chan, time,"
"primary key(user_to, message))")
db.commit()
db_ready.append(conn.name)
def get_tells(db, user_to):
return db.execute("select user_from, message, time, chan from tell where"
" user_to=lower(?) order by time",
(user_to.lower(),)).fetchall()
@hook.singlethread
@hook.event('PRIVMSG')
def tellinput(inp, input=None, notice=None, db=None, nick=None, conn=None):
if 'showtells' in input.msg.lower():
return
db_init(db, conn)
tells = get_tells(db, nick)
if tells:
user_from, message, time, chan = tells[0]
reltime = timesince.timesince(time)
reply = "{} sent you a message {} ago from {}: {}".format(user_from, reltime, chan,
message)
if len(tells) > 1:
reply += " (+{} more, {}showtells to view)".format(len(tells) - 1, conn.conf["command_prefix"])
db.execute("delete from tell where user_to=lower(?) and message=?",
(nick, message))
db.commit()
notice(reply)
@hook.command(autohelp=False)
def showtells(inp, nick='', chan='', notice=None, db=None, conn=None):
"""showtells -- View all pending tell messages (sent in a notice)."""
db_init(db, conn)
tells = get_tells(db, nick)
if not tells:
notice("You have no pending tells.")
return
for tell in tells:
user_from, message, time, chan = tell
past = timesince.timesince(time)
notice("{} sent you a message {} ago from {}: {}".format(user_from, past, chan, message))
db.execute("delete from tell where user_to=lower(?)",
(nick,))
db.commit()
@hook.command
def tell(inp, nick='', chan='', db=None, input=None, notice=None, conn=None):
"""tell <nick> <message> -- Relay <message> to <nick> when <nick> is around."""
query = inp.split(' ', 1)
if len(query) != 2:
notice(tell.__doc__)
return
user_to = query[0].lower()
message = query[1].strip()
user_from = nick
if chan.lower() == user_from.lower():
chan = 'a pm'
if user_to == user_from.lower():
notice("Have you looked in a mirror lately?")
return
if user_to.lower() == input.conn.nick.lower():
# user is looking for us, being a smart-ass
notice("Thanks for the message, {}!".format(user_from))
return
if not re.match("^[A-Za-z0-9_|.\-\]\[]*$", user_to.lower()):
notice("I can't send a message to that user!")
return
db_init(db, conn)
if db.execute("select count() from tell where user_to=?",
(user_to,)).fetchone()[0] >= 10:
notice("That person has too many messages queued.")
return
try:
db.execute("insert into tell(user_to, user_from, message, chan,"
"time) values(?,?,?,?,?)", (user_to, user_from, message,
chan, time.time()))
db.commit()
except db.IntegrityError:
notice("Message has already been queued.")
return
notice("Your message has been sent!")
| gpl-3.0 |
swenson/sagewiki | unidecode/unidecode/x025.py | 252 | 3871 | data = (
'-', # 0x00
'-', # 0x01
'|', # 0x02
'|', # 0x03
'-', # 0x04
'-', # 0x05
'|', # 0x06
'|', # 0x07
'-', # 0x08
'-', # 0x09
'|', # 0x0a
'|', # 0x0b
'+', # 0x0c
'+', # 0x0d
'+', # 0x0e
'+', # 0x0f
'+', # 0x10
'+', # 0x11
'+', # 0x12
'+', # 0x13
'+', # 0x14
'+', # 0x15
'+', # 0x16
'+', # 0x17
'+', # 0x18
'+', # 0x19
'+', # 0x1a
'+', # 0x1b
'+', # 0x1c
'+', # 0x1d
'+', # 0x1e
'+', # 0x1f
'+', # 0x20
'+', # 0x21
'+', # 0x22
'+', # 0x23
'+', # 0x24
'+', # 0x25
'+', # 0x26
'+', # 0x27
'+', # 0x28
'+', # 0x29
'+', # 0x2a
'+', # 0x2b
'+', # 0x2c
'+', # 0x2d
'+', # 0x2e
'+', # 0x2f
'+', # 0x30
'+', # 0x31
'+', # 0x32
'+', # 0x33
'+', # 0x34
'+', # 0x35
'+', # 0x36
'+', # 0x37
'+', # 0x38
'+', # 0x39
'+', # 0x3a
'+', # 0x3b
'+', # 0x3c
'+', # 0x3d
'+', # 0x3e
'+', # 0x3f
'+', # 0x40
'+', # 0x41
'+', # 0x42
'+', # 0x43
'+', # 0x44
'+', # 0x45
'+', # 0x46
'+', # 0x47
'+', # 0x48
'+', # 0x49
'+', # 0x4a
'+', # 0x4b
'-', # 0x4c
'-', # 0x4d
'|', # 0x4e
'|', # 0x4f
'-', # 0x50
'|', # 0x51
'+', # 0x52
'+', # 0x53
'+', # 0x54
'+', # 0x55
'+', # 0x56
'+', # 0x57
'+', # 0x58
'+', # 0x59
'+', # 0x5a
'+', # 0x5b
'+', # 0x5c
'+', # 0x5d
'+', # 0x5e
'+', # 0x5f
'+', # 0x60
'+', # 0x61
'+', # 0x62
'+', # 0x63
'+', # 0x64
'+', # 0x65
'+', # 0x66
'+', # 0x67
'+', # 0x68
'+', # 0x69
'+', # 0x6a
'+', # 0x6b
'+', # 0x6c
'+', # 0x6d
'+', # 0x6e
'+', # 0x6f
'+', # 0x70
'/', # 0x71
'\\', # 0x72
'X', # 0x73
'-', # 0x74
'|', # 0x75
'-', # 0x76
'|', # 0x77
'-', # 0x78
'|', # 0x79
'-', # 0x7a
'|', # 0x7b
'-', # 0x7c
'|', # 0x7d
'-', # 0x7e
'|', # 0x7f
'#', # 0x80
'#', # 0x81
'#', # 0x82
'#', # 0x83
'#', # 0x84
'#', # 0x85
'#', # 0x86
'#', # 0x87
'#', # 0x88
'#', # 0x89
'#', # 0x8a
'#', # 0x8b
'#', # 0x8c
'#', # 0x8d
'#', # 0x8e
'#', # 0x8f
'#', # 0x90
'#', # 0x91
'#', # 0x92
'#', # 0x93
'-', # 0x94
'|', # 0x95
'[?]', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'[?]', # 0x99
'[?]', # 0x9a
'[?]', # 0x9b
'[?]', # 0x9c
'[?]', # 0x9d
'[?]', # 0x9e
'[?]', # 0x9f
'#', # 0xa0
'#', # 0xa1
'#', # 0xa2
'#', # 0xa3
'#', # 0xa4
'#', # 0xa5
'#', # 0xa6
'#', # 0xa7
'#', # 0xa8
'#', # 0xa9
'#', # 0xaa
'#', # 0xab
'#', # 0xac
'#', # 0xad
'#', # 0xae
'#', # 0xaf
'#', # 0xb0
'#', # 0xb1
'^', # 0xb2
'^', # 0xb3
'^', # 0xb4
'^', # 0xb5
'>', # 0xb6
'>', # 0xb7
'>', # 0xb8
'>', # 0xb9
'>', # 0xba
'>', # 0xbb
'V', # 0xbc
'V', # 0xbd
'V', # 0xbe
'V', # 0xbf
'<', # 0xc0
'<', # 0xc1
'<', # 0xc2
'<', # 0xc3
'<', # 0xc4
'<', # 0xc5
'*', # 0xc6
'*', # 0xc7
'*', # 0xc8
'*', # 0xc9
'*', # 0xca
'*', # 0xcb
'*', # 0xcc
'*', # 0xcd
'*', # 0xce
'*', # 0xcf
'*', # 0xd0
'*', # 0xd1
'*', # 0xd2
'*', # 0xd3
'*', # 0xd4
'*', # 0xd5
'*', # 0xd6
'*', # 0xd7
'*', # 0xd8
'*', # 0xd9
'*', # 0xda
'*', # 0xdb
'*', # 0xdc
'*', # 0xdd
'*', # 0xde
'*', # 0xdf
'*', # 0xe0
'*', # 0xe1
'*', # 0xe2
'*', # 0xe3
'*', # 0xe4
'*', # 0xe5
'*', # 0xe6
'#', # 0xe7
'#', # 0xe8
'#', # 0xe9
'#', # 0xea
'#', # 0xeb
'^', # 0xec
'^', # 0xed
'^', # 0xee
'O', # 0xef
'#', # 0xf0
'#', # 0xf1
'#', # 0xf2
'#', # 0xf3
'#', # 0xf4
'#', # 0xf5
'#', # 0xf6
'#', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| gpl-2.0 |
jakub-d/kubernetes | hack/lookup_pull.py | 246 | 1299 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script to print out PR info in release note format.
import json
import sys
import urllib2
PULLQUERY=("https://api.github.com/repos/"
"GoogleCloudPlatform/kubernetes/pulls/{pull}")
LOGIN="login"
TITLE="title"
USER="user"
def print_pulls(pulls):
for pull in pulls:
d = json.loads(urllib2.urlopen(PULLQUERY.format(pull=pull)).read())
print "* {title} #{pull} ({author})".format(
title=d[TITLE], pull=pull, author=d[USER][LOGIN])
if __name__ == "__main__":
if len(sys.argv) < 2:
print ("Usage: {cmd} <pulls>...: Prints out short " +
"markdown description for PRs appropriate for release notes.")
sys.exit(1)
print_pulls(sys.argv[1:])
| apache-2.0 |
snowballstem/snowball | python/stemwords.py | 1 | 3437 | import sys
import codecs
import snowballstemmer
def usage():
print('''usage: %s [-l <language>] [-i <input file>] [-o <output file>] [-c <character encoding>] [-p[2]] [-h]
The input file consists of a list of words to be stemmed, one per
line. Words should be in lower case, but (for English) A-Z letters
are mapped to their a-z equivalents anyway. If omitted, stdin is
used.
If -c is given, the argument is the character encoding of the input
and output files. If it is omitted, the UTF-8 encoding is used.
If -p is given the output file consists of each word of the input
file followed by \"->\" followed by its stemmed equivalent.
If -p2 is given the output file is a two column layout containing
the input words in the first column and the stemmed eqivalents in
the second column.
Otherwise, the output file consists of the stemmed words, one per
line.
-h displays this help''' % sys.argv[0])
def main():
argv = sys.argv[1:]
if len(argv) < 5:
usage()
else:
pretty = 0
input = ''
output = ''
encoding = 'utf_8'
language = 'English'
show_help = False
while len(argv):
arg = argv[0]
argv = argv[1:]
if arg == '-h':
show_help = True
break
elif arg == "-p":
pretty = 1
elif arg == "-p2":
pretty = 2
elif arg == "-l":
if len(argv) == 0:
show_help = True
break
language = argv[0]
argv = argv[1:]
elif arg == "-i":
if len(argv) == 0:
show_help = True
break
input = argv[0]
argv = argv[1:]
elif arg == "-o":
if len(argv) == 0:
show_help = True
break
output = argv[0]
argv = argv[1:]
elif arg == "-c":
if len(argv) == 0:
show_help = True
break
encoding = argv[0]
if show_help or input == '' or output == '':
usage()
else:
stemming(language, input, output, encoding, pretty)
def stemming(lang, input, output, encoding, pretty):
stemmer = snowballstemmer.stemmer(lang)
with codecs.open(output, "w", encoding) as outfile:
with codecs.open(input, "r", encoding) as infile:
for original in infile.readlines():
original = original.strip()
# Convert only ASCII-letters to lowercase, to match C behavior
original = ''.join((c.lower() if 'A' <= c <= 'Z' else c for c in original))
stemmed = stemmer.stemWord(original)
if pretty == 0:
if stemmed != "":
outfile.write(stemmed)
elif pretty == 1:
outfile.write(original, " -> ", stemmed)
elif pretty == 2:
outfile.write(original)
if len(original) < 30:
outfile.write(" " * (30 - len(original)))
else:
outfile.write("\n")
outfile.write(" " * 30)
outfile.write(stemmed)
outfile.write('\n')
main()
| bsd-3-clause |
markYoungH/chromium.src | media/tools/constrained_network_server/traffic_control.py | 186 | 12569 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Traffic control library for constraining the network configuration on a port.
The traffic controller sets up a constrained network configuration on a port.
Traffic to the constrained port is forwarded to a specified server port.
"""
import logging
import os
import re
import subprocess
# The maximum bandwidth limit.
_DEFAULT_MAX_BANDWIDTH_KBIT = 1000000
class TrafficControlError(BaseException):
"""Exception raised for errors in traffic control library.
Attributes:
msg: User defined error message.
cmd: Command for which the exception was raised.
returncode: Return code of running the command.
stdout: Output of running the command.
stderr: Error output of running the command.
"""
def __init__(self, msg, cmd=None, returncode=None, output=None,
error=None):
BaseException.__init__(self, msg)
self.msg = msg
self.cmd = cmd
self.returncode = returncode
self.output = output
self.error = error
def CheckRequirements():
"""Checks if permissions are available to run traffic control commands.
Raises:
TrafficControlError: If permissions to run traffic control commands are not
available.
"""
if os.geteuid() != 0:
_Exec(['sudo', '-n', 'tc', '-help'],
msg=('Cannot run \'tc\' command. Traffic Control must be run as root '
'or have password-less sudo access to this command.'))
_Exec(['sudo', '-n', 'iptables', '-help'],
msg=('Cannot run \'iptables\' command. Traffic Control must be run '
'as root or have password-less sudo access to this command.'))
def CreateConstrainedPort(config):
"""Creates a new constrained port.
Imposes packet level constraints such as bandwidth, latency, and packet loss
on a given port using the specified configuration dictionary. Traffic to that
port is forwarded to a specified server port.
Args:
config: Constraint configuration dictionary, format:
port: Port to constrain (integer 1-65535).
server_port: Port to redirect traffic on [port] to (integer 1-65535).
interface: Network interface name (string).
latency: Delay added on each packet sent (integer in ms).
bandwidth: Maximum allowed upload bandwidth (integer in kbit/s).
loss: Percentage of packets to drop (integer 0-100).
Raises:
TrafficControlError: If any operation fails. The message in the exception
describes what failed.
"""
_CheckArgsExist(config, 'interface', 'port', 'server_port')
_AddRootQdisc(config['interface'])
try:
_ConfigureClass('add', config)
_AddSubQdisc(config)
_AddFilter(config['interface'], config['port'])
_AddIptableRule(config['interface'], config['port'], config['server_port'])
except TrafficControlError as e:
logging.debug('Error creating constrained port %d.\nError: %s\n'
'Deleting constrained port.', config['port'], e.error)
DeleteConstrainedPort(config)
raise e
def DeleteConstrainedPort(config):
"""Deletes an existing constrained port.
Deletes constraints set on a given port and the traffic forwarding rule from
the constrained port to a specified server port.
The original constrained network configuration used to create the constrained
port must be passed in.
Args:
config: Constraint configuration dictionary, format:
port: Port to constrain (integer 1-65535).
server_port: Port to redirect traffic on [port] to (integer 1-65535).
interface: Network interface name (string).
bandwidth: Maximum allowed upload bandwidth (integer in kbit/s).
Raises:
TrafficControlError: If any operation fails. The message in the exception
describes what failed.
"""
_CheckArgsExist(config, 'interface', 'port', 'server_port')
try:
# Delete filters first so it frees the class.
_DeleteFilter(config['interface'], config['port'])
finally:
try:
# Deleting the class deletes attached qdisc as well.
_ConfigureClass('del', config)
finally:
_DeleteIptableRule(config['interface'], config['port'],
config['server_port'])
def TearDown(config):
"""Deletes the root qdisc and all iptables rules.
Args:
config: Constraint configuration dictionary, format:
interface: Network interface name (string).
Raises:
TrafficControlError: If any operation fails. The message in the exception
describes what failed.
"""
_CheckArgsExist(config, 'interface')
command = ['sudo', 'tc', 'qdisc', 'del', 'dev', config['interface'], 'root']
try:
_Exec(command, msg='Could not delete root qdisc.')
finally:
_DeleteAllIpTableRules()
def _CheckArgsExist(config, *args):
"""Check that the args exist in config dictionary and are not None.
Args:
config: Any dictionary.
*args: The list of key names to check.
Raises:
TrafficControlError: If any key name does not exist in config or is None.
"""
for key in args:
if key not in config.keys() or config[key] is None:
raise TrafficControlError('Missing "%s" parameter.' % key)
def _AddRootQdisc(interface):
"""Sets up the default root qdisc.
Args:
interface: Network interface name.
Raises:
TrafficControlError: If adding the root qdisc fails for a reason other than
it already exists.
"""
command = ['sudo', 'tc', 'qdisc', 'add', 'dev', interface, 'root', 'handle',
'1:', 'htb']
try:
_Exec(command, msg=('Error creating root qdisc. '
'Make sure you have root access'))
except TrafficControlError as e:
# Ignore the error if root already exists.
if not 'File exists' in e.error:
raise e
def _ConfigureClass(option, config):
"""Adds or deletes a class and qdisc attached to the root.
The class specifies bandwidth, and qdisc specifies delay and packet loss. The
class ID is based on the config port.
Args:
option: Adds or deletes a class option [add|del].
config: Constraint configuration dictionary, format:
port: Port to constrain (integer 1-65535).
interface: Network interface name (string).
bandwidth: Maximum allowed upload bandwidth (integer in kbit/s).
"""
# Use constrained port as class ID so we can attach the qdisc and filter to
# it, as well as delete the class, using only the port number.
class_id = '1:%x' % config['port']
if 'bandwidth' not in config.keys() or not config['bandwidth']:
bandwidth = _DEFAULT_MAX_BANDWIDTH_KBIT
else:
bandwidth = config['bandwidth']
bandwidth = '%dkbit' % bandwidth
command = ['sudo', 'tc', 'class', option, 'dev', config['interface'],
'parent', '1:', 'classid', class_id, 'htb', 'rate', bandwidth,
'ceil', bandwidth]
_Exec(command, msg=('Error configuring class ID %s using "%s" command.' %
(class_id, option)))
def _AddSubQdisc(config):
"""Adds a qdisc attached to the class identified by the config port.
Args:
config: Constraint configuration dictionary, format:
port: Port to constrain (integer 1-65535).
interface: Network interface name (string).
latency: Delay added on each packet sent (integer in ms).
loss: Percentage of packets to drop (integer 0-100).
"""
port_hex = '%x' % config['port']
class_id = '1:%x' % config['port']
command = ['sudo', 'tc', 'qdisc', 'add', 'dev', config['interface'], 'parent',
class_id, 'handle', port_hex + ':0', 'netem']
# Check if packet-loss is set in the configuration.
if 'loss' in config.keys() and config['loss']:
loss = '%d%%' % config['loss']
command.extend(['loss', loss])
# Check if latency is set in the configuration.
if 'latency' in config.keys() and config['latency']:
latency = '%dms' % config['latency']
command.extend(['delay', latency])
_Exec(command, msg='Could not attach qdisc to class ID %s.' % class_id)
def _AddFilter(interface, port):
"""Redirects packets coming to a specified port into the constrained class.
Args:
interface: Interface name to attach the filter to (string).
port: Port number to filter packets with (integer 1-65535).
"""
class_id = '1:%x' % port
command = ['sudo', 'tc', 'filter', 'add', 'dev', interface, 'protocol', 'ip',
'parent', '1:', 'prio', '1', 'u32', 'match', 'ip', 'sport', port,
'0xffff', 'flowid', class_id]
_Exec(command, msg='Error adding filter on port %d.' % port)
def _DeleteFilter(interface, port):
"""Deletes the filter attached to the configured port.
Args:
interface: Interface name the filter is attached to (string).
port: Port number being filtered (integer 1-65535).
"""
handle_id = _GetFilterHandleId(interface, port)
command = ['sudo', 'tc', 'filter', 'del', 'dev', interface, 'protocol', 'ip',
'parent', '1:0', 'handle', handle_id, 'prio', '1', 'u32']
_Exec(command, msg='Error deleting filter on port %d.' % port)
def _GetFilterHandleId(interface, port):
"""Searches for the handle ID of the filter identified by the config port.
Args:
interface: Interface name the filter is attached to (string).
port: Port number being filtered (integer 1-65535).
Returns:
The handle ID.
Raises:
TrafficControlError: If handle ID was not found.
"""
command = ['sudo', 'tc', 'filter', 'list', 'dev', interface, 'parent', '1:']
output = _Exec(command, msg='Error listing filters.')
# Search for the filter handle ID associated with class ID '1:port'.
handle_id_re = re.search(
'([0-9a-fA-F]{3}::[0-9a-fA-F]{3}).*(?=flowid 1:%x\s)' % port, output)
if handle_id_re:
return handle_id_re.group(1)
raise TrafficControlError(('Could not find filter handle ID for class ID '
'1:%x.') % port)
def _AddIptableRule(interface, port, server_port):
"""Forwards traffic from constrained port to a specified server port.
Args:
interface: Interface name to attach the filter to (string).
port: Port of incoming packets (integer 1-65535).
server_port: Server port to forward the packets to (integer 1-65535).
"""
# Preroute rules for accessing the port through external connections.
command = ['sudo', 'iptables', '-t', 'nat', '-A', 'PREROUTING', '-i',
interface, '-p', 'tcp', '--dport', port, '-j', 'REDIRECT',
'--to-port', server_port]
_Exec(command, msg='Error adding iptables rule for port %d.' % port)
# Output rules for accessing the rule through localhost or 127.0.0.1
command = ['sudo', 'iptables', '-t', 'nat', '-A', 'OUTPUT', '-p', 'tcp',
'--dport', port, '-j', 'REDIRECT', '--to-port', server_port]
_Exec(command, msg='Error adding iptables rule for port %d.' % port)
def _DeleteIptableRule(interface, port, server_port):
"""Deletes the iptable rule associated with specified port number.
Args:
interface: Interface name to attach the filter to (string).
port: Port of incoming packets (integer 1-65535).
server_port: Server port packets are forwarded to (integer 1-65535).
"""
command = ['sudo', 'iptables', '-t', 'nat', '-D', 'PREROUTING', '-i',
interface, '-p', 'tcp', '--dport', port, '-j', 'REDIRECT',
'--to-port', server_port]
_Exec(command, msg='Error deleting iptables rule for port %d.' % port)
command = ['sudo', 'iptables', '-t', 'nat', '-D', 'OUTPUT', '-p', 'tcp',
'--dport', port, '-j', 'REDIRECT', '--to-port', server_port]
_Exec(command, msg='Error adding iptables rule for port %d.' % port)
def _DeleteAllIpTableRules():
"""Deletes all iptables rules."""
command = ['sudo', 'iptables', '-t', 'nat', '-F']
_Exec(command, msg='Error deleting all iptables rules.')
def _Exec(command, msg=None):
"""Executes a command.
Args:
command: Command list to execute.
msg: Message describing the error in case the command fails.
Returns:
The standard output from running the command.
Raises:
TrafficControlError: If command fails. Message is set by the msg parameter.
"""
cmd_list = [str(x) for x in command]
cmd = ' '.join(cmd_list)
logging.debug('Running command: %s', cmd)
p = subprocess.Popen(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
raise TrafficControlError(msg, cmd, p.returncode, output, error)
return output.strip()
| bsd-3-clause |
marcelocure/django | tests/gis_tests/utils.py | 327 | 1377 | from unittest import skip
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
def no_backend(test_func, backend):
"Use this decorator to disable test on specified backend."
if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'].rsplit('.')[-1] == backend:
@skip("This test is skipped on '%s' backend" % backend)
def inner():
pass
return inner
else:
return test_func
# Decorators to disable entire test functions for specific
# spatial backends.
def no_oracle(func):
return no_backend(func, 'oracle')
# Shortcut booleans to omit only portions of tests.
_default_db = settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'].rsplit('.')[-1]
oracle = _default_db == 'oracle'
postgis = _default_db == 'postgis'
mysql = _default_db == 'mysql'
spatialite = _default_db == 'spatialite'
# MySQL spatial indices can't handle NULL geometries.
gisfield_may_be_null = not mysql
if oracle and 'gis' in settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE']:
from django.contrib.gis.db.backends.oracle.models import OracleSpatialRefSys as SpatialRefSys
elif postgis:
from django.contrib.gis.db.backends.postgis.models import PostGISSpatialRefSys as SpatialRefSys
elif spatialite:
from django.contrib.gis.db.backends.spatialite.models import SpatialiteSpatialRefSys as SpatialRefSys
else:
SpatialRefSys = None
| bsd-3-clause |
chromium/chromium | components/policy/tools/generate_policy_source.py | 1 | 66390 | #!/usr/bin/env python3
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''python3 %(prog)s [options]
Pass at least:
--chrome-version-file <path to src/chrome/VERSION> or --all-chrome-versions
--target-platform <which platform the target code will be generated for and can
be one of (win, mac, linux, chromeos, ios)>
--policy_templates <path to the policy_templates.json input file>.'''
from argparse import ArgumentParser
from collections import namedtuple
from collections import OrderedDict
from functools import partial
import ast
import codecs
import json
import os
import re
import sys
import textwrap
sys.path.insert(
0,
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir,
'third_party', 'six', 'src'))
import six
from xml.sax.saxutils import escape as xml_escape
if sys.version_info.major == 2:
string_type = basestring
else:
string_type = str
CHROME_POLICY_KEY = 'SOFTWARE\\\\Policies\\\\Google\\\\Chrome'
CHROMIUM_POLICY_KEY = 'SOFTWARE\\\\Policies\\\\Chromium'
PLATFORM_STRINGS = {
'chrome_frame': ['win'],
'chrome_os': ['chrome_os'],
'android': ['android'],
'webview_android': ['android'],
'ios': ['ios'],
'chrome.win': ['win'],
'chrome.linux': ['linux'],
'chrome.mac': ['mac'],
'chrome.*': ['win', 'mac', 'linux'],
'chrome.win7': ['win']
}
class PolicyDetails:
"""Parses a policy template and caches all its details."""
# Maps policy types to a tuple with 4 other types:
# - the equivalent base::Value::Type or 'TYPE_EXTERNAL' if the policy
# references external data
# - the equivalent Protobuf field type
# - the name of one of the protobufs for shared policy types
# - the equivalent type in Android's App Restriction Schema
# TODO(joaodasilva): refactor the 'dict' type into a more generic 'json' type
# that can also be used to represent lists of other JSON objects.
TYPE_MAP = {
'dict': ('Type::DICTIONARY', 'string', 'String', 'string'),
'external': ('TYPE_EXTERNAL', 'string', 'String', 'invalid'),
'int': ('Type::INTEGER', 'int64', 'Integer', 'integer'),
'int-enum': ('Type::INTEGER', 'int64', 'Integer', 'choice'),
'list': ('Type::LIST', 'StringList', 'StringList', 'string'),
'main': ('Type::BOOLEAN', 'bool', 'Boolean', 'bool'),
'string': ('Type::STRING', 'string', 'String', 'string'),
'string-enum': ('Type::STRING', 'string', 'String', 'choice'),
'string-enum-list': ('Type::LIST', 'StringList', 'StringList',
'multi-select'),
}
class EnumItem:
def __init__(self, item):
self.caption = PolicyDetails._RemovePlaceholders(item['caption'])
self.value = item['value']
def _ConvertPlatform(self, platform):
'''Converts product platform string in policy_templates.json to platform
string that is defined in build config.'''
if platform not in PLATFORM_STRINGS:
raise RuntimeError('Platform "%s" is not supported' % platform)
return PLATFORM_STRINGS[platform]
def __init__(self, policy, chrome_major_version, target_platform, valid_tags):
self.id = policy['id']
self.name = policy['name']
self.tags = policy.get('tags', None)
self._CheckTagsValidity(valid_tags)
features = policy.get('features', {})
self.can_be_recommended = features.get('can_be_recommended', False)
self.can_be_mandatory = features.get('can_be_mandatory', True)
self.internal_only = features.get('internal_only', False)
self.is_deprecated = policy.get('deprecated', False)
self.is_device_only = policy.get('device_only', False)
self.is_future = policy.get('future', False)
self.per_profile = features.get('per_profile', False)
self.supported_chrome_os_management = policy.get(
'supported_chrome_os_management', ['active_directory', 'google_cloud'])
self.schema = policy['schema']
self.validation_schema = policy.get('validation_schema')
self.has_enterprise_default = 'default_for_enterprise_users' in policy
if self.has_enterprise_default:
self.enterprise_default = policy['default_for_enterprise_users']
self.platforms = set()
self.future_on = set()
for platform, version_range in map(lambda s: s.split(':'),
policy.get('supported_on', [])):
split_result = version_range.split('-')
if len(split_result) != 2:
raise RuntimeError('supported_on must have exactly one dash: "%s"' % p)
(version_min, version_max) = split_result
if version_min == '':
raise RuntimeError('supported_on must define a start version: "%s"' % p)
# Skip if filtering by Chromium version and the current Chromium version
# does not support the policy.
if chrome_major_version:
if (int(version_min) > chrome_major_version or
version_max != '' and int(version_max) < chrome_major_version):
continue
self.platforms.update(self._ConvertPlatform(platform))
for platform in policy.get('future_on', []):
self.future_on.update(self._ConvertPlatform(platform))
if self.is_device_only and self.platforms.union(self.future_on) > set(
['chrome_os']):
raise RuntimeError('device_only is only allowed for Chrome OS: "%s"' %
self.name)
self.is_supported = (target_platform in self.platforms
or target_platform in self.future_on)
self.is_future_on = target_platform in self.future_on
self.is_future = self.is_future or self.is_future_on
if policy['type'] not in PolicyDetails.TYPE_MAP:
raise NotImplementedError(
'Unknown policy type for %s: %s' % (policy['name'], policy['type']))
self.policy_type, self.protobuf_type, self.policy_protobuf_type, \
self.restriction_type = PolicyDetails.TYPE_MAP[policy['type']]
self.desc = '\n'.join(
map(str.strip,
PolicyDetails._RemovePlaceholders(policy['desc']).splitlines()))
self.caption = PolicyDetails._RemovePlaceholders(policy['caption'])
self.max_size = policy.get('max_size', 0)
items = policy.get('items')
if items is None:
self.items = None
else:
self.items = [PolicyDetails.EnumItem(entry) for entry in items]
PH_PATTERN = re.compile('<ph[^>]*>([^<]*|[^<]*<ex>([^<]*)</ex>[^<]*)</ph>')
def _CheckTagsValidity(self, valid_tags):
if self.tags == None:
raise RuntimeError('Policy ' + self.name + ' has to contain a list of '
'tags!\n An empty list is also valid but means '
'setting this policy can never harm the user\'s '
'privacy or security.\n')
for tag in self.tags:
if not tag in valid_tags:
raise RuntimeError('Invalid Tag:' + tag + '!\n'
'Chose a valid tag from \'risk_tag_definitions\' (a '
'subproperty of root in policy_templates.json)!')
# Simplistic grit placeholder stripper.
@staticmethod
def _RemovePlaceholders(text):
result = ''
pos = 0
for m in PolicyDetails.PH_PATTERN.finditer(text):
result += text[pos:m.start(0)]
result += m.group(2) or m.group(1)
pos = m.end(0)
result += text[pos:]
return result
class PolicyAtomicGroup:
"""Parses a policy atomic group and caches its name and policy names"""
def __init__(self, policy_group, available_policies,
policies_already_in_group):
self.id = policy_group['id']
self.name = policy_group['name']
self.policies = policy_group.get('policies', None)
self._CheckPoliciesValidity(available_policies, policies_already_in_group)
def _CheckPoliciesValidity(self, available_policies,
policies_already_in_group):
if self.policies == None or len(self.policies) <= 0:
raise RuntimeError('Atomic policy group ' + self.name +
' has to contain a list of '
'policies!\n')
for policy in self.policies:
if policy in policies_already_in_group:
raise RuntimeError('Policy: ' + policy +
' cannot be in more than one atomic group '
'in policy_templates.json)!')
policies_already_in_group.add(policy)
if not policy in available_policies:
raise RuntimeError('Invalid policy: ' + policy + ' in atomic group ' +
self.name + '.\n')
def ParseVersionFile(version_path):
chrome_major_version = None
for line in open(version_path, 'r').readlines():
key, val = line.rstrip('\r\n').split('=', 1)
if key == 'MAJOR':
chrome_major_version = val
break
if chrome_major_version is None:
raise RuntimeError('VERSION file does not contain major version.')
return int(chrome_major_version)
def main():
parser = ArgumentParser(usage=__doc__)
parser.add_argument(
'--pch',
'--policy-constants-header',
dest='header_path',
help='generate header file of policy constants',
metavar='FILE')
parser.add_argument(
'--pcc',
'--policy-constants-source',
dest='source_path',
help='generate source file of policy constants',
metavar='FILE')
parser.add_argument(
'--cpp',
'--cloud-policy-protobuf',
dest='cloud_policy_proto_path',
help='generate cloud policy protobuf file',
metavar='FILE')
parser.add_argument(
'--cpfrp',
'--cloud-policy-full-runtime-protobuf',
dest='cloud_policy_full_runtime_proto_path',
help='generate cloud policy full runtime protobuf',
metavar='FILE')
parser.add_argument(
'--csp',
'--chrome-settings-protobuf',
dest='chrome_settings_proto_path',
help='generate chrome settings protobuf file',
metavar='FILE')
parser.add_argument(
'--policy-common-definitions-protobuf',
dest='policy_common_definitions_proto_path',
help='policy common definitions protobuf file path',
metavar='FILE')
parser.add_argument(
'--policy-common-definitions-full-runtime-protobuf',
dest='policy_common_definitions_full_runtime_proto_path',
help='generate policy common definitions full runtime protobuf file',
metavar='FILE')
parser.add_argument(
'--csfrp',
'--chrome-settings-full-runtime-protobuf',
dest='chrome_settings_full_runtime_proto_path',
help='generate chrome settings full runtime protobuf',
metavar='FILE')
parser.add_argument(
'--ard',
'--app-restrictions-definition',
dest='app_restrictions_path',
help='generate an XML file as specified by '
'Android\'s App Restriction Schema',
metavar='FILE')
parser.add_argument(
'--rth',
'--risk-tag-header',
dest='risk_header_path',
help='generate header file for policy risk tags',
metavar='FILE')
parser.add_argument(
'--crospch',
'--cros-policy-constants-header',
dest='cros_constants_header_path',
help='generate header file of policy constants for use in '
'Chrome OS',
metavar='FILE')
parser.add_argument(
'--crospcc',
'--cros-policy-constants-source',
dest='cros_constants_source_path',
help='generate source file of policy constants for use in '
'Chrome OS',
metavar='FILE')
parser.add_argument(
'--chrome-version-file',
dest='chrome_version_file',
help='path to src/chrome/VERSION',
metavar='FILE')
parser.add_argument(
'--all-chrome-versions',
action='store_true',
dest='all_chrome_versions',
default=False,
help='do not restrict generated policies by chrome version')
parser.add_argument(
'--target-platform',
dest='target_platform',
help='the platform the generated code should run on - can be one of'
'(win, mac, linux, chromeos, fuchsia)',
metavar='PLATFORM')
parser.add_argument(
'--policy-templates-file',
dest='policy_templates_file',
help='path to the policy_templates.json input file',
metavar='FILE')
args = parser.parse_args()
has_arg_error = False
if not args.target_platform:
print('Error: Missing --target-platform=<platform>')
has_arg_error = True
if not args.policy_templates_file:
print('Error: Missing'
' --policy-templates-file=<path to policy_templates.json>')
has_arg_error = True
if not args.chrome_version_file and not args.all_chrome_versions:
print('Error: Missing'
' --chrome-version-file=<path to src/chrome/VERSION>\n'
' or --all-chrome-versions')
has_arg_error = True
if has_arg_error:
print('')
parser.print_help()
return 2
version_path = args.chrome_version_file
target_platform = args.target_platform
template_file_name = args.policy_templates_file
# --target-platform accepts "chromeos" as its input because that's what is
# used within GN. Within policy templates, "chrome_os" is used instead.
if target_platform == 'chromeos':
target_platform = 'chrome_os'
if args.all_chrome_versions:
chrome_major_version = None
else:
chrome_major_version = ParseVersionFile(version_path)
template_file_contents = _LoadJSONFile(template_file_name)
risk_tags = RiskTags(template_file_contents)
policy_details = [
PolicyDetails(policy, chrome_major_version, target_platform,
risk_tags.GetValidTags())
for policy in template_file_contents['policy_definitions']
if policy['type'] != 'group'
]
risk_tags.ComputeMaxTags(policy_details)
sorted_policy_details = sorted(policy_details, key=lambda policy: policy.name)
policy_details_set = list(map((lambda x: x.name), policy_details))
policies_already_in_group = set()
policy_atomic_groups = [
PolicyAtomicGroup(group, policy_details_set, policies_already_in_group)
for group in template_file_contents['policy_atomic_group_definitions']
]
sorted_policy_atomic_groups = sorted(
policy_atomic_groups, key=lambda group: group.name)
def GenerateFile(path, writer, sorted=False, xml=False):
if path:
with codecs.open(path, 'w', encoding='utf-8') as f:
_OutputGeneratedWarningHeader(f, template_file_name, xml)
writer(sorted and sorted_policy_details or policy_details,
sorted and sorted_policy_atomic_groups or policy_atomic_groups,
target_platform, f, risk_tags)
if args.header_path:
GenerateFile(args.header_path, _WritePolicyConstantHeader, sorted=True)
if args.source_path:
GenerateFile(args.source_path, _WritePolicyConstantSource, sorted=True)
if args.risk_header_path:
GenerateFile(args.risk_header_path, _WritePolicyRiskTagHeader)
if args.cloud_policy_proto_path:
GenerateFile(args.cloud_policy_proto_path, _WriteCloudPolicyProtobuf)
if (args.policy_common_definitions_full_runtime_proto_path and
args.policy_common_definitions_proto_path):
GenerateFile(
args.policy_common_definitions_full_runtime_proto_path,
partial(_WritePolicyCommonDefinitionsFullRuntimeProtobuf,
args.policy_common_definitions_proto_path))
if args.cloud_policy_full_runtime_proto_path:
GenerateFile(args.cloud_policy_full_runtime_proto_path,
_WriteCloudPolicyFullRuntimeProtobuf)
if args.chrome_settings_proto_path:
GenerateFile(args.chrome_settings_proto_path, _WriteChromeSettingsProtobuf)
if args.chrome_settings_full_runtime_proto_path:
GenerateFile(args.chrome_settings_full_runtime_proto_path,
_WriteChromeSettingsFullRuntimeProtobuf)
if target_platform == 'android' and args.app_restrictions_path:
GenerateFile(args.app_restrictions_path, _WriteAppRestrictions, xml=True)
# Generated code for Chrome OS (unused in Chromium).
if args.cros_constants_header_path:
GenerateFile(
args.cros_constants_header_path,
_WriteChromeOSPolicyConstantsHeader,
sorted=True)
if args.cros_constants_source_path:
GenerateFile(
args.cros_constants_source_path,
_WriteChromeOSPolicyConstantsSource,
sorted=True)
return 0
#------------------ shared helpers ---------------------------------#
def _OutputGeneratedWarningHeader(f, template_file_path, xml_style):
left_margin = '//'
if xml_style:
left_margin = ' '
f.write('<?xml version="1.0" encoding="utf-8"?>\n' '<!--\n')
else:
f.write('//\n')
f.write(left_margin + ' DO NOT MODIFY THIS FILE DIRECTLY!\n')
f.write(left_margin + ' IT IS GENERATED BY generate_policy_source.py\n')
f.write(left_margin + ' FROM ' + template_file_path + '\n')
if xml_style:
f.write('-->\n\n')
else:
f.write(left_margin + '\n\n')
COMMENT_WRAPPER = textwrap.TextWrapper()
COMMENT_WRAPPER.width = 80
COMMENT_WRAPPER.initial_indent = '// '
COMMENT_WRAPPER.subsequent_indent = '// '
COMMENT_WRAPPER.replace_whitespace = False
# Writes a comment, each line prefixed by // and wrapped to 80 spaces.
def _OutputComment(f, comment):
for line in six.ensure_text(comment).splitlines():
if len(line) == 0:
f.write('//')
else:
f.write(COMMENT_WRAPPER.fill(line))
f.write('\n')
def _LoadJSONFile(json_file):
with codecs.open(json_file, 'r', encoding='utf-8') as f:
text = f.read()
return ast.literal_eval(text)
#------------------ policy constants header ------------------------#
def _WritePolicyConstantHeader(policies, policy_atomic_groups, target_platform,
f, risk_tags):
f.write('''#ifndef COMPONENTS_POLICY_POLICY_CONSTANTS_H_
#define COMPONENTS_POLICY_POLICY_CONSTANTS_H_
#include <cstdint>
#include <string>
#include "components/policy/core/common/policy_details.h"
#include "components/policy/core/common/policy_map.h"
#include "components/policy/proto/cloud_policy.pb.h"
namespace policy {
namespace internal {
struct SchemaData;
}
''')
if target_platform == 'win':
f.write('// The windows registry path where Chrome policy '
'configuration resides.\n'
'extern const wchar_t kRegistryChromePolicyKey[];\n')
f.write('''#if defined(OS_CHROMEOS)
// Sets default profile policies values for enterprise users.
void SetEnterpriseUsersProfileDefaults(PolicyMap* policy_map);
// Sets default system-wide policies values for enterprise users.
void SetEnterpriseUsersSystemWideDefaults(PolicyMap* policy_map);
// Sets all default values for enterprise users.
void SetEnterpriseUsersDefaults(PolicyMap* policy_map);
#endif
// Returns the PolicyDetails for |policy| if |policy| is a known
// Chrome policy, otherwise returns nullptr.
const PolicyDetails* GetChromePolicyDetails(
const std::string& policy);
// Returns the schema data of the Chrome policy schema.
const internal::SchemaData* GetChromeSchemaData();
''')
f.write('// Key names for the policy settings.\n' 'namespace key {\n\n')
for policy in policies:
# TODO(joaodasilva): Include only supported policies in
# configuration_policy_handler.cc and configuration_policy_handler_list.cc
# so that these names can be conditional on 'policy.is_supported'.
# http://crbug.com/223616
f.write('extern const char k' + policy.name + '[];\n')
f.write('\n} // namespace key\n\n')
f.write('// Group names for the policy settings.\n' 'namespace group {\n\n')
for group in policy_atomic_groups:
f.write('extern const char k' + group.name + '[];\n')
f.write('\n} // namespace group\n\n')
f.write('struct AtomicGroup {\n'
' const short id;\n'
' const char* policy_group;\n'
' const char* const* policies;\n'
'};\n\n')
f.write('extern const AtomicGroup kPolicyAtomicGroupMappings[];\n\n')
f.write('extern const size_t kPolicyAtomicGroupMappingsLength;\n\n')
f.write('enum class StringPolicyType {\n'
' STRING,\n'
' JSON,\n'
' EXTERNAL,\n'
'};\n\n')
# User policy proto pointers, one struct for each protobuf type.
protobuf_types = _GetProtobufTypes(policies)
for protobuf_type in protobuf_types:
_WriteChromePolicyAccessHeader(f, protobuf_type)
f.write('constexpr int64_t kDevicePolicyExternalDataResourceCacheSize = %d;\n'
% _ComputeTotalDevicePolicyExternalDataMaxSize(policies))
f.write('\n} // namespace policy\n\n'
'#endif // COMPONENTS_POLICY_POLICY_CONSTANTS_H_\n')
def _WriteChromePolicyAccessHeader(f, protobuf_type):
f.write('// Read access to the protobufs of all supported %s user policies.\n'
% protobuf_type.lower())
f.write('struct %sPolicyAccess {\n' % protobuf_type)
f.write(' const char* policy_key;\n'
' bool per_profile;\n'
' bool (enterprise_management::CloudPolicySettings::'
'*has_proto)() const;\n'
' const enterprise_management::%sPolicyProto&\n'
' (enterprise_management::CloudPolicySettings::'
'*get_proto)() const;\n' % protobuf_type)
if protobuf_type == 'String':
f.write(' const StringPolicyType type;\n')
f.write('};\n')
f.write('extern const %sPolicyAccess k%sPolicyAccess[];\n\n' %
(protobuf_type, protobuf_type))
def _ComputeTotalDevicePolicyExternalDataMaxSize(policies):
total_device_policy_external_data_max_size = 0
for policy in policies:
if policy.is_device_only and policy.policy_type == 'TYPE_EXTERNAL':
total_device_policy_external_data_max_size += policy.max_size
return total_device_policy_external_data_max_size
#------------------ policy constants source ------------------------#
SchemaNodeKey = namedtuple('SchemaNodeKey',
'schema_type extra is_sensitive_value')
SchemaNode = namedtuple(
'SchemaNode',
'schema_type extra is_sensitive_value has_sensitive_children comments')
PropertyNode = namedtuple('PropertyNode', 'key schema')
PropertiesNode = namedtuple(
'PropertiesNode',
'begin end pattern_end required_begin required_end additional name')
RestrictionNode = namedtuple('RestrictionNode', 'first second')
# A mapping of the simple schema types to base::Value::Types.
SIMPLE_SCHEMA_NAME_MAP = {
'boolean': 'Type::BOOLEAN',
'integer': 'Type::INTEGER',
'null': 'Type::NONE',
'number': 'Type::DOUBLE',
'string': 'Type::STRING',
}
INVALID_INDEX = -1
MIN_INDEX = -1
MAX_INDEX = (1 << 15) - 1 # signed short in c++
MIN_POLICY_ID = 0
MAX_POLICY_ID = (1 << 16) - 1 # unsigned short
MIN_EXTERNAL_DATA_SIZE = 0
MAX_EXTERNAL_DATA_SIZE = (1 << 32) - 1 # unsigned int32
class SchemaNodesGenerator:
"""Builds the internal structs to represent a JSON schema."""
def __init__(self, shared_strings):
"""Creates a new generator.
|shared_strings| is a map of strings to a C expression that evaluates to
that string at runtime. This mapping can be used to reuse existing string
constants."""
self.shared_strings = shared_strings
self.key_index_map = {} # |SchemaNodeKey| -> index in |schema_nodes|
self.schema_nodes = [] # List of |SchemaNode|s
self.property_nodes = [] # List of |PropertyNode|s
self.properties_nodes = [] # List of |PropertiesNode|s
self.restriction_nodes = [] # List of |RestrictionNode|s
self.required_properties = []
self.int_enums = []
self.string_enums = []
self.ranges = {}
self.id_map = {}
def GetString(self, s):
if s in self.shared_strings:
return self.shared_strings[s]
# Generate JSON escaped string, which is slightly different from desired
# C/C++ escaped string. Known differences includes unicode escaping format.
return json.dumps(s)
def AppendSchema(self, schema_type, extra, is_sensitive_value, comment=''):
# Find existing schema node with same structure.
key_node = SchemaNodeKey(schema_type, extra, is_sensitive_value)
if key_node in self.key_index_map:
index = self.key_index_map[key_node]
if comment:
self.schema_nodes[index].comments.add(comment)
return index
# Create new schema node.
index = len(self.schema_nodes)
comments = {comment} if comment else set()
schema_node = SchemaNode(schema_type, extra, is_sensitive_value, False,
comments)
self.schema_nodes.append(schema_node)
self.key_index_map[key_node] = index
return index
def AppendRestriction(self, first, second):
r = RestrictionNode(str(first), str(second))
if not r in self.ranges:
self.ranges[r] = len(self.restriction_nodes)
self.restriction_nodes.append(r)
return self.ranges[r]
def GetSimpleType(self, name, is_sensitive_value):
return self.AppendSchema(SIMPLE_SCHEMA_NAME_MAP[name], INVALID_INDEX,
is_sensitive_value, 'simple type: ' + name)
def SchemaHaveRestriction(self, schema):
return any(keyword in schema
for keyword in ['minimum', 'maximum', 'enum', 'pattern'])
def IsConsecutiveInterval(self, seq):
sortedSeq = sorted(seq)
return all(
sortedSeq[i] + 1 == sortedSeq[i + 1] for i in range(len(sortedSeq) - 1))
def GetEnumIntegerType(self, schema, is_sensitive_value, name):
assert all(type(x) == int for x in schema['enum'])
possible_values = schema['enum']
if self.IsConsecutiveInterval(possible_values):
index = self.AppendRestriction(max(possible_values), min(possible_values))
return self.AppendSchema(
'Type::INTEGER', index, is_sensitive_value,
'integer with enumeration restriction (use range instead): %s' % name)
offset_begin = len(self.int_enums)
self.int_enums += possible_values
offset_end = len(self.int_enums)
return self.AppendSchema('Type::INTEGER',
self.AppendRestriction(offset_begin, offset_end),
is_sensitive_value,
'integer with enumeration restriction: %s' % name)
def GetEnumStringType(self, schema, is_sensitive_value, name):
assert all(type(x) == str for x in schema['enum'])
offset_begin = len(self.string_enums)
self.string_enums += schema['enum']
offset_end = len(self.string_enums)
return self.AppendSchema('Type::STRING',
self.AppendRestriction(offset_begin, offset_end),
is_sensitive_value,
'string with enumeration restriction: %s' % name)
def GetEnumType(self, schema, is_sensitive_value, name):
if len(schema['enum']) == 0:
raise RuntimeError('Empty enumeration in %s' % name)
elif schema['type'] == 'integer':
return self.GetEnumIntegerType(schema, is_sensitive_value, name)
elif schema['type'] == 'string':
return self.GetEnumStringType(schema, is_sensitive_value, name)
else:
raise RuntimeError('Unknown enumeration type in %s' % name)
def GetPatternType(self, schema, is_sensitive_value, name):
if schema['type'] != 'string':
raise RuntimeError('Unknown pattern type in %s' % name)
pattern = schema['pattern']
# Try to compile the pattern to validate it, note that the syntax used
# here might be slightly different from re2.
# TODO(binjin): Add a python wrapper of re2 and use it here.
re.compile(pattern)
index = len(self.string_enums)
self.string_enums.append(pattern)
return self.AppendSchema('Type::STRING', self.AppendRestriction(
index, index), is_sensitive_value,
'string with pattern restriction: %s' % name)
def GetRangedType(self, schema, is_sensitive_value, name):
if schema['type'] != 'integer':
raise RuntimeError('Unknown ranged type in %s' % name)
min_value_set, max_value_set = False, False
if 'minimum' in schema:
min_value = int(schema['minimum'])
min_value_set = True
if 'maximum' in schema:
max_value = int(schema['maximum'])
max_value_set = True
if min_value_set and max_value_set and min_value > max_value:
raise RuntimeError('Invalid ranged type in %s' % name)
index = self.AppendRestriction(
str(max_value) if max_value_set else 'INT_MAX',
str(min_value) if min_value_set else 'INT_MIN')
return self.AppendSchema('Type::INTEGER', index, is_sensitive_value,
'integer with ranged restriction: %s' % name)
def Generate(self, schema, name):
"""Generates the structs for the given schema.
|schema|: a valid JSON schema in a dictionary.
|name|: the name of the current node, for the generated comments."""
if '$ref' in schema:
if 'id' in schema:
raise RuntimeError("Schemas with a $ref can't have an id")
if not isinstance(schema['$ref'], string_type):
raise RuntimeError("$ref attribute must be a string")
return schema['$ref']
is_sensitive_value = schema.get('sensitiveValue', False)
assert type(is_sensitive_value) is bool
if schema['type'] in SIMPLE_SCHEMA_NAME_MAP:
if not self.SchemaHaveRestriction(schema):
# Simple types use shared nodes.
return self.GetSimpleType(schema['type'], is_sensitive_value)
elif 'enum' in schema:
return self.GetEnumType(schema, is_sensitive_value, name)
elif 'pattern' in schema:
return self.GetPatternType(schema, is_sensitive_value, name)
else:
return self.GetRangedType(schema, is_sensitive_value, name)
if schema['type'] == 'array':
return self.AppendSchema(
'Type::LIST',
self.GenerateAndCollectID(schema['items'], 'items of ' + name),
is_sensitive_value)
elif schema['type'] == 'object':
# Reserve an index first, so that dictionaries come before their
# properties. This makes sure that the root node is the first in the
# SchemaNodes array.
# This however, prevents de-duplication for object schemas since we could
# only determine duplicates after all child schema nodes are generated as
# well and then we couldn't remove the newly created schema node without
# invalidating all child schema indices.
index = len(self.schema_nodes)
self.schema_nodes.append(
SchemaNode('Type::DICTIONARY', INVALID_INDEX, is_sensitive_value,
False, {name}))
if 'additionalProperties' in schema:
additionalProperties = self.GenerateAndCollectID(
schema['additionalProperties'], 'additionalProperties of ' + name)
else:
additionalProperties = INVALID_INDEX
# Properties must be sorted by name, for the binary search lookup.
# Note that |properties| must be evaluated immediately, so that all the
# recursive calls to Generate() append the necessary child nodes; if
# |properties| were a generator then this wouldn't work.
sorted_properties = sorted(schema.get('properties', {}).items())
properties = [
PropertyNode(
self.GetString(key), self.GenerateAndCollectID(subschema, key))
for key, subschema in sorted_properties
]
pattern_properties = []
for pattern, subschema in schema.get('patternProperties', {}).items():
pattern_properties.append(
PropertyNode(
self.GetString(pattern),
self.GenerateAndCollectID(subschema, pattern)))
begin = len(self.property_nodes)
self.property_nodes += properties
end = len(self.property_nodes)
self.property_nodes += pattern_properties
pattern_end = len(self.property_nodes)
if index == 0:
self.root_properties_begin = begin
self.root_properties_end = end
required_begin = len(self.required_properties)
required_properties = schema.get('required', [])
assert type(required_properties) is list
assert all(type(x) == str for x in required_properties)
self.required_properties += required_properties
required_end = len(self.required_properties)
# Check that each string in |required_properties| is in |properties|.
properties = schema.get('properties', {})
for name in required_properties:
assert name in properties
extra = len(self.properties_nodes)
self.properties_nodes.append(
PropertiesNode(begin, end, pattern_end, required_begin, required_end,
additionalProperties, name))
# Update index at |extra| now, since that was filled with a dummy value
# when the schema node was created.
self.schema_nodes[index] = self.schema_nodes[index]._replace(extra=extra)
return index
else:
assert False
def GenerateAndCollectID(self, schema, name):
"""A wrapper of Generate(), will take the return value, check and add 'id'
attribute to self.id_map. The wrapper needs to be used for every call to
Generate().
"""
index = self.Generate(schema, name)
if 'id' not in schema:
return index
id_str = schema['id']
if id_str in self.id_map:
raise RuntimeError('Duplicated id: ' + id_str)
self.id_map[id_str] = index
return index
def Write(self, f):
"""Writes the generated structs to the given file.
|f| an open file to write to."""
f.write('const internal::SchemaNode kSchemas[] = {\n'
'// Type' + ' ' * 27 +
'Extra IsSensitiveValue HasSensitiveChildren\n')
for schema_node in self.schema_nodes:
assert schema_node.extra >= MIN_INDEX and schema_node.extra <= MAX_INDEX
comment = ('\n' + ' ' * 69 + '// ').join(sorted(schema_node.comments))
f.write(' { base::Value::%-19s %4s %-16s %-5s }, // %s\n' %
(schema_node.schema_type + ',', str(schema_node.extra) + ',',
str(schema_node.is_sensitive_value).lower() + ',',
str(schema_node.has_sensitive_children).lower(), comment))
f.write('};\n\n')
if self.property_nodes:
f.write('const internal::PropertyNode kPropertyNodes[] = {\n'
'// Property' + ' ' * 61 + 'Schema\n')
for property_node in self.property_nodes:
f.write(' { %-64s %6d },\n' % (property_node.key + ',',
property_node.schema))
f.write('};\n\n')
if self.properties_nodes:
f.write('const internal::PropertiesNode kProperties[] = {\n'
'// Begin End PatternEnd RequiredBegin RequiredEnd'
' Additional Properties\n')
for properties_node in self.properties_nodes:
for i in range(0, len(properties_node) - 1):
assert (properties_node[i] >= MIN_INDEX and
properties_node[i] <= MAX_INDEX)
f.write(
' { %5d, %5d, %5d, %5d, %10d, %5d }, // %s\n' % properties_node)
f.write('};\n\n')
if self.restriction_nodes:
f.write('const internal::RestrictionNode kRestrictionNodes[] = {\n')
f.write('// FIRST, SECOND\n')
for restriction_node in self.restriction_nodes:
f.write(' {{ %-8s %4s}},\n' % (restriction_node.first + ',',
restriction_node.second))
f.write('};\n\n')
if self.required_properties:
f.write('const char* const kRequiredProperties[] = {\n')
for required_property in self.required_properties:
f.write(' %s,\n' % self.GetString(required_property))
f.write('};\n\n')
if self.int_enums:
f.write('const int kIntegerEnumerations[] = {\n')
for possible_values in self.int_enums:
f.write(' %d,\n' % possible_values)
f.write('};\n\n')
if self.string_enums:
f.write('const char* const kStringEnumerations[] = {\n')
for possible_values in self.string_enums:
f.write(' %s,\n' % self.GetString(possible_values))
f.write('};\n\n')
f.write('const internal::SchemaData* GetChromeSchemaData() {\n')
f.write(' static const internal::SchemaData kChromeSchemaData = {\n'
' kSchemas,\n')
f.write(' kPropertyNodes,\n' if self.property_nodes else ' nullptr,\n')
f.write(' kProperties,\n' if self.properties_nodes else ' nullptr,\n')
f.write(' kRestrictionNodes,\n' if self.
restriction_nodes else ' nullptr,\n')
f.write(' kRequiredProperties,\n' if self.
required_properties else ' nullptr,\n')
f.write(' kIntegerEnumerations,\n' if self.int_enums else ' nullptr,\n')
f.write(
' kStringEnumerations,\n' if self.string_enums else ' nullptr,\n')
f.write(' %d, // validation_schema root index\n' %
self.validation_schema_root_index)
f.write(' };\n\n')
f.write(' return &kChromeSchemaData;\n' '}\n\n')
def GetByID(self, id_str):
if not isinstance(id_str, string_type):
return id_str
if id_str not in self.id_map:
raise RuntimeError('Invalid $ref: ' + id_str)
return self.id_map[id_str]
def ResolveID(self, index, tuple_type, params):
simple_tuple = params[:index] + (self.GetByID(
params[index]),) + params[index + 1:]
return tuple_type(*simple_tuple)
def ResolveReferences(self):
"""Resolve reference mapping, required to be called after Generate()
After calling Generate(), the type of indices used in schema structures
might be either int or string. An int type suggests that it's a resolved
index, but for string type it's unresolved. Resolving a reference is as
simple as looking up for corresponding ID in self.id_map, and replace the
old index with the mapped index.
"""
self.schema_nodes = list(
map(partial(self.ResolveID, 1, SchemaNode), self.schema_nodes))
self.property_nodes = list(
map(partial(self.ResolveID, 1, PropertyNode), self.property_nodes))
self.properties_nodes = list(
map(partial(self.ResolveID, 3, PropertiesNode), self.properties_nodes))
def FindSensitiveChildren(self):
"""Wrapper function, which calls FindSensitiveChildrenRecursive().
"""
if self.schema_nodes:
self.FindSensitiveChildrenRecursive(0, set())
def FindSensitiveChildrenRecursive(self, index, handled_schema_nodes):
"""Recursively compute |has_sensitive_children| for the schema node at
|index| and all its child elements. A schema has sensitive children if any
of its children has |is_sensitive_value|==True or has sensitive children
itself.
"""
node = self.schema_nodes[index]
if index in handled_schema_nodes:
return node.has_sensitive_children or node.is_sensitive_value
handled_schema_nodes.add(index)
has_sensitive_children = False
if node.schema_type == 'Type::DICTIONARY':
properties_node = self.properties_nodes[node.extra]
# Iterate through properties and patternProperties.
for property_index in range(properties_node.begin,
properties_node.pattern_end - 1):
sub_index = self.property_nodes[property_index].schema
has_sensitive_children |= self.FindSensitiveChildrenRecursive(
sub_index, handled_schema_nodes)
# AdditionalProperties
if properties_node.additional != INVALID_INDEX:
sub_index = properties_node.additional
has_sensitive_children |= self.FindSensitiveChildrenRecursive(
sub_index, handled_schema_nodes)
elif node.schema_type == 'Type::LIST':
sub_index = node.extra
has_sensitive_children |= self.FindSensitiveChildrenRecursive(
sub_index, handled_schema_nodes)
if has_sensitive_children:
self.schema_nodes[index] = self.schema_nodes[index]._replace(
has_sensitive_children=True)
return has_sensitive_children or node.is_sensitive_value
def _GenerateDefaultValue(value):
"""Converts a JSON object into a base::Value entry. Returns a tuple, the first
entry being a list of declaration statements to define the variable, the
second entry being a way to access the variable.
If no definition is needed, the first return value will be an empty list. If
any error occurs, the second return value will be None (ie, no way to fetch
the value).
|value|: The deserialized value to convert to base::Value."""
if type(value) == bool or type(value) == int:
return [], 'base::Value(%s)' % json.dumps(value)
elif type(value) == str:
return [], 'base::Value("%s")' % value
elif type(value) == list:
setup = ['base::Value default_value(base::Value::Type::LIST);']
for entry in value:
decl, fetch = _GenerateDefaultValue(entry)
# Nested lists are not supported.
if decl:
return [], None
setup.append('default_value.Append(%s);' % fetch)
return setup, 'std::move(default_value)'
return [], None
def _WritePolicyConstantSource(policies, policy_atomic_groups, target_platform,
f, risk_tags):
f.write('''#include "components/policy/policy_constants.h"
#include <algorithm>
#include <climits>
#include <memory>
#include "base/check_op.h"
#include "base/stl_util.h" // base::size()
#include "base/values.h"
#include "build/branding_buildflags.h"
#include "components/policy/core/common/policy_types.h"
#include "components/policy/core/common/schema_internal.h"
#include "components/policy/proto/cloud_policy.pb.h"
#include "components/policy/risk_tag.h"
namespace em = enterprise_management;
namespace policy {
''')
# Generate the Chrome schema.
chrome_schema = {
'type': 'object',
'properties': {},
}
chrome_validation_schema = {
'type': 'object',
'properties': {},
}
shared_strings = {}
for policy in policies:
shared_strings[policy.name] = "key::k%s" % policy.name
if policy.is_supported:
chrome_schema['properties'][policy.name] = policy.schema
if policy.validation_schema is not None:
(chrome_validation_schema['properties'][policy.name]
) = policy.validation_schema
# Note: this list must be kept in sync with the known property list of the
# Chrome schema, so that binary searching in the PropertyNode array gets the
# right index on this array as well. See the implementation of
# GetChromePolicyDetails() below.
# TODO(crbug.com/1074336): kChromePolicyDetails shouldn't be declare if there
# is no policy.
f.write(
'''const __attribute__((unused)) PolicyDetails kChromePolicyDetails[] = {
// is_deprecated is_future is_device_policy id max_external_data_size, risk tags
''')
for policy in policies:
if policy.is_supported:
assert policy.id >= MIN_POLICY_ID and policy.id <= MAX_POLICY_ID
assert (policy.max_size >= MIN_EXTERNAL_DATA_SIZE and
policy.max_size <= MAX_EXTERNAL_DATA_SIZE)
f.write(' // %s\n' % policy.name)
f.write(' { %-14s%-10s%-17s%4s,%22s, %s },\n' %
('true,' if policy.is_deprecated else 'false,',
'true,' if policy.is_future_on else 'false, ',
'true,' if policy.is_device_only else 'false,', policy.id,
policy.max_size, risk_tags.ToInitString(policy.tags)))
f.write('};\n\n')
schema_generator = SchemaNodesGenerator(shared_strings)
schema_generator.GenerateAndCollectID(chrome_schema, 'root node')
if chrome_validation_schema['properties']:
schema_generator.validation_schema_root_index = \
schema_generator.GenerateAndCollectID(chrome_validation_schema,
'validation_schema root node')
else:
schema_generator.validation_schema_root_index = INVALID_INDEX
schema_generator.ResolveReferences()
schema_generator.FindSensitiveChildren()
schema_generator.Write(f)
f.write('\n')
if schema_generator.property_nodes:
f.write('namespace {\n')
f.write('bool CompareKeys(const internal::PropertyNode& node,\n'
' const std::string& key) {\n'
' return node.key < key;\n'
'}\n\n')
f.write('} // namespace\n\n')
if target_platform == 'win':
f.write('#if BUILDFLAG(GOOGLE_CHROME_BRANDING)\n'
'const wchar_t kRegistryChromePolicyKey[] = '
'L"' + CHROME_POLICY_KEY + '";\n'
'#else\n'
'const wchar_t kRegistryChromePolicyKey[] = '
'L"' + CHROMIUM_POLICY_KEY + '";\n'
'#endif\n\n')
# Setting enterprise defaults code generation.
profile_policy_enterprise_defaults = ""
system_wide_policy_enterprise_defaults = ""
for policy in policies:
if policy.has_enterprise_default and policy.is_supported:
declare_default_stmts, fetch_default = _GenerateDefaultValue(
policy.enterprise_default)
if not fetch_default:
raise RuntimeError('Type %s of policy %s is not supported at '
'enterprise defaults' %
(policy.policy_type, policy.name))
# Convert declare_default_stmts to a string with the correct indentation.
if declare_default_stmts:
declare_default = ' %s\n' % '\n '.join(declare_default_stmts)
else:
declare_default = ''
setting_enterprise_default = ''' if (!policy_map->Get(key::k%s)) {
%s
policy_map->Set(key::k%s,
POLICY_LEVEL_MANDATORY,
POLICY_SCOPE_USER,
POLICY_SOURCE_ENTERPRISE_DEFAULT,
%s,
nullptr);
}
''' % (policy.name, declare_default, policy.name, fetch_default)
if policy.per_profile:
profile_policy_enterprise_defaults += setting_enterprise_default
else:
system_wide_policy_enterprise_defaults += setting_enterprise_default
f.write('#if defined(OS_CHROMEOS)')
f.write('''
void SetEnterpriseUsersProfileDefaults(PolicyMap* policy_map) {
%s
}
''' % profile_policy_enterprise_defaults)
f.write('''
void SetEnterpriseUsersSystemWideDefaults(PolicyMap* policy_map) {
%s
}
''' % system_wide_policy_enterprise_defaults)
f.write('''
void SetEnterpriseUsersDefaults(PolicyMap* policy_map) {
SetEnterpriseUsersProfileDefaults(policy_map);
SetEnterpriseUsersSystemWideDefaults(policy_map);
}
''')
f.write('#endif\n\n')
f.write('const PolicyDetails* GetChromePolicyDetails('
'const std::string& policy) {\n')
if schema_generator.property_nodes:
f.write(' // First index in kPropertyNodes of the Chrome policies.\n'
' static const int begin_index = %s;\n'
' // One-past-the-end of the Chrome policies in kPropertyNodes.\n'
' static const int end_index = %s;\n' %
(schema_generator.root_properties_begin,
schema_generator.root_properties_end))
f.write(''' const internal::PropertyNode* begin =
kPropertyNodes + begin_index;
const internal::PropertyNode* end = kPropertyNodes + end_index;
const internal::PropertyNode* it =
std::lower_bound(begin, end, policy, CompareKeys);
if (it == end || it->key != policy)
return nullptr;
// This relies on kPropertyNodes from begin_index to end_index
// having exactly the same policies (and in the same order) as
// kChromePolicyDetails, so that binary searching on the first
// gets the same results as a binary search on the second would.
// However, kPropertyNodes has the policy names and
// kChromePolicyDetails doesn't, so we obtain the index into
// the second array by searching the first to avoid duplicating
// the policy name pointers.
// Offsetting |it| from |begin| here obtains the index we're
// looking for.
size_t index = it - begin;
CHECK_LT(index, base::size(kChromePolicyDetails));
return kChromePolicyDetails + index;
''')
else:
f.write('return nullptr;')
f.write('}\n\n')
f.write('namespace key {\n\n')
for policy in policies:
# TODO(joaodasilva): Include only supported policies in
# configuration_policy_handler.cc and configuration_policy_handler_list.cc
# so that these names can be conditional on 'policy.is_supported'.
# http://crbug.com/223616
f.write('const char k{name}[] = "{name}";\n'.format(name=policy.name))
f.write('\n} // namespace key\n\n')
f.write('namespace group {\n\n')
for group in policy_atomic_groups:
f.write('const char k{name}[] = "{name}";\n'.format(name=group.name))
f.write('\n')
f.write('namespace {\n\n')
for group in policy_atomic_groups:
f.write('const char* const %s[] = {' % (group.name))
for policy in group.policies:
f.write('key::k%s, ' % (policy))
f.write('nullptr};\n')
f.write('\n} // namespace\n')
f.write('\n} // namespace group\n\n')
atomic_groups_length = 0
f.write('const AtomicGroup kPolicyAtomicGroupMappings[] = {\n')
for group in policy_atomic_groups:
atomic_groups_length += 1
f.write(' {')
f.write(' {id}, group::k{name}, group::{name}'.format(
id=group.id, name=group.name))
f.write(' },\n')
f.write('};\n\n')
f.write('const size_t kPolicyAtomicGroupMappingsLength = %s;\n\n' %
(atomic_groups_length))
supported_user_policies = [
p for p in policies if p.is_supported and not p.is_device_only
]
protobuf_types = _GetProtobufTypes(supported_user_policies)
for protobuf_type in protobuf_types:
_WriteChromePolicyAccessSource(supported_user_policies, f, protobuf_type)
f.write('\n} // namespace policy\n')
# Return the StringPolicyType enum value for a particular policy type.
def _GetStringPolicyType(policy_type):
if policy_type == 'Type::STRING':
return 'StringPolicyType::STRING'
elif policy_type == 'Type::DICTIONARY':
return 'StringPolicyType::JSON'
elif policy_type == 'TYPE_EXTERNAL':
return 'StringPolicyType::EXTERNAL'
raise RuntimeError('Invalid string type: ' + policy_type + '!\n')
# Writes an array that contains the pointers to the proto field for each policy
# in |policies| of the given |protobuf_type|.
def _WriteChromePolicyAccessSource(policies, f, protobuf_type):
f.write('const %sPolicyAccess k%sPolicyAccess[] = {\n' % (protobuf_type,
protobuf_type))
extra_args = ''
for policy in policies:
if policy.policy_protobuf_type == protobuf_type:
name = policy.name
if protobuf_type == 'String':
extra_args = ',\n ' + _GetStringPolicyType(policy.policy_type)
f.write(' {key::k%s,\n'
' %s,\n'
' &em::CloudPolicySettings::has_%s,\n'
' &em::CloudPolicySettings::%s%s},\n' %
(name, str(policy.per_profile).lower(), name.lower(),
name.lower(), extra_args))
# The list is nullptr-terminated.
f.write(' {nullptr, false, nullptr, nullptr},\n' '};\n\n')
#------------------ policy risk tag header -------------------------#
class RiskTags(object):
'''Generates files and strings to translate the parsed risk tags.'''
# TODO(fhorschig|tnagel): Add, Check & Generate translation descriptions.
def __init__(self, template_file_contents):
self.max_tags = None
self.enum_for_tag = OrderedDict() # Ordered by severity as stated in JSON.
self._ReadRiskTagMetaData(template_file_contents)
def GenerateEnum(self):
values = [' ' + self.enum_for_tag[tag] for tag in self.enum_for_tag]
values.append(' RISK_TAG_COUNT')
values.append(' RISK_TAG_NONE')
enum_text = 'enum RiskTag : uint8_t {\n'
enum_text += ',\n'.join(values) + '\n};\n'
return enum_text
def GetMaxTags(self):
return str(self.max_tags)
def GetValidTags(self):
return [tag for tag in self.enum_for_tag]
def ToInitString(self, tags):
all_tags = [self._ToEnum(tag) for tag in tags]
all_tags += ["RISK_TAG_NONE" for missing in range(len(tags), self.max_tags)]
str_tags = "{ " + ", ".join(all_tags) + " }"
return "\n ".join(textwrap.wrap(str_tags, 69))
def ComputeMaxTags(self, policies):
self.max_tags = 0
for policy in policies:
if not policy.is_supported or policy.tags == None:
continue
self.max_tags = max(len(policy.tags), self.max_tags)
def _ToEnum(self, tag):
if tag in self.enum_for_tag:
return self.enum_for_tag[tag]
raise RuntimeError('Invalid Tag:' + tag + '!\n'
'Chose a valid tag from \'risk_tag_definitions\' (a '
'subproperty of root in policy_templates.json)!')
def _ReadRiskTagMetaData(self, template_file_contents):
for tag in template_file_contents['risk_tag_definitions']:
if tag.get('name', None) == None:
raise RuntimeError('Tag in \'risk_tag_definitions\' without '
'description found!')
if tag.get('description', None) == None:
raise RuntimeError('Tag ' + tag['name'] + ' has no description!')
if tag.get('user-description', None) == None:
raise RuntimeError('Tag ' + tag['name'] + ' has no user-description!')
self.enum_for_tag[tag['name']] = "RISK_TAG_" + tag['name'].replace(
"-", "_").upper()
def _WritePolicyRiskTagHeader(policies, policy_atomic_groups, target_platform,
f, risk_tags):
f.write('''#ifndef CHROME_COMMON_POLICY_RISK_TAG_H_
#define CHROME_COMMON_POLICY_RISK_TAG_H_
#include <stddef.h>
namespace policy {
// The tag of a policy indicates which impact a policy can have on
// a user's privacy and/or security. Ordered descending by
// impact.
// The explanation of the single tags is stated in
// policy_templates.json within the 'risk_tag_definitions' tag.
''')
f.write(risk_tags.GenerateEnum() + '\n')
f.write('// This constant describes how many risk tags were used by the\n'
'// policy which uses the most risk tags.\n'
'const size_t kMaxRiskTagCount = ' + risk_tags.GetMaxTags() + ';\n'
'\n'
'} // namespace policy\n\n'
'\n'
'#endif // CHROME_COMMON_POLICY_RISK_TAG_H_')
#------------------ policy protobufs -------------------------------#
# This code applies to both Active Directory and Google cloud management.
CHROME_SETTINGS_PROTO_HEAD = '''
syntax = "proto2";
option optimize_for = LITE_RUNTIME;
package enterprise_management;
// For StringList and PolicyOptions.
import "policy_common_definitions.proto";
'''
CLOUD_POLICY_PROTO_HEAD = '''
syntax = "proto2";
option optimize_for = LITE_RUNTIME;
package enterprise_management;
import "policy_common_definitions.proto";
'''
# Field IDs [1..RESERVED_IDS] will not be used in the wrapping protobuf.
RESERVED_IDS = 2
def _WritePolicyProto(f, policy, fields):
_OutputComment(f, policy.caption + '\n\n' + policy.desc)
if policy.items is not None:
_OutputComment(f, '\nValid values:')
for item in policy.items:
_OutputComment(f, ' %s: %s' % (str(item.value), item.caption))
if policy.policy_type == 'Type::DICTIONARY':
_OutputComment(
f, '\nValue schema:\n%s' % json.dumps(
policy.schema, sort_keys=True, indent=4, separators=(',', ': ')))
_OutputComment(
f, '\nSupported on: %s' %
', '.join(sorted(list(policy.platforms.union(policy.future_on)))))
if policy.can_be_recommended and not policy.can_be_mandatory:
_OutputComment(
f, '\nNote: this policy must have a RECOMMENDED ' +
'PolicyMode set in PolicyOptions.')
f.write('message %sProto {\n' % policy.name)
f.write(' optional PolicyOptions policy_options = 1;\n')
f.write(' optional %s %s = 2;\n' % (policy.protobuf_type, policy.name))
f.write('}\n\n')
fields += [
' optional %sProto %s = %s;\n' % (policy.name, policy.name,
policy.id + RESERVED_IDS)
]
def _WriteChromeSettingsProtobuf(policies, policy_atomic_groups,
target_platform, f, risk_tags):
f.write(CHROME_SETTINGS_PROTO_HEAD)
fields = []
f.write('// PBs for individual settings.\n\n')
for policy in policies:
# Note: This protobuf also gets the unsupported policies, since it's an
# exhaustive list of all the supported user policies on any platform.
if not policy.is_device_only:
_WritePolicyProto(f, policy, fields)
f.write('// --------------------------------------------------\n'
'// Big wrapper PB containing the above groups.\n\n'
'message ChromeSettingsProto {\n')
f.write(''.join(fields))
f.write('}\n\n')
def _WriteChromeSettingsFullRuntimeProtobuf(policies, policy_atomic_groups,
target_platform, f, risk_tags):
# For full runtime, disable LITE_RUNTIME switch and import full runtime
# version of cloud_policy.proto.
f.write(
CHROME_SETTINGS_PROTO_HEAD.replace(
"option optimize_for = LITE_RUNTIME;",
"//option optimize_for = LITE_RUNTIME;").replace(
"import \"cloud_policy.proto\";",
"import \"cloud_policy_full_runtime.proto\";").replace(
"import \"policy_common_definitions.proto\";",
"import \"policy_common_definitions_full_runtime.proto\";"))
fields = []
f.write('// PBs for individual settings.\n\n')
for policy in policies:
# Note: This protobuf also gets the unsupported policies, since it's an
# exhaustive list of all the supported user policies on any platform.
if not policy.is_device_only:
_WritePolicyProto(f, policy, fields)
f.write('// --------------------------------------------------\n'
'// Big wrapper PB containing the above groups.\n\n'
'message ChromeSettingsProto {\n')
f.write(''.join(fields))
f.write('}\n\n')
def _WriteCloudPolicyProtobuf(policies, policy_atomic_groups, target_platform,
f, risk_tags):
f.write(CLOUD_POLICY_PROTO_HEAD)
f.write('message CloudPolicySettings {\n')
for policy in policies:
if policy.is_supported and not policy.is_device_only:
f.write(
' optional %sPolicyProto %s = %s;\n' %
(policy.policy_protobuf_type, policy.name, policy.id + RESERVED_IDS))
f.write('}\n\n')
def _WriteCloudPolicyFullRuntimeProtobuf(policies, policy_atomic_groups,
target_platform, f, risk_tags):
# For full runtime, disable LITE_RUNTIME switch
f.write(
CLOUD_POLICY_PROTO_HEAD.replace(
"option optimize_for = LITE_RUNTIME;",
"//option optimize_for = LITE_RUNTIME;").replace(
"import \"policy_common_definitions.proto\";",
"import \"policy_common_definitions_full_runtime.proto\";"))
f.write('message CloudPolicySettings {\n')
for policy in policies:
if policy.is_supported and not policy.is_device_only:
f.write(
' optional %sPolicyProto %s = %s;\n' %
(policy.policy_protobuf_type, policy.name, policy.id + RESERVED_IDS))
f.write('}\n\n')
def _WritePolicyCommonDefinitionsFullRuntimeProtobuf(
policy_common_definitions_proto_path, policies, policy_atomic_groups,
target_platform, f, risk_tags):
# For full runtime, disable LITE_RUNTIME switch
with open(policy_common_definitions_proto_path, 'r') as proto_file:
policy_common_definitions_proto_code = proto_file.read()
f.write(
policy_common_definitions_proto_code.replace(
"option optimize_for = LITE_RUNTIME;",
"//option optimize_for = LITE_RUNTIME;"))
#------------------ Chrome OS policy constants header --------------#
# This code applies to Active Directory management only.
# Filter for _GetSupportedChromeOSPolicies().
def _IsSupportedChromeOSPolicy(type, policy):
# Filter out unsupported policies.
if not policy.is_supported:
return False
# Filter out device policies if user policies are requested.
if type == 'user' and policy.is_device_only:
return False
# Filter out user policies if device policies are requested.
if type == 'device' and not policy.is_device_only:
return False
# Filter out non-Active-Directory policies.
if 'active_directory' not in policy.supported_chrome_os_management:
return False
return True
# Returns a list of supported user and/or device policies `by filtering
# |policies|. |type| may be 'user', 'device' or 'both'.
def _GetSupportedChromeOSPolicies(policies, type):
if (type not in ['user', 'device', 'both']):
raise RuntimeError('Unsupported type "%s"' % type)
return filter(partial(_IsSupportedChromeOSPolicy, type), policies)
# Returns the list of all policy.policy_protobuf_type strings from |policies|.
def _GetProtobufTypes(policies):
return sorted(['Integer', 'Boolean', 'String', 'StringList'])
# Writes the definition of an array that contains the pointers to the mutable
# proto field for each policy in |policies| of the given |protobuf_type|.
def _WriteChromeOSPolicyAccessHeader(f, protobuf_type):
f.write('// Access to the mutable protobuf function of all supported '
'%s user\n// policies.\n' % protobuf_type.lower())
f.write('struct %sPolicyAccess {\n'
' const char* policy_key;\n'
' bool per_profile;\n'
' enterprise_management::%sPolicyProto*\n'
' (enterprise_management::CloudPolicySettings::'
'*mutable_proto_ptr)();\n'
'};\n' % (protobuf_type, protobuf_type))
f.write('extern const %sPolicyAccess k%sPolicyAccess[];\n\n' %
(protobuf_type, protobuf_type))
# Writes policy_constants.h for use in Chrome OS.
def _WriteChromeOSPolicyConstantsHeader(policies, policy_atomic_groups,
target_platform, f, risk_tags):
f.write('#ifndef __BINDINGS_POLICY_CONSTANTS_H_\n'
'#define __BINDINGS_POLICY_CONSTANTS_H_\n\n')
# Forward declarations.
supported_user_policies = _GetSupportedChromeOSPolicies(policies, 'user')
protobuf_types = _GetProtobufTypes(supported_user_policies)
f.write('namespace enterprise_management {\n' 'class CloudPolicySettings;\n')
for protobuf_type in protobuf_types:
f.write('class %sPolicyProto;\n' % protobuf_type)
f.write('} // namespace enterprise_management\n\n')
f.write('namespace policy {\n\n')
# Policy keys.
all_supported_policies = _GetSupportedChromeOSPolicies(policies, 'both')
f.write('// Registry key names for user and device policies.\n'
'namespace key {\n\n')
for policy in all_supported_policies:
f.write('extern const char k' + policy.name + '[];\n')
f.write('\n} // namespace key\n\n')
# Device policy keys.
f.write('// NULL-terminated list of device policy registry key names.\n')
f.write('extern const char* kDevicePolicyKeys[];\n\n')
# User policy proto pointers, one struct for each protobuf type.
for protobuf_type in protobuf_types:
_WriteChromeOSPolicyAccessHeader(f, protobuf_type)
f.write('} // namespace policy\n\n'
'#endif // __BINDINGS_POLICY_CONSTANTS_H_\n')
#------------------ Chrome OS policy constants source --------------#
# Writes an array that contains the pointers to the mutable proto field for each
# policy in |policies| of the given |protobuf_type|.
def _WriteChromeOSPolicyAccessSource(policies, f, protobuf_type):
f.write('constexpr %sPolicyAccess k%sPolicyAccess[] = {\n' % (protobuf_type,
protobuf_type))
for policy in policies:
if policy.policy_protobuf_type == protobuf_type:
f.write(
' {key::k%s,\n'
' %s,\n'
' &em::CloudPolicySettings::mutable_%s},\n' %
(policy.name, str(policy.per_profile).lower(), policy.name.lower()))
# The list is nullptr-terminated.
f.write(' {nullptr, false, nullptr},\n' '};\n\n')
# Writes policy_constants.cc for use in Chrome OS.
def _WriteChromeOSPolicyConstantsSource(policies, policy_atomic_groups,
target_platform, f, risk_tags):
f.write('#include "bindings/cloud_policy.pb.h"\n'
'#include "bindings/policy_constants.h"\n\n'
'namespace em = enterprise_management;\n\n'
'namespace policy {\n\n')
# Policy keys.
all_supported_policies = _GetSupportedChromeOSPolicies(policies, 'both')
f.write('namespace key {\n\n')
for policy in all_supported_policies:
f.write('const char k{name}[] = "{name}";\n'.format(name=policy.name))
f.write('\n} // namespace key\n\n')
# Device policy keys.
supported_device_policies = _GetSupportedChromeOSPolicies(policies, 'device')
f.write('const char* kDevicePolicyKeys[] = {\n\n')
for policy in supported_device_policies:
f.write(' key::k%s,\n' % policy.name)
f.write(' nullptr};\n\n')
# User policy proto pointers, one struct for each protobuf type.
supported_user_policies = _GetSupportedChromeOSPolicies(policies, 'user')
protobuf_types = _GetProtobufTypes(supported_user_policies)
for protobuf_type in protobuf_types:
_WriteChromeOSPolicyAccessSource(supported_user_policies, f, protobuf_type)
f.write('} // namespace policy\n')
#------------------ app restrictions -------------------------------#
def _WriteAppRestrictions(policies, policy_atomic_groups, target_platform, f,
risk_tags):
def WriteRestrictionCommon(key):
f.write(' <restriction\n' ' android:key="%s"\n' % key)
f.write(' android:title="@string/%sTitle"\n' % key)
f.write(' android:description="@string/%sDesc"\n' % key)
def WriteItemsDefinition(key):
f.write(' android:entries="@array/%sEntries"\n' % key)
f.write(' android:entryValues="@array/%sValues"\n' % key)
def WriteAppRestriction(policy):
policy_name = policy.name
WriteRestrictionCommon(policy_name)
if policy.items is not None:
WriteItemsDefinition(policy_name)
f.write(' android:restrictionType="%s"/>' % policy.restriction_type)
f.write('\n\n')
# _WriteAppRestrictions body
f.write('<restrictions xmlns:android="'
'http://schemas.android.com/apk/res/android">\n\n')
for policy in policies:
if (policy.is_supported and policy.restriction_type != 'invalid'
and not policy.is_deprecated and not policy.is_future
and not policy.internal_only):
WriteAppRestriction(policy)
f.write('</restrictions>')
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
ader1990/linux-kernel-3-13-5-scsi-fix | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <[email protected]>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
aferr/TimingCompartments | configs/topologies/BaseTopology.py | 15 | 2949 | # Copyright (c) 2012 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Jason Power
import m5
class BaseTopology(object):
description = "BaseTopology"
def __init__(self):
""" When overriding place any objects created in
configs/ruby/<protocol>.py that are needed in
makeTopology (below) here. The minimum is usually
all of the controllers created in the above file.
"""
def makeTopology(self, options, IntLink, ExtLink, Router):
""" Called from configs/ruby/Ruby.py
The return value is ( list(Router), list(IntLink), list(ExtLink))
The API of this function cannot change when subclassing!!
Any additional information needed to create this topology should
be passed into the constructor when it's instantiated in
configs/ruby/<protocol>.py
"""
m5.util.fatal("BaseTopology should have been overridden!!")
class SimpleTopology(BaseTopology):
""" Provides methods needed for the topologies included in Ruby before
topology changes.
These topologies are "simple" in the sense that they only use a flat
list of controllers to construct the topology.
"""
description = "SimpleTopology"
def __init__(self, controllers):
self.nodes = controllers
def addController(self, controller):
self.nodes.append(controller)
def __len__(self):
return len(self.nodes)
| bsd-3-clause |
helloworldajou/webserver | demos/classifier_webcam.py | 4 | 7059 | #!/usr/bin/env python2
#
# Example to run classifier on webcam stream.
# Brandon Amos & Vijayenthiran
# 2016/06/21
#
# Copyright 2015-2016 Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Contrib: Vijayenthiran
# This example file shows to run a classifier on webcam stream. You need to
# run the classifier.py to generate classifier with your own dataset.
# To run this file from the openface home dir:
# ./demo/classifier_webcam.py <path-to-your-classifier>
import time
start = time.time()
import argparse
import cv2
import os
import pickle
import sys
import numpy as np
np.set_printoptions(precision=2)
from sklearn.mixture import GMM
import openface
fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, '..', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
def getRep(bgrImg):
start = time.time()
if bgrImg is None:
raise Exception("Unable to load image/frame")
rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
if args.verbose:
print(" + Original size: {}".format(rgbImg.shape))
if args.verbose:
print("Loading the image took {} seconds.".format(time.time() - start))
start = time.time()
# Get the largest face bounding box
# bb = align.getLargestFaceBoundingBox(rgbImg) #Bounding box
# Get all bounding boxes
bb = align.getAllFaceBoundingBoxes(rgbImg)
if bb is None:
# raise Exception("Unable to find a face: {}".format(imgPath))
return None
if args.verbose:
print("Face detection took {} seconds.".format(time.time() - start))
start = time.time()
alignedFaces = []
for box in bb:
alignedFaces.append(
align.align(
args.imgDim,
rgbImg,
box,
landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE))
if alignedFaces is None:
raise Exception("Unable to align the frame")
if args.verbose:
print("Alignment took {} seconds.".format(time.time() - start))
start = time.time()
reps = []
for alignedFace in alignedFaces:
reps.append(net.forward(alignedFace))
if args.verbose:
print("Neural network forward pass took {} seconds.".format(
time.time() - start))
# print (reps)
return reps
def infer(img, args):
with open(args.classifierModel, 'r') as f:
if sys.version_info[0] < 3:
(le, clf) = pickle.load(f) # le - label and clf - classifer
else:
(le, clf) = pickle.load(f, encoding='latin1') # le - label and clf - classifer
reps = getRep(img)
persons = []
confidences = []
for rep in reps:
try:
rep = rep.reshape(1, -1)
except:
print ("No Face detected")
return (None, None)
start = time.time()
predictions = clf.predict_proba(rep).ravel()
# print (predictions)
maxI = np.argmax(predictions)
# max2 = np.argsort(predictions)[-3:][::-1][1]
persons.append(le.inverse_transform(maxI))
# print (str(le.inverse_transform(max2)) + ": "+str( predictions [max2]))
# ^ prints the second prediction
confidences.append(predictions[maxI])
if args.verbose:
print("Prediction took {} seconds.".format(time.time() - start))
pass
# print("Predict {} with {:.2f} confidence.".format(person.decode('utf-8'), confidence))
if isinstance(clf, GMM):
dist = np.linalg.norm(rep - clf.means_[maxI])
print(" + Distance from the mean: {}".format(dist))
pass
return (persons, confidences)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--dlibFacePredictor',
type=str,
help="Path to dlib's face predictor.",
default=os.path.join(
dlibModelDir,
"shape_predictor_68_face_landmarks.dat"))
parser.add_argument(
'--networkModel',
type=str,
help="Path to Torch network model.",
default=os.path.join(
openfaceModelDir,
'nn4.small2.v1.t7'))
parser.add_argument('--imgDim', type=int,
help="Default image dimension.", default=96)
parser.add_argument(
'--captureDevice',
type=int,
default=0,
help='Capture device. 0 for latop webcam and 1 for usb webcam')
parser.add_argument('--width', type=int, default=320)
parser.add_argument('--height', type=int, default=240)
parser.add_argument('--threshold', type=float, default=0.5)
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--verbose', action='store_true')
parser.add_argument(
'classifierModel',
type=str,
help='The Python pickle representing the classifier. This is NOT the Torch network model, which can be set with --networkModel.')
args = parser.parse_args()
align = openface.AlignDlib(args.dlibFacePredictor)
net = openface.TorchNeuralNet(
args.networkModel,
imgDim=args.imgDim,
cuda=args.cuda)
# Capture device. Usually 0 will be webcam and 1 will be usb cam.
video_capture = cv2.VideoCapture(args.captureDevice)
video_capture.set(3, args.width)
video_capture.set(4, args.height)
confidenceList = []
while True:
ret, frame = video_capture.read()
persons, confidences = infer(frame, args)
print ("P: " + str(persons) + " C: " + str(confidences))
try:
# append with two floating point precision
confidenceList.append('%.2f' % confidences[0])
except:
# If there is no face detected, confidences matrix will be empty.
# We can simply ignore it.
pass
for i, c in enumerate(confidences):
if c <= args.threshold: # 0.5 is kept as threshold for known face.
persons[i] = "_unknown"
# Print the person name and conf value on the frame
cv2.putText(frame, "P: {} C: {}".format(persons, confidences),
(50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
cv2.imshow('', frame)
# quit the program on the press of key 'q'
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
| apache-2.0 |
ibinti/intellij-community | python/helpers/pycharm/__jb.for_twisted/twisted/plugins/teamcity_plugin.py | 11 | 1180 | import sys
from teamcity.unittestpy import TeamcityTestResult
from twisted.trial.reporter import Reporter
from twisted.python.failure import Failure
from twisted.plugins.twisted_trial import _Reporter
class FailureWrapper(Failure):
def __getitem__(self, key):
return self.value[key]
class TeamcityReporter(TeamcityTestResult, Reporter):
def __init__(self,
stream=sys.stdout,
tbformat='default',
realtime=False,
publisher=None):
TeamcityTestResult.__init__(self)
Reporter.__init__(self,
stream=stream,
tbformat=tbformat,
realtime=realtime,
publisher=publisher)
def addError(self, test, failure, *k):
super(TeamcityReporter, self).addError(test, FailureWrapper(failure), *k)
Teamcity = _Reporter("Teamcity Reporter",
"twisted.plugins.teamcity_plugin",
description="teamcity messages",
longOpt="teamcity",
shortOpt="teamcity",
klass="TeamcityReporter")
| apache-2.0 |
Lujeni/ansible | lib/ansible/modules/network/nxos/nxos_vrf.py | 5 | 17884 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_vrf
extends_documentation_fragment: nxos
version_added: "2.1"
short_description: Manages global VRF configuration.
description:
- This module provides declarative management of VRFs
on CISCO NXOS network devices.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
- Trishna Guha (@trishnaguha)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- Cisco NX-OS creates the default VRF by itself. Therefore,
you're not allowed to use default as I(vrf) name in this module.
- C(vrf) name must be shorter than 32 chars.
- VRF names are not case sensible in NX-OS. Anyway, the name is stored
just like it's inserted by the user and it'll not be changed again
unless the VRF is removed and re-created. i.e. C(vrf=NTC) will create
a VRF named NTC, but running it again with C(vrf=ntc) will not cause
a configuration change.
options:
name:
description:
- Name of VRF to be managed.
required: true
aliases: [vrf]
admin_state:
description:
- Administrative state of the VRF.
default: up
choices: ['up','down']
vni:
description:
- Specify virtual network identifier. Valid values are Integer
or keyword 'default'.
version_added: "2.2"
rd:
description:
- VPN Route Distinguisher (RD). Valid values are a string in
one of the route-distinguisher formats (ASN2:NN, ASN4:NN, or
IPV4:NN); the keyword 'auto', or the keyword 'default'.
version_added: "2.2"
interfaces:
description:
- List of interfaces to check the VRF has been
configured correctly or keyword 'default'.
version_added: 2.5
associated_interfaces:
description:
- This is a intent option and checks the operational state of the for given vrf C(name)
for associated interfaces. If the value in the C(associated_interfaces) does not match with
the operational state of vrf interfaces on device it will result in failure.
version_added: "2.5"
aggregate:
description: List of VRFs definitions.
version_added: 2.5
purge:
description:
- Purge VRFs not defined in the I(aggregate) parameter.
type: bool
default: 'no'
version_added: 2.5
state:
description:
- Manages desired state of the resource.
default: present
choices: ['present','absent']
description:
description:
- Description of the VRF or keyword 'default'.
delay:
description:
- Time in seconds to wait before checking for the operational state on remote
device. This wait is applicable for operational state arguments.
default: 10
'''
EXAMPLES = '''
- name: Ensure ntc VRF exists on switch
nxos_vrf:
name: ntc
description: testing
state: present
- name: Aggregate definition of VRFs
nxos_vrf:
aggregate:
- { name: test1, description: Testing, admin_state: down }
- { name: test2, interfaces: Ethernet1/2 }
- name: Aggregate definitions of VRFs with Purge
nxos_vrf:
aggregate:
- { name: ntc1, description: purge test1 }
- { name: ntc2, description: purge test2 }
state: present
purge: yes
- name: Delete VRFs exist on switch
nxos_vrf:
aggregate:
- { name: ntc1 }
- { name: ntc2 }
state: absent
- name: Assign interfaces to VRF declaratively
nxos_vrf:
name: test1
interfaces:
- Ethernet2/3
- Ethernet2/5
- name: Check interfaces assigned to VRF
nxos_vrf:
name: test1
associated_interfaces:
- Ethernet2/3
- Ethernet2/5
- name: Ensure VRF is tagged with interface Ethernet2/5 only (Removes from Ethernet2/3)
nxos_vrf:
name: test1
interfaces:
- Ethernet2/5
- name: Delete VRF
nxos_vrf:
name: ntc
state: absent
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample:
- vrf context ntc
- no shutdown
- interface Ethernet1/2
- no switchport
- vrf member test2
'''
import re
import time
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.nxos.nxos import load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, get_interface_type
from ansible.module_utils.network.common.utils import remove_default_spec
def search_obj_in_list(name, lst):
for o in lst:
if o['name'] == name:
return o
def execute_show_command(command, module):
if 'show run' not in command:
output = 'json'
else:
output = 'text'
cmds = [{
'command': command,
'output': output,
}]
body = run_commands(module, cmds)
return body
def get_existing_vrfs(module):
objs = list()
command = "show vrf all"
try:
body = execute_show_command(command, module)[0]
except IndexError:
return list()
try:
vrf_table = body['TABLE_vrf']['ROW_vrf']
except (TypeError, IndexError, KeyError):
return list()
if isinstance(vrf_table, list):
for vrf in vrf_table:
obj = {}
obj['name'] = vrf['vrf_name']
objs.append(obj)
elif isinstance(vrf_table, dict):
obj = {}
obj['name'] = vrf_table['vrf_name']
objs.append(obj)
return objs
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
state = module.params['state']
purge = module.params['purge']
args = ('rd', 'description', 'vni')
for w in want:
name = w['name']
admin_state = w['admin_state']
vni = w['vni']
interfaces = w.get('interfaces') or []
state = w['state']
del w['state']
obj_in_have = search_obj_in_list(name, have)
if state == 'absent' and obj_in_have:
commands.append('no vrf context {0}'.format(name))
elif state == 'present':
if not obj_in_have:
commands.append('vrf context {0}'.format(name))
for item in args:
candidate = w.get(item)
if candidate and candidate != 'default':
cmd = item + ' ' + str(candidate)
commands.append(cmd)
if admin_state == 'up':
commands.append('no shutdown')
elif admin_state == 'down':
commands.append('shutdown')
commands.append('exit')
if interfaces and interfaces[0] != 'default':
for i in interfaces:
commands.append('interface {0}'.format(i))
if get_interface_type(i) in ('ethernet', 'portchannel'):
commands.append('no switchport')
commands.append('vrf member {0}'.format(name))
else:
# If vni is already configured on vrf, unconfigure it first.
if vni:
if obj_in_have.get('vni') and vni != obj_in_have.get('vni'):
commands.append('no vni {0}'.format(obj_in_have.get('vni')))
for item in args:
candidate = w.get(item)
if candidate == 'default':
if obj_in_have.get(item):
cmd = 'no ' + item + ' ' + obj_in_have.get(item)
commands.append(cmd)
elif candidate and candidate != obj_in_have.get(item):
cmd = item + ' ' + str(candidate)
commands.append(cmd)
if admin_state and admin_state != obj_in_have.get('admin_state'):
if admin_state == 'up':
commands.append('no shutdown')
elif admin_state == 'down':
commands.append('shutdown')
if commands:
commands.insert(0, 'vrf context {0}'.format(name))
commands.append('exit')
if interfaces and interfaces[0] != 'default':
if not obj_in_have['interfaces']:
for i in interfaces:
commands.append('vrf context {0}'.format(name))
commands.append('exit')
commands.append('interface {0}'.format(i))
if get_interface_type(i) in ('ethernet', 'portchannel'):
commands.append('no switchport')
commands.append('vrf member {0}'.format(name))
elif set(interfaces) != set(obj_in_have['interfaces']):
missing_interfaces = list(set(interfaces) - set(obj_in_have['interfaces']))
for i in missing_interfaces:
commands.append('vrf context {0}'.format(name))
commands.append('exit')
commands.append('interface {0}'.format(i))
if get_interface_type(i) in ('ethernet', 'portchannel'):
commands.append('no switchport')
commands.append('vrf member {0}'.format(name))
superfluous_interfaces = list(set(obj_in_have['interfaces']) - set(interfaces))
for i in superfluous_interfaces:
commands.append('vrf context {0}'.format(name))
commands.append('exit')
commands.append('interface {0}'.format(i))
if get_interface_type(i) in ('ethernet', 'portchannel'):
commands.append('no switchport')
commands.append('no vrf member {0}'.format(name))
elif interfaces and interfaces[0] == 'default':
if obj_in_have['interfaces']:
for i in obj_in_have['interfaces']:
commands.append('vrf context {0}'.format(name))
commands.append('exit')
commands.append('interface {0}'.format(i))
if get_interface_type(i) in ('ethernet', 'portchannel'):
commands.append('no switchport')
commands.append('no vrf member {0}'.format(name))
if purge:
existing = get_existing_vrfs(module)
if existing:
for h in existing:
if h['name'] in ('default', 'management'):
pass
else:
obj_in_want = search_obj_in_list(h['name'], want)
if not obj_in_want:
commands.append('no vrf context {0}'.format(h['name']))
return commands
def validate_vrf(name, module):
if name == 'default':
module.fail_json(msg='cannot use default as name of a VRF')
elif len(name) > 32:
module.fail_json(msg='VRF name exceeded max length of 32', name=name)
else:
return name
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
d = item.copy()
d['name'] = validate_vrf(d['name'], module)
obj.append(d)
else:
obj.append({
'name': validate_vrf(module.params['name'], module),
'description': module.params['description'],
'vni': module.params['vni'],
'rd': module.params['rd'],
'admin_state': module.params['admin_state'],
'state': module.params['state'],
'interfaces': module.params['interfaces'],
'associated_interfaces': module.params['associated_interfaces']
})
return obj
def get_value(arg, config, module):
extra_arg_regex = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(arg), re.M)
value = ''
if arg in config:
value = extra_arg_regex.search(config).group('value')
return value
def map_config_to_obj(want, element_spec, module):
objs = list()
for w in want:
obj = deepcopy(element_spec)
del obj['delay']
del obj['state']
command = 'show vrf {0}'.format(w['name'])
try:
body = execute_show_command(command, module)[0]
vrf_table = body['TABLE_vrf']['ROW_vrf']
except (TypeError, IndexError):
return list()
name = vrf_table['vrf_name']
obj['name'] = name
obj['admin_state'] = vrf_table['vrf_state'].lower()
command = 'show run all | section vrf.context.{0}'.format(name)
body = execute_show_command(command, module)[0]
extra_params = ['vni', 'rd', 'description']
for param in extra_params:
obj[param] = get_value(param, body, module)
obj['interfaces'] = []
command = 'show vrf {0} interface'.format(name)
try:
body = execute_show_command(command, module)[0]
vrf_int = body['TABLE_if']['ROW_if']
except (TypeError, IndexError):
vrf_int = None
if vrf_int:
if isinstance(vrf_int, list):
for i in vrf_int:
intf = i['if_name']
obj['interfaces'].append(intf)
elif isinstance(vrf_int, dict):
intf = vrf_int['if_name']
obj['interfaces'].append(intf)
objs.append(obj)
return objs
def check_declarative_intent_params(want, module, element_spec, result):
have = None
is_delay = False
for w in want:
if w.get('associated_interfaces') is None:
continue
if result['changed'] and not is_delay:
time.sleep(module.params['delay'])
is_delay = True
if have is None:
have = map_config_to_obj(want, element_spec, module)
for i in w['associated_interfaces']:
obj_in_have = search_obj_in_list(w['name'], have)
if obj_in_have:
interfaces = obj_in_have.get('interfaces')
if interfaces is not None and i not in interfaces:
module.fail_json(msg="Interface %s not configured on vrf %s" % (i, w['name']))
def vrf_error_check(module, commands, responses):
"""Checks for VRF config errors and executes a retry in some circumstances.
"""
pattern = 'ERROR: Deletion of VRF .* in progress'
if re.search(pattern, str(responses)):
# Allow delay/retry for VRF changes
time.sleep(15)
responses = load_config(module, commands, opts={'catch_clierror': True})
if re.search(pattern, str(responses)):
module.fail_json(msg='VRF config (and retry) failure: %s ' % responses)
module.warn('VRF config delayed by VRF deletion - passed on retry')
def main():
""" main entry point for module execution
"""
element_spec = dict(
name=dict(type='str', aliases=['vrf']),
description=dict(type='str'),
vni=dict(type='str'),
rd=dict(type='str'),
admin_state=dict(type='str', default='up', choices=['up', 'down']),
interfaces=dict(type='list'),
associated_interfaces=dict(type='list'),
delay=dict(type='int', default=10),
state=dict(type='str', default='present', choices=['present', 'absent']),
)
aggregate_spec = deepcopy(element_spec)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
purge=dict(type='bool', default=False),
)
argument_spec.update(element_spec)
argument_spec.update(nxos_argument_spec)
required_one_of = [['name', 'aggregate']]
mutually_exclusive = [['name', 'aggregate']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(want, element_spec, module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands and not module.check_mode:
responses = load_config(module, commands, opts={'catch_clierror': True})
vrf_error_check(module, commands, responses)
result['changed'] = True
check_declarative_intent_params(want, module, element_spec, result)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
sonaht/ansible | lib/ansible/modules/cloud/amazon/ec2_metric_alarm.py | 57 | 11596 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'curated'}
DOCUMENTATION = """
module: ec2_metric_alarm
short_description: "Create/update or delete AWS Cloudwatch 'metric alarms'"
description:
- Can create or delete AWS metric alarms.
- Metrics you wish to alarm on must already exist.
version_added: "1.6"
author: "Zacharie Eakin (@zeekin)"
options:
state:
description:
- register or deregister the alarm
required: true
choices: ['present', 'absent']
name:
description:
- Unique name for the alarm
required: true
metric:
description:
- Name of the monitored metric (e.g. CPUUtilization)
- Metric must already exist
required: false
namespace:
description:
- Name of the appropriate namespace ('AWS/EC2', 'System/Linux', etc.), which determines the category it will appear under in cloudwatch
required: false
statistic:
description:
- Operation applied to the metric
- Works in conjunction with period and evaluation_periods to determine the comparison value
required: false
choices: ['SampleCount','Average','Sum','Minimum','Maximum']
comparison:
description:
- Determines how the threshold value is compared
required: false
choices: ['<=','<','>','>=']
threshold:
description:
- Sets the min/max bound for triggering the alarm
required: false
period:
description:
- The time (in seconds) between metric evaluations
required: false
evaluation_periods:
description:
- The number of times in which the metric is evaluated before final calculation
required: false
unit:
description:
- The threshold's unit of measurement
required: false
choices:
- 'Seconds'
- 'Microseconds'
- 'Milliseconds'
- 'Bytes'
- 'Kilobytes'
- 'Megabytes'
- 'Gigabytes'
- 'Terabytes'
- 'Bits'
- 'Kilobits'
- 'Megabits'
- 'Gigabits'
- 'Terabits'
- 'Percent'
- 'Count'
- 'Bytes/Second'
- 'Kilobytes/Second'
- 'Megabytes/Second'
- 'Gigabytes/Second'
- 'Terabytes/Second'
- 'Bits/Second'
- 'Kilobits/Second'
- 'Megabits/Second'
- 'Gigabits/Second'
- 'Terabits/Second'
- 'Count/Second'
- 'None'
description:
description:
- A longer description of the alarm
required: false
dimensions:
description:
- Describes to what the alarm is applied
required: false
alarm_actions:
description:
- A list of the names action(s) taken when the alarm is in the 'alarm' status
required: false
insufficient_data_actions:
description:
- A list of the names of action(s) to take when the alarm is in the 'insufficient_data' status
required: false
ok_actions:
description:
- A list of the names of action(s) to take when the alarm is in the 'ok' status
required: false
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = '''
- name: create alarm
ec2_metric_alarm:
state: present
region: ap-southeast-2
name: "cpu-low"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: "<="
threshold: 5.0
period: 300
evaluation_periods: 3
unit: "Percent"
description: "This will alarm when a bamboo slave's cpu usage average is lower than 5% for 15 minutes "
dimensions: {'InstanceId':'i-XXX'}
alarm_actions: ["action1","action2"]
'''
try:
import boto.ec2.cloudwatch
from boto.ec2.cloudwatch import CloudWatchConnection, MetricAlarm
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def create_metric_alarm(connection, module):
name = module.params.get('name')
metric = module.params.get('metric')
namespace = module.params.get('namespace')
statistic = module.params.get('statistic')
comparison = module.params.get('comparison')
threshold = module.params.get('threshold')
period = module.params.get('period')
evaluation_periods = module.params.get('evaluation_periods')
unit = module.params.get('unit')
description = module.params.get('description')
dimensions = module.params.get('dimensions')
alarm_actions = module.params.get('alarm_actions')
insufficient_data_actions = module.params.get('insufficient_data_actions')
ok_actions = module.params.get('ok_actions')
alarms = connection.describe_alarms(alarm_names=[name])
if not alarms:
alm = MetricAlarm(
name=name,
metric=metric,
namespace=namespace,
statistic=statistic,
comparison=comparison,
threshold=threshold,
period=period,
evaluation_periods=evaluation_periods,
unit=unit,
description=description,
dimensions=dimensions,
alarm_actions=alarm_actions,
insufficient_data_actions=insufficient_data_actions,
ok_actions=ok_actions
)
try:
connection.create_alarm(alm)
changed = True
alarms = connection.describe_alarms(alarm_names=[name])
except BotoServerError as e:
module.fail_json(msg=str(e))
else:
alarm = alarms[0]
changed = False
for attr in ('comparison','metric','namespace','statistic','threshold','period','evaluation_periods','unit','description'):
if getattr(alarm, attr) != module.params.get(attr):
changed = True
setattr(alarm, attr, module.params.get(attr))
#this is to deal with a current bug where you cannot assign '<=>' to the comparator when modifying an existing alarm
comparison = alarm.comparison
comparisons = {'<=' : 'LessThanOrEqualToThreshold', '<' : 'LessThanThreshold', '>=' : 'GreaterThanOrEqualToThreshold', '>' : 'GreaterThanThreshold'}
alarm.comparison = comparisons[comparison]
dim1 = module.params.get('dimensions')
dim2 = alarm.dimensions
for keys in dim1:
if not isinstance(dim1[keys], list):
dim1[keys] = [dim1[keys]]
if keys not in dim2 or dim1[keys] != dim2[keys]:
changed=True
setattr(alarm, 'dimensions', dim1)
for attr in ('alarm_actions','insufficient_data_actions','ok_actions'):
action = module.params.get(attr) or []
# Boto and/or ansible may provide same elements in lists but in different order.
# Compare on sets since they do not need any order.
if set(getattr(alarm, attr)) != set(action):
changed = True
setattr(alarm, attr, module.params.get(attr))
try:
if changed:
connection.create_alarm(alarm)
except BotoServerError as e:
module.fail_json(msg=str(e))
result = alarms[0]
module.exit_json(changed=changed, name=result.name,
actions_enabled=result.actions_enabled,
alarm_actions=result.alarm_actions,
alarm_arn=result.alarm_arn,
comparison=result.comparison,
description=result.description,
dimensions=result.dimensions,
evaluation_periods=result.evaluation_periods,
insufficient_data_actions=result.insufficient_data_actions,
last_updated=result.last_updated,
metric=result.metric,
namespace=result.namespace,
ok_actions=result.ok_actions,
period=result.period,
state_reason=result.state_reason,
state_value=result.state_value,
statistic=result.statistic,
threshold=result.threshold,
unit=result.unit)
def delete_metric_alarm(connection, module):
name = module.params.get('name')
alarms = connection.describe_alarms(alarm_names=[name])
if alarms:
try:
connection.delete_alarms([name])
module.exit_json(changed=True)
except BotoServerError as e:
module.fail_json(msg=str(e))
else:
module.exit_json(changed=False)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
metric=dict(type='str'),
namespace=dict(type='str'),
statistic=dict(type='str', choices=['SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum']),
comparison=dict(type='str', choices=['<=', '<', '>', '>=']),
threshold=dict(type='float'),
period=dict(type='int'),
unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', 'Terabytes',
'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', 'Bytes/Second', 'Kilobytes/Second',
'Megabytes/Second', 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second',
'Gigabits/Second', 'Terabits/Second', 'Count/Second', 'None']),
evaluation_periods=dict(type='int'),
description=dict(type='str'),
dimensions=dict(type='dict', default={}),
alarm_actions=dict(type='list'),
insufficient_data_actions=dict(type='list'),
ok_actions=dict(type='list'),
state=dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
if state == 'present':
create_metric_alarm(connection, module)
elif state == 'absent':
delete_metric_alarm(connection, module)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
murraymeehan/marsyas | scripts/Python/batch.py | 7 | 1475 | import os
from glob import glob
inputDirectory = "../../../../Databases/taslp/";
outputDirectory = "../../../output3 ";
testCommand = " ";
#testCommand = " -q 1 ";
beginCommand = "../../bin/release/peakClustering ";
beginCommand = "..\\..\\bin\\release\\peakClustering.exe ";
endCommand = " -P -f -S 0 -r -k 2 -c 3 -N music -i 250_2500 -o "+outputDirectory;
execStyle=[
#hwps
"-T 1 -s 20 -t hoabfb ",
"-T 10 -s 20 -t hoabfb ",
"-T 1 -s 20 -t hoabfb -u ",
"-T 10 -s 20 -t hoabfb -u ",
#virtanen
"-T 1 -s 20 -t voabfb ",
"-T 10 -s 20 -t voabfb ",
"-T 1 -s 20 -t voabfb -u ",
"-T 10 -s 20 -t voabfb -u ",
#srinivasan criterion
"-T 1 -s 20 -t soabfb ",
"-T 10 -s 20 -t soabfb ",
"-T 1 -s 20 -t soabfb -u ",
"-T 10 -s 20 -t soabfb -u ",
# amplitude only
"-T 1 -s 20 -t abfb ",
"-T 1 -s 20 -t abfb -u ",
# harmonicity only
"-T 1 -s 20 -t ho ",
"-T 1 -s 20 -t ho -u ",
"-T 1 -s 20 -t vo ",
"-T 1 -s 20 -t vo -u ",
"-T 1 -s 20 -t so ",
"-T 1 -s 20 -t so -u ",
# srinivasan algo
" -s 1024 -npp -u -T 1 -t soabfb ",
"-s 1024 -npp -u -T 10 -t soabfb "];
for style in execStyle:
for name in glob(inputDirectory+"*V*.wav"):
command = beginCommand+style+testCommand+endCommand+name
print command
os.system(command)
| gpl-2.0 |
sathiamour/foursquared | util/oget.py | 262 | 3416 | #!/usr/bin/python
"""
Pull a oAuth protected page from foursquare.
Expects ~/.oget to contain (one on each line):
CONSUMER_KEY
CONSUMER_KEY_SECRET
USERNAME
PASSWORD
Don't forget to chmod 600 the file!
"""
import httplib
import os
import re
import sys
import urllib
import urllib2
import urlparse
import user
from xml.dom import pulldom
from xml.dom import minidom
import oauth
"""From: http://groups.google.com/group/foursquare-api/web/oauth
@consumer = OAuth::Consumer.new("consumer_token","consumer_secret", {
:site => "http://foursquare.com",
:scheme => :header,
:http_method => :post,
:request_token_path => "/oauth/request_token",
:access_token_path => "/oauth/access_token",
:authorize_path => "/oauth/authorize"
})
"""
SERVER = 'api.foursquare.com:80'
CONTENT_TYPE_HEADER = {'Content-Type' :'application/x-www-form-urlencoded'}
SIGNATURE_METHOD = oauth.OAuthSignatureMethod_HMAC_SHA1()
AUTHEXCHANGE_URL = 'http://api.foursquare.com/v1/authexchange'
def parse_auth_response(auth_response):
return (
re.search('<oauth_token>(.*)</oauth_token>', auth_response).groups()[0],
re.search('<oauth_token_secret>(.*)</oauth_token_secret>',
auth_response).groups()[0]
)
def create_signed_oauth_request(username, password, consumer):
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
consumer, http_method='POST', http_url=AUTHEXCHANGE_URL,
parameters=dict(fs_username=username, fs_password=password))
oauth_request.sign_request(SIGNATURE_METHOD, consumer, None)
return oauth_request
def main():
url = urlparse.urlparse(sys.argv[1])
# Nevermind that the query can have repeated keys.
parameters = dict(urlparse.parse_qsl(url.query))
password_file = open(os.path.join(user.home, '.oget'))
lines = [line.strip() for line in password_file.readlines()]
if len(lines) == 4:
cons_key, cons_key_secret, username, password = lines
access_token = None
else:
cons_key, cons_key_secret, username, password, token, secret = lines
access_token = oauth.OAuthToken(token, secret)
consumer = oauth.OAuthConsumer(cons_key, cons_key_secret)
if not access_token:
oauth_request = create_signed_oauth_request(username, password, consumer)
connection = httplib.HTTPConnection(SERVER)
headers = {'Content-Type' :'application/x-www-form-urlencoded'}
connection.request(oauth_request.http_method, AUTHEXCHANGE_URL,
body=oauth_request.to_postdata(), headers=headers)
auth_response = connection.getresponse().read()
token = parse_auth_response(auth_response)
access_token = oauth.OAuthToken(*token)
open(os.path.join(user.home, '.oget'), 'w').write('\n'.join((
cons_key, cons_key_secret, username, password, token[0], token[1])))
oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer,
access_token, http_method='POST', http_url=url.geturl(),
parameters=parameters)
oauth_request.sign_request(SIGNATURE_METHOD, consumer, access_token)
connection = httplib.HTTPConnection(SERVER)
connection.request(oauth_request.http_method, oauth_request.to_url(),
body=oauth_request.to_postdata(), headers=CONTENT_TYPE_HEADER)
print connection.getresponse().read()
#print minidom.parse(connection.getresponse()).toprettyxml(indent=' ')
if __name__ == '__main__':
main()
| apache-2.0 |
dmarteau/QGIS | python/plugins/db_manager/db_plugins/gpkg/sql_dictionary.py | 71 | 1200 | # -*- coding: utf-8 -*-
"""
***************************************************************************
sql_dictionary.py
---------------------
Date : April 2012
Copyright : (C) 2012 by Giuseppe Sucameli
Email : brush dot tyler at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
def getSqlDictionary(spatial=True):
from ..spatialite.sql_dictionary import getSqlDictionary
return getSqlDictionary(spatial)
def getQueryBuilderDictionary():
from ..spatialite.sql_dictionary import getQueryBuilderDictionary
return getQueryBuilderDictionary()
| gpl-2.0 |
smourph/PGo-TrainerTools | pgoapi/protos/POGOProtos/Networking/Responses/GetIncensePokemonResponse_pb2.py | 12 | 6521 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Networking/Responses/GetIncensePokemonResponse.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from POGOProtos.Enums import PokemonId_pb2 as POGOProtos_dot_Enums_dot_PokemonId__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Networking/Responses/GetIncensePokemonResponse.proto',
package='POGOProtos.Networking.Responses',
syntax='proto3',
serialized_pb=_b('\n?POGOProtos/Networking/Responses/GetIncensePokemonResponse.proto\x12\x1fPOGOProtos.Networking.Responses\x1a POGOProtos/Enums/PokemonId.proto\"\x85\x03\n\x19GetIncensePokemonResponse\x12Q\n\x06result\x18\x01 \x01(\x0e\x32\x41.POGOProtos.Networking.Responses.GetIncensePokemonResponse.Result\x12/\n\npokemon_id\x18\x02 \x01(\x0e\x32\x1b.POGOProtos.Enums.PokemonId\x12\x10\n\x08latitude\x18\x03 \x01(\x01\x12\x11\n\tlongitude\x18\x04 \x01(\x01\x12\x1a\n\x12\x65ncounter_location\x18\x05 \x01(\t\x12\x14\n\x0c\x65ncounter_id\x18\x06 \x01(\x06\x12\x1e\n\x16\x64isappear_timestamp_ms\x18\x07 \x01(\x03\"m\n\x06Result\x12\x1d\n\x19INCENSE_ENCOUNTER_UNKNOWN\x10\x00\x12\x1f\n\x1bINCENSE_ENCOUNTER_AVAILABLE\x10\x01\x12#\n\x1fINCENSE_ENCOUNTER_NOT_AVAILABLE\x10\x02\x62\x06proto3')
,
dependencies=[POGOProtos_dot_Enums_dot_PokemonId__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_GETINCENSEPOKEMONRESPONSE_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='POGOProtos.Networking.Responses.GetIncensePokemonResponse.Result',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='INCENSE_ENCOUNTER_UNKNOWN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INCENSE_ENCOUNTER_AVAILABLE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INCENSE_ENCOUNTER_NOT_AVAILABLE', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=415,
serialized_end=524,
)
_sym_db.RegisterEnumDescriptor(_GETINCENSEPOKEMONRESPONSE_RESULT)
_GETINCENSEPOKEMONRESPONSE = _descriptor.Descriptor(
name='GetIncensePokemonResponse',
full_name='POGOProtos.Networking.Responses.GetIncensePokemonResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='POGOProtos.Networking.Responses.GetIncensePokemonResponse.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pokemon_id', full_name='POGOProtos.Networking.Responses.GetIncensePokemonResponse.pokemon_id', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='latitude', full_name='POGOProtos.Networking.Responses.GetIncensePokemonResponse.latitude', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='longitude', full_name='POGOProtos.Networking.Responses.GetIncensePokemonResponse.longitude', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='encounter_location', full_name='POGOProtos.Networking.Responses.GetIncensePokemonResponse.encounter_location', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='encounter_id', full_name='POGOProtos.Networking.Responses.GetIncensePokemonResponse.encounter_id', index=5,
number=6, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='disappear_timestamp_ms', full_name='POGOProtos.Networking.Responses.GetIncensePokemonResponse.disappear_timestamp_ms', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETINCENSEPOKEMONRESPONSE_RESULT,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=135,
serialized_end=524,
)
_GETINCENSEPOKEMONRESPONSE.fields_by_name['result'].enum_type = _GETINCENSEPOKEMONRESPONSE_RESULT
_GETINCENSEPOKEMONRESPONSE.fields_by_name['pokemon_id'].enum_type = POGOProtos_dot_Enums_dot_PokemonId__pb2._POKEMONID
_GETINCENSEPOKEMONRESPONSE_RESULT.containing_type = _GETINCENSEPOKEMONRESPONSE
DESCRIPTOR.message_types_by_name['GetIncensePokemonResponse'] = _GETINCENSEPOKEMONRESPONSE
GetIncensePokemonResponse = _reflection.GeneratedProtocolMessageType('GetIncensePokemonResponse', (_message.Message,), dict(
DESCRIPTOR = _GETINCENSEPOKEMONRESPONSE,
__module__ = 'POGOProtos.Networking.Responses.GetIncensePokemonResponse_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Networking.Responses.GetIncensePokemonResponse)
))
_sym_db.RegisterMessage(GetIncensePokemonResponse)
# @@protoc_insertion_point(module_scope)
| gpl-3.0 |
renegelinas/mi-instrument | mi/idk/platform/test/test_metadata.py | 11 | 2605 | #!/usr/bin/env python
"""
@package mi.idk.platform.test.test_metadata
@file mi.idk/platform/test/test_metadata.py
@author Bill French
@brief test metadata object
"""
__author__ = 'Bill French'
__license__ = 'Apache 2.0'
from os.path import basename, dirname
from os import makedirs
from os.path import exists
import sys
from nose.plugins.attrib import attr
from mock import Mock
import unittest
from mi.core.unit_test import MiUnitTest
from mi.core.log import get_logger ; log = get_logger()
from mi.idk.platform.metadata import Metadata
from mi.idk.exceptions import InvalidParameters
import os
BASE_DIR = "/tmp"
DRIVER_PATH = "test_driver/foo"
METADATA_DIR = "/tmp/mi/platform/driver/test_driver/foo"
METADATA_FILE = "metadata.yml"
@attr('UNIT', group='mi')
class TestMetadata(MiUnitTest):
"""
Test the metadata object
"""
def setUp(self):
"""
Setup the test case
"""
self.createMetadataFile()
def createMetadataFile(self):
"""
"""
self.addCleanup(self.removeMetadataFile)
if(not exists(METADATA_DIR)):
os.makedirs(METADATA_DIR)
md_file = open("%s/%s" % (METADATA_DIR, METADATA_FILE), 'w')
md_file.write("driver_metadata:\n")
md_file.write(" author: Bill French\n")
md_file.write(" driver_path: test_driver/foo\n")
md_file.write(" driver_name: test_driver_foo\n")
md_file.write(" email: [email protected]\n")
md_file.write(" release_notes: some note\n")
md_file.write(" version: 0.2.2\n")
md_file.close()
def removeMetadataFile(self):
filename = "%s/%s" % (METADATA_DIR, METADATA_FILE)
if(exists(filename)):
pass
#os.unlink(filename)
def test_constructor(self):
"""
Test object creation
"""
default_metadata = Metadata()
self.assertTrue(default_metadata)
specific_metadata = Metadata(DRIVER_PATH, BASE_DIR)
self.assertTrue(specific_metadata)
self.assertTrue(os.path.isfile(specific_metadata.metadata_path()), msg="file doesn't exist: %s" % specific_metadata.metadata_path())
self.assertEqual(specific_metadata.driver_path, "test_driver/foo")
self.assertEqual(specific_metadata.driver_name, "test_driver_foo")
self.assertEqual(specific_metadata.author, "Bill French")
self.assertEqual(specific_metadata.email, "[email protected]")
self.assertEqual(specific_metadata.notes, "some note")
self.assertEqual(specific_metadata.version, "0.2.2")
| bsd-2-clause |
pgoeser/gnuradio | gr-howto-write-a-block/apps/howto_square.py | 36 | 2164 | #!/usr/bin/env python
##################################################
# Gnuradio Python Flow Graph
# Title: Howto Square
# Generated: Thu Nov 12 11:26:07 2009
##################################################
import howto
from gnuradio import eng_notation
from gnuradio import gr
from gnuradio.eng_option import eng_option
from gnuradio.gr import firdes
from gnuradio.wxgui import scopesink2
from grc_gnuradio import wxgui as grc_wxgui
from optparse import OptionParser
import wx
class howto_square(grc_wxgui.top_block_gui):
def __init__(self):
grc_wxgui.top_block_gui.__init__(self, title="Howto Square")
##################################################
# Variables
##################################################
self.samp_rate = samp_rate = 10e3
##################################################
# Blocks
##################################################
self.sink = scopesink2.scope_sink_f(
self.GetWin(),
title="Input",
sample_rate=samp_rate,
v_scale=20,
v_offset=0,
t_scale=0.002,
ac_couple=False,
xy_mode=False,
num_inputs=1,
)
self.Add(self.sink.win)
self.sink2 = scopesink2.scope_sink_f(
self.GetWin(),
title="Output",
sample_rate=samp_rate,
v_scale=0,
v_offset=0,
t_scale=0.002,
ac_couple=False,
xy_mode=False,
num_inputs=1,
)
self.Add(self.sink2.win)
self.sqr = howto.square_ff()
self.src = gr.vector_source_f(([float(n)-50 for n in range(100)]), True, 1)
self.thr = gr.throttle(gr.sizeof_float*1, samp_rate)
##################################################
# Connections
##################################################
self.connect((self.thr, 0), (self.sqr, 0))
self.connect((self.src, 0), (self.thr, 0))
self.connect((self.thr, 0), (self.sink, 0))
self.connect((self.sqr, 0), (self.sink2, 0))
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.sink.set_sample_rate(self.samp_rate)
self.sink2.set_sample_rate(self.samp_rate)
if __name__ == '__main__':
parser = OptionParser(option_class=eng_option, usage="%prog: [options]")
(options, args) = parser.parse_args()
tb = howto_square()
tb.Run(True)
| gpl-3.0 |
EricMuller/mynotes-backend | requirements/twisted/Twisted-17.1.0/src/twisted/test/test_paths.py | 13 | 74412 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases covering L{twisted.python.filepath}.
"""
from __future__ import division, absolute_import
import os, time, pickle, errno, stat
from pprint import pformat
from twisted.python.compat import _PY3, unicode
from twisted.python.win32 import WindowsError, ERROR_DIRECTORY
from twisted.python import filepath
from twisted.python.runtime import platform
from twisted.trial.unittest import SkipTest, SynchronousTestCase as TestCase
from zope.interface.verify import verifyObject
if not platform._supportsSymlinks():
symlinkSkip = "Platform does not support symlinks"
else:
symlinkSkip = None
class BytesTestCase(TestCase):
"""
Override default method implementations to support byte paths.
"""
def mktemp(self):
"""
Return a temporary path, encoded as bytes.
"""
return TestCase.mktemp(self).encode("utf-8")
class AbstractFilePathTests(BytesTestCase):
"""
Tests for L{IFilePath} implementations.
"""
f1content = b"file 1"
f2content = b"file 2"
def _mkpath(self, *p):
x = os.path.abspath(os.path.join(self.cmn, *p))
self.all.append(x)
return x
def subdir(self, *dirname):
os.mkdir(self._mkpath(*dirname))
def subfile(self, *dirname):
return open(self._mkpath(*dirname), "wb")
def setUp(self):
self.now = time.time()
cmn = self.cmn = os.path.abspath(self.mktemp())
self.all = [cmn]
os.mkdir(cmn)
self.subdir(b"sub1")
with self.subfile(b"file1") as f:
f.write(self.f1content)
with self.subfile(b"sub1", b"file2") as f:
f.write(self.f2content)
self.subdir(b'sub3')
self.subfile(b"sub3", b"file3.ext1").close()
self.subfile(b"sub3", b"file3.ext2").close()
self.subfile(b"sub3", b"file3.ext3").close()
self.path = filepath.FilePath(cmn)
self.root = filepath.FilePath(b"/")
def test_verifyObject(self):
"""
Instances of the path type being tested provide L{IFilePath}.
"""
self.assertTrue(verifyObject(filepath.IFilePath, self.path))
def test_segmentsFromPositive(self):
"""
Verify that the segments between two paths are correctly identified.
"""
self.assertEqual(
self.path.child(b"a").child(b"b").child(b"c").segmentsFrom(self.path),
[b"a", b"b", b"c"])
def test_segmentsFromNegative(self):
"""
Verify that segmentsFrom notices when the ancestor isn't an ancestor.
"""
self.assertRaises(
ValueError,
self.path.child(b"a").child(b"b").child(b"c").segmentsFrom,
self.path.child(b"d").child(b"c").child(b"e"))
def test_walk(self):
"""
Verify that walking the path gives the same result as the known file
hierarchy.
"""
x = [foo.path for foo in self.path.walk()]
self.assertEqual(set(x), set(self.all))
def test_parents(self):
"""
L{FilePath.parents()} should return an iterator of every ancestor of
the L{FilePath} in question.
"""
L = []
pathobj = self.path.child(b"a").child(b"b").child(b"c")
fullpath = pathobj.path
lastpath = fullpath
thispath = os.path.dirname(fullpath)
while lastpath != self.root.path:
L.append(thispath)
lastpath = thispath
thispath = os.path.dirname(thispath)
self.assertEqual([x.path for x in pathobj.parents()], L)
def test_validSubdir(self):
"""
Verify that a valid subdirectory will show up as a directory, but not as a
file, not as a symlink, and be listable.
"""
sub1 = self.path.child(b'sub1')
self.assertTrue(sub1.exists(),
"This directory does exist.")
self.assertTrue(sub1.isdir(),
"It's a directory.")
self.assertFalse(sub1.isfile(),
"It's a directory.")
self.assertFalse(sub1.islink(),
"It's a directory.")
self.assertEqual(sub1.listdir(),
[b'file2'])
def test_invalidSubdir(self):
"""
Verify that a subdirectory that doesn't exist is reported as such.
"""
sub2 = self.path.child(b'sub2')
self.assertFalse(sub2.exists(),
"This directory does not exist.")
def test_validFiles(self):
"""
Make sure that we can read existent non-empty files.
"""
f1 = self.path.child(b'file1')
with f1.open() as f:
self.assertEqual(f.read(), self.f1content)
f2 = self.path.child(b'sub1').child(b'file2')
with f2.open() as f:
self.assertEqual(f.read(), self.f2content)
def test_multipleChildSegments(self):
"""
C{fp.descendant([a, b, c])} returns the same L{FilePath} as is returned
by C{fp.child(a).child(b).child(c)}.
"""
multiple = self.path.descendant([b'a', b'b', b'c'])
single = self.path.child(b'a').child(b'b').child(b'c')
self.assertEqual(multiple, single)
def test_dictionaryKeys(self):
"""
Verify that path instances are usable as dictionary keys.
"""
f1 = self.path.child(b'file1')
f1prime = self.path.child(b'file1')
f2 = self.path.child(b'file2')
dictoid = {}
dictoid[f1] = 3
dictoid[f1prime] = 4
self.assertEqual(dictoid[f1], 4)
self.assertEqual(list(dictoid.keys()), [f1])
self.assertIs(list(dictoid.keys())[0], f1)
self.assertIsNot(list(dictoid.keys())[0], f1prime) # sanity check
dictoid[f2] = 5
self.assertEqual(dictoid[f2], 5)
self.assertEqual(len(dictoid), 2)
def test_dictionaryKeyWithString(self):
"""
Verify that path instances are usable as dictionary keys which do not clash
with their string counterparts.
"""
f1 = self.path.child(b'file1')
dictoid = {f1: 'hello'}
dictoid[f1.path] = 'goodbye'
self.assertEqual(len(dictoid), 2)
def test_childrenNonexistentError(self):
"""
Verify that children raises the appropriate exception for non-existent
directories.
"""
self.assertRaises(filepath.UnlistableError,
self.path.child(b'not real').children)
def test_childrenNotDirectoryError(self):
"""
Verify that listdir raises the appropriate exception for attempting to list
a file rather than a directory.
"""
self.assertRaises(filepath.UnlistableError,
self.path.child(b'file1').children)
def test_newTimesAreFloats(self):
"""
Verify that all times returned from the various new time functions are ints
(and hopefully therefore 'high precision').
"""
for p in self.path, self.path.child(b'file1'):
self.assertEqual(type(p.getAccessTime()), float)
self.assertEqual(type(p.getModificationTime()), float)
self.assertEqual(type(p.getStatusChangeTime()), float)
def test_oldTimesAreInts(self):
"""
Verify that all times returned from the various time functions are
integers, for compatibility.
"""
for p in self.path, self.path.child(b'file1'):
self.assertEqual(type(p.getatime()), int)
self.assertEqual(type(p.getmtime()), int)
self.assertEqual(type(p.getctime()), int)
class FakeWindowsPath(filepath.FilePath):
"""
A test version of FilePath which overrides listdir to raise L{WindowsError}.
"""
def listdir(self):
"""
@raise WindowsError: always.
"""
if _PY3:
# For Python 3.3 and higher, WindowsError is an alias for OSError.
# The first argument to the OSError constructor is errno, and the fourth
# argument is winerror.
# For further details, refer to:
# https://docs.python.org/3/library/exceptions.html#OSError
#
# On Windows, if winerror is set in the constructor,
# the errno value in the constructor is ignored, and OSError internally
# maps the winerror value to an errno value.
raise WindowsError(
None,
"A directory's validness was called into question",
self.path,
ERROR_DIRECTORY)
else:
raise WindowsError(
ERROR_DIRECTORY,
"A directory's validness was called into question")
class ListingCompatibilityTests(BytesTestCase):
"""
These tests verify compatibility with legacy behavior of directory listing.
"""
def test_windowsErrorExcept(self):
"""
Verify that when a WindowsError is raised from listdir, catching
WindowsError works.
"""
fwp = FakeWindowsPath(self.mktemp())
self.assertRaises(filepath.UnlistableError, fwp.children)
self.assertRaises(WindowsError, fwp.children)
if not platform.isWindows():
test_windowsErrorExcept.skip = "Only relevant on on Windows."
def test_alwaysCatchOSError(self):
"""
Verify that in the normal case where a directory does not exist, we will
get an OSError.
"""
fp = filepath.FilePath(self.mktemp())
self.assertRaises(OSError, fp.children)
def test_keepOriginalAttributes(self):
"""
Verify that the Unlistable exception raised will preserve the attributes of
the previously-raised exception.
"""
fp = filepath.FilePath(self.mktemp())
ose = self.assertRaises(OSError, fp.children)
d1 = list(ose.__dict__.keys())
d1.remove('originalException')
d2 = list(ose.originalException.__dict__.keys())
d1.sort()
d2.sort()
self.assertEqual(d1, d2)
class ExplodingFile:
"""
A C{file}-alike which raises exceptions from its I/O methods and keeps track
of whether it has been closed.
@ivar closed: A C{bool} which is C{False} until C{close} is called, then it
is C{True}.
"""
closed = False
def read(self, n=0):
"""
@raise IOError: Always raised.
"""
raise IOError()
def write(self, what):
"""
@raise IOError: Always raised.
"""
raise IOError()
def close(self):
"""
Mark the file as having been closed.
"""
self.closed = True
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
class TrackingFilePath(filepath.FilePath):
"""
A subclass of L{filepath.FilePath} which maintains a list of all other paths
created by clonePath.
@ivar trackingList: A list of all paths created by this path via
C{clonePath} (which also includes paths created by methods like
C{parent}, C{sibling}, C{child}, etc (and all paths subsequently created
by those paths, etc).
@type trackingList: C{list} of L{TrackingFilePath}
@ivar openedFiles: A list of all file objects opened by this
L{TrackingFilePath} or any other L{TrackingFilePath} in C{trackingList}.
@type openedFiles: C{list} of C{file}
"""
def __init__(self, path, alwaysCreate=False, trackingList=None):
filepath.FilePath.__init__(self, path, alwaysCreate)
if trackingList is None:
trackingList = []
self.trackingList = trackingList
self.openedFiles = []
def open(self, *a, **k):
"""
Override 'open' to track all files opened by this path.
"""
f = filepath.FilePath.open(self, *a, **k)
self.openedFiles.append(f)
return f
def openedPaths(self):
"""
Return a list of all L{TrackingFilePath}s associated with this
L{TrackingFilePath} that have had their C{open()} method called.
"""
return [path for path in self.trackingList if path.openedFiles]
def clonePath(self, name):
"""
Override L{filepath.FilePath.clonePath} to give the new path a reference
to the same tracking list.
"""
clone = TrackingFilePath(name, trackingList=self.trackingList)
self.trackingList.append(clone)
return clone
class ExplodingFilePath(filepath.FilePath):
"""
A specialized L{FilePath} which always returns an instance of
L{ExplodingFile} from its C{open} method.
@ivar fp: The L{ExplodingFile} instance most recently returned from the
C{open} method.
"""
def __init__(self, pathName, originalExploder=None):
"""
Initialize an L{ExplodingFilePath} with a name and a reference to the
@param pathName: The path name as passed to L{filepath.FilePath}.
@type pathName: C{str}
@param originalExploder: The L{ExplodingFilePath} to associate opened
files with.
@type originalExploder: L{ExplodingFilePath}
"""
filepath.FilePath.__init__(self, pathName)
if originalExploder is None:
originalExploder = self
self._originalExploder = originalExploder
def open(self, mode=None):
"""
Create, save, and return a new C{ExplodingFile}.
@param mode: Present for signature compatibility. Ignored.
@return: A new C{ExplodingFile}.
"""
f = self._originalExploder.fp = ExplodingFile()
return f
def clonePath(self, name):
return ExplodingFilePath(name, self._originalExploder)
class PermissionsTests(BytesTestCase):
"""
Test Permissions and RWX classes
"""
def assertNotUnequal(self, first, second, msg=None):
"""
Tests that C{first} != C{second} is false. This method tests the
__ne__ method, as opposed to L{assertEqual} (C{first} == C{second}),
which tests the __eq__ method.
Note: this should really be part of trial
"""
if first != second:
if msg is None:
msg = '';
if len(msg) > 0:
msg += '\n'
raise self.failureException(
'%snot not unequal (__ne__ not implemented correctly):'
'\na = %s\nb = %s\n'
% (msg, pformat(first), pformat(second)))
return first
def test_rwxFromBools(self):
"""
L{RWX}'s constructor takes a set of booleans
"""
for r in (True, False):
for w in (True, False):
for x in (True, False):
rwx = filepath.RWX(r, w, x)
self.assertEqual(rwx.read, r)
self.assertEqual(rwx.write, w)
self.assertEqual(rwx.execute, x)
rwx = filepath.RWX(True, True, True)
self.assertTrue(rwx.read and rwx.write and rwx.execute)
def test_rwxEqNe(self):
"""
L{RWX}'s created with the same booleans are equivalent. If booleans
are different, they are not equal.
"""
for r in (True, False):
for w in (True, False):
for x in (True, False):
self.assertEqual(filepath.RWX(r, w, x),
filepath.RWX(r, w, x))
self.assertNotUnequal(filepath.RWX(r, w, x),
filepath.RWX(r, w, x))
self.assertNotEqual(filepath.RWX(True, True, True),
filepath.RWX(True, True, False))
self.assertNotEqual(3, filepath.RWX(True, True, True))
def test_rwxShorthand(self):
"""
L{RWX}'s shorthand string should be 'rwx' if read, write, and execute
permission bits are true. If any of those permissions bits are false,
the character is replaced by a '-'.
"""
def getChar(val, letter):
if val:
return letter
return '-'
for r in (True, False):
for w in (True, False):
for x in (True, False):
rwx = filepath.RWX(r, w, x)
self.assertEqual(rwx.shorthand(),
getChar(r, 'r') +
getChar(w, 'w') +
getChar(x, 'x'))
self.assertEqual(filepath.RWX(True, False, True).shorthand(), "r-x")
def test_permissionsFromStat(self):
"""
L{Permissions}'s constructor takes a valid permissions bitmask and
parsaes it to produce the correct set of boolean permissions.
"""
def _rwxFromStat(statModeInt, who):
def getPermissionBit(what, who):
return (statModeInt &
getattr(stat, "S_I%s%s" % (what, who))) > 0
return filepath.RWX(*[getPermissionBit(what, who) for what in
('R', 'W', 'X')])
for u in range(0, 8):
for g in range(0, 8):
for o in range(0, 8):
chmodString = "%d%d%d" % (u, g, o)
chmodVal = int(chmodString, 8)
perm = filepath.Permissions(chmodVal)
self.assertEqual(perm.user,
_rwxFromStat(chmodVal, "USR"),
"%s: got user: %s" %
(chmodString, perm.user))
self.assertEqual(perm.group,
_rwxFromStat(chmodVal, "GRP"),
"%s: got group: %s" %
(chmodString, perm.group))
self.assertEqual(perm.other,
_rwxFromStat(chmodVal, "OTH"),
"%s: got other: %s" %
(chmodString, perm.other))
perm = filepath.Permissions(0o777)
for who in ("user", "group", "other"):
for what in ("read", "write", "execute"):
self.assertTrue(getattr(getattr(perm, who), what))
def test_permissionsEq(self):
"""
Two L{Permissions}'s that are created with the same bitmask
are equivalent
"""
self.assertEqual(filepath.Permissions(0o777),
filepath.Permissions(0o777))
self.assertNotUnequal(filepath.Permissions(0o777),
filepath.Permissions(0o777))
self.assertNotEqual(filepath.Permissions(0o777),
filepath.Permissions(0o700))
self.assertNotEqual(3, filepath.Permissions(0o777))
def test_permissionsShorthand(self):
"""
L{Permissions}'s shorthand string is the RWX shorthand string for its
user permission bits, group permission bits, and other permission bits
concatenated together, without a space.
"""
for u in range(0, 8):
for g in range(0, 8):
for o in range(0, 8):
perm = filepath.Permissions(int("0o%d%d%d" % (u, g, o), 8))
self.assertEqual(perm.shorthand(),
''.join(x.shorthand() for x in (
perm.user, perm.group, perm.other)))
self.assertEqual(filepath.Permissions(0o770).shorthand(), "rwxrwx---")
class FilePathTests(AbstractFilePathTests):
"""
Test various L{FilePath} path manipulations.
In particular, note that tests defined on this class instead of on the base
class are only run against L{twisted.python.filepath}.
"""
def test_chmod(self):
"""
L{FilePath.chmod} modifies the permissions of
the passed file as expected (using C{os.stat} to check). We use some
basic modes that should work everywhere (even on Windows).
"""
for mode in (0o555, 0o777):
self.path.child(b"sub1").chmod(mode)
self.assertEqual(
stat.S_IMODE(os.stat(self.path.child(b"sub1").path).st_mode),
mode)
def symlink(self, target, name):
"""
Create a symbolic link named C{name} pointing at C{target}.
@type target: C{str}
@type name: C{str}
@raise SkipTest: raised if symbolic links are not supported on the
host platform.
"""
if symlinkSkip:
raise SkipTest(symlinkSkip)
os.symlink(target, name)
def createLinks(self):
"""
Create several symbolic links to files and directories.
"""
subdir = self.path.child(b"sub1")
self.symlink(subdir.path, self._mkpath(b"sub1.link"))
self.symlink(subdir.child(b"file2").path, self._mkpath(b"file2.link"))
self.symlink(subdir.child(b"file2").path,
self._mkpath(b"sub1", b"sub1.file2.link"))
def test_realpathSymlink(self):
"""
L{FilePath.realpath} returns the path of the ultimate target of a
symlink.
"""
self.createLinks()
self.symlink(self.path.child(b"file2.link").path,
self.path.child(b"link.link").path)
self.assertEqual(self.path.child(b"link.link").realpath(),
self.path.child(b"sub1").child(b"file2"))
def test_realpathCyclicalSymlink(self):
"""
L{FilePath.realpath} raises L{filepath.LinkError} if the path is a
symbolic link which is part of a cycle.
"""
self.symlink(self.path.child(b"link1").path, self.path.child(b"link2").path)
self.symlink(self.path.child(b"link2").path, self.path.child(b"link1").path)
self.assertRaises(filepath.LinkError,
self.path.child(b"link2").realpath)
def test_realpathNoSymlink(self):
"""
L{FilePath.realpath} returns the path itself if the path is not a
symbolic link.
"""
self.assertEqual(self.path.child(b"sub1").realpath(),
self.path.child(b"sub1"))
def test_walkCyclicalSymlink(self):
"""
Verify that walking a path with a cyclical symlink raises an error
"""
self.createLinks()
self.symlink(self.path.child(b"sub1").path,
self.path.child(b"sub1").child(b"sub1.loopylink").path)
def iterateOverPath():
return [foo.path for foo in self.path.walk()]
self.assertRaises(filepath.LinkError, iterateOverPath)
def test_walkObeysDescendWithCyclicalSymlinks(self):
"""
Verify that, after making a path with cyclical symlinks, when the
supplied C{descend} predicate returns C{False}, the target is not
traversed, as if it was a simple symlink.
"""
self.createLinks()
# we create cyclical symlinks
self.symlink(self.path.child(b"sub1").path,
self.path.child(b"sub1").child(b"sub1.loopylink").path)
def noSymLinks(path):
return not path.islink()
def iterateOverPath():
return [foo.path for foo in self.path.walk(descend=noSymLinks)]
self.assertTrue(iterateOverPath())
def test_walkObeysDescend(self):
"""
Verify that when the supplied C{descend} predicate returns C{False},
the target is not traversed.
"""
self.createLinks()
def noSymLinks(path):
return not path.islink()
x = [foo.path for foo in self.path.walk(descend=noSymLinks)]
self.assertEqual(set(x), set(self.all))
def test_getAndSet(self):
content = b'newcontent'
self.path.child(b'new').setContent(content)
newcontent = self.path.child(b'new').getContent()
self.assertEqual(content, newcontent)
content = b'content'
self.path.child(b'new').setContent(content, b'.tmp')
newcontent = self.path.child(b'new').getContent()
self.assertEqual(content, newcontent)
def test_getContentFileClosing(self):
"""
If reading from the underlying file raises an exception,
L{FilePath.getContent} raises that exception after closing the file.
"""
fp = ExplodingFilePath(b"")
self.assertRaises(IOError, fp.getContent)
self.assertTrue(fp.fp.closed)
def test_symbolicLink(self):
"""
Verify the behavior of the C{isLink} method against links and
non-links. Also check that the symbolic link shares the directory
property with its target.
"""
s4 = self.path.child(b"sub4")
s3 = self.path.child(b"sub3")
self.symlink(s3.path, s4.path)
self.assertTrue(s4.islink())
self.assertFalse(s3.islink())
self.assertTrue(s4.isdir())
self.assertTrue(s3.isdir())
def test_linkTo(self):
"""
Verify that symlink creates a valid symlink that is both a link and a
file if its target is a file, or a directory if its target is a
directory.
"""
targetLinks = [
(self.path.child(b"sub2"), self.path.child(b"sub2.link")),
(self.path.child(b"sub2").child(b"file3.ext1"),
self.path.child(b"file3.ext1.link"))
]
for target, link in targetLinks:
target.linkTo(link)
self.assertTrue(link.islink(), "This is a link")
self.assertEqual(target.isdir(), link.isdir())
self.assertEqual(target.isfile(), link.isfile())
def test_linkToErrors(self):
"""
Verify C{linkTo} fails in the following case:
- the target is in a directory that doesn't exist
- the target already exists
"""
self.assertRaises(OSError, self.path.child(b"file1").linkTo,
self.path.child(b'nosub').child(b'file1'))
self.assertRaises(OSError, self.path.child(b"file1").linkTo,
self.path.child(b'sub1').child(b'file2'))
if symlinkSkip:
test_symbolicLink.skip = symlinkSkip
test_linkTo.skip = symlinkSkip
test_linkToErrors.skip = symlinkSkip
def testMultiExt(self):
f3 = self.path.child(b'sub3').child(b'file3')
exts = b'.foo', b'.bar', b'ext1', b'ext2', b'ext3'
self.assertFalse(f3.siblingExtensionSearch(*exts))
f3e = f3.siblingExtension(b".foo")
f3e.touch()
self.assertFalse(not f3.siblingExtensionSearch(*exts).exists())
self.assertFalse(not f3.siblingExtensionSearch(b'*').exists())
f3e.remove()
self.assertFalse(f3.siblingExtensionSearch(*exts))
def testPreauthChild(self):
fp = filepath.FilePath(b'.')
fp.preauthChild(b'foo/bar')
self.assertRaises(filepath.InsecurePath, fp.child, u'/mon\u20acy')
def testStatCache(self):
p = self.path.child(b'stattest')
p.touch()
self.assertEqual(p.getsize(), 0)
self.assertEqual(abs(p.getmtime() - time.time()) // 20, 0)
self.assertEqual(abs(p.getctime() - time.time()) // 20, 0)
self.assertEqual(abs(p.getatime() - time.time()) // 20, 0)
self.assertTrue(p.exists())
self.assertTrue(p.exists())
# OOB removal: FilePath.remove() will automatically restat
os.remove(p.path)
# test caching
self.assertTrue(p.exists())
p.restat(reraise=False)
self.assertFalse(p.exists())
self.assertFalse(p.islink())
self.assertFalse(p.isdir())
self.assertFalse(p.isfile())
def testPersist(self):
newpath = pickle.loads(pickle.dumps(self.path))
self.assertEqual(self.path.__class__, newpath.__class__)
self.assertEqual(self.path.path, newpath.path)
def testInsecureUNIX(self):
self.assertRaises(filepath.InsecurePath, self.path.child, b"..")
self.assertRaises(filepath.InsecurePath, self.path.child, b"/etc")
self.assertRaises(filepath.InsecurePath, self.path.child, b"../..")
def testInsecureWin32(self):
self.assertRaises(filepath.InsecurePath, self.path.child, b"..\\..")
self.assertRaises(filepath.InsecurePath, self.path.child, b"C:randomfile")
if platform.getType() != 'win32':
testInsecureWin32.skip = "Test will run only on Windows."
def testInsecureWin32Whacky(self):
"""
Windows has 'special' filenames like NUL and CON and COM1 and LPR
and PRN and ... god knows what else. They can be located anywhere in
the filesystem. For obvious reasons, we do not wish to normally permit
access to these.
"""
self.assertRaises(filepath.InsecurePath, self.path.child, b"CON")
self.assertRaises(filepath.InsecurePath, self.path.child, b"C:CON")
self.assertRaises(filepath.InsecurePath, self.path.child, r"C:\CON")
if platform.getType() != 'win32':
testInsecureWin32Whacky.skip = "Test will run only on Windows."
def testComparison(self):
self.assertEqual(filepath.FilePath(b'a'),
filepath.FilePath(b'a'))
self.assertTrue(filepath.FilePath(b'z') >
filepath.FilePath(b'a'))
self.assertTrue(filepath.FilePath(b'z') >=
filepath.FilePath(b'a'))
self.assertTrue(filepath.FilePath(b'a') >=
filepath.FilePath(b'a'))
self.assertTrue(filepath.FilePath(b'a') <=
filepath.FilePath(b'a'))
self.assertTrue(filepath.FilePath(b'a') <
filepath.FilePath(b'z'))
self.assertTrue(filepath.FilePath(b'a') <=
filepath.FilePath(b'z'))
self.assertTrue(filepath.FilePath(b'a') !=
filepath.FilePath(b'z'))
self.assertTrue(filepath.FilePath(b'z') !=
filepath.FilePath(b'a'))
self.assertFalse(filepath.FilePath(b'z') !=
filepath.FilePath(b'z'))
def test_descendantOnly(self):
"""
If C{".."} is in the sequence passed to L{FilePath.descendant},
L{InsecurePath} is raised.
"""
self.assertRaises(
filepath.InsecurePath,
self.path.descendant, [u'mon\u20acy', u'..'])
def testSibling(self):
p = self.path.child(b'sibling_start')
ts = p.sibling(b'sibling_test')
self.assertEqual(ts.dirname(), p.dirname())
self.assertEqual(ts.basename(), b'sibling_test')
ts.createDirectory()
self.assertIn(ts, self.path.children())
def testTemporarySibling(self):
ts = self.path.temporarySibling()
self.assertEqual(ts.dirname(), self.path.dirname())
self.assertNotIn(ts.basename(), self.path.listdir())
ts.createDirectory()
self.assertIn(ts, self.path.parent().children())
def test_temporarySiblingExtension(self):
"""
If L{FilePath.temporarySibling} is given an extension argument, it will
produce path objects with that extension appended to their names.
"""
testExtension = b".test-extension"
ts = self.path.temporarySibling(testExtension)
self.assertTrue(ts.basename().endswith(testExtension),
"%s does not end with %s" % (
ts.basename(), testExtension))
def test_removeDirectory(self):
"""
L{FilePath.remove} on a L{FilePath} that refers to a directory will
recursively delete its contents.
"""
self.path.remove()
self.assertFalse(self.path.exists())
def test_removeWithSymlink(self):
"""
For a path which is a symbolic link, L{FilePath.remove} just deletes
the link, not the target.
"""
link = self.path.child(b"sub1.link")
# setUp creates the sub1 child
self.symlink(self.path.child(b"sub1").path, link.path)
link.remove()
self.assertFalse(link.exists())
self.assertTrue(self.path.child(b"sub1").exists())
def test_copyToDirectory(self):
"""
L{FilePath.copyTo} makes a copy of all the contents of the directory
named by that L{FilePath} if it is able to do so.
"""
oldPaths = list(self.path.walk()) # Record initial state
fp = filepath.FilePath(self.mktemp())
self.path.copyTo(fp)
self.path.remove()
fp.copyTo(self.path)
newPaths = list(self.path.walk()) # Record double-copy state
newPaths.sort()
oldPaths.sort()
self.assertEqual(newPaths, oldPaths)
def test_copyToMissingDestFileClosing(self):
"""
If an exception is raised while L{FilePath.copyTo} is trying to open
source file to read from, the destination file is closed and the
exception is raised to the caller of L{FilePath.copyTo}.
"""
nosuch = self.path.child(b"nothere")
# Make it look like something to copy, even though it doesn't exist.
# This could happen if the file is deleted between the isfile check and
# the file actually being opened.
nosuch.isfile = lambda: True
# We won't get as far as writing to this file, but it's still useful for
# tracking whether we closed it.
destination = ExplodingFilePath(self.mktemp())
self.assertRaises(IOError, nosuch.copyTo, destination)
self.assertTrue(destination.fp.closed)
def test_copyToFileClosing(self):
"""
If an exception is raised while L{FilePath.copyTo} is copying bytes
between two regular files, the source and destination files are closed
and the exception propagates to the caller of L{FilePath.copyTo}.
"""
destination = ExplodingFilePath(self.mktemp())
source = ExplodingFilePath(__file__)
self.assertRaises(IOError, source.copyTo, destination)
self.assertTrue(source.fp.closed)
self.assertTrue(destination.fp.closed)
def test_copyToDirectoryItself(self):
"""
L{FilePath.copyTo} fails with an OSError or IOError (depending on
platform, as it propagates errors from open() and write()) when
attempting to copy a directory to a child of itself.
"""
self.assertRaises((OSError, IOError),
self.path.copyTo, self.path.child(b'file1'))
def test_copyToWithSymlink(self):
"""
Verify that copying with followLinks=True copies symlink targets
instead of symlinks
"""
self.symlink(self.path.child(b"sub1").path,
self.path.child(b"link1").path)
fp = filepath.FilePath(self.mktemp())
self.path.copyTo(fp)
self.assertFalse(fp.child(b"link1").islink())
self.assertEqual([x.basename() for x in fp.child(b"sub1").children()],
[x.basename() for x in fp.child(b"link1").children()])
def test_copyToWithoutSymlink(self):
"""
Verify that copying with followLinks=False copies symlinks as symlinks
"""
self.symlink(b"sub1", self.path.child(b"link1").path)
fp = filepath.FilePath(self.mktemp())
self.path.copyTo(fp, followLinks=False)
self.assertTrue(fp.child(b"link1").islink())
self.assertEqual(os.readlink(self.path.child(b"link1").path),
os.readlink(fp.child(b"link1").path))
def test_copyToMissingSource(self):
"""
If the source path is missing, L{FilePath.copyTo} raises L{OSError}.
"""
path = filepath.FilePath(self.mktemp())
exc = self.assertRaises(OSError, path.copyTo, b'some other path')
self.assertEqual(exc.errno, errno.ENOENT)
def test_moveTo(self):
"""
Verify that moving an entire directory results into another directory
with the same content.
"""
oldPaths = list(self.path.walk()) # Record initial state
fp = filepath.FilePath(self.mktemp())
self.path.moveTo(fp)
fp.moveTo(self.path)
newPaths = list(self.path.walk()) # Record double-move state
newPaths.sort()
oldPaths.sort()
self.assertEqual(newPaths, oldPaths)
def test_moveToExistsCache(self):
"""
A L{FilePath} that has been moved aside with L{FilePath.moveTo} no
longer registers as existing. Its previously non-existent target
exists, though, as it was created by the call to C{moveTo}.
"""
fp = filepath.FilePath(self.mktemp())
fp2 = filepath.FilePath(self.mktemp())
fp.touch()
# Both a sanity check (make sure the file status looks right) and an
# enticement for stat-caching logic to kick in and remember that these
# exist / don't exist.
self.assertTrue(fp.exists())
self.assertFalse(fp2.exists())
fp.moveTo(fp2)
self.assertFalse(fp.exists())
self.assertTrue(fp2.exists())
def test_moveToExistsCacheCrossMount(self):
"""
The assertion of test_moveToExistsCache should hold in the case of a
cross-mount move.
"""
self.setUpFaultyRename()
self.test_moveToExistsCache()
def test_moveToSizeCache(self, hook=lambda : None):
"""
L{FilePath.moveTo} clears its destination's status cache, such that
calls to L{FilePath.getsize} after the call to C{moveTo} will report the
new size, not the old one.
This is a separate test from C{test_moveToExistsCache} because it is
intended to cover the fact that the destination's cache is dropped;
test_moveToExistsCache doesn't cover this case because (currently) a
file that doesn't exist yet does not cache the fact of its non-
existence.
"""
fp = filepath.FilePath(self.mktemp())
fp2 = filepath.FilePath(self.mktemp())
fp.setContent(b"1234")
fp2.setContent(b"1234567890")
hook()
# Sanity check / kick off caching.
self.assertEqual(fp.getsize(), 4)
self.assertEqual(fp2.getsize(), 10)
# Actually attempting to replace a file on Windows would fail with
# ERROR_ALREADY_EXISTS, but we don't need to test that, just the cached
# metadata, so, delete the file ...
os.remove(fp2.path)
# ... but don't clear the status cache, as fp2.remove() would.
self.assertEqual(fp2.getsize(), 10)
fp.moveTo(fp2)
self.assertEqual(fp2.getsize(), 4)
def test_moveToSizeCacheCrossMount(self):
"""
The assertion of test_moveToSizeCache should hold in the case of a
cross-mount move.
"""
self.test_moveToSizeCache(hook=self.setUpFaultyRename)
def test_moveToError(self):
"""
Verify error behavior of moveTo: it should raises one of OSError or
IOError if you want to move a path into one of its child. It's simply
the error raised by the underlying rename system call.
"""
self.assertRaises((OSError, IOError), self.path.moveTo, self.path.child(b'file1'))
def setUpFaultyRename(self):
"""
Set up a C{os.rename} that will fail with L{errno.EXDEV} on first call.
This is used to simulate a cross-device rename failure.
@return: a list of pair (src, dest) of calls to C{os.rename}
@rtype: C{list} of C{tuple}
"""
invokedWith = []
def faultyRename(src, dest):
invokedWith.append((src, dest))
if len(invokedWith) == 1:
raise OSError(errno.EXDEV, 'Test-induced failure simulating '
'cross-device rename failure')
return originalRename(src, dest)
originalRename = os.rename
self.patch(os, "rename", faultyRename)
return invokedWith
def test_crossMountMoveTo(self):
"""
C{moveTo} should be able to handle C{EXDEV} error raised by
C{os.rename} when trying to move a file on a different mounted
filesystem.
"""
invokedWith = self.setUpFaultyRename()
# Bit of a whitebox test - force os.rename, which moveTo tries
# before falling back to a slower method, to fail, forcing moveTo to
# use the slower behavior.
self.test_moveTo()
# A bit of a sanity check for this whitebox test - if our rename
# was never invoked, the test has probably fallen into disrepair!
self.assertTrue(invokedWith)
def test_crossMountMoveToWithSymlink(self):
"""
By default, when moving a symlink, it should follow the link and
actually copy the content of the linked node.
"""
invokedWith = self.setUpFaultyRename()
f2 = self.path.child(b'file2')
f3 = self.path.child(b'file3')
self.symlink(self.path.child(b'file1').path, f2.path)
f2.moveTo(f3)
self.assertFalse(f3.islink())
self.assertEqual(f3.getContent(), b'file 1')
self.assertTrue(invokedWith)
def test_crossMountMoveToWithoutSymlink(self):
"""
Verify that moveTo called with followLinks=False actually create
another symlink.
"""
invokedWith = self.setUpFaultyRename()
f2 = self.path.child(b'file2')
f3 = self.path.child(b'file3')
self.symlink(self.path.child(b'file1').path, f2.path)
f2.moveTo(f3, followLinks=False)
self.assertTrue(f3.islink())
self.assertEqual(f3.getContent(), b'file 1')
self.assertTrue(invokedWith)
def test_createBinaryMode(self):
"""
L{FilePath.create} should always open (and write to) files in binary
mode; line-feed octets should be unmodified.
(While this test should pass on all platforms, it is only really
interesting on platforms which have the concept of binary mode, i.e.
Windows platforms.)
"""
path = filepath.FilePath(self.mktemp())
with path.create() as f:
self.assertIn("b", f.mode)
f.write(b"\n")
with open(path.path, "rb") as fp:
read = fp.read()
self.assertEqual(read, b"\n")
def testOpen(self):
# Opening a file for reading when it does not already exist is an error
nonexistent = self.path.child(b'nonexistent')
e = self.assertRaises(IOError, nonexistent.open)
self.assertEqual(e.errno, errno.ENOENT)
# Opening a file for writing when it does not exist is okay
writer = self.path.child(b'writer')
with writer.open('w') as f:
f.write(b'abc\ndef')
# Make sure those bytes ended up there - and test opening a file for
# reading when it does exist at the same time
with writer.open() as f:
self.assertEqual(f.read(), b'abc\ndef')
# Re-opening that file in write mode should erase whatever was there.
writer.open('w').close()
with writer.open() as f:
self.assertEqual(f.read(), b'')
# Put some bytes in a file so we can test that appending does not
# destroy them.
appender = self.path.child(b'appender')
with appender.open('w') as f:
f.write(b'abc')
with appender.open('a') as f:
f.write(b'def')
with appender.open('r') as f:
self.assertEqual(f.read(), b'abcdef')
# read/write should let us do both without erasing those bytes
with appender.open('r+') as f:
self.assertEqual(f.read(), b'abcdef')
# ANSI C *requires* an fseek or an fgetpos between an fread and an
# fwrite or an fwrite and an fread. We can't reliably get Python to
# invoke fgetpos, so we seek to a 0 byte offset from the current
# position instead. Also, Python sucks for making this seek
# relative to 1 instead of a symbolic constant representing the
# current file position.
f.seek(0, 1)
# Put in some new bytes for us to test for later.
f.write(b'ghi')
# Make sure those new bytes really showed up
with appender.open('r') as f:
self.assertEqual(f.read(), b'abcdefghi')
# write/read should let us do both, but erase anything that's there
# already.
with appender.open('w+') as f:
self.assertEqual(f.read(), b'')
f.seek(0, 1) # Don't forget this!
f.write(b'123')
# super append mode should let us read and write and also position the
# cursor at the end of the file, without erasing everything.
with appender.open('a+') as f:
# The order of these lines may seem surprising, but it is
# necessary. The cursor is not at the end of the file until after
# the first write.
f.write(b'456')
f.seek(0, 1) # Asinine.
self.assertEqual(f.read(), b'')
f.seek(0, 0)
self.assertEqual(f.read(), b'123456')
# Opening a file exclusively must fail if that file exists already.
nonexistent.requireCreate(True)
nonexistent.open('w').close()
existent = nonexistent
del nonexistent
self.assertRaises((OSError, IOError), existent.open)
def test_openWithExplicitBinaryMode(self):
"""
Due to a bug in Python 2.7 on Windows including multiple 'b'
characters in the mode passed to the built-in open() will cause an
error. FilePath.open() ensures that only a single 'b' character is
included in the mode passed to the built-in open().
See http://bugs.python.org/issue7686 for details about the bug.
"""
writer = self.path.child(b'explicit-binary')
with writer.open('wb') as file:
file.write(b'abc\ndef')
self.assertTrue(writer.exists)
def test_openWithRedundantExplicitBinaryModes(self):
"""
Due to a bug in Python 2.7 on Windows including multiple 'b'
characters in the mode passed to the built-in open() will cause an
error. No matter how many 'b' modes are specified, FilePath.open()
ensures that only a single 'b' character is included in the mode
passed to the built-in open().
See http://bugs.python.org/issue7686 for details about the bug.
"""
writer = self.path.child(b'multiple-binary')
with writer.open('wbb') as file:
file.write(b'abc\ndef')
self.assertTrue(writer.exists)
def test_existsCache(self):
"""
Check that C{filepath.FilePath.exists} correctly restat the object if
an operation has occurred in the mean time.
"""
fp = filepath.FilePath(self.mktemp())
self.assertFalse(fp.exists())
fp.makedirs()
self.assertTrue(fp.exists())
def test_makedirsMakesDirectoriesRecursively(self):
"""
C{FilePath.makedirs} creates a directory at C{path}}, including
recursively creating all parent directories leading up to the path.
"""
fp = filepath.FilePath(os.path.join(
self.mktemp(), b"foo", b"bar", b"baz"))
self.assertFalse(fp.exists())
fp.makedirs()
self.assertTrue(fp.exists())
self.assertTrue(fp.isdir())
def test_makedirsMakesDirectoriesWithIgnoreExistingDirectory(self):
"""
Calling C{FilePath.makedirs} with C{ignoreExistingDirectory} set to
C{True} has no effect if directory does not exist.
"""
fp = filepath.FilePath(self.mktemp())
self.assertFalse(fp.exists())
fp.makedirs(ignoreExistingDirectory=True)
self.assertTrue(fp.exists())
self.assertTrue(fp.isdir())
def test_makedirsThrowsWithExistentDirectory(self):
"""
C{FilePath.makedirs} throws an C{OSError} exception
when called on a directory that already exists.
"""
fp = filepath.FilePath(os.path.join(self.mktemp()))
fp.makedirs()
exception = self.assertRaises(OSError, fp.makedirs)
self.assertEqual(exception.errno, errno.EEXIST)
def test_makedirsAcceptsIgnoreExistingDirectory(self):
"""
C{FilePath.makedirs} succeeds when called on a directory that already
exists and the c{ignoreExistingDirectory} argument is set to C{True}.
"""
fp = filepath.FilePath(self.mktemp())
fp.makedirs()
self.assertTrue(fp.exists())
fp.makedirs(ignoreExistingDirectory=True)
self.assertTrue(fp.exists())
def test_makedirsIgnoreExistingDirectoryExistAlreadyAFile(self):
"""
When C{FilePath.makedirs} is called with C{ignoreExistingDirectory} set
to C{True} it throws an C{OSError} exceptions if path is a file.
"""
fp = filepath.FilePath(self.mktemp())
fp.create()
self.assertTrue(fp.isfile())
exception = self.assertRaises(
OSError, fp.makedirs, ignoreExistingDirectory=True)
self.assertEqual(exception.errno, errno.EEXIST)
def test_makedirsRaisesNonEexistErrorsIgnoreExistingDirectory(self):
"""
When C{FilePath.makedirs} is called with C{ignoreExistingDirectory} set
to C{True} it raises an C{OSError} exception if exception errno is not
EEXIST.
"""
def faultyMakedirs(path):
raise OSError(errno.EACCES, 'Permission Denied')
self.patch(os, 'makedirs', faultyMakedirs)
fp = filepath.FilePath(self.mktemp())
exception = self.assertRaises(
OSError, fp.makedirs, ignoreExistingDirectory=True)
self.assertEqual(exception.errno, errno.EACCES)
def test_changed(self):
"""
L{FilePath.changed} indicates that the L{FilePath} has changed, but does
not re-read the status information from the filesystem until it is
queried again via another method, such as C{getsize}.
"""
fp = filepath.FilePath(self.mktemp())
fp.setContent(b"12345")
self.assertEqual(fp.getsize(), 5)
# Someone else comes along and changes the file.
with open(fp.path, 'wb') as fObj:
fObj.write(b"12345678")
# Sanity check for caching: size should still be 5.
self.assertEqual(fp.getsize(), 5)
fp.changed()
# This path should look like we don't know what status it's in, not that
# we know that it didn't exist when last we checked.
self.assertIsNone(fp.statinfo)
self.assertEqual(fp.getsize(), 8)
def test_getPermissions_POSIX(self):
"""
Getting permissions for a file returns a L{Permissions} object for
POSIX platforms (which supports separate user, group, and other
permissions bits.
"""
for mode in (0o777, 0o700):
self.path.child(b"sub1").chmod(mode)
self.assertEqual(self.path.child(b"sub1").getPermissions(),
filepath.Permissions(mode))
self.path.child(b"sub1").chmod(0o764) #sanity check
self.assertEqual(
self.path.child(b"sub1").getPermissions().shorthand(),
"rwxrw-r--")
def test_deprecateStatinfoGetter(self):
"""
Getting L{twisted.python.filepath.FilePath.statinfo} is deprecated.
"""
fp = filepath.FilePath(self.mktemp())
fp.statinfo
warningInfo = self.flushWarnings([self.test_deprecateStatinfoGetter])
self.assertEqual(len(warningInfo), 1)
self.assertEqual(warningInfo[0]['category'], DeprecationWarning)
self.assertEqual(
warningInfo[0]['message'],
"twisted.python.filepath.FilePath.statinfo was deprecated in "
"Twisted 15.0.0; please use other FilePath methods such as "
"getsize(), isdir(), getModificationTime(), etc. instead")
def test_deprecateStatinfoSetter(self):
"""
Setting L{twisted.python.filepath.FilePath.statinfo} is deprecated.
"""
fp = filepath.FilePath(self.mktemp())
fp.statinfo = None
warningInfo = self.flushWarnings([self.test_deprecateStatinfoSetter])
self.assertEqual(len(warningInfo), 1)
self.assertEqual(warningInfo[0]['category'], DeprecationWarning)
self.assertEqual(
warningInfo[0]['message'],
"twisted.python.filepath.FilePath.statinfo was deprecated in "
"Twisted 15.0.0; please use other FilePath methods such as "
"getsize(), isdir(), getModificationTime(), etc. instead")
def test_deprecateStatinfoSetterSets(self):
"""
Setting L{twisted.python.filepath.FilePath.statinfo} changes the value
of _statinfo such that getting statinfo again returns the new value.
"""
fp = filepath.FilePath(self.mktemp())
fp.statinfo = None
self.assertIsNone(fp.statinfo)
def test_filePathNotDeprecated(self):
"""
While accessing L{twisted.python.filepath.FilePath.statinfo} is
deprecated, the filepath itself is not.
"""
filepath.FilePath(self.mktemp())
warningInfo = self.flushWarnings([self.test_filePathNotDeprecated])
self.assertEqual(warningInfo, [])
def test_getPermissions_Windows(self):
"""
Getting permissions for a file returns a L{Permissions} object in
Windows. Windows requires a different test, because user permissions
= group permissions = other permissions. Also, chmod may not be able
to set the execute bit, so we are skipping tests that set the execute
bit.
"""
# Change permission after test so file can be deleted
self.addCleanup(self.path.child(b"sub1").chmod, 0o777)
for mode in (0o777, 0o555):
self.path.child(b"sub1").chmod(mode)
self.assertEqual(self.path.child(b"sub1").getPermissions(),
filepath.Permissions(mode))
self.path.child(b"sub1").chmod(0o511) #sanity check to make sure that
# user=group=other permissions
self.assertEqual(self.path.child(b"sub1").getPermissions().shorthand(),
"r-xr-xr-x")
def test_whetherBlockOrSocket(self):
"""
Ensure that a file is not a block or socket
"""
self.assertFalse(self.path.isBlockDevice())
self.assertFalse(self.path.isSocket())
def test_statinfoBitsNotImplementedInWindows(self):
"""
Verify that certain file stats are not available on Windows
"""
self.assertRaises(NotImplementedError, self.path.getInodeNumber)
self.assertRaises(NotImplementedError, self.path.getDevice)
self.assertRaises(NotImplementedError, self.path.getNumberOfHardLinks)
self.assertRaises(NotImplementedError, self.path.getUserID)
self.assertRaises(NotImplementedError, self.path.getGroupID)
def test_statinfoBitsAreNumbers(self):
"""
Verify that file inode/device/nlinks/uid/gid stats are numbers in
a POSIX environment
"""
if _PY3:
numbers = int
else:
numbers = (int, long)
c = self.path.child(b'file1')
for p in self.path, c:
self.assertIsInstance(p.getInodeNumber(), numbers)
self.assertIsInstance(p.getDevice(), numbers)
self.assertIsInstance(p.getNumberOfHardLinks(), numbers)
self.assertIsInstance(p.getUserID(), numbers)
self.assertIsInstance(p.getGroupID(), numbers)
self.assertEqual(self.path.getUserID(), c.getUserID())
self.assertEqual(self.path.getGroupID(), c.getGroupID())
def test_statinfoNumbersAreValid(self):
"""
Verify that the right numbers come back from the right accessor methods
for file inode/device/nlinks/uid/gid (in a POSIX environment)
"""
# specify fake statinfo information
class FakeStat:
st_ino = 200
st_dev = 300
st_nlink = 400
st_uid = 500
st_gid = 600
# monkey patch in a fake restat method for self.path
fake = FakeStat()
def fakeRestat(*args, **kwargs):
self.path._statinfo = fake
self.path.restat = fakeRestat
# ensure that restat will need to be called to get values
self.path._statinfo = None
self.assertEqual(self.path.getInodeNumber(), fake.st_ino)
self.assertEqual(self.path.getDevice(), fake.st_dev)
self.assertEqual(self.path.getNumberOfHardLinks(), fake.st_nlink)
self.assertEqual(self.path.getUserID(), fake.st_uid)
self.assertEqual(self.path.getGroupID(), fake.st_gid)
if platform.isWindows():
test_statinfoBitsAreNumbers.skip = True
test_statinfoNumbersAreValid.skip = True
test_getPermissions_POSIX.skip = True
else:
test_statinfoBitsNotImplementedInWindows.skip = "Test will run only on Windows."
test_getPermissions_Windows.skip = "Test will run only on Windows."
class SetContentTests(BytesTestCase):
"""
Tests for L{FilePath.setContent}.
"""
def test_write(self):
"""
Contents of the file referred to by a L{FilePath} can be written using
L{FilePath.setContent}.
"""
pathString = self.mktemp()
path = filepath.FilePath(pathString)
path.setContent(b"hello, world")
with open(pathString, "rb") as fObj:
contents = fObj.read()
self.assertEqual(b"hello, world", contents)
def test_fileClosing(self):
"""
If writing to the underlying file raises an exception,
L{FilePath.setContent} raises that exception after closing the file.
"""
fp = ExplodingFilePath(b"")
self.assertRaises(IOError, fp.setContent, b"blah")
self.assertTrue(fp.fp.closed)
def test_nameCollision(self):
"""
L{FilePath.setContent} will use a different temporary filename on each
invocation, so that multiple processes, threads, or reentrant
invocations will not collide with each other.
"""
fp = TrackingFilePath(self.mktemp())
fp.setContent(b"alpha")
fp.setContent(b"beta")
# Sanity check: setContent should only open one derivative path each
# time to store the temporary file.
openedSiblings = fp.openedPaths()
self.assertEqual(len(openedSiblings), 2)
self.assertNotEqual(openedSiblings[0], openedSiblings[1])
def _assertOneOpened(self, fp, extension):
"""
Assert that the L{TrackingFilePath} C{fp} was used to open one sibling
with the given extension.
@param fp: A L{TrackingFilePath} which should have been used to open
file at a sibling path.
@type fp: L{TrackingFilePath}
@param extension: The extension the sibling path is expected to have
had.
@type extension: L{bytes}
@raise: C{self.failureException} is raised if the extension of the
opened file is incorrect or if not exactly one file was opened
using C{fp}.
"""
opened = fp.openedPaths()
self.assertEqual(len(opened), 1, "expected exactly one opened file")
self.assertTrue(
opened[0].basename().endswith(extension),
"%s does not end with %r extension" % (
opened[0].basename(), extension))
def test_defaultExtension(self):
"""
L{FilePath.setContent} creates temporary files with the extension
I{.new} if no alternate extension value is given.
"""
fp = TrackingFilePath(self.mktemp())
fp.setContent(b"hello")
self._assertOneOpened(fp, b".new")
def test_customExtension(self):
"""
L{FilePath.setContent} creates temporary files with a user-supplied
extension so that if it is somehow interrupted while writing them the
file that it leaves behind will be identifiable.
"""
fp = TrackingFilePath(self.mktemp())
fp.setContent(b"goodbye", b"-something-else")
self._assertOneOpened(fp, b"-something-else")
class UnicodeFilePathTests(TestCase):
"""
L{FilePath} instances should have the same internal representation as they
were instantiated with.
"""
def test_UnicodeInstantiation(self):
"""
L{FilePath} instantiated with a text path will return a text-mode
FilePath.
"""
fp = filepath.FilePath(u'./mon\u20acy')
self.assertEqual(type(fp.path), unicode)
def test_UnicodeInstantiationBytesChild(self):
"""
Calling L{FilePath.child} on a text-mode L{FilePath} with a L{bytes}
subpath will return a bytes-mode FilePath.
"""
fp = filepath.FilePath(u'./parent-mon\u20acy')
child = fp.child(u'child-mon\u20acy'.encode('utf-8'))
self.assertEqual(type(child.path), bytes)
def test_UnicodeInstantiationUnicodeChild(self):
"""
Calling L{FilePath.child} on a text-mode L{FilePath} with a text
subpath will return a text-mode FilePath.
"""
fp = filepath.FilePath(u'./parent-mon\u20acy')
child = fp.child(u'mon\u20acy')
self.assertEqual(type(child.path), unicode)
def test_UnicodeInstantiationUnicodePreauthChild(self):
"""
Calling L{FilePath.preauthChild} on a text-mode L{FilePath} with a text
subpath will return a text-mode FilePath.
"""
fp = filepath.FilePath(u'./parent-mon\u20acy')
child = fp.preauthChild(u'mon\u20acy')
self.assertEqual(type(child.path), unicode)
def test_UnicodeInstantiationBytesPreauthChild(self):
"""
Calling L{FilePath.preauthChild} on a text-mode L{FilePath} with a bytes
subpath will return a bytes-mode FilePath.
"""
fp = filepath.FilePath(u'./parent-mon\u20acy')
child = fp.preauthChild(u'child-mon\u20acy'.encode('utf-8'))
self.assertEqual(type(child.path), bytes)
def test_BytesInstantiation(self):
"""
L{FilePath} instantiated with a L{bytes} path will return a bytes-mode
FilePath.
"""
fp = filepath.FilePath(b"./")
self.assertEqual(type(fp.path), bytes)
def test_BytesInstantiationBytesChild(self):
"""
Calling L{FilePath.child} on a bytes-mode L{FilePath} with a bytes
subpath will return a bytes-mode FilePath.
"""
fp = filepath.FilePath(b"./")
child = fp.child(u'child-mon\u20acy'.encode('utf-8'))
self.assertEqual(type(child.path), bytes)
def test_BytesInstantiationUnicodeChild(self):
"""
Calling L{FilePath.child} on a bytes-mode L{FilePath} with a text
subpath will return a text-mode FilePath.
"""
fp = filepath.FilePath(u'parent-mon\u20acy'.encode('utf-8'))
child = fp.child(u"mon\u20acy")
self.assertEqual(type(child.path), unicode)
def test_BytesInstantiationBytesPreauthChild(self):
"""
Calling L{FilePath.preauthChild} on a bytes-mode L{FilePath} with a
bytes subpath will return a bytes-mode FilePath.
"""
fp = filepath.FilePath(u'./parent-mon\u20acy'.encode('utf-8'))
child = fp.preauthChild(u'child-mon\u20acy'.encode('utf-8'))
self.assertEqual(type(child.path), bytes)
def test_BytesInstantiationUnicodePreauthChild(self):
"""
Calling L{FilePath.preauthChild} on a bytes-mode L{FilePath} with a text
subpath will return a text-mode FilePath.
"""
fp = filepath.FilePath(u'./parent-mon\u20acy'.encode('utf-8'))
child = fp.preauthChild(u"mon\u20acy")
self.assertEqual(type(child.path), unicode)
def test_unicoderepr(self):
"""
The repr of a L{unicode} L{FilePath} shouldn't burst into flames.
"""
fp = filepath.FilePath(u"/mon\u20acy")
reprOutput = repr(fp)
if _PY3:
self.assertEqual("FilePath('/mon\u20acy')", reprOutput)
else:
self.assertEqual("FilePath(u'/mon\\u20acy')", reprOutput)
def test_bytesrepr(self):
"""
The repr of a L{bytes} L{FilePath} shouldn't burst into flames.
"""
fp = filepath.FilePath(u'/parent-mon\u20acy'.encode('utf-8'))
reprOutput = repr(fp)
if _PY3:
self.assertEqual(
"FilePath(b'/parent-mon\\xe2\\x82\\xacy')", reprOutput)
else:
self.assertEqual(
"FilePath('/parent-mon\\xe2\\x82\\xacy')", reprOutput)
def test_unicodereprWindows(self):
"""
The repr of a L{unicode} L{FilePath} shouldn't burst into flames.
"""
fp = filepath.FilePath(u"C:\\")
reprOutput = repr(fp)
if _PY3:
self.assertEqual("FilePath('C:\\\\')", reprOutput)
else:
self.assertEqual("FilePath(u'C:\\\\')", reprOutput)
def test_bytesreprWindows(self):
"""
The repr of a L{bytes} L{FilePath} shouldn't burst into flames.
"""
fp = filepath.FilePath(b"C:\\")
reprOutput = repr(fp)
if _PY3:
self.assertEqual("FilePath(b'C:\\\\')", reprOutput)
else:
self.assertEqual("FilePath('C:\\\\')", reprOutput)
if platform.isWindows():
test_unicoderepr.skip = "Test will not work on Windows"
test_bytesrepr.skip = "Test will not work on Windows"
else:
test_unicodereprWindows.skip = "Test only works on Windows"
test_bytesreprWindows.skip = "Test only works on Windows"
def test_mixedTypeGlobChildren(self):
"""
C{globChildren} will return the same type as the pattern argument.
"""
fp = filepath.FilePath(u"/")
children = fp.globChildren(b"*")
self.assertIsInstance(children[0].path, bytes)
def test_unicodeGlobChildren(self):
"""
C{globChildren} works with L{unicode}.
"""
fp = filepath.FilePath(u"/")
children = fp.globChildren(u"*")
self.assertIsInstance(children[0].path, unicode)
def test_unicodeBasename(self):
"""
Calling C{basename} on an text- L{FilePath} returns L{unicode}.
"""
fp = filepath.FilePath(u"./")
self.assertIsInstance(fp.basename(), unicode)
def test_unicodeDirname(self):
"""
Calling C{dirname} on a text-mode L{FilePath} returns L{unicode}.
"""
fp = filepath.FilePath(u"./")
self.assertIsInstance(fp.dirname(), unicode)
def test_unicodeParent(self):
"""
Calling C{parent} on a text-mode L{FilePath} will return a text-mode
L{FilePath}.
"""
fp = filepath.FilePath(u"./")
parent = fp.parent()
self.assertIsInstance(parent.path, unicode)
def test_mixedTypeTemporarySibling(self):
"""
A L{bytes} extension to C{temporarySibling} will mean a L{bytes} mode
L{FilePath} is returned.
"""
fp = filepath.FilePath(u"./mon\u20acy")
tempSibling = fp.temporarySibling(b".txt")
self.assertIsInstance(tempSibling.path, bytes)
def test_unicodeTemporarySibling(self):
"""
A L{unicode} extension to C{temporarySibling} will mean a L{unicode}
mode L{FilePath} is returned.
"""
fp = filepath.FilePath(u"/tmp/mon\u20acy")
tempSibling = fp.temporarySibling(u".txt")
self.assertIsInstance(tempSibling.path, unicode)
def test_mixedTypeSiblingExtensionSearch(self):
"""
C{siblingExtensionSearch} called with L{bytes} on a L{unicode}-mode
L{FilePath} will return a L{list} of L{bytes}-mode L{FilePath}s.
"""
fp = filepath.FilePath(u"./mon\u20acy")
sibling = filepath.FilePath(fp._asTextPath() + u".txt")
sibling.touch()
newPath = fp.siblingExtensionSearch(b".txt")
self.assertIsInstance(newPath, filepath.FilePath)
self.assertIsInstance(newPath.path, bytes)
def test_unicodeSiblingExtensionSearch(self):
"""
C{siblingExtensionSearch} called with L{unicode} on a L{unicode}-mode
L{FilePath} will return a L{list} of L{unicode}-mode L{FilePath}s.
"""
fp = filepath.FilePath(u"./mon\u20acy")
sibling = filepath.FilePath(fp._asTextPath() + u".txt")
sibling.touch()
newPath = fp.siblingExtensionSearch(u".txt")
self.assertIsInstance(newPath, filepath.FilePath)
self.assertIsInstance(newPath.path, unicode)
def test_mixedTypeSiblingExtension(self):
"""
C{siblingExtension} called with L{bytes} on a L{unicode}-mode
L{FilePath} will return a L{bytes}-mode L{FilePath}.
"""
fp = filepath.FilePath(u"./mon\u20acy")
sibling = filepath.FilePath(fp._asTextPath() + u".txt")
sibling.touch()
newPath = fp.siblingExtension(b".txt")
self.assertIsInstance(newPath, filepath.FilePath)
self.assertIsInstance(newPath.path, bytes)
def test_unicodeSiblingExtension(self):
"""
C{siblingExtension} called with L{unicode} on a L{unicode}-mode
L{FilePath} will return a L{unicode}-mode L{FilePath}.
"""
fp = filepath.FilePath(u"./mon\u20acy")
sibling = filepath.FilePath(fp._asTextPath() + u".txt")
sibling.touch()
newPath = fp.siblingExtension(u".txt")
self.assertIsInstance(newPath, filepath.FilePath)
self.assertIsInstance(newPath.path, unicode)
def test_mixedTypeChildSearchPreauth(self):
"""
C{childSearchPreauth} called with L{bytes} on a L{unicode}-mode
L{FilePath} will return a L{bytes}-mode L{FilePath}.
"""
fp = filepath.FilePath(u"./mon\u20acy")
fp.createDirectory()
self.addCleanup(lambda: fp.remove())
child = fp.child("text.txt")
child.touch()
newPath = fp.childSearchPreauth(b"text.txt")
self.assertIsInstance(newPath, filepath.FilePath)
self.assertIsInstance(newPath.path, bytes)
def test_unicodeChildSearchPreauth(self):
"""
C{childSearchPreauth} called with L{unicode} on a L{unicode}-mode
L{FilePath} will return a L{unicode}-mode L{FilePath}.
"""
fp = filepath.FilePath(u"./mon\u20acy")
fp.createDirectory()
self.addCleanup(lambda: fp.remove())
child = fp.child("text.txt")
child.touch()
newPath = fp.childSearchPreauth(u"text.txt")
self.assertIsInstance(newPath, filepath.FilePath)
self.assertIsInstance(newPath.path, unicode)
def test_asBytesModeFromUnicode(self):
"""
C{asBytesMode} on a L{unicode}-mode L{FilePath} returns a new
L{bytes}-mode L{FilePath}.
"""
fp = filepath.FilePath(u"./tmp")
newfp = fp.asBytesMode()
self.assertIsNot(fp, newfp)
self.assertIsInstance(newfp.path, bytes)
def test_asTextModeFromBytes(self):
"""
C{asBytesMode} on a L{unicode}-mode L{FilePath} returns a new
L{bytes}-mode L{FilePath}.
"""
fp = filepath.FilePath(b"./tmp")
newfp = fp.asTextMode()
self.assertIsNot(fp, newfp)
self.assertIsInstance(newfp.path, unicode)
def test_asBytesModeFromBytes(self):
"""
C{asBytesMode} on a L{bytes}-mode L{FilePath} returns the same
L{bytes}-mode L{FilePath}.
"""
fp = filepath.FilePath(b"./tmp")
newfp = fp.asBytesMode()
self.assertIs(fp, newfp)
self.assertIsInstance(newfp.path, bytes)
def test_asTextModeFromUnicode(self):
"""
C{asTextMode} on a L{unicode}-mode L{FilePath} returns the same
L{unicode}-mode L{FilePath}.
"""
fp = filepath.FilePath(u"./tmp")
newfp = fp.asTextMode()
self.assertIs(fp, newfp)
self.assertIsInstance(newfp.path, unicode)
def test_asBytesModeFromUnicodeWithEncoding(self):
"""
C{asBytesMode} with an C{encoding} argument uses that encoding when
coercing the L{unicode}-mode L{FilePath} to a L{bytes}-mode L{FilePath}.
"""
fp = filepath.FilePath(u"\u2603")
newfp = fp.asBytesMode(encoding="utf-8")
self.assertIn(b"\xe2\x98\x83", newfp.path)
def test_asTextModeFromBytesWithEncoding(self):
"""
C{asTextMode} with an C{encoding} argument uses that encoding when
coercing the L{bytes}-mode L{FilePath} to a L{unicode}-mode L{FilePath}.
"""
fp = filepath.FilePath(b'\xe2\x98\x83')
newfp = fp.asTextMode(encoding="utf-8")
self.assertIn(u"\u2603", newfp.path)
def test_asBytesModeFromUnicodeWithUnusableEncoding(self):
"""
C{asBytesMode} with an C{encoding} argument that can't be used to encode
the unicode path raises a L{UnicodeError}.
"""
fp = filepath.FilePath(u"\u2603")
with self.assertRaises(UnicodeError):
fp.asBytesMode(encoding="ascii")
def test_asTextModeFromBytesWithUnusableEncoding(self):
"""
C{asTextMode} with an C{encoding} argument that can't be used to encode
the unicode path raises a L{UnicodeError}.
"""
fp = filepath.FilePath(b"\u2603")
with self.assertRaises(UnicodeError):
fp.asTextMode(encoding="utf-32")
| mit |
blockstack/packaging | imported/future/src/libpasteurize/fixes/fix_add_all_future_builtins.py | 60 | 1270 | """
For the ``future`` package.
Adds this import line::
from builtins import (ascii, bytes, chr, dict, filter, hex, input,
int, list, map, next, object, oct, open, pow,
range, round, str, super, zip)
to a module, irrespective of whether each definition is used.
Adds these imports after any other imports (in an initial block of them).
"""
from __future__ import unicode_literals
from lib2to3 import fixer_base
from libfuturize.fixer_util import touch_import_top
class FixAddAllFutureBuiltins(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "file_input"
run_order = 1
def transform(self, node, results):
# import_str = """(ascii, bytes, chr, dict, filter, hex, input,
# int, list, map, next, object, oct, open, pow,
# range, round, str, super, zip)"""
touch_import_top(u'builtins', '*', node)
# builtins = """ascii bytes chr dict filter hex input
# int list map next object oct open pow
# range round str super zip"""
# for builtin in sorted(builtins.split(), reverse=True):
# touch_import_top(u'builtins', builtin, node)
| gpl-3.0 |
manpen/thrill | frontends/swig_python/python_test.py | 4 | 4291 | #!/usr/bin/env python
##########################################################################
# frontends/swig_python/python_test.py
#
# Part of Project Thrill - http://project-thrill.org
#
# Copyright (C) 2015 Timo Bingmann <[email protected]>
#
# All rights reserved. Published under the BSD-2 license in the LICENSE file.
##########################################################################
import unittest
import threading
import sys
import thrill
class TryThread(threading.Thread):
def __init__(self, **kwargs):
threading.Thread.__init__(self, **kwargs)
self.exception = None
def run(self):
try:
threading.Thread.run(self)
except Exception:
self.exception = sys.exc_info()
raise
def run_thrill_threads(num_threads, thread_func):
# construct a local context mock network
ctxs = thrill.PyContext.ConstructLoopback(num_threads, 1)
# but then start python threads for each context
threads = []
for thrid in range(0, num_threads):
t = TryThread(target=thread_func, args=(ctxs[thrid],))
t.start()
threads.append(t)
# wait for computation to finish
for thr in threads:
thr.join()
# check for exceptions
for thr in threads:
if thr.exception:
raise Exception(thr.exception)
def run_tests(thread_func):
for num_threads in [1, 2, 5]:
run_thrill_threads(num_threads, thread_func)
class TestOperations(unittest.TestCase):
def test_generate_allgather(self):
def test(ctx):
test_size = 1024
dia1 = ctx.Generate(
lambda x: [int(x), "hello %d" % (x)], test_size)
self.assertEqual(dia1.Size(), test_size)
check = [[int(x), "hello %d" % (x)] for x in range(0, test_size)]
self.assertEqual(dia1.AllGather(), check)
run_tests(test)
def test_generate_map_allgather(self):
def test(ctx):
test_size = 1024
dia1 = ctx.Generate(lambda x: int(x), test_size)
self.assertEqual(dia1.Size(), test_size)
dia2 = dia1.Map(lambda x: [int(x), "hello %d" % (x)])
check = [[int(x), "hello %d" % (x)] for x in range(0, test_size)]
self.assertEqual(dia2.Size(), test_size)
self.assertEqual(dia2.AllGather(), check)
dia3 = dia1.Map(lambda x: [int(x), "two %d" % (x)])
check = [[int(x), "two %d" % (x)] for x in range(0, test_size)]
self.assertEqual(dia3.Size(), test_size)
self.assertEqual(dia3.AllGather(), check)
run_tests(test)
def test_distribute_map_filter_allgather(self):
def test(ctx):
test_size = 1024
dia1 = ctx.Distribute([x * x for x in range(0, test_size)])
self.assertEqual(dia1.Size(), test_size)
dia2 = dia1.Map(lambda x: [int(x), "hello %d" % (x)])
dia3 = dia2.Filter(lambda x: x[0] >= 16 and x[0] < 10000)
check = [[int(x * x), "hello %d" % (x * x)]
for x in range(4, 100)]
self.assertEqual(dia3.AllGather(), check)
run_tests(test)
def my_generator(self, index):
#print("generator at index", index)
return (index, "hello at %d" % (index))
def my_thread(self, ctx):
print("thread in python, rank", ctx.my_rank())
dia1 = ctx.Generate(lambda x: [int(x), x], 50)
dia2 = dia1.Map(lambda x: (x[0], x[1] + " mapped"))
s = dia2.Size()
print("Size:", s)
self.assertEqual(s, 50)
print("AllGather:", dia2.AllGather())
dia3 = dia2.ReduceBy(lambda x: x[0] % 10,
lambda x, y: (x + y))
print("dia3.Size:", dia3.Size())
print("dia3.AllGather:", dia3.AllGather())
dia4 = dia3.Filter(lambda x: x[0] == 2)
print("dia4.AllGather:", dia4.AllGather())
#####
dia5 = ctx.Distribute([2, 3, 5, 7, 11, 13, 17, 19])
print("dia5.AllGather:", dia5.AllGather())
def notest_operations(self):
run_thrill_threads(4, self.my_thread)
if __name__ == '__main__':
unittest.main()
##########################################################################
| bsd-2-clause |
lkeijser/func | test/unittest/test_func_arg.py | 8 | 4973 | ##
## Copyright 2007, Red Hat, Inc
## see AUTHORS
##
## This software may be freely redistributed under the terms of the GNU
## general public license.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
#tester module for ArgCompatibility
from func.minion.func_arg import ArgCompatibility
class TestArgCompatibility:
def setUp(self):
#create the simple object
self.ac = ArgCompatibility(self.dummy_arg_getter())
def test_arg_compatibility(self):
"""
Testing the method argument compatiblity
"""
result = self.ac.validate_all()
assert result == True
self.ac = ArgCompatibility(self.dummy_no_getter())
result = self.ac.validate_all()
assert result == True
self.ac = ArgCompatibility(self.dummy_empty_args())
result = self.ac.validate_all()
assert result == True
def test_is_all_arguments_registered(self):
#create the dummy class
tc = FooClass()
arguments = tc.register_method()
assert self.ac.is_all_arguments_registered(tc,'foomethod',arguments['foomethod']['args'])==True
print arguments
assert self.ac.validate_all()==True
def dummy_no_getter(self):
return {}
def dummy_empty_args(self):
return{
'myfunc':{
'args':{},
'description':'Cool methods here'
}
}
def dummy_arg_getter(self):
"""
A simple method to test the stuff we have written for
arg compatiblity. I just return a dict with proper stuff
Should more an more tests here to see if didnt miss something
"""
return {
'hifunc':{
'args':{
'app':{
'type':'int',
'range':[0,100],
'optional':False,
'default' : 12
},
'platform':{
'type':'string',
'options':["fedora","redhat","ubuntu"],
'description':"Hey im a fedora fan",
'default':'fedora8',
},
'platform2':{
'type':'string',
'min_length':4,
'max_length':33,
'description':"Hey im a fedora fan",
'default':'fedora8',
},
'is_independent':{
'type':'boolean',
'default' :False,
'description':'Are you independent ?',
'optional':False
},
'some_string':{
'type':'string',
'validator': "^[a-zA-Z]$",
'description':'String to be validated',
'default':'makkalot',
'optional':False}, # validator is a re string for those whoo need better validation,so when we have options there is no need to use validator and reverse is True
#to define also a float we dont need it actually but maybe useful for the UI stuff.
'some_float':{
'type':'float',
'description':'The float point value',
'default':33.44,
'optional':False
},
'some_iterable':{
'type':'list',
'description':'The value and description for *arg',
'optional':True, #that is where it makes sense
'validator':'^[0-9]+$',#maybe useful to say it is to be a number for example
},
'some_hash':{
'type':'hash',
'description':"Dummy desc here",
'optional':True, #of course it is,
'validator':"^[a-z]*$",#only for values not keys
}
},
'description':"The dummy method description",
}
}
class FooClass(object):
"""
Sample class for testing the is_all_arguments_registered
method functionality ...
"""
def foomethod(self,arg1,arg5,arg4,*arg,**kw):
pass
def register_method(self):
return{
'foomethod':{
'args':{
'arg1':{},
'arg4':{},
'arg5':{},
'arg':{},
'kw':{},
}
}
}
| gpl-2.0 |
mujiansu/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/distutils/command/bdist_dumb.py | 53 | 4901 | """distutils.command.bdist_dumb
Implements the Distutils 'bdist_dumb' command (create a "dumb" built
distribution -- i.e., just an archive to be unpacked under $prefix or
$exec_prefix)."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: bdist_dumb.py 61000 2008-02-23 17:40:11Z christian.heimes $"
import os
from distutils.core import Command
from distutils.util import get_platform
from distutils.dir_util import remove_tree, ensure_relative
from distutils.errors import *
from distutils.sysconfig import get_python_version
from distutils import log
class bdist_dumb (Command):
description = "create a \"dumb\" built distribution"
user_options = [('bdist-dir=', 'd',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('format=', 'f',
"archive format to create (tar, ztar, gztar, zip)"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('relative', None,
"build the archive using relative paths"
"(default: false)"),
]
boolean_options = ['keep-temp', 'skip-build', 'relative']
default_format = { 'posix': 'gztar',
'nt': 'zip',
'os2': 'zip' }
def initialize_options (self):
self.bdist_dir = None
self.plat_name = None
self.format = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = 0
self.relative = 0
# initialize_options()
def finalize_options (self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'dumb')
if self.format is None:
try:
self.format = self.default_format[os.name]
except KeyError:
raise DistutilsPlatformError, \
("don't know how to create dumb built distributions " +
"on platform %s") % os.name
self.set_undefined_options('bdist',
('dist_dir', 'dist_dir'),
('plat_name', 'plat_name'))
# finalize_options()
def run (self):
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.root = self.bdist_dir
install.skip_build = self.skip_build
install.warn_dir = 0
log.info("installing to %s" % self.bdist_dir)
self.run_command('install')
# And make an archive relative to the root of the
# pseudo-installation tree.
archive_basename = "%s.%s" % (self.distribution.get_fullname(),
self.plat_name)
# OS/2 objects to any ":" characters in a filename (such as when
# a timestamp is used in a version) so change them to hyphens.
if os.name == "os2":
archive_basename = archive_basename.replace(":", "-")
pseudoinstall_root = os.path.join(self.dist_dir, archive_basename)
if not self.relative:
archive_root = self.bdist_dir
else:
if (self.distribution.has_ext_modules() and
(install.install_base != install.install_platbase)):
raise DistutilsPlatformError, \
("can't make a dumb built distribution where "
"base and platbase are different (%s, %s)"
% (repr(install.install_base),
repr(install.install_platbase)))
else:
archive_root = os.path.join(self.bdist_dir,
ensure_relative(install.install_base))
# Make the archive
filename = self.make_archive(pseudoinstall_root,
self.format, root_dir=archive_root)
if self.distribution.has_ext_modules():
pyversion = get_python_version()
else:
pyversion = 'any'
self.distribution.dist_files.append(('bdist_dumb', pyversion,
filename))
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# run()
# class bdist_dumb
| apache-2.0 |
shujaatak/UAV_MissionPlanner | Lib/site-packages/numpy/lib/tests/test__datasource.py | 54 | 10225 | import os
import sys
from tempfile import mkdtemp, mkstemp, NamedTemporaryFile
from shutil import rmtree
from urlparse import urlparse
from urllib2 import URLError
import urllib2
from numpy.testing import *
from numpy.compat import asbytes
import numpy.lib._datasource as datasource
def urlopen_stub(url, data=None):
'''Stub to replace urlopen for testing.'''
if url == valid_httpurl():
tmpfile = NamedTemporaryFile(prefix='urltmp_')
return tmpfile
else:
raise URLError('Name or service not known')
old_urlopen = None
def setup():
global old_urlopen
old_urlopen = urllib2.urlopen
urllib2.urlopen = urlopen_stub
def teardown():
urllib2.urlopen = old_urlopen
# A valid website for more robust testing
http_path = 'http://www.google.com/'
http_file = 'index.html'
http_fakepath = 'http://fake.abc.web/site/'
http_fakefile = 'fake.txt'
malicious_files = ['/etc/shadow', '../../shadow',
'..\\system.dat', 'c:\\windows\\system.dat']
magic_line = asbytes('three is the magic number')
# Utility functions used by many TestCases
def valid_textfile(filedir):
# Generate and return a valid temporary file.
fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir, text=True)
os.close(fd)
return path
def invalid_textfile(filedir):
# Generate and return an invalid filename.
fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir)
os.close(fd)
os.remove(path)
return path
def valid_httpurl():
return http_path+http_file
def invalid_httpurl():
return http_fakepath+http_fakefile
def valid_baseurl():
return http_path
def invalid_baseurl():
return http_fakepath
def valid_httpfile():
return http_file
def invalid_httpfile():
return http_fakefile
class TestDataSourceOpen(TestCase):
def setUp(self):
self.tmpdir = mkdtemp()
self.ds = datasource.DataSource(self.tmpdir)
def tearDown(self):
rmtree(self.tmpdir)
del self.ds
def test_ValidHTTP(self):
assert self.ds.open(valid_httpurl())
def test_InvalidHTTP(self):
url = invalid_httpurl()
self.assertRaises(IOError, self.ds.open, url)
try:
self.ds.open(url)
except IOError, e:
# Regression test for bug fixed in r4342.
assert e.errno is None
def test_InvalidHTTPCacheURLError(self):
self.assertRaises(URLError, self.ds._cache, invalid_httpurl())
def test_ValidFile(self):
local_file = valid_textfile(self.tmpdir)
assert self.ds.open(local_file)
def test_InvalidFile(self):
invalid_file = invalid_textfile(self.tmpdir)
self.assertRaises(IOError, self.ds.open, invalid_file)
def test_ValidGzipFile(self):
try:
import gzip
except ImportError:
# We don't have the gzip capabilities to test.
import nose
raise nose.SkipTest
# Test datasource's internal file_opener for Gzip files.
filepath = os.path.join(self.tmpdir, 'foobar.txt.gz')
fp = gzip.open(filepath, 'w')
fp.write(magic_line)
fp.close()
fp = self.ds.open(filepath)
result = fp.readline()
fp.close()
self.assertEqual(magic_line, result)
def test_ValidBz2File(self):
try:
import bz2
except ImportError:
# We don't have the bz2 capabilities to test.
import nose
raise nose.SkipTest
# Test datasource's internal file_opener for BZip2 files.
filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2')
fp = bz2.BZ2File(filepath, 'w')
fp.write(magic_line)
fp.close()
fp = self.ds.open(filepath)
result = fp.readline()
fp.close()
self.assertEqual(magic_line, result)
class TestDataSourceExists(TestCase):
def setUp(self):
self.tmpdir = mkdtemp()
self.ds = datasource.DataSource(self.tmpdir)
def tearDown(self):
rmtree(self.tmpdir)
del self.ds
def test_ValidHTTP(self):
assert self.ds.exists(valid_httpurl())
def test_InvalidHTTP(self):
self.assertEqual(self.ds.exists(invalid_httpurl()), False)
def test_ValidFile(self):
# Test valid file in destpath
tmpfile = valid_textfile(self.tmpdir)
assert self.ds.exists(tmpfile)
# Test valid local file not in destpath
localdir = mkdtemp()
tmpfile = valid_textfile(localdir)
assert self.ds.exists(tmpfile)
rmtree(localdir)
def test_InvalidFile(self):
tmpfile = invalid_textfile(self.tmpdir)
self.assertEqual(self.ds.exists(tmpfile), False)
class TestDataSourceAbspath(TestCase):
def setUp(self):
self.tmpdir = os.path.abspath(mkdtemp())
self.ds = datasource.DataSource(self.tmpdir)
def tearDown(self):
rmtree(self.tmpdir)
del self.ds
def test_ValidHTTP(self):
scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl())
local_path = os.path.join(self.tmpdir, netloc,
upath.strip(os.sep).strip('/'))
self.assertEqual(local_path, self.ds.abspath(valid_httpurl()))
def test_ValidFile(self):
tmpfile = valid_textfile(self.tmpdir)
tmpfilename = os.path.split(tmpfile)[-1]
# Test with filename only
self.assertEqual(tmpfile, self.ds.abspath(os.path.split(tmpfile)[-1]))
# Test filename with complete path
self.assertEqual(tmpfile, self.ds.abspath(tmpfile))
def test_InvalidHTTP(self):
scheme, netloc, upath, pms, qry, frg = urlparse(invalid_httpurl())
invalidhttp = os.path.join(self.tmpdir, netloc,
upath.strip(os.sep).strip('/'))
self.assertNotEqual(invalidhttp, self.ds.abspath(valid_httpurl()))
def test_InvalidFile(self):
invalidfile = valid_textfile(self.tmpdir)
tmpfile = valid_textfile(self.tmpdir)
tmpfilename = os.path.split(tmpfile)[-1]
# Test with filename only
self.assertNotEqual(invalidfile, self.ds.abspath(tmpfilename))
# Test filename with complete path
self.assertNotEqual(invalidfile, self.ds.abspath(tmpfile))
def test_sandboxing(self):
tmpfile = valid_textfile(self.tmpdir)
tmpfilename = os.path.split(tmpfile)[-1]
tmp_path = lambda x: os.path.abspath(self.ds.abspath(x))
assert tmp_path(valid_httpurl()).startswith(self.tmpdir)
assert tmp_path(invalid_httpurl()).startswith(self.tmpdir)
assert tmp_path(tmpfile).startswith(self.tmpdir)
assert tmp_path(tmpfilename).startswith(self.tmpdir)
for fn in malicious_files:
assert tmp_path(http_path+fn).startswith(self.tmpdir)
assert tmp_path(fn).startswith(self.tmpdir)
def test_windows_os_sep(self):
orig_os_sep = os.sep
try:
os.sep = '\\'
self.test_ValidHTTP()
self.test_ValidFile()
self.test_InvalidHTTP()
self.test_InvalidFile()
self.test_sandboxing()
finally:
os.sep = orig_os_sep
class TestRepositoryAbspath(TestCase):
def setUp(self):
self.tmpdir = os.path.abspath(mkdtemp())
self.repos = datasource.Repository(valid_baseurl(), self.tmpdir)
def tearDown(self):
rmtree(self.tmpdir)
del self.repos
def test_ValidHTTP(self):
scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl())
local_path = os.path.join(self.repos._destpath, netloc, \
upath.strip(os.sep).strip('/'))
filepath = self.repos.abspath(valid_httpfile())
self.assertEqual(local_path, filepath)
def test_sandboxing(self):
tmp_path = lambda x: os.path.abspath(self.repos.abspath(x))
assert tmp_path(valid_httpfile()).startswith(self.tmpdir)
for fn in malicious_files:
assert tmp_path(http_path+fn).startswith(self.tmpdir)
assert tmp_path(fn).startswith(self.tmpdir)
def test_windows_os_sep(self):
orig_os_sep = os.sep
try:
os.sep = '\\'
self.test_ValidHTTP()
self.test_sandboxing()
finally:
os.sep = orig_os_sep
class TestRepositoryExists(TestCase):
def setUp(self):
self.tmpdir = mkdtemp()
self.repos = datasource.Repository(valid_baseurl(), self.tmpdir)
def tearDown(self):
rmtree(self.tmpdir)
del self.repos
def test_ValidFile(self):
# Create local temp file
tmpfile = valid_textfile(self.tmpdir)
assert self.repos.exists(tmpfile)
def test_InvalidFile(self):
tmpfile = invalid_textfile(self.tmpdir)
self.assertEqual(self.repos.exists(tmpfile), False)
def test_RemoveHTTPFile(self):
assert self.repos.exists(valid_httpurl())
def test_CachedHTTPFile(self):
localfile = valid_httpurl()
# Create a locally cached temp file with an URL based
# directory structure. This is similar to what Repository.open
# would do.
scheme, netloc, upath, pms, qry, frg = urlparse(localfile)
local_path = os.path.join(self.repos._destpath, netloc)
os.mkdir(local_path, 0700)
tmpfile = valid_textfile(local_path)
assert self.repos.exists(tmpfile)
class TestOpenFunc(TestCase):
def setUp(self):
self.tmpdir = mkdtemp()
def tearDown(self):
rmtree(self.tmpdir)
def test_DataSourceOpen(self):
local_file = valid_textfile(self.tmpdir)
# Test case where destpath is passed in
assert datasource.open(local_file, destpath=self.tmpdir)
# Test case where default destpath is used
assert datasource.open(local_file)
if hasattr(sys, 'gettotalrefcount'):
# skip these, when Python was compiled using the --with-pydebug option
del TestDataSourceOpen
del TestDataSourceExists
del TestDataSourceAbspath
del TestRepositoryExists
del TestOpenFunc
if __name__ == "__main__":
run_module_suite()
| gpl-2.0 |
omarocegueda/dipy | dipy/segment/tissue.py | 6 | 6207 | import numpy as np
from dipy.sims.voxel import add_noise
from dipy.segment.mrf import (ConstantObservationModel,
IteratedConditionalModes)
class TissueClassifierHMRF(object):
r"""
This class contains the methods for tissue classification using the Markov
Random Fields modeling approach
"""
def __init__(self, save_history=False, verbose=True):
self.save_history = save_history
self.segmentations = []
self.pves = []
self.energies = []
self.energies_sum = []
self.verbose = verbose
def classify(self, image, nclasses, beta, tolerance=None, max_iter=None):
r"""
This method uses the Maximum a posteriori - Markov Random Field
approach for segmentation by using the Iterative Conditional Modes and
Expectation Maximization to estimate the parameters.
Parameters
----------
image : ndarray,
3D structural image.
nclasses : int,
number of desired classes.
beta : float,
smoothing parameter, the higher this number the smoother the
output will be.
tolerance: float,
value that defines the percentage of change tolerated to
prevent the ICM loop to stop. Default is 1e-05.
max_iter : float,
fixed number of desired iterations. Default is 100.
If the user only specifies this parameter, the tolerance
value will not be considered. If none of these two
parameters
Returns
-------
initial_segmentation : ndarray,
3D segmented image with all tissue types
specified in nclasses.
final_segmentation : ndarray,
3D final refined segmentation containing all
tissue types.
PVE : ndarray,
3D probability map of each tissue type.
"""
nclasses = nclasses + 1 # One extra class for the background
energy_sum = [1e-05]
com = ConstantObservationModel()
icm = IteratedConditionalModes()
if image.max() > 1:
image = np.interp(image, [0, image.max()], [0.0, 1.0])
mu, sigma = com.initialize_param_uniform(image, nclasses)
p = np.argsort(mu)
mu = mu[p]
sigma = sigma[p]
sigmasq = sigma ** 2
neglogl = com.negloglikelihood(image, mu, sigmasq, nclasses)
seg_init = icm.initialize_maximum_likelihood(neglogl)
mu, sigma = com.seg_stats(image, seg_init, nclasses)
sigmasq = sigma ** 2
zero = np.zeros_like(image) + 0.001
zero_noise = add_noise(zero, 10000, 1, noise_type='gaussian')
image_gauss = np.where(image == 0, zero_noise, image)
final_segmentation = np.empty_like(image)
initial_segmentation = seg_init.copy()
if max_iter is not None and tolerance is None:
for i in range(max_iter):
if self.verbose:
print('>> Iteration: ' + str(i))
PLN = icm.prob_neighborhood(seg_init, beta, nclasses)
PVE = com.prob_image(image_gauss, nclasses, mu, sigmasq, PLN)
mu_upd, sigmasq_upd = com.update_param(image_gauss,
PVE, mu, nclasses)
ind = np.argsort(mu_upd)
mu_upd = mu_upd[ind]
sigmasq_upd = sigmasq_upd[ind]
negll = com.negloglikelihood(image_gauss,
mu_upd, sigmasq_upd, nclasses)
final_segmentation, energy = icm.icm_ising(negll,
beta, seg_init)
if self.save_history:
self.segmentations.append(final_segmentation)
self.pves.append(PVE)
self.energies.append(energy)
self.energies_sum.append(energy[energy > -np.inf].sum())
seg_init = final_segmentation.copy()
mu = mu_upd.copy()
sigmasq = sigmasq_upd.copy()
else:
max_iter = 100
for i in range(max_iter):
if self.verbose:
print('>> Iteration: ' + str(i))
PLN = icm.prob_neighborhood(seg_init, beta, nclasses)
PVE = com.prob_image(image_gauss, nclasses, mu, sigmasq, PLN)
mu_upd, sigmasq_upd = com.update_param(image_gauss,
PVE, mu, nclasses)
ind = np.argsort(mu_upd)
mu_upd = mu_upd[ind]
sigmasq_upd = sigmasq_upd[ind]
negll = com.negloglikelihood(image_gauss,
mu_upd, sigmasq_upd, nclasses)
final_segmentation, energy = icm.icm_ising(negll,
beta, seg_init)
energy_sum.append(energy[energy > -np.inf].sum())
if self.save_history:
self.segmentations.append(final_segmentation)
self.pves.append(PVE)
self.energies.append(energy)
self.energies_sum.append(energy[energy > -np.inf].sum())
if tolerance is None:
tolerance = 1e-05
if i % 10 == 0 and i != 0:
tol = tolerance * (np.amax(energy_sum) -
np.amin(energy_sum))
test_dist = np.absolute(np.amax(
energy_sum[np.size(energy_sum) - 5: i]) -
np.amin(energy_sum[np.size(energy_sum) - 5: i])
)
if test_dist < tol:
break
seg_init = final_segmentation.copy()
mu = mu_upd.copy()
sigmasq = sigmasq_upd.copy()
PVE = PVE[..., 1:]
return initial_segmentation, final_segmentation, PVE
| bsd-3-clause |
davidharrigan/django | tests/csrf_tests/tests.py | 78 | 23643 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from django.middleware.csrf import (
CSRF_KEY_LENGTH, CsrfViewMiddleware, get_token,
)
from django.template import RequestContext, Template
from django.template.context_processors import csrf
from django.test import SimpleTestCase, override_settings
from django.views.decorators.csrf import (
csrf_exempt, ensure_csrf_cookie, requires_csrf_token,
)
# Response/views used for CsrfResponseMiddleware and CsrfViewMiddleware tests
def post_form_response():
resp = HttpResponse(content="""
<html><body><h1>\u00a1Unicode!<form method="post"><input type="text" /></form></body></html>
""", mimetype="text/html")
return resp
def post_form_view(request):
"""A view that returns a POST form (without a token)"""
return post_form_response()
# Response/views used for template tag tests
def token_view(request):
"""A view that uses {% csrf_token %}"""
context = RequestContext(request, processors=[csrf])
template = Template("{% csrf_token %}")
return HttpResponse(template.render(context))
def non_token_view_using_request_processor(request):
"""
A view that doesn't use the token, but does use the csrf view processor.
"""
context = RequestContext(request, processors=[csrf])
template = Template("")
return HttpResponse(template.render(context))
class TestingHttpRequest(HttpRequest):
"""
A version of HttpRequest that allows us to change some things
more easily
"""
def is_secure(self):
return getattr(self, '_is_secure_override', False)
class CsrfViewMiddlewareTest(SimpleTestCase):
# The csrf token is potentially from an untrusted source, so could have
# characters that need dealing with.
_csrf_id_cookie = b"<1>\xc2\xa1"
_csrf_id = "1"
def _get_GET_no_csrf_cookie_request(self):
return TestingHttpRequest()
def _get_GET_csrf_cookie_request(self):
req = TestingHttpRequest()
req.COOKIES[settings.CSRF_COOKIE_NAME] = self._csrf_id_cookie
return req
def _get_POST_csrf_cookie_request(self):
req = self._get_GET_csrf_cookie_request()
req.method = "POST"
return req
def _get_POST_no_csrf_cookie_request(self):
req = self._get_GET_no_csrf_cookie_request()
req.method = "POST"
return req
def _get_POST_request_with_token(self):
req = self._get_POST_csrf_cookie_request()
req.POST['csrfmiddlewaretoken'] = self._csrf_id
return req
def _check_token_present(self, response, csrf_id=None):
self.assertContains(response, "name='csrfmiddlewaretoken' value='%s'" % (csrf_id or self._csrf_id))
def test_process_view_token_too_long(self):
"""
If the token is longer than expected, it is ignored and a new token is
created.
"""
req = self._get_GET_no_csrf_cookie_request()
req.COOKIES[settings.CSRF_COOKIE_NAME] = 'x' * 10000000
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, False)
self.assertEqual(len(csrf_cookie.value), CSRF_KEY_LENGTH)
def test_process_response_get_token_used(self):
"""
When get_token is used, check that the cookie is created and headers
patched.
"""
req = self._get_GET_no_csrf_cookie_request()
# Put tests for CSRF_COOKIE_* settings here
with self.settings(CSRF_COOKIE_NAME='myname',
CSRF_COOKIE_DOMAIN='.example.com',
CSRF_COOKIE_PATH='/test/',
CSRF_COOKIE_SECURE=True,
CSRF_COOKIE_HTTPONLY=True):
# token_view calls get_token() indirectly
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get('myname', False)
self.assertNotEqual(csrf_cookie, False)
self.assertEqual(csrf_cookie['domain'], '.example.com')
self.assertEqual(csrf_cookie['secure'], True)
self.assertEqual(csrf_cookie['httponly'], True)
self.assertEqual(csrf_cookie['path'], '/test/')
self.assertIn('Cookie', resp2.get('Vary', ''))
def test_process_response_get_token_not_used(self):
"""
Check that if get_token() is not called, the view middleware does not
add a cookie.
"""
# This is important to make pages cacheable. Pages which do call
# get_token(), assuming they use the token, are not cacheable because
# the token is specific to the user
req = self._get_GET_no_csrf_cookie_request()
# non_token_view_using_request_processor does not call get_token(), but
# does use the csrf request processor. By using this, we are testing
# that the view processor is properly lazy and doesn't call get_token()
# until needed.
CsrfViewMiddleware().process_view(req, non_token_view_using_request_processor, (), {})
resp = non_token_view_using_request_processor(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, False)
self.assertEqual(csrf_cookie, False)
# Check the request processing
def test_process_request_no_csrf_cookie(self):
"""
Check that if no CSRF cookies is present, the middleware rejects the
incoming request. This will stop login CSRF.
"""
req = self._get_POST_no_csrf_cookie_request()
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(403, req2.status_code)
def test_process_request_csrf_cookie_no_token(self):
"""
Check that if a CSRF cookie is present but no token, the middleware
rejects the incoming request.
"""
req = self._get_POST_csrf_cookie_request()
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(403, req2.status_code)
def test_process_request_csrf_cookie_and_token(self):
"""
Check that if both a cookie and a token is present, the middleware lets it through.
"""
req = self._get_POST_request_with_token()
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
def test_process_request_csrf_cookie_no_token_exempt_view(self):
"""
Check that if a CSRF cookie is present and no token, but the csrf_exempt
decorator has been applied to the view, the middleware lets it through
"""
req = self._get_POST_csrf_cookie_request()
req2 = CsrfViewMiddleware().process_view(req, csrf_exempt(post_form_view), (), {})
self.assertIsNone(req2)
def test_csrf_token_in_header(self):
"""
Check that we can pass in the token in a header instead of in the form
"""
req = self._get_POST_csrf_cookie_request()
req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
@override_settings(CSRF_HEADER_NAME='HTTP_X_CSRFTOKEN_CUSTOMIZED')
def test_csrf_token_in_header_with_customized_name(self):
"""
settings.CSRF_HEADER_NAME can be used to customize the CSRF header name
"""
req = self._get_POST_csrf_cookie_request()
req.META['HTTP_X_CSRFTOKEN_CUSTOMIZED'] = self._csrf_id
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
def test_put_and_delete_rejected(self):
"""
Tests that HTTP PUT and DELETE methods have protection
"""
req = TestingHttpRequest()
req.method = 'PUT'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(403, req2.status_code)
req = TestingHttpRequest()
req.method = 'DELETE'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(403, req2.status_code)
def test_put_and_delete_allowed(self):
"""
Tests that HTTP PUT and DELETE methods can get through with
X-CSRFToken and a cookie
"""
req = self._get_GET_csrf_cookie_request()
req.method = 'PUT'
req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
req = self._get_GET_csrf_cookie_request()
req.method = 'DELETE'
req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
# Tests for the template tag method
def test_token_node_no_csrf_cookie(self):
"""
Check that CsrfTokenNode works when no CSRF cookie is set
"""
req = self._get_GET_no_csrf_cookie_request()
resp = token_view(req)
token = get_token(req)
self.assertIsNotNone(token)
self._check_token_present(resp, token)
def test_token_node_empty_csrf_cookie(self):
"""
Check that we get a new token if the csrf_cookie is the empty string
"""
req = self._get_GET_no_csrf_cookie_request()
req.COOKIES[settings.CSRF_COOKIE_NAME] = b""
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
token = get_token(req)
self.assertIsNotNone(token)
self._check_token_present(resp, token)
def test_token_node_with_csrf_cookie(self):
"""
Check that CsrfTokenNode works when a CSRF cookie is set
"""
req = self._get_GET_csrf_cookie_request()
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
self._check_token_present(resp)
def test_get_token_for_exempt_view(self):
"""
Check that get_token still works for a view decorated with 'csrf_exempt'.
"""
req = self._get_GET_csrf_cookie_request()
CsrfViewMiddleware().process_view(req, csrf_exempt(token_view), (), {})
resp = token_view(req)
self._check_token_present(resp)
def test_get_token_for_requires_csrf_token_view(self):
"""
Check that get_token works for a view decorated solely with requires_csrf_token
"""
req = self._get_GET_csrf_cookie_request()
resp = requires_csrf_token(token_view)(req)
self._check_token_present(resp)
def test_token_node_with_new_csrf_cookie(self):
"""
Check that CsrfTokenNode works when a CSRF cookie is created by
the middleware (when one was not already present)
"""
req = self._get_GET_no_csrf_cookie_request()
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies[settings.CSRF_COOKIE_NAME]
self._check_token_present(resp, csrf_id=csrf_cookie.value)
@override_settings(DEBUG=True)
def test_https_bad_referer(self):
"""
Test that a POST HTTPS request with a bad referer is rejected
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://www.evil.org/somepage'
req.META['SERVER_PORT'] = '443'
response = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertContains(
response,
'Referer checking failed - https://www.evil.org/somepage does not '
'match any trusted origins.',
status_code=403,
)
@override_settings(DEBUG=True)
def test_https_malformed_referer(self):
"""
A POST HTTPS request with a bad referer is rejected.
"""
malformed_referer_msg = 'Referer checking failed - Referer is malformed.'
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_REFERER'] = 'http://http://www.example.com/'
response = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertContains(
response,
'Referer checking failed - Referer is insecure while host is secure.',
status_code=403,
)
# Empty
req.META['HTTP_REFERER'] = ''
response = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertContains(response, malformed_referer_msg, status_code=403)
# Non-ASCII
req.META['HTTP_REFERER'] = b'\xd8B\xf6I\xdf'
response = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertContains(response, malformed_referer_msg, status_code=403)
# missing scheme
# >>> urlparse('//example.com/')
# ParseResult(scheme='', netloc='example.com', path='/', params='', query='', fragment='')
req.META['HTTP_REFERER'] = '//example.com/'
response = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertContains(response, malformed_referer_msg, status_code=403)
# missing netloc
# >>> urlparse('https://')
# ParseResult(scheme='https', netloc='', path='', params='', query='', fragment='')
req.META['HTTP_REFERER'] = 'https://'
response = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertContains(response, malformed_referer_msg, status_code=403)
@override_settings(ALLOWED_HOSTS=['www.example.com'])
def test_https_good_referer(self):
"""
A POST HTTPS request with a good referer is accepted.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://www.example.com/somepage'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
@override_settings(ALLOWED_HOSTS=['www.example.com'])
def test_https_good_referer_2(self):
"""
A POST HTTPS request with a good referer is accepted where the referer
contains no trailing slash.
"""
# See ticket #15617
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://www.example.com'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
@override_settings(ALLOWED_HOSTS=['www.example.com'], CSRF_TRUSTED_ORIGINS=['dashboard.example.com'])
def test_https_csrf_trusted_origin_allowed(self):
"""
A POST HTTPS request with a referer added to the CSRF_TRUSTED_ORIGINS
setting is accepted.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://dashboard.example.com'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
@override_settings(ALLOWED_HOSTS=['www.example.com'], CSRF_TRUSTED_ORIGINS=['.example.com'])
def test_https_csrf_wildcard_trusted_origin_allowed(self):
"""
A POST HTTPS request with a referer that matches a CSRF_TRUSTED_ORIGINS
wilcard is accepted.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://dashboard.example.com'
response = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(response)
@override_settings(ALLOWED_HOSTS=['www.example.com'], CSRF_COOKIE_DOMAIN='.example.com')
def test_https_good_referer_matches_cookie_domain(self):
"""
A POST HTTPS request with a good referer should be accepted from a
subdomain that's allowed by CSRF_COOKIE_DOMAIN.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_REFERER'] = 'https://foo.example.com/'
req.META['SERVER_PORT'] = '443'
response = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(response)
@override_settings(ALLOWED_HOSTS=['www.example.com'], CSRF_COOKIE_DOMAIN='.example.com')
def test_https_good_referer_matches_cookie_domain_with_different_port(self):
"""
A POST HTTPS request with a good referer should be accepted from a
subdomain that's allowed by CSRF_COOKIE_DOMAIN and a non-443 port.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://foo.example.com:4443/'
req.META['SERVER_PORT'] = '4443'
response = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(response)
@override_settings(CSRF_COOKIE_DOMAIN='.example.com', DEBUG=True)
def test_https_reject_insecure_referer(self):
"""
A POST HTTPS request from an insecure referer should be rejected.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_REFERER'] = 'http://example.com/'
req.META['SERVER_PORT'] = '443'
response = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertContains(
response,
'Referer checking failed - Referer is insecure while host is secure.',
status_code=403,
)
def test_ensures_csrf_cookie_no_middleware(self):
"""
The ensure_csrf_cookie() decorator works without middleware.
"""
@ensure_csrf_cookie
def view(request):
# Doesn't insert a token or anything
return HttpResponse(content="")
req = self._get_GET_no_csrf_cookie_request()
resp = view(req)
self.assertTrue(resp.cookies.get(settings.CSRF_COOKIE_NAME, False))
self.assertIn('Cookie', resp.get('Vary', ''))
def test_ensures_csrf_cookie_with_middleware(self):
"""
The ensure_csrf_cookie() decorator works with the CsrfViewMiddleware
enabled.
"""
@ensure_csrf_cookie
def view(request):
# Doesn't insert a token or anything
return HttpResponse(content="")
req = self._get_GET_no_csrf_cookie_request()
CsrfViewMiddleware().process_view(req, view, (), {})
resp = view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
self.assertTrue(resp2.cookies.get(settings.CSRF_COOKIE_NAME, False))
self.assertIn('Cookie', resp2.get('Vary', ''))
def test_ensures_csrf_cookie_no_logging(self):
"""
ensure_csrf_cookie() doesn't log warnings (#19436).
"""
@ensure_csrf_cookie
def view(request):
# Doesn't insert a token or anything
return HttpResponse(content="")
class TestHandler(logging.Handler):
def emit(self, record):
raise Exception("This shouldn't have happened!")
logger = logging.getLogger('django.request')
test_handler = TestHandler()
old_log_level = logger.level
try:
logger.addHandler(test_handler)
logger.setLevel(logging.WARNING)
req = self._get_GET_no_csrf_cookie_request()
view(req)
finally:
logger.removeHandler(test_handler)
logger.setLevel(old_log_level)
def test_csrf_cookie_age(self):
"""
CSRF cookie age can be set using settings.CSRF_COOKIE_AGE.
"""
req = self._get_GET_no_csrf_cookie_request()
MAX_AGE = 123
with self.settings(CSRF_COOKIE_NAME='csrfcookie',
CSRF_COOKIE_DOMAIN='.example.com',
CSRF_COOKIE_AGE=MAX_AGE,
CSRF_COOKIE_PATH='/test/',
CSRF_COOKIE_SECURE=True,
CSRF_COOKIE_HTTPONLY=True):
# token_view calls get_token() indirectly
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
max_age = resp2.cookies.get('csrfcookie').get('max-age')
self.assertEqual(max_age, MAX_AGE)
def test_csrf_cookie_age_none(self):
"""
CSRF cookie age does not have max age set and therefore uses
session-based cookies.
"""
req = self._get_GET_no_csrf_cookie_request()
MAX_AGE = None
with self.settings(CSRF_COOKIE_NAME='csrfcookie',
CSRF_COOKIE_DOMAIN='.example.com',
CSRF_COOKIE_AGE=MAX_AGE,
CSRF_COOKIE_PATH='/test/',
CSRF_COOKIE_SECURE=True,
CSRF_COOKIE_HTTPONLY=True):
# token_view calls get_token() indirectly
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
max_age = resp2.cookies.get('csrfcookie').get('max-age')
self.assertEqual(max_age, '')
def test_post_data_read_failure(self):
"""
#20128 -- IOErrors during POST data reading should be caught and
treated as if the POST data wasn't there.
"""
class CsrfPostRequest(HttpRequest):
"""
HttpRequest that can raise an IOError when accessing POST data
"""
def __init__(self, token, raise_error):
super(CsrfPostRequest, self).__init__()
self.method = 'POST'
self.raise_error = False
self.COOKIES[settings.CSRF_COOKIE_NAME] = token
self.POST['csrfmiddlewaretoken'] = token
self.raise_error = raise_error
def _load_post_and_files(self):
raise IOError('error reading input data')
def _get_post(self):
if self.raise_error:
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
POST = property(_get_post, _set_post)
token = 'ABC'
req = CsrfPostRequest(token, raise_error=False)
resp = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
req = CsrfPostRequest(token, raise_error=True)
resp = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(resp.status_code, 403)
| bsd-3-clause |
wizyoung/workflows.kyoyue | PIL/ImageWin.py | 9 | 7167 | #
# The Python Imaging Library.
# $Id$
#
# a Windows DIB display interface
#
# History:
# 1996-05-20 fl Created
# 1996-09-20 fl Fixed subregion exposure
# 1997-09-21 fl Added draw primitive (for tzPrint)
# 2003-05-21 fl Added experimental Window/ImageWindow classes
# 2003-09-05 fl Added fromstring/tostring methods
#
# Copyright (c) Secret Labs AB 1997-2003.
# Copyright (c) Fredrik Lundh 1996-2003.
#
# See the README file for information on usage and redistribution.
#
from . import Image
class HDC(object):
"""
Wraps an HDC integer. The resulting object can be passed to the
:py:meth:`~PIL.ImageWin.Dib.draw` and :py:meth:`~PIL.ImageWin.Dib.expose`
methods.
"""
def __init__(self, dc):
self.dc = dc
def __int__(self):
return self.dc
class HWND(object):
"""
Wraps an HWND integer. The resulting object can be passed to the
:py:meth:`~PIL.ImageWin.Dib.draw` and :py:meth:`~PIL.ImageWin.Dib.expose`
methods, instead of a DC.
"""
def __init__(self, wnd):
self.wnd = wnd
def __int__(self):
return self.wnd
class Dib(object):
"""
A Windows bitmap with the given mode and size. The mode can be one of "1",
"L", "P", or "RGB".
If the display requires a palette, this constructor creates a suitable
palette and associates it with the image. For an "L" image, 128 greylevels
are allocated. For an "RGB" image, a 6x6x6 colour cube is used, together
with 20 greylevels.
To make sure that palettes work properly under Windows, you must call the
**palette** method upon certain events from Windows.
:param image: Either a PIL image, or a mode string. If a mode string is
used, a size must also be given. The mode can be one of "1",
"L", "P", or "RGB".
:param size: If the first argument is a mode string, this
defines the size of the image.
"""
def __init__(self, image, size=None):
if hasattr(image, "mode") and hasattr(image, "size"):
mode = image.mode
size = image.size
else:
mode = image
image = None
if mode not in ["1", "L", "P", "RGB"]:
mode = Image.getmodebase(mode)
self.image = Image.core.display(mode, size)
self.mode = mode
self.size = size
if image:
self.paste(image)
def expose(self, handle):
"""
Copy the bitmap contents to a device context.
:param handle: Device context (HDC), cast to a Python integer, or an
HDC or HWND instance. In PythonWin, you can use the
:py:meth:`CDC.GetHandleAttrib` to get a suitable handle.
"""
if isinstance(handle, HWND):
dc = self.image.getdc(handle)
try:
result = self.image.expose(dc)
finally:
self.image.releasedc(handle, dc)
else:
result = self.image.expose(handle)
return result
def draw(self, handle, dst, src=None):
"""
Same as expose, but allows you to specify where to draw the image, and
what part of it to draw.
The destination and source areas are given as 4-tuple rectangles. If
the source is omitted, the entire image is copied. If the source and
the destination have different sizes, the image is resized as
necessary.
"""
if not src:
src = (0, 0) + self.size
if isinstance(handle, HWND):
dc = self.image.getdc(handle)
try:
result = self.image.draw(dc, dst, src)
finally:
self.image.releasedc(handle, dc)
else:
result = self.image.draw(handle, dst, src)
return result
def query_palette(self, handle):
"""
Installs the palette associated with the image in the given device
context.
This method should be called upon **QUERYNEWPALETTE** and
**PALETTECHANGED** events from Windows. If this method returns a
non-zero value, one or more display palette entries were changed, and
the image should be redrawn.
:param handle: Device context (HDC), cast to a Python integer, or an
HDC or HWND instance.
:return: A true value if one or more entries were changed (this
indicates that the image should be redrawn).
"""
if isinstance(handle, HWND):
handle = self.image.getdc(handle)
try:
result = self.image.query_palette(handle)
finally:
self.image.releasedc(handle, handle)
else:
result = self.image.query_palette(handle)
return result
def paste(self, im, box=None):
"""
Paste a PIL image into the bitmap image.
:param im: A PIL image. The size must match the target region.
If the mode does not match, the image is converted to the
mode of the bitmap image.
:param box: A 4-tuple defining the left, upper, right, and
lower pixel coordinate. If None is given instead of a
tuple, all of the image is assumed.
"""
im.load()
if self.mode != im.mode:
im = im.convert(self.mode)
if box:
self.image.paste(im.im, box)
else:
self.image.paste(im.im)
def frombytes(self, buffer):
"""
Load display memory contents from byte data.
:param buffer: A buffer containing display data (usually
data returned from <b>tobytes</b>)
"""
return self.image.frombytes(buffer)
def tobytes(self):
"""
Copy display memory contents to bytes object.
:return: A bytes object containing display data.
"""
return self.image.tobytes()
class Window(object):
"""Create a Window with the given title size."""
def __init__(self, title="PIL", width=None, height=None):
self.hwnd = Image.core.createwindow(
title, self.__dispatcher, width or 0, height or 0
)
def __dispatcher(self, action, *args):
return getattr(self, "ui_handle_" + action)(*args)
def ui_handle_clear(self, dc, x0, y0, x1, y1):
pass
def ui_handle_damage(self, x0, y0, x1, y1):
pass
def ui_handle_destroy(self):
pass
def ui_handle_repair(self, dc, x0, y0, x1, y1):
pass
def ui_handle_resize(self, width, height):
pass
def mainloop(self):
Image.core.eventloop()
class ImageWindow(Window):
"""Create an image window which displays the given image."""
def __init__(self, image, title="PIL"):
if not isinstance(image, Dib):
image = Dib(image)
self.image = image
width, height = image.size
Window.__init__(self, title, width=width, height=height)
def ui_handle_repair(self, dc, x0, y0, x1, y1):
self.image.draw(dc, (x0, y0, x1, y1))
| mit |
detiber/openshift-ansible-contrib | reference-architecture/aws-ansible/add-cns-storage.py | 1 | 13272 | #!/usr/bin/env python
# vim: sw=2 ts=2
import click
import os
import sys
@click.command()
### Cluster options
@click.option('--console-port', default='443', type=click.IntRange(1,65535), help='OpenShift web console port',
show_default=True)
@click.option('--deployment-type', default='openshift-enterprise', help='OpenShift deployment type',
show_default=True)
@click.option('--openshift-sdn', default='openshift-ovs-subnet', type=click.Choice(['openshift-ovs-subnet', 'openshift-ovs-multitenant']), help='OpenShift SDN',
show_default=True)
### AWS/EC2 options
@click.option('--gluster-stack', help='Specify a gluster stack name. Making the name unique will allow for multiple deployments',
show_default=True)
@click.option('--region', default='us-east-1', help='ec2 region',
show_default=True)
@click.option('--ami', default='ami-10251c7a', help='ec2 ami',
show_default=True)
@click.option('--node-instance-type', default='m4.2xlarge', help='ec2 instance type',
show_default=True)
@click.option('--use-cloudformation-facts', is_flag=True, help='Use cloudformation to populate facts. Requires Deployment >= OCP 3.5',
show_default=True)
@click.option('--keypair', help='ec2 keypair name',
show_default=True)
@click.option('--private-subnet-id1', help='Specify a Private subnet within the existing VPC',
show_default=True)
@click.option('--private-subnet-id2', help='Specify a Private subnet within the existing VPC',
show_default=True)
@click.option('--private-subnet-id3', help='Specify a Private subnet within the existing VPC',
show_default=True)
@click.option('--gluster-volume-size', default='500', help='Gluster volume size in GB',
show_default=True)
@click.option('--gluster-volume-type', default='st1', help='Gluster volume type',
show_default=True)
@click.option('--iops', help='Specfify the IOPS for a volume (used only with IO1)',
show_default=True)
### DNS options
@click.option('--public-hosted-zone', help='hosted zone for accessing the environment')
### Subscription and Software options
@click.option('--rhsm-user', help='Red Hat Subscription Management User')
@click.option('--rhsm-password', help='Red Hat Subscription Management Password',
hide_input=True,)
@click.option('--rhsm-pool', help='Red Hat Subscription Management Pool Name')
### Miscellaneous options
@click.option('--containerized', default='False', help='Containerized installation of OpenShift',
show_default=True)
@click.option('--iam-role', help='Specify the name of the existing IAM Instance profile',
show_default=True)
@click.option('--node-sg', help='Specify the already existing node security group id',
show_default=True)
@click.option('--existing-stack', help='Specify the name of the existing CloudFormation stack')
@click.option('--no-confirm', is_flag=True,
help='Skip confirmation prompt')
@click.help_option('--help', '-h')
@click.option('-v', '--verbose', count=True)
def launch_refarch_env(region=None,
ami=None,
no_confirm=False,
node_instance_type=None,
gluster_stack=None,
keypair=None,
public_hosted_zone=None,
deployment_type=None,
console_port=443,
rhsm_user=None,
rhsm_password=None,
rhsm_pool=None,
containerized=None,
node_type=None,
private_subnet_id1=None,
private_subnet_id2=None,
private_subnet_id3=None,
gluster_volume_type=None,
gluster_volume_size=None,
openshift_sdn=None,
iops=None,
node_sg=None,
iam_role=None,
existing_stack=None,
use_cloudformation_facts=False,
verbose=0):
# Need to prompt for the R53 zone:
if public_hosted_zone is None:
public_hosted_zone = click.prompt('Hosted DNS zone for accessing the environment')
if existing_stack is None:
existing_stack = click.prompt('Specify the name of the existing CloudFormation stack')
if gluster_stack is None:
gluster_stack = click.prompt('Specify a unique name for the CNS CloudFormation stack')
# If no keypair is specified fail:
if keypair is None:
keypair = click.prompt('A SSH keypair must be specified or created')
# If the user already provided values, don't bother asking again
if deployment_type in ['openshift-enterprise'] and rhsm_user is None:
rhsm_user = click.prompt("RHSM username?")
if deployment_type in ['openshift-enterprise'] and rhsm_password is None:
rhsm_password = click.prompt("RHSM password?", hide_input=True)
if deployment_type in ['openshift-enterprise'] and rhsm_pool is None:
rhsm_pool = click.prompt("RHSM Pool ID or Subscription Name for OpenShift?")
# Prompt for vars if they are not defined
if use_cloudformation_facts and iam_role is None:
iam_role = "Computed by Cloudformations"
elif iam_role is None:
iam_role = click.prompt("Specify the IAM Role of the node?")
if use_cloudformation_facts and node_sg is None:
node_sg = "Computed by Cloudformations"
elif node_sg is None:
node_sg = click.prompt("Specify the Security Group for the nodes?")
if use_cloudformation_facts and private_subnet_id1 is None:
private_subnet_id1 = "Computed by Cloudformations"
elif private_subnet_id1 is None:
private_subnet_id1 = click.prompt("Specify the first private subnet for the nodes?")
if use_cloudformation_facts and private_subnet_id2 is None:
private_subnet_id2 = "Computed by Cloudformations"
elif private_subnet_id2 is None:
private_subnet_id2 = click.prompt("Specify the second private subnet for the nodes?")
if use_cloudformation_facts and private_subnet_id3 is None:
private_subnet_id3 = "Computed by Cloudformations"
elif private_subnet_id3 is None:
private_subnet_id3 = click.prompt("Specify the third private subnet for the nodes?")
if gluster_volume_type in ['io1']:
iops = click.prompt('Specify a numeric value for iops')
if iops is None:
iops = "NA"
# Hidden facts for infrastructure.yaml
create_key = "no"
create_vpc = "no"
add_node = "yes"
node_type = "gluster"
# Display information to the user about their choices
if use_cloudformation_facts:
click.echo('Configured values:')
click.echo('\tami: %s' % ami)
click.echo('\tregion: %s' % region)
click.echo('\tgluster_stack: %s' % gluster_stack)
click.echo('\tnode_instance_type: %s' % node_instance_type)
click.echo('\tgluster_volume_type: %s' % gluster_volume_type)
click.echo('\tgluster_volume_size: %s' % gluster_volume_size)
click.echo('\tiops: %s' % iops)
click.echo('\topenshift_sdn: %s' % openshift_sdn)
click.echo('\tkeypair: %s' % keypair)
click.echo('\tdeployment_type: %s' % deployment_type)
click.echo('\tpublic_hosted_zone: %s' % public_hosted_zone)
click.echo('\tconsole port: %s' % console_port)
click.echo('\trhsm_user: %s' % rhsm_user)
click.echo('\trhsm_password: *******')
click.echo('\trhsm_pool: %s' % rhsm_pool)
click.echo('\tcontainerized: %s' % containerized)
click.echo('\texisting_stack: %s' % existing_stack)
click.echo('\tSubnets, Security Groups, and IAM Roles will be gather from the CloudFormation')
click.echo("")
else:
click.echo('Configured values:')
click.echo('\tami: %s' % ami)
click.echo('\tregion: %s' % region)
click.echo('\tgluster_stack: %s' % gluster_stack)
click.echo('\tnode_instance_type: %s' % node_instance_type)
click.echo('\tprivate_subnet_id1: %s' % private_subnet_id1)
click.echo('\tprivate_subnet_id2: %s' % private_subnet_id2)
click.echo('\tprivate_subnet_id3: %s' % private_subnet_id3)
click.echo('\tgluster_volume_type: %s' % gluster_volume_type)
click.echo('\tgluster_volume_size: %s' % gluster_volume_size)
click.echo('\tiops: %s' % iops)
click.echo('\openshift_sdn: %s' % openshift_sdn)
click.echo('\tkeypair: %s' % keypair)
click.echo('\tkeypair: %s' % keypair)
click.echo('\tnode_sg: %s' % node_sg)
click.echo('\tdeployment_type: %s' % deployment_type)
click.echo('\tpublic_hosted_zone: %s' % public_hosted_zone)
click.echo('\tconsole port: %s' % console_port)
click.echo('\trhsm_user: %s' % rhsm_user)
click.echo('\trhsm_password: *******')
click.echo('\trhsm_pool: %s' % rhsm_pool)
click.echo('\tcontainerized: %s' % containerized)
click.echo('\tiam_role: %s' % iam_role)
click.echo('\texisting_stack: %s' % existing_stack)
click.echo("")
if not no_confirm:
click.confirm('Continue using these values?', abort=True)
playbooks = ['playbooks/infrastructure.yaml', 'playbooks/add-node.yaml']
for playbook in playbooks:
# hide cache output unless in verbose mode
devnull='> /dev/null'
if verbose > 0:
devnull=''
# refresh the inventory cache to prevent stale hosts from
# interferring with re-running
command='inventory/aws/hosts/ec2.py --refresh-cache %s' % (devnull)
os.system(command)
# remove any cached facts to prevent stale data during a re-run
command='rm -rf .ansible/cached_facts'
os.system(command)
if use_cloudformation_facts:
command='ansible-playbook -i inventory/aws/hosts -e \'region=%s \
ami=%s \
keypair=%s \
gluster_stack=%s \
add_node=yes \
node_instance_type=%s \
public_hosted_zone=%s \
deployment_type=%s \
console_port=%s \
rhsm_user=%s \
rhsm_password=%s \
rhsm_pool="%s" \
containerized=%s \
node_type=gluster \
key_path=/dev/null \
create_key=%s \
create_vpc=%s \
gluster_volume_type=%s \
gluster_volume_size=%s \
iops=%s \
openshift_sdn=%s \
stack_name=%s \' %s' % (region,
ami,
keypair,
gluster_stack,
node_instance_type,
public_hosted_zone,
deployment_type,
console_port,
rhsm_user,
rhsm_password,
rhsm_pool,
containerized,
create_key,
create_vpc,
gluster_volume_type,
gluster_volume_size,
iops,
openshift_sdn,
existing_stack,
playbook)
else:
command='ansible-playbook -i inventory/aws/hosts -e \'region=%s \
ami=%s \
keypair=%s \
gluster_stack=%s \
add_node=yes \
node_sg=%s \
node_instance_type=%s \
private_subnet_id1=%s \
private_subnet_id2=%s \
private_subnet_id3=%s \
public_hosted_zone=%s \
deployment_type=%s \
console_port=%s \
rhsm_user=%s \
rhsm_password=%s \
rhsm_pool="%s" \
containerized=%s \
node_type=gluster \
iam_role=%s \
key_path=/dev/null \
create_key=%s \
create_vpc=%s \
gluster_volume_type=%s \
gluster_volume_size=%s \
iops=%s \
openshift_sdn=%s \
stack_name=%s \' %s' % (region,
ami,
keypair,
gluster_stack,
node_sg,
node_instance_type,
private_subnet_id1,
private_subnet_id2,
private_subnet_id3,
public_hosted_zone,
deployment_type,
console_port,
rhsm_user,
rhsm_password,
rhsm_pool,
containerized,
iam_role,
create_key,
create_vpc,
gluster_volume_type,
gluster_volume_size,
iops,
openshift_sdn,
existing_stack,
playbook)
if verbose > 0:
command += " -" + "".join(['v']*verbose)
click.echo('We are running: %s' % command)
status = os.system(command)
if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0:
return os.WEXITSTATUS(status)
if __name__ == '__main__':
# check for AWS access info
if os.getenv('AWS_ACCESS_KEY_ID') is None or os.getenv('AWS_SECRET_ACCESS_KEY') is None:
print 'AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY **MUST** be exported as environment variables.'
sys.exit(1)
launch_refarch_env(auto_envvar_prefix='OSE_REFArch')
| apache-2.0 |
lmazuel/azure-sdk-for-python | azure-mgmt-eventhub/azure/mgmt/eventhub/models/__init__.py | 2 | 2621 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .tracked_resource import TrackedResource
from .resource import Resource
from .sku import Sku
from .eh_namespace import EHNamespace
from .authorization_rule import AuthorizationRule
from .access_keys import AccessKeys
from .regenerate_access_key_parameters import RegenerateAccessKeyParameters
from .destination import Destination
from .capture_description import CaptureDescription
from .eventhub import Eventhub
from .consumer_group import ConsumerGroup
from .check_name_availability_parameter import CheckNameAvailabilityParameter
from .check_name_availability_result import CheckNameAvailabilityResult
from .operation_display import OperationDisplay
from .operation import Operation
from .error_response import ErrorResponse, ErrorResponseException
from .arm_disaster_recovery import ArmDisasterRecovery
from .operation_paged import OperationPaged
from .eh_namespace_paged import EHNamespacePaged
from .authorization_rule_paged import AuthorizationRulePaged
from .arm_disaster_recovery_paged import ArmDisasterRecoveryPaged
from .eventhub_paged import EventhubPaged
from .consumer_group_paged import ConsumerGroupPaged
from .event_hub_management_client_enums import (
SkuName,
SkuTier,
AccessRights,
KeyType,
EntityStatus,
EncodingCaptureDescription,
UnavailableReason,
ProvisioningStateDR,
RoleDisasterRecovery,
)
__all__ = [
'TrackedResource',
'Resource',
'Sku',
'EHNamespace',
'AuthorizationRule',
'AccessKeys',
'RegenerateAccessKeyParameters',
'Destination',
'CaptureDescription',
'Eventhub',
'ConsumerGroup',
'CheckNameAvailabilityParameter',
'CheckNameAvailabilityResult',
'OperationDisplay',
'Operation',
'ErrorResponse', 'ErrorResponseException',
'ArmDisasterRecovery',
'OperationPaged',
'EHNamespacePaged',
'AuthorizationRulePaged',
'ArmDisasterRecoveryPaged',
'EventhubPaged',
'ConsumerGroupPaged',
'SkuName',
'SkuTier',
'AccessRights',
'KeyType',
'EntityStatus',
'EncodingCaptureDescription',
'UnavailableReason',
'ProvisioningStateDR',
'RoleDisasterRecovery',
]
| mit |
mileswwatkins/pupa | pupa/scrape/schemas/bill.py | 2 | 4736 | """
Schema for bill objects.
"""
from .common import sources, extras, fuzzy_date_blank, fuzzy_date
from opencivicdata import common
versions_or_documents = {
"items": {
"properties": {
"note": {"type": "string"},
"date": fuzzy_date_blank,
"links": {
"items": {
"properties": {
"media_type": {"type": "string", "blank": True },
"url": {"type": "string", "format": "uri"}
},
"type": "object"
},
"type": "array",
},
},
"type": "object"
},
"type": "array",
}
schema = {
"type": "object",
"properties": {
"legislative_session": {"type": "string"},
"identifier": {"type": "string"},
"title": {"type": "string"},
"from_organization": { "type": ["string", "null"] },
"classification": {"items": {"type": "string", "enum": common.BILL_CLASSIFICATIONS},
"type": "array"},
"subject": { "items": {"type": "string"}, "type": "array"},
"abstracts": {
"items": {
"properties": {
"abstract": {"type": "string"},
"note": {"type": "string", "blank": True},
"date": {"type": "string", "blank": True},
},
"type": "object"},
"type": "array",
},
"other_titles": {
"items": {
"properties": {
"title": {"type": "string"},
"note": {"type": "string", "blank": True},
},
"type": "object"
},
"type": "array",
},
"other_identifiers": {
"items": {
"properties": {
"identifier": {"type": "string"},
"note": {"type": "string", "blank": True},
"scheme": {"type": "string", "blank": True},
},
"type": "object"
},
"type": "array",
},
"actions": {
"items": {
"properties": {
"organization": { "type": ["string", "null"] },
"date": fuzzy_date,
"description": { "type": "string" },
"classification": {"items": {"type": "string",
"enum": common.BILL_ACTION_CLASSIFICATIONS },
"type": "array",
},
"related_entities": {
"items": {
"properties": {
"name": {"type": "string"},
"entity_type": {
"enum": ["organization", "person", ""],
"type": "string", "blank": True,
},
"person_id": {"type": ["string", "null"]},
"organization_id": {"type": ["string", "null"]},
},
"type": "object"
},
"type": "array",
},
},
"type": "object"
},
"type": "array",
},
"sponsorships": {
"items": {
"properties": {
"primary": { "type": "boolean" },
"classification": { "type": "string", },
"name": {"type": "string" },
"entity_type": {
"enum": ["organization", "person", ""],
"type": "string", "blank": True,
},
"person_id": {"type": ["string", "null"] },
"organization_id": {"type": ["string", "null"] },
},
"type": "object"
},
"type": "array",
},
"related_bills": {
"items": {
"properties": {
"identifier": {"type": "string"},
"legislative_session": {"type": "string"},
"relation_type": {"enum": common.BILL_RELATION_TYPES, "type": "string"},
},
"type": "object"
},
"type": "array",
},
"versions": versions_or_documents,
"documents": versions_or_documents,
"sources": sources,
"extras": extras,
}
}
| bsd-3-clause |
EmmanuelJohnson/ssquiz | flask/lib/python2.7/site-packages/whoosh/query/__init__.py | 96 | 1843 | # Copyright 2012 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from whoosh.query.qcore import *
from whoosh.query.terms import *
from whoosh.query.compound import *
from whoosh.query.positional import *
from whoosh.query.ranges import *
from whoosh.query.wrappers import *
from whoosh.query.nested import *
from whoosh.query.qcolumns import *
from whoosh.query.spans import *
| bsd-3-clause |
robhudson/django | tests/template_tests/filter_tests/test_escapejs.py | 324 | 2055 | from __future__ import unicode_literals
from django.template.defaultfilters import escapejs_filter
from django.test import SimpleTestCase
from ..utils import setup
class EscapejsTests(SimpleTestCase):
@setup({'escapejs01': '{{ a|escapejs }}'})
def test_escapejs01(self):
output = self.engine.render_to_string('escapejs01', {'a': 'testing\r\njavascript \'string" <b>escaping</b>'})
self.assertEqual(output, 'testing\\u000D\\u000Ajavascript '
'\\u0027string\\u0022 \\u003Cb\\u003E'
'escaping\\u003C/b\\u003E')
@setup({'escapejs02': '{% autoescape off %}{{ a|escapejs }}{% endautoescape %}'})
def test_escapejs02(self):
output = self.engine.render_to_string('escapejs02', {'a': 'testing\r\njavascript \'string" <b>escaping</b>'})
self.assertEqual(output, 'testing\\u000D\\u000Ajavascript '
'\\u0027string\\u0022 \\u003Cb\\u003E'
'escaping\\u003C/b\\u003E')
class FunctionTests(SimpleTestCase):
def test_quotes(self):
self.assertEqual(
escapejs_filter('"double quotes" and \'single quotes\''),
'\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027',
)
def test_backslashes(self):
self.assertEqual(escapejs_filter(r'\ : backslashes, too'), '\\u005C : backslashes, too')
def test_whitespace(self):
self.assertEqual(
escapejs_filter('and lots of whitespace: \r\n\t\v\f\b'),
'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008',
)
def test_script(self):
self.assertEqual(
escapejs_filter(r'<script>and this</script>'),
'\\u003Cscript\\u003Eand this\\u003C/script\\u003E',
)
def test_paragraph_separator(self):
self.assertEqual(
escapejs_filter('paragraph separator:\u2029and line separator:\u2028'),
'paragraph separator:\\u2029and line separator:\\u2028',
)
| bsd-3-clause |
0111001101111010/open-health-inspection-api | venv/lib/python2.7/site-packages/pip/commands/install.py | 342 | 12694 | import os
import sys
import tempfile
import shutil
from pip.req import InstallRequirement, RequirementSet, parse_requirements
from pip.log import logger
from pip.locations import (src_prefix, virtualenv_no_global, distutils_scheme,
build_prefix)
from pip.basecommand import Command
from pip.index import PackageFinder
from pip.exceptions import InstallationError, CommandError, PreviousBuildDirError
from pip import cmdoptions
class InstallCommand(Command):
"""
Install packages from:
- PyPI (and other indexes) using requirement specifiers.
- VCS project urls.
- Local project directories.
- Local or remote source archives.
pip also supports installing from "requirements files", which provide
an easy way to specify a whole environment to be installed.
"""
name = 'install'
usage = """
%prog [options] <requirement specifier> ...
%prog [options] -r <requirements file> ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
summary = 'Install packages.'
bundle = False
def __init__(self, *args, **kw):
super(InstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-e', '--editable',
dest='editables',
action='append',
default=[],
metavar='path/url',
help='Install a project in editable mode (i.e. setuptools "develop mode") from a local project path or a VCS url.')
cmd_opts.add_option(cmdoptions.requirements.make())
cmd_opts.add_option(cmdoptions.build_dir.make())
cmd_opts.add_option(
'-t', '--target',
dest='target_dir',
metavar='dir',
default=None,
help='Install packages into <dir>.')
cmd_opts.add_option(
'-d', '--download', '--download-dir', '--download-directory',
dest='download_dir',
metavar='dir',
default=None,
help="Download packages into <dir> instead of installing them, regardless of what's already installed.")
cmd_opts.add_option(cmdoptions.download_cache.make())
cmd_opts.add_option(
'--src', '--source', '--source-dir', '--source-directory',
dest='src_dir',
metavar='dir',
default=src_prefix,
help='Directory to check out editable projects into. '
'The default in a virtualenv is "<venv path>/src". '
'The default for global installs is "<current dir>/src".')
cmd_opts.add_option(
'-U', '--upgrade',
dest='upgrade',
action='store_true',
help='Upgrade all packages to the newest available version. '
'This process is recursive regardless of whether a dependency is already satisfied.')
cmd_opts.add_option(
'--force-reinstall',
dest='force_reinstall',
action='store_true',
help='When upgrading, reinstall all packages even if they are '
'already up-to-date.')
cmd_opts.add_option(
'-I', '--ignore-installed',
dest='ignore_installed',
action='store_true',
help='Ignore the installed packages (reinstalling instead).')
cmd_opts.add_option(cmdoptions.no_deps.make())
cmd_opts.add_option(
'--no-install',
dest='no_install',
action='store_true',
help="DEPRECATED. Download and unpack all packages, but don't actually install them.")
cmd_opts.add_option(
'--no-download',
dest='no_download',
action="store_true",
help="DEPRECATED. Don't download any packages, just install the ones already downloaded "
"(completes an install run with --no-install).")
cmd_opts.add_option(cmdoptions.install_options.make())
cmd_opts.add_option(cmdoptions.global_options.make())
cmd_opts.add_option(
'--user',
dest='use_user_site',
action='store_true',
help='Install using the user scheme.')
cmd_opts.add_option(
'--egg',
dest='as_egg',
action='store_true',
help="Install packages as eggs, not 'flat', like pip normally does. This option is not about installing *from* eggs. (WARNING: Because this option overrides pip's normal install logic, requirements files may not behave as expected.)")
cmd_opts.add_option(
'--root',
dest='root_path',
metavar='dir',
default=None,
help="Install everything relative to this alternate root directory.")
cmd_opts.add_option(
"--compile",
action="store_true",
dest="compile",
default=True,
help="Compile py files to pyc",
)
cmd_opts.add_option(
"--no-compile",
action="store_false",
dest="compile",
help="Do not compile py files to pyc",
)
cmd_opts.add_option(cmdoptions.use_wheel.make())
cmd_opts.add_option(cmdoptions.no_use_wheel.make())
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help="Include pre-release and development versions. By default, pip only finds stable versions.")
cmd_opts.add_option(cmdoptions.no_clean.make())
index_opts = cmdoptions.make_option_group(cmdoptions.index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this install command.
This method is meant to be overridden by subclasses, not
called directly.
"""
return PackageFinder(find_links=options.find_links,
index_urls=index_urls,
use_wheel=options.use_wheel,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
allow_all_prereleases=options.pre,
process_dependency_links=
options.process_dependency_links,
session=session,
)
def run(self, options, args):
if (
options.no_install or
options.no_download or
(options.build_dir != build_prefix) or
options.no_clean
):
logger.deprecated('1.7', 'DEPRECATION: --no-install, --no-download, --build, '
'and --no-clean are deprecated. See https://github.com/pypa/pip/issues/906.')
if options.download_dir:
options.no_install = True
options.ignore_installed = True
options.build_dir = os.path.abspath(options.build_dir)
options.src_dir = os.path.abspath(options.src_dir)
install_options = options.install_options or []
if options.use_user_site:
if virtualenv_no_global():
raise InstallationError("Can not perform a '--user' install. User site-packages are not visible in this virtualenv.")
install_options.append('--user')
temp_target_dir = None
if options.target_dir:
options.ignore_installed = True
temp_target_dir = tempfile.mkdtemp()
options.target_dir = os.path.abspath(options.target_dir)
if os.path.exists(options.target_dir) and not os.path.isdir(options.target_dir):
raise CommandError("Target path exists but is not a directory, will not continue.")
install_options.append('--home=' + temp_target_dir)
global_options = options.global_options or []
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.notify('Ignoring indexes: %s' % ','.join(index_urls))
index_urls = []
if options.use_mirrors:
logger.deprecated("1.7",
"--use-mirrors has been deprecated and will be removed"
" in the future. Explicit uses of --index-url and/or "
"--extra-index-url is suggested.")
if options.mirrors:
logger.deprecated("1.7",
"--mirrors has been deprecated and will be removed in "
" the future. Explicit uses of --index-url and/or "
"--extra-index-url is suggested.")
index_urls += options.mirrors
session = self._build_session(options)
finder = self._build_package_finder(options, index_urls, session)
requirement_set = RequirementSet(
build_dir=options.build_dir,
src_dir=options.src_dir,
download_dir=options.download_dir,
download_cache=options.download_cache,
upgrade=options.upgrade,
as_egg=options.as_egg,
ignore_installed=options.ignore_installed,
ignore_dependencies=options.ignore_dependencies,
force_reinstall=options.force_reinstall,
use_user_site=options.use_user_site,
target_dir=temp_target_dir,
session=session,
pycompile=options.compile,
)
for name in args:
requirement_set.add_requirement(
InstallRequirement.from_line(name, None))
for name in options.editables:
requirement_set.add_requirement(
InstallRequirement.from_editable(name, default_vcs=options.default_vcs))
for filename in options.requirements:
for req in parse_requirements(filename, finder=finder, options=options, session=session):
requirement_set.add_requirement(req)
if not requirement_set.has_requirements:
opts = {'name': self.name}
if options.find_links:
msg = ('You must give at least one requirement to %(name)s '
'(maybe you meant "pip %(name)s %(links)s"?)' %
dict(opts, links=' '.join(options.find_links)))
else:
msg = ('You must give at least one requirement '
'to %(name)s (see "pip help %(name)s")' % opts)
logger.warn(msg)
return
try:
if not options.no_download:
requirement_set.prepare_files(finder, force_root_egg_info=self.bundle, bundle=self.bundle)
else:
requirement_set.locate_files()
if not options.no_install and not self.bundle:
requirement_set.install(install_options, global_options, root=options.root_path)
installed = ' '.join([req.name for req in
requirement_set.successfully_installed])
if installed:
logger.notify('Successfully installed %s' % installed)
elif not self.bundle:
downloaded = ' '.join([req.name for req in
requirement_set.successfully_downloaded])
if downloaded:
logger.notify('Successfully downloaded %s' % downloaded)
elif self.bundle:
requirement_set.create_bundle(self.bundle_filename)
logger.notify('Created bundle in %s' % self.bundle_filename)
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
# Clean up
if (not options.no_clean) and ((not options.no_install) or options.download_dir):
requirement_set.cleanup_files(bundle=self.bundle)
if options.target_dir:
if not os.path.exists(options.target_dir):
os.makedirs(options.target_dir)
lib_dir = distutils_scheme('', home=temp_target_dir)['purelib']
for item in os.listdir(lib_dir):
shutil.move(
os.path.join(lib_dir, item),
os.path.join(options.target_dir, item)
)
shutil.rmtree(temp_target_dir)
return requirement_set
| gpl-2.0 |
glwu/python-for-android | python3-alpha/python3-src/Lib/test/test_posixpath.py | 49 | 21964 | import unittest
from test import support, test_genericpath
import posixpath
import os
import sys
from posixpath import realpath, abspath, dirname, basename
try:
import posix
except ImportError:
posix = None
# An absolute path to a temporary filename for testing. We can't rely on TESTFN
# being an absolute path, so we need this.
ABSTFN = abspath(support.TESTFN)
def skip_if_ABSTFN_contains_backslash(test):
"""
On Windows, posixpath.abspath still returns paths with backslashes
instead of posix forward slashes. If this is the case, several tests
fail, so skip them.
"""
found_backslash = '\\' in ABSTFN
msg = "ABSTFN is not a posix path - tests fail"
return [test, unittest.skip(msg)(test)][found_backslash]
def safe_rmdir(dirname):
try:
os.rmdir(dirname)
except OSError:
pass
class PosixPathTest(unittest.TestCase):
def setUp(self):
self.tearDown()
def tearDown(self):
for suffix in ["", "1", "2"]:
support.unlink(support.TESTFN + suffix)
safe_rmdir(support.TESTFN + suffix)
def test_join(self):
self.assertEqual(posixpath.join("/foo", "bar", "/bar", "baz"),
"/bar/baz")
self.assertEqual(posixpath.join("/foo", "bar", "baz"), "/foo/bar/baz")
self.assertEqual(posixpath.join("/foo/", "bar/", "baz/"),
"/foo/bar/baz/")
self.assertEqual(posixpath.join(b"/foo", b"bar", b"/bar", b"baz"),
b"/bar/baz")
self.assertEqual(posixpath.join(b"/foo", b"bar", b"baz"),
b"/foo/bar/baz")
self.assertEqual(posixpath.join(b"/foo/", b"bar/", b"baz/"),
b"/foo/bar/baz/")
self.assertRaises(TypeError, posixpath.join, b"bytes", "str")
self.assertRaises(TypeError, posixpath.join, "str", b"bytes")
def test_split(self):
self.assertEqual(posixpath.split("/foo/bar"), ("/foo", "bar"))
self.assertEqual(posixpath.split("/"), ("/", ""))
self.assertEqual(posixpath.split("foo"), ("", "foo"))
self.assertEqual(posixpath.split("////foo"), ("////", "foo"))
self.assertEqual(posixpath.split("//foo//bar"), ("//foo", "bar"))
self.assertEqual(posixpath.split(b"/foo/bar"), (b"/foo", b"bar"))
self.assertEqual(posixpath.split(b"/"), (b"/", b""))
self.assertEqual(posixpath.split(b"foo"), (b"", b"foo"))
self.assertEqual(posixpath.split(b"////foo"), (b"////", b"foo"))
self.assertEqual(posixpath.split(b"//foo//bar"), (b"//foo", b"bar"))
def splitextTest(self, path, filename, ext):
self.assertEqual(posixpath.splitext(path), (filename, ext))
self.assertEqual(posixpath.splitext("/" + path), ("/" + filename, ext))
self.assertEqual(posixpath.splitext("abc/" + path),
("abc/" + filename, ext))
self.assertEqual(posixpath.splitext("abc.def/" + path),
("abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext("/abc.def/" + path),
("/abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext(path + "/"),
(filename + ext + "/", ""))
path = bytes(path, "ASCII")
filename = bytes(filename, "ASCII")
ext = bytes(ext, "ASCII")
self.assertEqual(posixpath.splitext(path), (filename, ext))
self.assertEqual(posixpath.splitext(b"/" + path),
(b"/" + filename, ext))
self.assertEqual(posixpath.splitext(b"abc/" + path),
(b"abc/" + filename, ext))
self.assertEqual(posixpath.splitext(b"abc.def/" + path),
(b"abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext(b"/abc.def/" + path),
(b"/abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext(path + b"/"),
(filename + ext + b"/", b""))
def test_splitext(self):
self.splitextTest("foo.bar", "foo", ".bar")
self.splitextTest("foo.boo.bar", "foo.boo", ".bar")
self.splitextTest("foo.boo.biff.bar", "foo.boo.biff", ".bar")
self.splitextTest(".csh.rc", ".csh", ".rc")
self.splitextTest("nodots", "nodots", "")
self.splitextTest(".cshrc", ".cshrc", "")
self.splitextTest("...manydots", "...manydots", "")
self.splitextTest("...manydots.ext", "...manydots", ".ext")
self.splitextTest(".", ".", "")
self.splitextTest("..", "..", "")
self.splitextTest("........", "........", "")
self.splitextTest("", "", "")
def test_isabs(self):
self.assertIs(posixpath.isabs(""), False)
self.assertIs(posixpath.isabs("/"), True)
self.assertIs(posixpath.isabs("/foo"), True)
self.assertIs(posixpath.isabs("/foo/bar"), True)
self.assertIs(posixpath.isabs("foo/bar"), False)
self.assertIs(posixpath.isabs(b""), False)
self.assertIs(posixpath.isabs(b"/"), True)
self.assertIs(posixpath.isabs(b"/foo"), True)
self.assertIs(posixpath.isabs(b"/foo/bar"), True)
self.assertIs(posixpath.isabs(b"foo/bar"), False)
def test_basename(self):
self.assertEqual(posixpath.basename("/foo/bar"), "bar")
self.assertEqual(posixpath.basename("/"), "")
self.assertEqual(posixpath.basename("foo"), "foo")
self.assertEqual(posixpath.basename("////foo"), "foo")
self.assertEqual(posixpath.basename("//foo//bar"), "bar")
self.assertEqual(posixpath.basename(b"/foo/bar"), b"bar")
self.assertEqual(posixpath.basename(b"/"), b"")
self.assertEqual(posixpath.basename(b"foo"), b"foo")
self.assertEqual(posixpath.basename(b"////foo"), b"foo")
self.assertEqual(posixpath.basename(b"//foo//bar"), b"bar")
def test_dirname(self):
self.assertEqual(posixpath.dirname("/foo/bar"), "/foo")
self.assertEqual(posixpath.dirname("/"), "/")
self.assertEqual(posixpath.dirname("foo"), "")
self.assertEqual(posixpath.dirname("////foo"), "////")
self.assertEqual(posixpath.dirname("//foo//bar"), "//foo")
self.assertEqual(posixpath.dirname(b"/foo/bar"), b"/foo")
self.assertEqual(posixpath.dirname(b"/"), b"/")
self.assertEqual(posixpath.dirname(b"foo"), b"")
self.assertEqual(posixpath.dirname(b"////foo"), b"////")
self.assertEqual(posixpath.dirname(b"//foo//bar"), b"//foo")
def test_islink(self):
self.assertIs(posixpath.islink(support.TESTFN + "1"), False)
self.assertIs(posixpath.lexists(support.TESTFN + "2"), False)
f = open(support.TESTFN + "1", "wb")
try:
f.write(b"foo")
f.close()
self.assertIs(posixpath.islink(support.TESTFN + "1"), False)
if support.can_symlink():
os.symlink(support.TESTFN + "1", support.TESTFN + "2")
self.assertIs(posixpath.islink(support.TESTFN + "2"), True)
os.remove(support.TESTFN + "1")
self.assertIs(posixpath.islink(support.TESTFN + "2"), True)
self.assertIs(posixpath.exists(support.TESTFN + "2"), False)
self.assertIs(posixpath.lexists(support.TESTFN + "2"), True)
finally:
if not f.close():
f.close()
@staticmethod
def _create_file(filename):
with open(filename, 'wb') as f:
f.write(b'foo')
def test_samefile(self):
test_fn = support.TESTFN + "1"
self._create_file(test_fn)
self.assertTrue(posixpath.samefile(test_fn, test_fn))
self.assertRaises(TypeError, posixpath.samefile)
@unittest.skipIf(
sys.platform.startswith('win'),
"posixpath.samefile does not work on links in Windows")
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
def test_samefile_on_links(self):
test_fn1 = support.TESTFN + "1"
test_fn2 = support.TESTFN + "2"
self._create_file(test_fn1)
os.symlink(test_fn1, test_fn2)
self.assertTrue(posixpath.samefile(test_fn1, test_fn2))
os.remove(test_fn2)
self._create_file(test_fn2)
self.assertFalse(posixpath.samefile(test_fn1, test_fn2))
def test_samestat(self):
test_fn = support.TESTFN + "1"
self._create_file(test_fn)
test_fns = [test_fn]*2
stats = map(os.stat, test_fns)
self.assertTrue(posixpath.samestat(*stats))
@unittest.skipIf(
sys.platform.startswith('win'),
"posixpath.samestat does not work on links in Windows")
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
def test_samestat_on_links(self):
test_fn1 = support.TESTFN + "1"
test_fn2 = support.TESTFN + "2"
self._create_file(test_fn1)
test_fns = (test_fn1, test_fn2)
os.symlink(*test_fns)
stats = map(os.stat, test_fns)
self.assertTrue(posixpath.samestat(*stats))
os.remove(test_fn2)
self._create_file(test_fn2)
stats = map(os.stat, test_fns)
self.assertFalse(posixpath.samestat(*stats))
self.assertRaises(TypeError, posixpath.samestat)
def test_ismount(self):
self.assertIs(posixpath.ismount("/"), True)
self.assertIs(posixpath.ismount(b"/"), True)
def test_ismount_non_existent(self):
# Non-existent mountpoint.
self.assertIs(posixpath.ismount(ABSTFN), False)
try:
os.mkdir(ABSTFN)
self.assertIs(posixpath.ismount(ABSTFN), False)
finally:
safe_rmdir(ABSTFN)
@unittest.skipUnless(support.can_symlink(),
"Test requires symlink support")
def test_ismount_symlinks(self):
# Symlinks are never mountpoints.
try:
os.symlink("/", ABSTFN)
self.assertIs(posixpath.ismount(ABSTFN), False)
finally:
os.unlink(ABSTFN)
@unittest.skipIf(posix is None, "Test requires posix module")
def test_ismount_different_device(self):
# Simulate the path being on a different device from its parent by
# mocking out st_dev.
save_lstat = os.lstat
def fake_lstat(path):
st_ino = 0
st_dev = 0
if path == ABSTFN:
st_dev = 1
st_ino = 1
return posix.stat_result((0, st_ino, st_dev, 0, 0, 0, 0, 0, 0, 0))
try:
os.lstat = fake_lstat
self.assertIs(posixpath.ismount(ABSTFN), True)
finally:
os.lstat = save_lstat
def test_expanduser(self):
self.assertEqual(posixpath.expanduser("foo"), "foo")
self.assertEqual(posixpath.expanduser(b"foo"), b"foo")
try:
import pwd
except ImportError:
pass
else:
self.assertIsInstance(posixpath.expanduser("~/"), str)
self.assertIsInstance(posixpath.expanduser(b"~/"), bytes)
# if home directory == root directory, this test makes no sense
if posixpath.expanduser("~") != '/':
self.assertEqual(
posixpath.expanduser("~") + "/",
posixpath.expanduser("~/")
)
self.assertEqual(
posixpath.expanduser(b"~") + b"/",
posixpath.expanduser(b"~/")
)
self.assertIsInstance(posixpath.expanduser("~root/"), str)
self.assertIsInstance(posixpath.expanduser("~foo/"), str)
self.assertIsInstance(posixpath.expanduser(b"~root/"), bytes)
self.assertIsInstance(posixpath.expanduser(b"~foo/"), bytes)
with support.EnvironmentVarGuard() as env:
env['HOME'] = '/'
self.assertEqual(posixpath.expanduser("~"), "/")
# expanduser should fall back to using the password database
del env['HOME']
home = pwd.getpwuid(os.getuid()).pw_dir
self.assertEqual(posixpath.expanduser("~"), home)
def test_normpath(self):
self.assertEqual(posixpath.normpath(""), ".")
self.assertEqual(posixpath.normpath("/"), "/")
self.assertEqual(posixpath.normpath("//"), "//")
self.assertEqual(posixpath.normpath("///"), "/")
self.assertEqual(posixpath.normpath("///foo/.//bar//"), "/foo/bar")
self.assertEqual(posixpath.normpath("///foo/.//bar//.//..//.//baz"),
"/foo/baz")
self.assertEqual(posixpath.normpath("///..//./foo/.//bar"), "/foo/bar")
self.assertEqual(posixpath.normpath(b""), b".")
self.assertEqual(posixpath.normpath(b"/"), b"/")
self.assertEqual(posixpath.normpath(b"//"), b"//")
self.assertEqual(posixpath.normpath(b"///"), b"/")
self.assertEqual(posixpath.normpath(b"///foo/.//bar//"), b"/foo/bar")
self.assertEqual(posixpath.normpath(b"///foo/.//bar//.//..//.//baz"),
b"/foo/baz")
self.assertEqual(posixpath.normpath(b"///..//./foo/.//bar"),
b"/foo/bar")
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_basic(self):
# Basic operation.
try:
os.symlink(ABSTFN+"1", ABSTFN)
self.assertEqual(realpath(ABSTFN), ABSTFN+"1")
finally:
support.unlink(ABSTFN)
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_relative(self):
try:
os.symlink(posixpath.relpath(ABSTFN+"1"), ABSTFN)
self.assertEqual(realpath(ABSTFN), ABSTFN+"1")
finally:
support.unlink(ABSTFN)
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_symlink_loops(self):
# Bug #930024, return the path unchanged if we get into an infinite
# symlink loop.
try:
old_path = abspath('.')
os.symlink(ABSTFN, ABSTFN)
self.assertEqual(realpath(ABSTFN), ABSTFN)
os.symlink(ABSTFN+"1", ABSTFN+"2")
os.symlink(ABSTFN+"2", ABSTFN+"1")
self.assertEqual(realpath(ABSTFN+"1"), ABSTFN+"1")
self.assertEqual(realpath(ABSTFN+"2"), ABSTFN+"2")
# Test using relative path as well.
os.chdir(dirname(ABSTFN))
self.assertEqual(realpath(basename(ABSTFN)), ABSTFN)
finally:
os.chdir(old_path)
support.unlink(ABSTFN)
support.unlink(ABSTFN+"1")
support.unlink(ABSTFN+"2")
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_resolve_parents(self):
# We also need to resolve any symlinks in the parents of a relative
# path passed to realpath. E.g.: current working directory is
# /usr/doc with 'doc' being a symlink to /usr/share/doc. We call
# realpath("a"). This should return /usr/share/doc/a/.
try:
old_path = abspath('.')
os.mkdir(ABSTFN)
os.mkdir(ABSTFN + "/y")
os.symlink(ABSTFN + "/y", ABSTFN + "/k")
os.chdir(ABSTFN + "/k")
self.assertEqual(realpath("a"), ABSTFN + "/y/a")
finally:
os.chdir(old_path)
support.unlink(ABSTFN + "/k")
safe_rmdir(ABSTFN + "/y")
safe_rmdir(ABSTFN)
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_resolve_before_normalizing(self):
# Bug #990669: Symbolic links should be resolved before we
# normalize the path. E.g.: if we have directories 'a', 'k' and 'y'
# in the following hierarchy:
# a/k/y
#
# and a symbolic link 'link-y' pointing to 'y' in directory 'a',
# then realpath("link-y/..") should return 'k', not 'a'.
try:
old_path = abspath('.')
os.mkdir(ABSTFN)
os.mkdir(ABSTFN + "/k")
os.mkdir(ABSTFN + "/k/y")
os.symlink(ABSTFN + "/k/y", ABSTFN + "/link-y")
# Absolute path.
self.assertEqual(realpath(ABSTFN + "/link-y/.."), ABSTFN + "/k")
# Relative path.
os.chdir(dirname(ABSTFN))
self.assertEqual(realpath(basename(ABSTFN) + "/link-y/.."),
ABSTFN + "/k")
finally:
os.chdir(old_path)
support.unlink(ABSTFN + "/link-y")
safe_rmdir(ABSTFN + "/k/y")
safe_rmdir(ABSTFN + "/k")
safe_rmdir(ABSTFN)
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_resolve_first(self):
# Bug #1213894: The first component of the path, if not absolute,
# must be resolved too.
try:
old_path = abspath('.')
os.mkdir(ABSTFN)
os.mkdir(ABSTFN + "/k")
os.symlink(ABSTFN, ABSTFN + "link")
os.chdir(dirname(ABSTFN))
base = basename(ABSTFN)
self.assertEqual(realpath(base + "link"), ABSTFN)
self.assertEqual(realpath(base + "link/k"), ABSTFN + "/k")
finally:
os.chdir(old_path)
support.unlink(ABSTFN + "link")
safe_rmdir(ABSTFN + "/k")
safe_rmdir(ABSTFN)
def test_relpath(self):
(real_getcwd, os.getcwd) = (os.getcwd, lambda: r"/home/user/bar")
try:
curdir = os.path.split(os.getcwd())[-1]
self.assertRaises(ValueError, posixpath.relpath, "")
self.assertEqual(posixpath.relpath("a"), "a")
self.assertEqual(posixpath.relpath(posixpath.abspath("a")), "a")
self.assertEqual(posixpath.relpath("a/b"), "a/b")
self.assertEqual(posixpath.relpath("../a/b"), "../a/b")
self.assertEqual(posixpath.relpath("a", "../b"), "../"+curdir+"/a")
self.assertEqual(posixpath.relpath("a/b", "../c"),
"../"+curdir+"/a/b")
self.assertEqual(posixpath.relpath("a", "b/c"), "../../a")
self.assertEqual(posixpath.relpath("a", "a"), ".")
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/x/y/z"), '../../../foo/bar/bat')
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/foo/bar"), 'bat')
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/"), 'foo/bar/bat')
self.assertEqual(posixpath.relpath("/", "/foo/bar/bat"), '../../..')
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/x"), '../foo/bar/bat')
self.assertEqual(posixpath.relpath("/x", "/foo/bar/bat"), '../../../x')
self.assertEqual(posixpath.relpath("/", "/"), '.')
self.assertEqual(posixpath.relpath("/a", "/a"), '.')
self.assertEqual(posixpath.relpath("/a/b", "/a/b"), '.')
finally:
os.getcwd = real_getcwd
def test_relpath_bytes(self):
(real_getcwdb, os.getcwdb) = (os.getcwdb, lambda: br"/home/user/bar")
try:
curdir = os.path.split(os.getcwdb())[-1]
self.assertRaises(ValueError, posixpath.relpath, b"")
self.assertEqual(posixpath.relpath(b"a"), b"a")
self.assertEqual(posixpath.relpath(posixpath.abspath(b"a")), b"a")
self.assertEqual(posixpath.relpath(b"a/b"), b"a/b")
self.assertEqual(posixpath.relpath(b"../a/b"), b"../a/b")
self.assertEqual(posixpath.relpath(b"a", b"../b"),
b"../"+curdir+b"/a")
self.assertEqual(posixpath.relpath(b"a/b", b"../c"),
b"../"+curdir+b"/a/b")
self.assertEqual(posixpath.relpath(b"a", b"b/c"), b"../../a")
self.assertEqual(posixpath.relpath(b"a", b"a"), b".")
self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/x/y/z"), b'../../../foo/bar/bat')
self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/foo/bar"), b'bat')
self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/"), b'foo/bar/bat')
self.assertEqual(posixpath.relpath(b"/", b"/foo/bar/bat"), b'../../..')
self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/x"), b'../foo/bar/bat')
self.assertEqual(posixpath.relpath(b"/x", b"/foo/bar/bat"), b'../../../x')
self.assertEqual(posixpath.relpath(b"/", b"/"), b'.')
self.assertEqual(posixpath.relpath(b"/a", b"/a"), b'.')
self.assertEqual(posixpath.relpath(b"/a/b", b"/a/b"), b'.')
self.assertRaises(TypeError, posixpath.relpath, b"bytes", "str")
self.assertRaises(TypeError, posixpath.relpath, "str", b"bytes")
finally:
os.getcwdb = real_getcwdb
def test_sameopenfile(self):
fname = support.TESTFN + "1"
with open(fname, "wb") as a, open(fname, "wb") as b:
self.assertTrue(posixpath.sameopenfile(a.fileno(), b.fileno()))
class PosixCommonTest(test_genericpath.CommonTest):
pathmodule = posixpath
attributes = ['relpath', 'samefile', 'sameopenfile', 'samestat']
def test_main():
support.run_unittest(PosixPathTest, PosixCommonTest)
if __name__=="__main__":
test_main()
| apache-2.0 |
tysonclugg/django | django/contrib/gis/gdal/srs.py | 72 | 11540 | """
The Spatial Reference class, represents OGR Spatial Reference objects.
Example:
>>> from django.contrib.gis.gdal import SpatialReference
>>> srs = SpatialReference('WGS84')
>>> print(srs)
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
>>> print(srs.proj)
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> print(srs.ellipsoid)
(6378137.0, 6356752.3142451793, 298.25722356300003)
>>> print(srs.projected, srs.geographic)
False True
>>> srs.import_epsg(32140)
>>> print(srs.name)
NAD83 / Texas South Central
"""
from ctypes import byref, c_char_p, c_int
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import SRSException
from django.contrib.gis.gdal.prototypes import srs as capi
from django.utils.encoding import force_bytes, force_text
class SpatialReference(GDALBase):
"""
A wrapper for the OGRSpatialReference object. According to the GDAL Web site,
the SpatialReference object "provide[s] services to represent coordinate
systems (projections and datums) and to transform between them."
"""
destructor = capi.release_srs
def __init__(self, srs_input='', srs_type='user'):
"""
Create a GDAL OSR Spatial Reference object from the given input.
The input may be string of OGC Well Known Text (WKT), an integer
EPSG code, a PROJ.4 string, and/or a projection "well known" shorthand
string (one of 'WGS84', 'WGS72', 'NAD27', 'NAD83').
"""
if srs_type == 'wkt':
self.ptr = capi.new_srs(c_char_p(b''))
self.import_wkt(srs_input)
return
elif isinstance(srs_input, str):
try:
# If SRID is a string, e.g., '4326', then make acceptable
# as user input.
srid = int(srs_input)
srs_input = 'EPSG:%d' % srid
except ValueError:
pass
elif isinstance(srs_input, int):
# EPSG integer code was input.
srs_type = 'epsg'
elif isinstance(srs_input, self.ptr_type):
srs = srs_input
srs_type = 'ogr'
else:
raise TypeError('Invalid SRS type "%s"' % srs_type)
if srs_type == 'ogr':
# Input is already an SRS pointer.
srs = srs_input
else:
# Creating a new SRS pointer, using the string buffer.
buf = c_char_p(b'')
srs = capi.new_srs(buf)
# If the pointer is NULL, throw an exception.
if not srs:
raise SRSException('Could not create spatial reference from: %s' % srs_input)
else:
self.ptr = srs
# Importing from either the user input string or an integer SRID.
if srs_type == 'user':
self.import_user_input(srs_input)
elif srs_type == 'epsg':
self.import_epsg(srs_input)
def __getitem__(self, target):
"""
Return the value of the given string attribute node, None if the node
doesn't exist. Can also take a tuple as a parameter, (target, child),
where child is the index of the attribute in the WKT. For example:
>>> wkt = 'GEOGCS["WGS 84", DATUM["WGS_1984, ... AUTHORITY["EPSG","4326"]]'
>>> srs = SpatialReference(wkt) # could also use 'WGS84', or 4326
>>> print(srs['GEOGCS'])
WGS 84
>>> print(srs['DATUM'])
WGS_1984
>>> print(srs['AUTHORITY'])
EPSG
>>> print(srs['AUTHORITY', 1]) # The authority value
4326
>>> print(srs['TOWGS84', 4]) # the fourth value in this wkt
0
>>> print(srs['UNIT|AUTHORITY']) # For the units authority, have to use the pipe symbole.
EPSG
>>> print(srs['UNIT|AUTHORITY', 1]) # The authority value for the units
9122
"""
if isinstance(target, tuple):
return self.attr_value(*target)
else:
return self.attr_value(target)
def __str__(self):
"Use 'pretty' WKT."
return self.pretty_wkt
# #### SpatialReference Methods ####
def attr_value(self, target, index=0):
"""
The attribute value for the given target node (e.g. 'PROJCS'). The index
keyword specifies an index of the child node to return.
"""
if not isinstance(target, str) or not isinstance(index, int):
raise TypeError
return capi.get_attr_value(self.ptr, force_bytes(target), index)
def auth_name(self, target):
"Return the authority name for the given string target node."
return capi.get_auth_name(self.ptr, force_bytes(target))
def auth_code(self, target):
"Return the authority code for the given string target node."
return capi.get_auth_code(self.ptr, force_bytes(target))
def clone(self):
"Return a clone of this SpatialReference object."
return SpatialReference(capi.clone_srs(self.ptr))
def from_esri(self):
"Morph this SpatialReference from ESRI's format to EPSG."
capi.morph_from_esri(self.ptr)
def identify_epsg(self):
"""
This method inspects the WKT of this SpatialReference, and will
add EPSG authority nodes where an EPSG identifier is applicable.
"""
capi.identify_epsg(self.ptr)
def to_esri(self):
"Morph this SpatialReference to ESRI's format."
capi.morph_to_esri(self.ptr)
def validate(self):
"Check to see if the given spatial reference is valid."
capi.srs_validate(self.ptr)
# #### Name & SRID properties ####
@property
def name(self):
"Return the name of this Spatial Reference."
if self.projected:
return self.attr_value('PROJCS')
elif self.geographic:
return self.attr_value('GEOGCS')
elif self.local:
return self.attr_value('LOCAL_CS')
else:
return None
@property
def srid(self):
"Return the SRID of top-level authority, or None if undefined."
try:
return int(self.attr_value('AUTHORITY', 1))
except (TypeError, ValueError):
return None
# #### Unit Properties ####
@property
def linear_name(self):
"Return the name of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return name
@property
def linear_units(self):
"Return the value of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return units
@property
def angular_name(self):
"Return the name of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return name
@property
def angular_units(self):
"Return the value of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return units
@property
def units(self):
"""
Return a 2-tuple of the units value and the units name. Automatically
determine whether to return the linear or angular units.
"""
units, name = None, None
if self.projected or self.local:
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
elif self.geographic:
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
if name is not None:
name = force_text(name)
return (units, name)
# #### Spheroid/Ellipsoid Properties ####
@property
def ellipsoid(self):
"""
Return a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening)
"""
return (self.semi_major, self.semi_minor, self.inverse_flattening)
@property
def semi_major(self):
"Return the Semi Major Axis for this Spatial Reference."
return capi.semi_major(self.ptr, byref(c_int()))
@property
def semi_minor(self):
"Return the Semi Minor Axis for this Spatial Reference."
return capi.semi_minor(self.ptr, byref(c_int()))
@property
def inverse_flattening(self):
"Return the Inverse Flattening for this Spatial Reference."
return capi.invflattening(self.ptr, byref(c_int()))
# #### Boolean Properties ####
@property
def geographic(self):
"""
Return True if this SpatialReference is geographic
(root node is GEOGCS).
"""
return bool(capi.isgeographic(self.ptr))
@property
def local(self):
"Return True if this SpatialReference is local (root node is LOCAL_CS)."
return bool(capi.islocal(self.ptr))
@property
def projected(self):
"""
Return True if this SpatialReference is a projected coordinate system
(root node is PROJCS).
"""
return bool(capi.isprojected(self.ptr))
# #### Import Routines #####
def import_epsg(self, epsg):
"Import the Spatial Reference from the EPSG code (an integer)."
capi.from_epsg(self.ptr, epsg)
def import_proj(self, proj):
"Import the Spatial Reference from a PROJ.4 string."
capi.from_proj(self.ptr, proj)
def import_user_input(self, user_input):
"Import the Spatial Reference from the given user input string."
capi.from_user_input(self.ptr, force_bytes(user_input))
def import_wkt(self, wkt):
"Import the Spatial Reference from OGC WKT (string)"
capi.from_wkt(self.ptr, byref(c_char_p(force_bytes(wkt))))
def import_xml(self, xml):
"Import the Spatial Reference from an XML string."
capi.from_xml(self.ptr, xml)
# #### Export Properties ####
@property
def wkt(self):
"Return the WKT representation of this Spatial Reference."
return capi.to_wkt(self.ptr, byref(c_char_p()))
@property
def pretty_wkt(self, simplify=0):
"Return the 'pretty' representation of the WKT."
return capi.to_pretty_wkt(self.ptr, byref(c_char_p()), simplify)
@property
def proj(self):
"Return the PROJ.4 representation for this Spatial Reference."
return capi.to_proj(self.ptr, byref(c_char_p()))
@property
def proj4(self):
"Alias for proj()."
return self.proj
@property
def xml(self, dialect=''):
"Return the XML representation of this Spatial Reference."
return capi.to_xml(self.ptr, byref(c_char_p()), force_bytes(dialect))
class CoordTransform(GDALBase):
"The coordinate system transformation object."
destructor = capi.destroy_ct
def __init__(self, source, target):
"Initialize on a source and target SpatialReference objects."
if not isinstance(source, SpatialReference) or not isinstance(target, SpatialReference):
raise TypeError('source and target must be of type SpatialReference')
self.ptr = capi.new_ct(source._ptr, target._ptr)
self._srs1_name = source.name
self._srs2_name = target.name
def __str__(self):
return 'Transform from "%s" to "%s"' % (self._srs1_name, self._srs2_name)
| bsd-3-clause |
arjclark/cylc | lib/jinja2/__init__.py | 71 | 2614 | # -*- coding: utf-8 -*-
"""
jinja2
~~~~~~
Jinja2 is a template engine written in pure Python. It provides a
Django inspired non-XML syntax but supports inline expressions and
an optional sandboxed environment.
Nutshell
--------
Here a small example of a Jinja2 template::
{% extends 'base.html' %}
{% block title %}Memberlist{% endblock %}
{% block content %}
<ul>
{% for user in users %}
<li><a href="{{ user.url }}">{{ user.username }}</a></li>
{% endfor %}
</ul>
{% endblock %}
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
__docformat__ = 'restructuredtext en'
__version__ = '2.10'
# high level interface
from jinja2.environment import Environment, Template
# loaders
from jinja2.loaders import BaseLoader, FileSystemLoader, PackageLoader, \
DictLoader, FunctionLoader, PrefixLoader, ChoiceLoader, \
ModuleLoader
# bytecode caches
from jinja2.bccache import BytecodeCache, FileSystemBytecodeCache, \
MemcachedBytecodeCache
# undefined types
from jinja2.runtime import Undefined, DebugUndefined, StrictUndefined, \
make_logging_undefined
# exceptions
from jinja2.exceptions import TemplateError, UndefinedError, \
TemplateNotFound, TemplatesNotFound, TemplateSyntaxError, \
TemplateAssertionError, TemplateRuntimeError
# decorators and public utilities
from jinja2.filters import environmentfilter, contextfilter, \
evalcontextfilter
from jinja2.utils import Markup, escape, clear_caches, \
environmentfunction, evalcontextfunction, contextfunction, \
is_undefined, select_autoescape
__all__ = [
'Environment', 'Template', 'BaseLoader', 'FileSystemLoader',
'PackageLoader', 'DictLoader', 'FunctionLoader', 'PrefixLoader',
'ChoiceLoader', 'BytecodeCache', 'FileSystemBytecodeCache',
'MemcachedBytecodeCache', 'Undefined', 'DebugUndefined',
'StrictUndefined', 'TemplateError', 'UndefinedError', 'TemplateNotFound',
'TemplatesNotFound', 'TemplateSyntaxError', 'TemplateAssertionError',
'TemplateRuntimeError',
'ModuleLoader', 'environmentfilter', 'contextfilter', 'Markup', 'escape',
'environmentfunction', 'contextfunction', 'clear_caches', 'is_undefined',
'evalcontextfilter', 'evalcontextfunction', 'make_logging_undefined',
'select_autoescape',
]
def _patch_async():
from jinja2.utils import have_async_gen
if have_async_gen:
from jinja2.asyncsupport import patch_all
patch_all()
_patch_async()
del _patch_async
| gpl-3.0 |
JudoWill/ResearchNotebooks | DownloadMicroData.py | 1 | 1738 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import os, os.path
import concurrent.futures
import csv
import urllib.request
import shutil
import gzip
os.chdir('/home/will/AutoMicroAnal/')
# <codecell>
with open('MicroarraySamples.tsv') as handle:
microdata = list(csv.DictReader(handle, delimiter = '\t'))
# <codecell>
def get_fileurl(supurl):
#print(supurl)
resp = urllib.request.urlopen(supurl)
for line in resp:
fname = str(line.split()[-1])
if fname.lower().endswith(".cel.gz'"):
#print('returning')
return supurl + fname[2:-1]
return None
def process_row(row):
supurl = row['URL'] + 'suppl/'
tmpfile = '/tmp/' + row['Sample Accession'] + '.CEL.gz'
finalfile = '/home/will/AutoMicroAnal/microadata/' + row['Sample Accession'] + '.CEL'
if os.path.exists(finalfile):
return None
fileurl = get_fileurl(supurl)
#print(fileurl)
if fileurl is None:
return fileurl
try:
resp = urllib.request.urlopen(fileurl)
with open(tmpfile, 'wb') as handle:
handle.write(resp.read())
except urllib.request.URLError:
return fileurl
with gzip.open(tmpfile) as zhandle:
with open(finalfile, 'wb') as handle:
handle.write(zhandle.read())
os.remove(tmpfile)
return None
# <codecell>
gp133As = [row for row in microdata if row['Platform'] == 'GPL96']
# <codecell>
for num, row in enumerate(gp133As):
try:
res = process_row(row)
except:
print('skipping')
continue
if (num == 0) | (num == 5) | (num == 20) | (num % 500 == 0):
print(num)
if res:
print(res)
# <codecell>
# <codecell>
| mit |
collex100/odoo | addons/account_check_writing/account.py | 379 | 2032 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv,fields
class account_journal(osv.osv):
_inherit = "account.journal"
_columns = {
'allow_check_writing': fields.boolean('Allow Check writing', help='Check this if the journal is to be used for writing checks.'),
'use_preprint_check': fields.boolean('Use Preprinted Check', help='Check if you use a preformated sheet for check'),
}
class res_company(osv.osv):
_inherit = "res.company"
_columns = {
'check_layout': fields.selection([
('top', 'Check on Top'),
('middle', 'Check in middle'),
('bottom', 'Check on bottom'),
],"Check Layout",
help="Check on top is compatible with Quicken, QuickBooks and Microsoft Money. Check in middle is compatible with Peachtree, ACCPAC and DacEasy. Check on bottom is compatible with Peachtree, ACCPAC and DacEasy only" ),
}
_defaults = {
'check_layout' : lambda *a: 'top',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
iemejia/incubator-beam | sdks/python/apache_beam/runners/portability/flink_uber_jar_job_server.py | 1 | 8979 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A job server submitting portable pipelines as uber jars to Flink."""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import print_function
import logging
import os
import tempfile
import time
import urllib
import requests
from google.protobuf import json_format
from apache_beam.options import pipeline_options
from apache_beam.portability.api import beam_job_api_pb2
from apache_beam.runners.portability import abstract_job_service
from apache_beam.runners.portability import job_server
_LOGGER = logging.getLogger(__name__)
class FlinkUberJarJobServer(abstract_job_service.AbstractJobServiceServicer):
"""A Job server which submits a self-contained Jar to a Flink cluster.
The jar contains the Beam pipeline definition, dependencies, and
the pipeline artifacts.
"""
def __init__(self, master_url, options):
super(FlinkUberJarJobServer, self).__init__()
self._master_url = master_url
self._executable_jar = (
options.view_as(
pipeline_options.FlinkRunnerOptions).flink_job_server_jar)
self._artifact_port = (
options.view_as(pipeline_options.JobServerOptions).artifact_port)
self._temp_dir = tempfile.mkdtemp(prefix='apache-beam-flink')
def start(self):
return self
def stop(self):
pass
def executable_jar(self):
if self._executable_jar:
if not os.path.exists(self._executable_jar):
parsed = urllib.parse.urlparse(self._executable_jar)
if not parsed.scheme:
raise ValueError(
'Unable to parse jar URL "%s". If using a full URL, make sure '
'the scheme is specified. If using a local file path, make sure '
'the file exists; you may have to first build the job server '
'using `./gradlew runners:flink:%s:job-server:shadowJar`.' %
(self._executable_jar, self._flink_version))
url = self._executable_jar
else:
url = job_server.JavaJarJobServer.path_to_beam_jar(
'runners:flink:%s:job-server:shadowJar' % self.flink_version())
return job_server.JavaJarJobServer.local_jar(url)
def flink_version(self):
full_version = requests.get('%s/v1/config' %
self._master_url).json()['flink-version']
# Only return up to minor version.
return '.'.join(full_version.split('.')[:2])
def create_beam_job(self, job_id, job_name, pipeline, options):
return FlinkBeamJob(
self._master_url,
self.executable_jar(),
job_id,
job_name,
pipeline,
options,
artifact_port=self._artifact_port)
def GetJobMetrics(self, request, context=None):
if request.job_id not in self._jobs:
raise LookupError("Job {} does not exist".format(request.job_id))
metrics_text = self._jobs[request.job_id].get_metrics()
response = beam_job_api_pb2.GetJobMetricsResponse()
json_format.Parse(metrics_text, response)
return response
class FlinkBeamJob(abstract_job_service.UberJarBeamJob):
"""Runs a single Beam job on Flink by staging all contents into a Jar
and uploading it via the Flink Rest API."""
def __init__(
self,
master_url,
executable_jar,
job_id,
job_name,
pipeline,
options,
artifact_port=0):
super(FlinkBeamJob, self).__init__(
executable_jar,
job_id,
job_name,
pipeline,
options,
artifact_port=artifact_port)
self._master_url = master_url
def request(self, method, path, expected_status=200, **kwargs):
url = '%s/%s' % (self._master_url, path)
response = method(url, **kwargs)
if response.status_code != expected_status:
raise RuntimeError(
"Request to %s failed with status %d: %s" %
(url, response.status_code, response.text))
if response.text:
return response.json()
def get(self, path, **kwargs):
return self.request(requests.get, path, **kwargs)
def post(self, path, **kwargs):
return self.request(requests.post, path, **kwargs)
def delete(self, path, **kwargs):
return self.request(requests.delete, path, **kwargs)
def run(self):
self._stop_artifact_service()
# Upload the jar and start the job.
with open(self._jar, 'rb') as jar_file:
self._flink_jar_id = self.post(
'v1/jars/upload',
files={'jarfile': ('beam.jar', jar_file)})['filename'].split('/')[-1]
self._jar_uploaded = True
self._flink_job_id = self.post(
'v1/jars/%s/run' % self._flink_jar_id,
json={
'entryClass': 'org.apache.beam.runners.flink.FlinkPipelineRunner'
})['jobid']
os.unlink(self._jar)
_LOGGER.info('Started Flink job as %s' % self._flink_job_id)
def cancel(self):
self.post('v1/%s/stop' % self._flink_job_id, expected_status=202)
self.delete_jar()
def delete_jar(self):
if self._jar_uploaded:
self._jar_uploaded = False
try:
self.delete('v1/jars/%s' % self._flink_jar_id)
except Exception:
_LOGGER.info(
'Error deleting jar %s' % self._flink_jar_id, exc_info=True)
def _get_state(self):
"""Query flink to get the current state.
:return: tuple of int and Timestamp or None
timestamp will be None if the state has not changed since the last query.
"""
# For just getting the status, execution-result seems cheaper.
flink_status = self.get('v1/jobs/%s/execution-result' %
self._flink_job_id)['status']['id']
if flink_status == 'COMPLETED':
flink_status = self.get('v1/jobs/%s' % self._flink_job_id)['state']
beam_state = {
'CREATED': beam_job_api_pb2.JobState.STARTING,
'RUNNING': beam_job_api_pb2.JobState.RUNNING,
'FAILING': beam_job_api_pb2.JobState.RUNNING,
'FAILED': beam_job_api_pb2.JobState.FAILED,
'CANCELLING': beam_job_api_pb2.JobState.CANCELLING,
'CANCELED': beam_job_api_pb2.JobState.CANCELLED,
'FINISHED': beam_job_api_pb2.JobState.DONE,
'RESTARTING': beam_job_api_pb2.JobState.RUNNING,
'SUSPENDED': beam_job_api_pb2.JobState.RUNNING,
'RECONCILING': beam_job_api_pb2.JobState.RUNNING,
'IN_PROGRESS': beam_job_api_pb2.JobState.RUNNING,
'COMPLETED': beam_job_api_pb2.JobState.DONE,
}.get(flink_status, beam_job_api_pb2.JobState.UNSPECIFIED)
if self.is_terminal_state(beam_state):
self.delete_jar()
# update the state history if it has changed
return beam_state, self.set_state(beam_state)
def get_state(self):
state, timestamp = self._get_state()
if timestamp is None:
# state has not changed since it was last checked: use previous timestamp
return super(FlinkBeamJob, self).get_state()
else:
return state, timestamp
def get_state_stream(self):
def _state_iter():
sleep_secs = 1.0
while True:
yield self.get_state()
sleep_secs = min(60, sleep_secs * 1.2)
time.sleep(sleep_secs)
for state, timestamp in self.with_state_history(_state_iter()):
yield state, timestamp
if self.is_terminal_state(state):
break
def get_message_stream(self):
for state, timestamp in self.get_state_stream():
if self.is_terminal_state(state):
response = self.get('v1/jobs/%s/exceptions' % self._flink_job_id)
for ix, exc in enumerate(response['all-exceptions']):
yield beam_job_api_pb2.JobMessage(
message_id='message%d' % ix,
time=str(exc['timestamp']),
importance=beam_job_api_pb2.JobMessage.MessageImportance.
JOB_MESSAGE_ERROR,
message_text=exc['exception'])
yield state, timestamp
break
else:
yield state, timestamp
def get_metrics(self):
accumulators = self.get('v1/jobs/%s/accumulators' %
self._flink_job_id)['user-task-accumulators']
for accumulator in accumulators:
if accumulator['name'] == '__metricscontainers':
return accumulator['value']
raise LookupError(
"Found no metrics container for job {}".format(self._flink_job_id))
| apache-2.0 |
roxyboy/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 261 | 4490 | import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
| bsd-3-clause |
coderfi/ansible-modules-extras | packaging/macports.py | 61 | 6679 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Jimmy Tang <[email protected]>
# Based on okpg (Patrick Pelletier <[email protected]>), pacman
# (Afterburn) and pkgin (Shaun Zinck) modules
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: macports
author: Jimmy Tang
short_description: Package manager for MacPorts
description:
- Manages MacPorts packages
version_added: "1.1"
options:
name:
description:
- name of package to install/remove
required: true
state:
description:
- state of the package
choices: [ 'present', 'absent', 'active', 'inactive' ]
required: false
default: present
update_cache:
description:
- update the package db first
required: false
default: "no"
choices: [ "yes", "no" ]
notes: []
'''
EXAMPLES = '''
- macports: name=foo state=present
- macports: name=foo state=present update_cache=yes
- macports: name=foo state=absent
- macports: name=foo state=active
- macports: name=foo state=inactive
'''
import pipes
def update_package_db(module, port_path):
""" Updates packages list. """
rc, out, err = module.run_command("%s sync" % port_path)
if rc != 0:
module.fail_json(msg="could not update package db")
def query_package(module, port_path, name, state="present"):
""" Returns whether a package is installed or not. """
if state == "present":
rc, out, err = module.run_command("%s installed | grep -q ^.*%s" % (pipes.quote(port_path), pipes.quote(name)), use_unsafe_shell=True)
if rc == 0:
return True
return False
elif state == "active":
rc, out, err = module.run_command("%s installed %s | grep -q active" % (pipes.quote(port_path), pipes.quote(name)), use_unsafe_shell=True)
if rc == 0:
return True
return False
def remove_packages(module, port_path, packages):
""" Uninstalls one or more packages if installed. """
remove_c = 0
# Using a for loop incase of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, port_path, package):
continue
rc, out, err = module.run_command("%s uninstall %s" % (port_path, package))
if query_package(module, port_path, package):
module.fail_json(msg="failed to remove %s: %s" % (package, out))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, port_path, packages):
""" Installs one or more packages if not already installed. """
install_c = 0
for package in packages:
if query_package(module, port_path, package):
continue
rc, out, err = module.run_command("%s install %s" % (port_path, package))
if not query_package(module, port_path, package):
module.fail_json(msg="failed to install %s: %s" % (package, out))
install_c += 1
if install_c > 0:
module.exit_json(changed=True, msg="installed %s package(s)" % (install_c))
module.exit_json(changed=False, msg="package(s) already present")
def activate_packages(module, port_path, packages):
""" Activate a package if it's inactive. """
activate_c = 0
for package in packages:
if not query_package(module, port_path, package):
module.fail_json(msg="failed to activate %s, package(s) not present" % (package))
if query_package(module, port_path, package, state="active"):
continue
rc, out, err = module.run_command("%s activate %s" % (port_path, package))
if not query_package(module, port_path, package, state="active"):
module.fail_json(msg="failed to activate %s: %s" % (package, out))
activate_c += 1
if activate_c > 0:
module.exit_json(changed=True, msg="activated %s package(s)" % (activate_c))
module.exit_json(changed=False, msg="package(s) already active")
def deactivate_packages(module, port_path, packages):
""" Deactivate a package if it's active. """
deactivated_c = 0
for package in packages:
if not query_package(module, port_path, package):
module.fail_json(msg="failed to activate %s, package(s) not present" % (package))
if not query_package(module, port_path, package, state="active"):
continue
rc, out, err = module.run_command("%s deactivate %s" % (port_path, package))
if query_package(module, port_path, package, state="active"):
module.fail_json(msg="failed to deactivated %s: %s" % (package, out))
deactivated_c += 1
if deactivated_c > 0:
module.exit_json(changed=True, msg="deactivated %s package(s)" % (deactivated_c))
module.exit_json(changed=False, msg="package(s) already inactive")
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(aliases=["pkg"], required=True),
state = dict(default="present", choices=["present", "installed", "absent", "removed", "active", "inactive"]),
update_cache = dict(default="no", aliases=["update-cache"], type='bool')
)
)
port_path = module.get_bin_path('port', True, ['/opt/local/bin'])
p = module.params
if p["update_cache"]:
update_package_db(module, port_path)
pkgs = p["name"].split(",")
if p["state"] in ["present", "installed"]:
install_packages(module, port_path, pkgs)
elif p["state"] in ["absent", "removed"]:
remove_packages(module, port_path, pkgs)
elif p["state"] == "active":
activate_packages(module, port_path, pkgs)
elif p["state"] == "inactive":
deactivate_packages(module, port_path, pkgs)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
ShawnPengxy/Flask-madeBlog | site-packages/httpie/output.py | 5 | 16098 | """Output streaming, processing and formatting.
"""
import json
import xml.dom.minidom
from functools import partial
from itertools import chain
import pygments
from pygments import token, lexer
from pygments.styles import get_style_by_name, STYLE_MAP
from pygments.lexers import get_lexer_for_mimetype, get_lexer_by_name
from pygments.formatters.terminal import TerminalFormatter
from pygments.formatters.terminal256 import Terminal256Formatter
from pygments.util import ClassNotFound
from .compat import is_windows
from .solarized import Solarized256Style
from .models import HTTPRequest, HTTPResponse, Environment
from .input import (OUT_REQ_BODY, OUT_REQ_HEAD,
OUT_RESP_HEAD, OUT_RESP_BODY)
# The default number of spaces to indent when pretty printing
DEFAULT_INDENT = 4
# Colors on Windows via colorama don't look that
# great and fruity seems to give the best result there.
AVAILABLE_STYLES = set(STYLE_MAP.keys())
AVAILABLE_STYLES.add('solarized')
DEFAULT_STYLE = 'solarized' if not is_windows else 'fruity'
BINARY_SUPPRESSED_NOTICE = (
b'\n'
b'+-----------------------------------------+\n'
b'| NOTE: binary data not shown in terminal |\n'
b'+-----------------------------------------+'
)
class BinarySuppressedError(Exception):
"""An error indicating that the body is binary and won't be written,
e.g., for terminal output)."""
message = BINARY_SUPPRESSED_NOTICE
###############################################################################
# Output Streams
###############################################################################
def write(stream, outfile, flush):
"""Write the output stream."""
try:
# Writing bytes so we use the buffer interface (Python 3).
buf = outfile.buffer
except AttributeError:
buf = outfile
for chunk in stream:
buf.write(chunk)
if flush:
outfile.flush()
def write_with_colors_win_py3(stream, outfile, flush):
"""Like `write`, but colorized chunks are written as text
directly to `outfile` to ensure it gets processed by colorama.
Applies only to Windows with Python 3 and colorized terminal output.
"""
color = b'\x1b['
encoding = outfile.encoding
for chunk in stream:
if color in chunk:
outfile.write(chunk.decode(encoding))
else:
outfile.buffer.write(chunk)
if flush:
outfile.flush()
def build_output_stream(args, env, request, response):
"""Build and return a chain of iterators over the `request`-`response`
exchange each of which yields `bytes` chunks.
"""
req_h = OUT_REQ_HEAD in args.output_options
req_b = OUT_REQ_BODY in args.output_options
resp_h = OUT_RESP_HEAD in args.output_options
resp_b = OUT_RESP_BODY in args.output_options
req = req_h or req_b
resp = resp_h or resp_b
output = []
Stream = get_stream_type(env, args)
if req:
output.append(Stream(
msg=HTTPRequest(request),
with_headers=req_h,
with_body=req_b))
if req_b and resp:
# Request/Response separator.
output.append([b'\n\n'])
if resp:
output.append(Stream(
msg=HTTPResponse(response),
with_headers=resp_h,
with_body=resp_b))
if env.stdout_isatty and resp_b:
# Ensure a blank line after the response body.
# For terminal output only.
output.append([b'\n\n'])
return chain(*output)
def get_stream_type(env, args):
"""Pick the right stream type based on `env` and `args`.
Wrap it in a partial with the type-specific args so that
we don't need to think what stream we are dealing with.
"""
if not env.stdout_isatty and not args.prettify:
Stream = partial(
RawStream,
chunk_size=RawStream.CHUNK_SIZE_BY_LINE
if args.stream
else RawStream.CHUNK_SIZE
)
elif args.prettify:
Stream = partial(
PrettyStream if args.stream else BufferedPrettyStream,
env=env,
processor=OutputProcessor(
env=env, groups=args.prettify, pygments_style=args.style),
)
else:
Stream = partial(EncodedStream, env=env)
return Stream
class BaseStream(object):
"""Base HTTP message output stream class."""
def __init__(self, msg, with_headers=True, with_body=True,
on_body_chunk_downloaded=None):
"""
:param msg: a :class:`models.HTTPMessage` subclass
:param with_headers: if `True`, headers will be included
:param with_body: if `True`, body will be included
"""
assert with_headers or with_body
self.msg = msg
self.with_headers = with_headers
self.with_body = with_body
self.on_body_chunk_downloaded = on_body_chunk_downloaded
def _get_headers(self):
"""Return the headers' bytes."""
return self.msg.headers.encode('ascii')
def _iter_body(self):
"""Return an iterator over the message body."""
raise NotImplementedError()
def __iter__(self):
"""Return an iterator over `self.msg`."""
if self.with_headers:
yield self._get_headers()
yield b'\r\n\r\n'
if self.with_body:
try:
for chunk in self._iter_body():
yield chunk
if self.on_body_chunk_downloaded:
self.on_body_chunk_downloaded(chunk)
except BinarySuppressedError as e:
if self.with_headers:
yield b'\n'
yield e.message
class RawStream(BaseStream):
"""The message is streamed in chunks with no processing."""
CHUNK_SIZE = 1024 * 100
CHUNK_SIZE_BY_LINE = 1
def __init__(self, chunk_size=CHUNK_SIZE, **kwargs):
super(RawStream, self).__init__(**kwargs)
self.chunk_size = chunk_size
def _iter_body(self):
return self.msg.iter_body(self.chunk_size)
class EncodedStream(BaseStream):
"""Encoded HTTP message stream.
The message bytes are converted to an encoding suitable for
`self.env.stdout`. Unicode errors are replaced and binary data
is suppressed. The body is always streamed by line.
"""
CHUNK_SIZE = 1
def __init__(self, env=Environment(), **kwargs):
super(EncodedStream, self).__init__(**kwargs)
if env.stdout_isatty:
# Use the encoding supported by the terminal.
output_encoding = getattr(env.stdout, 'encoding', None)
else:
# Preserve the message encoding.
output_encoding = self.msg.encoding
# Default to utf8 when unsure.
self.output_encoding = output_encoding or 'utf8'
def _iter_body(self):
for line, lf in self.msg.iter_lines(self.CHUNK_SIZE):
if b'\0' in line:
raise BinarySuppressedError()
yield line.decode(self.msg.encoding)\
.encode(self.output_encoding, 'replace') + lf
class PrettyStream(EncodedStream):
"""In addition to :class:`EncodedStream` behaviour, this stream applies
content processing.
Useful for long-lived HTTP responses that stream by lines
such as the Twitter streaming API.
"""
CHUNK_SIZE = 1
def __init__(self, processor, **kwargs):
super(PrettyStream, self).__init__(**kwargs)
self.processor = processor
def _get_headers(self):
return self.processor.process_headers(
self.msg.headers).encode(self.output_encoding)
def _iter_body(self):
for line, lf in self.msg.iter_lines(self.CHUNK_SIZE):
if b'\0' in line:
raise BinarySuppressedError()
yield self._process_body(line) + lf
def _process_body(self, chunk):
return (self.processor
.process_body(
content=chunk.decode(self.msg.encoding, 'replace'),
content_type=self.msg.content_type,
encoding=self.msg.encoding)
.encode(self.output_encoding, 'replace'))
class BufferedPrettyStream(PrettyStream):
"""The same as :class:`PrettyStream` except that the body is fully
fetched before it's processed.
Suitable regular HTTP responses.
"""
CHUNK_SIZE = 1024 * 10
def _iter_body(self):
# Read the whole body before prettifying it,
# but bail out immediately if the body is binary.
body = bytearray()
for chunk in self.msg.iter_body(self.CHUNK_SIZE):
if b'\0' in chunk:
raise BinarySuppressedError()
body.extend(chunk)
yield self._process_body(body)
###############################################################################
# Processing
###############################################################################
class HTTPLexer(lexer.RegexLexer):
"""Simplified HTTP lexer for Pygments.
It only operates on headers and provides a stronger contrast between
their names and values than the original one bundled with Pygments
(:class:`pygments.lexers.text import HttpLexer`), especially when
Solarized color scheme is used.
"""
name = 'HTTP'
aliases = ['http']
filenames = ['*.http']
tokens = {
'root': [
# Request-Line
(r'([A-Z]+)( +)([^ ]+)( +)(HTTP)(/)(\d+\.\d+)',
lexer.bygroups(
token.Name.Function,
token.Text,
token.Name.Namespace,
token.Text,
token.Keyword.Reserved,
token.Operator,
token.Number
)),
# Response Status-Line
(r'(HTTP)(/)(\d+\.\d+)( +)(\d{3})( +)(.+)',
lexer.bygroups(
token.Keyword.Reserved, # 'HTTP'
token.Operator, # '/'
token.Number, # Version
token.Text,
token.Number, # Status code
token.Text,
token.Name.Exception, # Reason
)),
# Header
(r'(.*?)( *)(:)( *)(.+)', lexer.bygroups(
token.Name.Attribute, # Name
token.Text,
token.Operator, # Colon
token.Text,
token.String # Value
))
]
}
class BaseProcessor(object):
"""Base, noop output processor class."""
enabled = True
def __init__(self, env=Environment(), **kwargs):
"""
:param env: an class:`Environment` instance
:param kwargs: additional keyword argument that some
processor might require.
"""
self.env = env
self.kwargs = kwargs
def process_headers(self, headers):
"""Return processed `headers`
:param headers: The headers as text.
"""
return headers
def process_body(self, content, content_type, subtype, encoding):
"""Return processed `content`.
:param content: The body content as text
:param content_type: Full content type, e.g., 'application/atom+xml'.
:param subtype: E.g. 'xml'.
:param encoding: The original content encoding.
"""
return content
class JSONProcessor(BaseProcessor):
"""JSON body processor."""
def process_body(self, content, content_type, subtype, encoding):
if subtype == 'json':
try:
# Indent the JSON data, sort keys by name, and
# avoid unicode escapes to improve readability.
content = json.dumps(json.loads(content),
sort_keys=True,
ensure_ascii=False,
indent=DEFAULT_INDENT)
except ValueError:
# Invalid JSON but we don't care.
pass
return content
class XMLProcessor(BaseProcessor):
"""XML body processor."""
# TODO: tests
def process_body(self, content, content_type, subtype, encoding):
if subtype == 'xml':
try:
# Pretty print the XML
doc = xml.dom.minidom.parseString(content.encode(encoding))
content = doc.toprettyxml(indent=' ' * DEFAULT_INDENT)
except xml.parsers.expat.ExpatError:
# Ignore invalid XML errors (skips attempting to pretty print)
pass
return content
class PygmentsProcessor(BaseProcessor):
"""A processor that applies syntax-highlighting using Pygments
to the headers, and to the body as well if its content type is recognized.
"""
def __init__(self, *args, **kwargs):
super(PygmentsProcessor, self).__init__(*args, **kwargs)
# Cache that speeds up when we process streamed body by line.
self.lexers_by_type = {}
if not self.env.colors:
self.enabled = False
return
try:
style = get_style_by_name(
self.kwargs.get('pygments_style', DEFAULT_STYLE))
except ClassNotFound:
style = Solarized256Style
if self.env.is_windows or self.env.colors == 256:
fmt_class = Terminal256Formatter
else:
fmt_class = TerminalFormatter
self.formatter = fmt_class(style=style)
def process_headers(self, headers):
return pygments.highlight(
headers, HTTPLexer(), self.formatter).strip()
def process_body(self, content, content_type, subtype, encoding):
try:
lexer = self.lexers_by_type.get(content_type)
if not lexer:
try:
lexer = get_lexer_for_mimetype(content_type)
except ClassNotFound:
lexer = get_lexer_by_name(subtype)
self.lexers_by_type[content_type] = lexer
except ClassNotFound:
pass
else:
content = pygments.highlight(content, lexer, self.formatter)
return content.strip()
class HeadersProcessor(BaseProcessor):
"""Sorts headers by name retaining relative order of multiple headers
with the same name.
"""
def process_headers(self, headers):
lines = headers.splitlines()
headers = sorted(lines[1:], key=lambda h: h.split(':')[0])
return '\r\n'.join(lines[:1] + headers)
class OutputProcessor(object):
"""A delegate class that invokes the actual processors."""
installed_processors = {
'format': [
HeadersProcessor,
JSONProcessor,
XMLProcessor
],
'colors': [
PygmentsProcessor
]
}
def __init__(self, groups, env=Environment(), **kwargs):
"""
:param env: a :class:`models.Environment` instance
:param groups: the groups of processors to be applied
:param kwargs: additional keyword arguments for processors
"""
self.processors = []
for group in groups:
for cls in self.installed_processors[group]:
processor = cls(env, **kwargs)
if processor.enabled:
self.processors.append(processor)
def process_headers(self, headers):
for processor in self.processors:
headers = processor.process_headers(headers)
return headers
def process_body(self, content, content_type, encoding):
# e.g., 'application/atom+xml'
content_type = content_type.split(';')[0]
# e.g., 'xml'
subtype = content_type.split('/')[-1].split('+')[-1]
for processor in self.processors:
content = processor.process_body(
content,
content_type,
subtype,
encoding
)
return content
| mit |
EttusResearch/gnuradio | gr-channels/python/channels/qa_channel_model.py | 47 | 1900 | #!/usr/bin/env python
#
# Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, analog, blocks, channels
import math
class test_channel_model(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_000(self):
N = 1000 # number of samples to use
fs = 1000 # baseband sampling rate
freq = 100
signal = analog.sig_source_c(fs, analog.GR_SIN_WAVE, freq, 1)
head = blocks.head(gr.sizeof_gr_complex, N)
op = channels.channel_model(0.0, 0.0, 1.0, [1,], 0)
snk = blocks.vector_sink_c()
snk1 = blocks.vector_sink_c()
op.set_noise_voltage(0.0)
op.set_frequency_offset(0.0)
op.set_taps([1,])
op.set_timing_offset(1.0)
self.tb.connect(signal, head, op, snk)
self.tb.connect(op, snk1)
self.tb.run()
dst_data = snk.data()
exp_data = snk1.data()
self.assertComplexTuplesAlmostEqual(exp_data, dst_data, 5)
if __name__ == '__main__':
gr_unittest.run(test_channel_model, "test_channel_model.xml")
| gpl-3.0 |
wwright2/dcim3-angstrom1 | sources/openembedded-core/meta/lib/oeqa/utils/commands.py | 2 | 4475 | # Copyright (c) 2013-2014 Intel Corporation
#
# Released under the MIT license (see COPYING.MIT)
# DESCRIPTION
# This module is mainly used by scripts/oe-selftest and modules under meta/oeqa/selftest
# It provides a class and methods for running commands on the host in a convienent way for tests.
import os
import sys
import signal
import subprocess
import threading
import logging
from oeqa.utils import CommandError
from oeqa.utils import ftools
class Command(object):
def __init__(self, command, bg=False, timeout=None, data=None, **options):
self.defaultopts = {
"stdout": subprocess.PIPE,
"stderr": subprocess.STDOUT,
"stdin": None,
"shell": False,
"bufsize": -1,
}
self.cmd = command
self.bg = bg
self.timeout = timeout
self.data = data
self.options = dict(self.defaultopts)
if isinstance(self.cmd, basestring):
self.options["shell"] = True
if self.data:
self.options['stdin'] = subprocess.PIPE
self.options.update(options)
self.status = None
self.output = None
self.error = None
self.thread = None
self.log = logging.getLogger("utils.commands")
def run(self):
self.process = subprocess.Popen(self.cmd, **self.options)
def commThread():
self.output, self.error = self.process.communicate(self.data)
self.thread = threading.Thread(target=commThread)
self.thread.start()
self.log.debug("Running command '%s'" % self.cmd)
if not self.bg:
self.thread.join(self.timeout)
self.stop()
def stop(self):
if self.thread.isAlive():
self.process.terminate()
# let's give it more time to terminate gracefully before killing it
self.thread.join(5)
if self.thread.isAlive():
self.process.kill()
self.thread.join()
self.output = self.output.rstrip()
self.status = self.process.poll()
self.log.debug("Command '%s' returned %d as exit code." % (self.cmd, self.status))
# logging the complete output is insane
# bitbake -e output is really big
# and makes the log file useless
if self.status:
lout = "\n".join(self.output.splitlines()[-20:])
self.log.debug("Last 20 lines:\n%s" % lout)
class Result(object):
pass
def runCmd(command, ignore_status=False, timeout=None, assert_error=True, **options):
result = Result()
cmd = Command(command, timeout=timeout, **options)
cmd.run()
result.command = command
result.status = cmd.status
result.output = cmd.output
result.pid = cmd.process.pid
if result.status and not ignore_status:
if assert_error:
raise AssertionError("Command '%s' returned non-zero exit status %d:\n%s" % (command, result.status, result.output))
else:
raise CommandError(result.status, command, result.output)
return result
def bitbake(command, ignore_status=False, timeout=None, postconfig=None, **options):
if postconfig:
postconfig_file = os.path.join(os.environ.get('BUILDDIR'), 'oeqa-post.conf')
ftools.write_file(postconfig_file, postconfig)
extra_args = "-R %s" % postconfig_file
else:
extra_args = ""
if isinstance(command, basestring):
cmd = "bitbake " + extra_args + " " + command
else:
cmd = [ "bitbake" ] + [a for a in (command + extra_args.split(" ")) if a not in [""]]
try:
return runCmd(cmd, ignore_status, timeout, **options)
finally:
if postconfig:
os.remove(postconfig_file)
def get_bb_env(target=None, postconfig=None):
if target:
return bitbake("-e %s" % target, postconfig=postconfig).output
else:
return bitbake("-e", postconfig=postconfig).output
def get_bb_var(var, target=None, postconfig=None):
val = None
bbenv = get_bb_env(target, postconfig=postconfig)
for line in bbenv.splitlines():
if line.startswith(var + "="):
val = line.split('=')[1]
val = val.replace('\"','')
break
return val
def get_test_layer():
layers = get_bb_var("BBLAYERS").split()
testlayer = None
for l in layers:
if "/meta-selftest" in l and os.path.isdir(l):
testlayer = l
break
return testlayer
| mit |
technic-tec/onedrive-d-old | onedrive_d/od_onedrive_api.py | 2 | 23966 | #!/usr/bin/python3
"""
OneDrive REST API for onedrive_d.
Refer to http://msdn.microsoft.com/en-us/library/dn659752.aspx
Notes:
* The API object can be called by any arbitrary thread in the program.
* Call get_instance() will realize a API singleton object.
* When there is network issue at an API call, the calling thread is put to sleep
and thread manager will wake it up when the network seems fine. When the caller
is waken up, it will retry the function that failed before.
* When refresh_token is set, API will try to get new access_token automatically
and retry the function call later.
Bullets 3 and 4 are like interrupt handling.
"""
import os
import json
import urllib
import functools
import fcntl
# import imghdr
import requests
# for debugging
from time import sleep
from . import od_glob
from . import od_thread_manager
api_instance = None
def get_instance():
global api_instance
if api_instance is None:
api_instance = OneDriveAPI(od_glob.APP_CLIENT_ID, od_glob.APP_CLIENT_SECRET)
return api_instance
class OneDriveAPIException(Exception):
def __init__(self, args=None):
super().__init__()
if args is None:
pass
elif 'error_description' in args:
self.errno = args['error']
self.message = args['error_description']
elif 'error' in args and 'code' in args['error']:
args = args['error']
self.errno = args['code']
self.message = args['message']
else:
self.errno = 0
self.message = ''
def __str__(self):
return self.message + ' (' + self.errno + ')'
class OneDriveAuthError(OneDriveAPIException):
"""
Raised when authentication fails.
"""
pass
class OneDriveServerInternalError(OneDriveAPIException):
pass
class OneDriveValueError(OneDriveAPIException):
"""
Raised when input to OneDriveAPI is invalid.
"""
pass
class OneDriveAPI:
CLIENT_SCOPE = ['wl.skydrive', 'wl.skydrive_update', 'wl.offline_access']
REDIRECT_URI = 'https://login.live.com/oauth20_desktop.srf'
OAUTH_AUTHORIZE_URI = 'https://login.live.com/oauth20_authorize.srf?'
OAUTH_TOKEN_URI = 'https://login.live.com/oauth20_token.srf'
OAUTH_SIGNOUT_URI = 'https://login.live.com/oauth20_logout.srf'
API_URI = 'https://apis.live.net/v5.0/'
FOLDER_TYPES = ['folder', 'album']
UNSUPPORTED_TYPES = ['notebook']
ROOT_ENTRY_ID = 'me/skydrive'
logger = od_glob.get_logger()
threadman = od_thread_manager.get_instance()
def __init__(self, client_id, client_secret, client_scope=CLIENT_SCOPE, redirect_uri=REDIRECT_URI):
self.client_access_token = None
self.client_refresh_token = None
self.client_id = client_id
self.client_secret = client_secret
self.client_scope = client_scope
self.client_redirect_uri = redirect_uri
self.http_client = requests.Session()
def parse_response(self, request, error, ok_status=requests.codes.ok):
ret = request.json()
if request.status_code != ok_status:
if 'code' in ret['error']:
if ret['error']['code'] == 'request_token_expired':
raise OneDriveAuthError(ret)
elif ret['error']['code'] == 'server_internal_error':
raise OneDriveServerInternalError(ret)
raise error(ret)
return ret
def auto_recover_auth_error(self):
"""
Note that this function still throws exceptions.
"""
if self.client_refresh_token is None:
raise OneDriveAuthError()
refreshed_token_set = self.refresh_token(self.client_refresh_token)
od_glob.get_config_instance().set_access_token(refreshed_token_set)
self.logger.info('auto refreshed API token in face of auth error.')
def get_auth_uri(self, display='touch', locale='en', state=''):
"""
Use the code returned in the final redirect URL to exchange for
an access token
http://msdn.microsoft.com/en-us/library/dn659750.aspx
"""
params = {
'client_id': self.client_id,
'scope': ' '.join(self.client_scope),
'response_type': 'code',
'redirect_uri': self.client_redirect_uri,
'display': display,
'locale': locale
}
if state != '':
params['state'] = state
return OneDriveAPI.OAUTH_AUTHORIZE_URI + urllib.parse.urlencode(params)
def is_signed_in(self):
return self.access_token is not None
def set_user_id(self, id):
self.user_id = id
def set_access_token(self, token):
self.client_access_token = token
self.http_client.headers.update({'Authorization': 'Bearer ' + token})
def set_refresh_token(self, token):
self.client_refresh_token = token
def get_access_token(self, code=None, uri=None):
"""
http://msdn.microsoft.com/en-us/library/dn659750.aspx
return a dict with keys token_type, expires_in, scope,
access_token, refresh_token, authentication_token
"""
if uri is not None and '?' in uri:
qs_dict = urllib.parse.parse_qs(uri.split('?')[1])
if 'code' in qs_dict:
code = qs_dict['code']
if code is None:
raise OneDriveValueError(
{'error': 'access_code_not_found', 'error_description': 'The access code is not specified.'})
params = {
"client_id": self.client_id,
"client_secret": self.client_secret,
"redirect_uri": self.client_redirect_uri,
"code": code,
"grant_type": "authorization_code"
}
try:
request = requests.post(
OneDriveAPI.OAUTH_TOKEN_URI, data=params, verify=False)
response = self.parse_response(request, OneDriveAPIException)
self.set_access_token(response['access_token'])
self.set_refresh_token(response['refresh_token'])
self.set_user_id(response['user_id'])
return response
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
return self.get_access_token(code, uri)
def refresh_token(self, token):
params = {
"client_id": self.client_id,
"client_secret": self.client_secret,
"redirect_uri": self.client_redirect_uri,
"refresh_token": token,
"grant_type": 'refresh_token'
}
while True:
try:
request = requests.post(OneDriveAPI.OAUTH_TOKEN_URI, data=params)
response = self.parse_response(request, OneDriveAPIException)
self.set_access_token(response['access_token'])
self.set_refresh_token(response['refresh_token'])
self.set_user_id(response['user_id'])
return response
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
def sign_out(self):
while True:
try:
r = self.http_client.get(OneDriveAPI.OAUTH_SIGNOUT_URI + '?client_id=' + self.client_id + '&redirect_uri=' + self.client_redirect_uri)
return self.parse_response(r, OneDriveAuthError)
except OneDriveAuthError:
self.auto_recover_auth_error()
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
def get_recent_docs(self):
raise NotImplementedError('get_recent_docs is not implemented.')
def get_quota(self, user_id='me'):
while True:
try:
r = self.http_client.get(OneDriveAPI.API_URI + user_id + '/skydrive/quota')
return self.parse_response(r, OneDriveAPIException)
except OneDriveAuthError:
self.auto_recover_auth_error()
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
def get_root_entry_name(self):
return self.ROOT_ENTRY_ID
def get_property(self, entry_id='me/skydrive'):
try:
r = self.http_client.get(OneDriveAPI.API_URI + entry_id)
return self.parse_response(r, OneDriveAPIException)
except OneDriveAuthError:
self.auto_recover_auth_error()
return self.get_property(entry_id)
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
return self.get_property(entry_id)
def set_property(self, entry_id, **kwargs):
"""
Different types of files have different RW fields.
Refer to http://msdn.microsoft.com/en-us/library/dn631831.aspx.
Example:
self.set_property(your_id, name = 'new name', description = 'new desc')
"""
headers = {
'Content-Type': 'application/json',
}
while True:
try:
r = self.http_client.put(
OneDriveAPI.API_URI + entry_id, data=json.dumps(kwargs), headers=headers)
return self.parse_response(r, OneDriveAPIException)
except OneDriveAuthError:
self.auto_recover_auth_error()
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
def get_link(self, entry_id, type='r'):
"""
Return a link to share the entry.
@param type: one of 'r' (default), 'rw', 'e' (short for 'embed').
"""
if type == 'r':
type = 'shared_read_link'
elif type == 'rw':
type = 'shared_edit_link'
else:
type = 'embed'
while True:
try:
r = self.http_client.get(OneDriveAPI.API_URI + entry_id + '/' + type)
return self.parse_response(r, OneDriveAPIException)['source']
except OneDriveAuthError:
self.auto_recover_auth_error()
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
def list_entries(self, folder_id='me/skydrive', type='files'):
"""
@param type: 'files' (default) for all files. 'shared' for shared files (used internally).
"""
while True:
try:
r = self.http_client.get(OneDriveAPI.API_URI + folder_id + '/' + type)
return self.parse_response(r, OneDriveAPIException)['data']
except OneDriveAuthError:
self.auto_recover_auth_error()
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
except OneDriveServerInternalError as e:
self.logger.error(e)
self.threadman.hang_caller()
def list_shared_entries(self, user_id='me'):
return self.list_entries(user_id + '/skydrive', 'shared')
def mkdir(self, folder_name, parent_id='me/skydrive'):
if parent_id == '/':
parent_id = 'me/skydrive' # fix parent_id alias
data = {'name': folder_name}
headers = {'Content-Type': 'application/json'}
uri = OneDriveAPI.API_URI + parent_id
while True:
try:
r = self.http_client.post(uri, data=json.dumps(data), headers=headers)
return self.parse_response(r, OneDriveAPIException, requests.codes.created)
except OneDriveAuthError:
self.auto_recover_auth_error()
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
def cp(self, target_id, dest_folder_id, overwrite=True, type='COPY'):
"""
Return an entry dict if opeation succeeds.
@param overwrite: whether or not to overwrite an existing entry. True, False, None (ChooseNewName).
"""
if overwrite is None:
overwrite = 'ChooseNewName'
data = {'destination': dest_folder_id}
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + self.client_access_token
}
uri = OneDriveAPI.API_URI + target_id + '?overwrite=' + str(overwrite)
req = requests.Request(
type, uri, data=json.dumps(data), headers=headers).prepare()
while True:
try:
r = self.http_client.send(req)
return self.parse_response(r, OneDriveAPIException, requests.codes.created)
except OneDriveAuthError:
self.auto_recover_auth_error()
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
except OneDriveServerInternalError as e:
self.logger.error(e)
self.threadman.hang_caller()
def mv(self, target_id, dest_folder_id, overwrite=True):
return self.cp(target_id, dest_folder_id, overwrite, 'MOVE')
def bits_put(self, name, folder_id, local_path=None, block_size=1048576):
"""
Upload a large file with Microsoft BITS API.
A detailed document: https://gist.github.com/rgregg/37ba8929768a62131e85
Official document: https://msdn.microsoft.com/en-us/library/aa362821%28v=vs.85%29.aspx
@param name: remote file name
@param folder_id: the folder_id returned by Live API
@param local_path: the local path of the file to upload
@param remote_path (X): the remote path to put the file.
@return None if an unrecoverable error occurs; or a file property dict.
"""
# get file size
try:
source_size = os.path.getsize(local_path)
except:
self.logger.error("cannot get file size of \"" + local_path + "\"")
return None
# produce request url
if '!' in folder_id:
# subfolder
bits_folder_id = folder_id.split('.')[-1]
url = "https://cid-" + self.user_id + \
".users.storage.live.com/items/" + bits_folder_id + "/" + name
elif folder_id != '':
# root folder
user_id = folder_id.split('.')[-1]
url = "https://cid-" + user_id + \
".users.storage.live.com/users/0x" + user_id + "/LiveFolders/" + name
# elif remote_path is not None:
# url = "https://cid-" + user_id + ".users.storage.live.com/users/0x" + user_id + "/LiveFolders/" + remote_path
else:
self.logger.error("cannot request BITS. folder_id is invalid.")
return None
# force refresh access token to get largest expiration time
try:
self.auto_recover_auth_error()
except Exception as e:
self.logger.error(e)
return None
# BITS: Create-Session
headers = {
'X-Http-Method-Override': 'BITS_POST',
'Content-Length': 0,
'BITS-Packet-Type': 'Create-Session',
'BITS-Supported-Protocols': '{7df0354d-249b-430f-820d-3d2a9bef4931}'
}
self.logger.debug('getting session token for BITS upload...')
while True:
try:
response = self.http_client.request('post', url, headers=headers)
if response.status_code != 201:
if 'www-authenticate' in response.headers and 'invalid_token' in response.headers['www-authenticate']:
response.close()
raise OneDriveAuthError()
else:
# unknown error should be further analyzed
self.logger.debug("failed BITS Create-Session request to upload \"%s\". HTTP %d.", local_path, response.status_code)
self.logger.debug(response.headers)
response.close()
return None
session_id = response.headers['bits-session-id']
response.close()
break
except OneDriveAuthError:
self.auto_recover_auth_error()
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
del headers
# BITS: upload file by blocks
# The autnentication of this part relies on session_id, not access_token.
self.logger.debug('uploading file "%s".', local_path)
source_file = open(local_path, 'rb')
fcntl.lockf(source_file, fcntl.LOCK_SH)
source_cursor = 0
while source_cursor < source_size:
try:
target_cursor = min(source_cursor + block_size, source_size) - 1
source_file.seek(source_cursor)
data = source_file.read(target_cursor - source_cursor + 1)
self.logger.debug("uploading block %d - %d (total: %d B)", source_cursor, target_cursor, source_size)
response = self.http_client.request('post', url, data=data, headers={
'X-Http-Method-Override': 'BITS_POST',
'BITS-Packet-Type': 'Fragment',
'BITS-Session-Id': session_id,
'Content-Range': 'bytes {}-{}/{}'.format(source_cursor, target_cursor, source_size)
})
if response.status_code != requests.codes.ok:
# unknown error. better log it for future analysis
self.logger.debug('an error occurred uploading the block. HTTP %d.', response.status_code)
self.logger.debug(response.headers)
response.close()
fcntl.lockf(source_file, fcntl.LOCK_UN)
source_file.close()
# should I cancel session? https://msdn.microsoft.com/en-us/library/aa362829%28v=vs.85%29.aspx
return None
else:
source_cursor = int(response.headers['bits-received-content-range'])
response.close()
del data
# sleep(1)
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
del data
self.threadman.hang_caller()
fcntl.lockf(source_file, fcntl.LOCK_UN)
source_file.close()
# refresh token if expired
if od_glob.get_config_instance().is_token_expired():
try:
self.auto_recover_auth_error()
except Exception as e:
# this branch is horrible
self.logger.error(e)
return None
# BITS: close session
self.logger.debug('BITS upload completed. Closing session...')
headers = {
'X-Http-Method-Override': 'BITS_POST',
'BITS-Packet-Type': 'Close-Session',
'BITS-Session-Id': session_id,
'Content-Length': 0
}
while True:
try:
response = self.http_client.request('post', url, headers=headers)
if response.status_code != requests.codes.ok and response.status_code != requests.codes.created:
# when token expires, server return HTTP 500
# www-authenticate: 'Bearer realm="OneDriveAPI", error="expired_token", error_description="Auth token expired. Try refreshing."'
if 'www-authenticate' in response.headers and 'expired_token' in response.headers['www-authenticate']: # 'invalid_token' in response.headers['www-authenticate']:
response.close()
raise OneDriveAuthError()
else:
# however, when the token is changed,
# we will get HTTP 500 with 'x-clienterrorcode': 'UploadSessionNotFound'
self.logger.debug('An error occurred when closing BITS session. HTTP %d', response.status_code)
self.logger.debug(response.headers)
response.close()
return None
res_id = response.headers['x-resource-id']
response.close()
self.logger.debug('BITS session successfully closed.')
return self.get_property('file.' + res_id[:res_id.index('!')] + '.' + res_id)
except OneDriveAuthError:
self.auto_recover_auth_error()
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
def put(self, name, folder_id='me/skydrive', upload_location=None, local_path=None, data=None, overwrite=True):
"""
Upload the file or data to a path.
Returns a dict with keys 'source', 'name', and 'id'
@param name: the new name used for the uploaded FILE. Assuming the name is NTFS-compatible.
@param folder_id: the parent folder of the entry to upload. Default: root folder.
@param upload_location: OneDrive upload_location URL. If given, folder_id is ignored.
@param local_path: the local path of the FILE.
@param data: the data of the entry. If given, local_path is ignored.
@param overwrite: whether or not to overwrite existing files, if any.
To put an empty file, either local_path points to an empty file or data is set ''.
To upload a dir, check if it exists, and then send recursive puts to upload its files.
Another issue is timestamp correction.
"""
uri = OneDriveAPI.API_URI
if upload_location is not None:
uri += upload_location # already ends with '/'
else:
uri += folder_id + '/files/'
if name == '':
raise OneDriveValueError(
{'error': 'empty_name', 'error_description': 'The file name cannot be empty.'})
uri += name
d = {
'downsize_photo_uploads': False,
'overwrite': overwrite
}
uri += '?' + urllib.parse.urlencode(d)
if data is not None:
pass
elif local_path is not None:
if not os.path.isfile(local_path):
raise OneDriveValueError(
{'error': 'wrong_file_type', 'error_description': 'The local path "' + local_path + '" is not a file.'})
else:
data = open(local_path, 'rb')
else:
raise OneDriveValueError(
{'error': 'upload_null_content', 'error_description': 'local_path and data cannot both be null.'})
while True:
try:
r = self.http_client.put(uri, data=data)
ret = r.json()
if r.status_code != requests.codes.ok and r.status_code != requests.codes.created:
# TODO: try testing this
if 'error' in ret and 'code' in ret['error'] and ret['error']['code'] == 'request_token_expired':
raise OneDriveAuthError(ret)
else:
raise OneDriveAPIException(ret)
return self.get_property(ret['id'])
except OneDriveAuthError:
self.auto_recover_auth_error()
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
except OneDriveServerInternalError as e:
self.logger.error(e)
self.threadman.hang_caller()
def get_by_blocks(self, entry_id, local_path, file_size, block_size):
try:
f = open(local_path, 'wb')
except OSError as e:
self.logger.error(e)
return False
self.logger.debug('download file to "' + local_path + '"...')
# fcntl.lockf(f, fcntl.LOCK_SH)
cursor = 0
while cursor < file_size:
self.logger.debug('current cursor: ' + str(cursor))
try:
target = min(cursor + block_size, file_size) - 1
r = self.http_client.get(OneDriveAPI.API_URI + entry_id + '/content',
headers={
'Range': 'bytes={0}-{1}'.format(cursor, target)
})
if r.status_code == requests.codes.ok or r.status_code == requests.codes.partial:
# sample data: 'bytes 12582912-12927920/12927921'
range_unit, range_str = r.headers['content-range'].split(' ')
range_range, range_total = range_str.split('/')
range_from, range_to = range_range.split('-')
f.write(r.content)
cursor = int(range_to) + 1
r.close()
else:
if 'www-authenticate' in r.headers and 'invalid_token' in r.headers['www-authenticate']:
raise OneDriveAuthError()
else:
self.logger.debug('failed downloading block. HTTP %d.', r.status_code)
self.logger.debug(r.headers)
self.logger.debug(r.content)
return False
# return False
except OneDriveAuthError:
self.auto_recover_auth_error()
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
f.close()
self.logger.debug('file saved.')
# fcntl.lockf(f, fcntl.LOCK_UN)
return True
def get(self, entry_id, local_path=None):
"""
Fetching content of OneNote files will raise OneDriveAPIException:
Resource type 'notebook' doesn't support the path 'content'. (request_url_invalid)
"""
while True:
try:
r = self.http_client.get(OneDriveAPI.API_URI + entry_id + '/content')
if r.status_code != requests.codes.ok:
ret = r.json()
# TODO: try testing this
if 'error' in ret and 'code' in ret['error'] and ret['error']['code'] == 'request_token_expired':
raise OneDriveAuthError(ret)
else:
raise OneDriveAPIException(ret)
if local_path is not None:
with open(local_path, 'wb') as f:
f.write(r.content)
return True
else:
return r.content
except OneDriveAuthError:
self.auto_recover_auth_error()
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
except OneDriveServerInternalError as e:
self.logger.error(e)
self.threadman.hang_caller()
def rm(self, entry_id):
"""
OneDrive API always returns HTTP 204.
"""
while True:
try:
self.http_client.delete(OneDriveAPI.API_URI + entry_id)
return
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
except OneDriveServerInternalError as e:
self.logger.error(e)
self.threadman.hang_caller()
def get_user_info(self, user_id='me'):
while True:
try:
r = self.http_client.get(OneDriveAPI.API_URI + user_id)
return self.parse_response(r, OneDriveAPIException, requests.codes.ok)
except OneDriveAuthError:
self.auto_recover_auth_error()
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
def get_contact_list(self, user_id='me'):
while True:
try:
r = self.http_client.get(OneDriveAPI.API_URI + user_id + '/friends')
return self.parse_response(r, OneDriveAPIException, requests.codes.ok)
except OneDriveAuthError:
self.auto_recover_auth_error()
except requests.exceptions.ConnectionError:
self.logger.info('network connection error.')
self.threadman.hang_caller()
| lgpl-3.0 |
scieloorg/wayta | setup.py | 1 | 1219 | import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.txt')) as f:
README = f.read()
with open(os.path.join(here, 'CHANGES.txt')) as f:
CHANGES = f.read()
requires = [
'elasticsearch>=1.1.1',
'pyramid>=1.5.2',
'pyramid_chameleon>=0.3',
'pyramid_debugtoolbar>=2.1'
]
setup(name='wayta',
version='1.3.1',
description='A tool to suggest the name of an institution or country in the original form and language.',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='SciELO',
author_email='[email protected]',
url='http://docs.scielo.org',
keywords='web pyramid pylons',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
entry_points="""\
[paste.app_factory]
main = wayta:main
[console_scripts]
wayta_loaddata=processing.loaddata:main
""",
)
| bsd-2-clause |
zorroz/microblog | flask/lib/python2.7/site-packages/setuptools/sandbox.py | 221 | 9994 | import os
import sys
import tempfile
import operator
import functools
import itertools
import re
import pkg_resources
if os.name == "java":
import org.python.modules.posix.PosixModule as _os
else:
_os = sys.modules[os.name]
try:
_file = file
except NameError:
_file = None
_open = open
from distutils.errors import DistutilsError
from pkg_resources import working_set
from setuptools.compat import builtins, execfile
__all__ = [
"AbstractSandbox", "DirectorySandbox", "SandboxViolation", "run_setup",
]
def run_setup(setup_script, args):
"""Run a distutils setup script, sandboxed in its directory"""
old_dir = os.getcwd()
save_argv = sys.argv[:]
save_path = sys.path[:]
setup_dir = os.path.abspath(os.path.dirname(setup_script))
temp_dir = os.path.join(setup_dir,'temp')
if not os.path.isdir(temp_dir): os.makedirs(temp_dir)
save_tmp = tempfile.tempdir
save_modules = sys.modules.copy()
pr_state = pkg_resources.__getstate__()
try:
tempfile.tempdir = temp_dir
os.chdir(setup_dir)
try:
sys.argv[:] = [setup_script]+list(args)
sys.path.insert(0, setup_dir)
# reset to include setup dir, w/clean callback list
working_set.__init__()
working_set.callbacks.append(lambda dist:dist.activate())
DirectorySandbox(setup_dir).run(
lambda: execfile(
"setup.py",
{'__file__':setup_script, '__name__':'__main__'}
)
)
except SystemExit:
v = sys.exc_info()[1]
if v.args and v.args[0]:
raise
# Normal exit, just return
finally:
pkg_resources.__setstate__(pr_state)
sys.modules.update(save_modules)
# remove any modules imported within the sandbox
del_modules = [
mod_name for mod_name in sys.modules
if mod_name not in save_modules
# exclude any encodings modules. See #285
and not mod_name.startswith('encodings.')
]
list(map(sys.modules.__delitem__, del_modules))
os.chdir(old_dir)
sys.path[:] = save_path
sys.argv[:] = save_argv
tempfile.tempdir = save_tmp
class AbstractSandbox:
"""Wrap 'os' module and 'open()' builtin for virtualizing setup scripts"""
_active = False
def __init__(self):
self._attrs = [
name for name in dir(_os)
if not name.startswith('_') and hasattr(self,name)
]
def _copy(self, source):
for name in self._attrs:
setattr(os, name, getattr(source,name))
def run(self, func):
"""Run 'func' under os sandboxing"""
try:
self._copy(self)
if _file:
builtins.file = self._file
builtins.open = self._open
self._active = True
return func()
finally:
self._active = False
if _file:
builtins.file = _file
builtins.open = _open
self._copy(_os)
def _mk_dual_path_wrapper(name):
original = getattr(_os,name)
def wrap(self,src,dst,*args,**kw):
if self._active:
src,dst = self._remap_pair(name,src,dst,*args,**kw)
return original(src,dst,*args,**kw)
return wrap
for name in ["rename", "link", "symlink"]:
if hasattr(_os,name): locals()[name] = _mk_dual_path_wrapper(name)
def _mk_single_path_wrapper(name, original=None):
original = original or getattr(_os,name)
def wrap(self,path,*args,**kw):
if self._active:
path = self._remap_input(name,path,*args,**kw)
return original(path,*args,**kw)
return wrap
if _file:
_file = _mk_single_path_wrapper('file', _file)
_open = _mk_single_path_wrapper('open', _open)
for name in [
"stat", "listdir", "chdir", "open", "chmod", "chown", "mkdir",
"remove", "unlink", "rmdir", "utime", "lchown", "chroot", "lstat",
"startfile", "mkfifo", "mknod", "pathconf", "access"
]:
if hasattr(_os,name): locals()[name] = _mk_single_path_wrapper(name)
def _mk_single_with_return(name):
original = getattr(_os,name)
def wrap(self,path,*args,**kw):
if self._active:
path = self._remap_input(name,path,*args,**kw)
return self._remap_output(name, original(path,*args,**kw))
return original(path,*args,**kw)
return wrap
for name in ['readlink', 'tempnam']:
if hasattr(_os,name): locals()[name] = _mk_single_with_return(name)
def _mk_query(name):
original = getattr(_os,name)
def wrap(self,*args,**kw):
retval = original(*args,**kw)
if self._active:
return self._remap_output(name, retval)
return retval
return wrap
for name in ['getcwd', 'tmpnam']:
if hasattr(_os,name): locals()[name] = _mk_query(name)
def _validate_path(self,path):
"""Called to remap or validate any path, whether input or output"""
return path
def _remap_input(self,operation,path,*args,**kw):
"""Called for path inputs"""
return self._validate_path(path)
def _remap_output(self,operation,path):
"""Called for path outputs"""
return self._validate_path(path)
def _remap_pair(self,operation,src,dst,*args,**kw):
"""Called for path pairs like rename, link, and symlink operations"""
return (
self._remap_input(operation+'-from',src,*args,**kw),
self._remap_input(operation+'-to',dst,*args,**kw)
)
if hasattr(os, 'devnull'):
_EXCEPTIONS = [os.devnull,]
else:
_EXCEPTIONS = []
try:
from win32com.client.gencache import GetGeneratePath
_EXCEPTIONS.append(GetGeneratePath())
del GetGeneratePath
except ImportError:
# it appears pywin32 is not installed, so no need to exclude.
pass
class DirectorySandbox(AbstractSandbox):
"""Restrict operations to a single subdirectory - pseudo-chroot"""
write_ops = dict.fromkeys([
"open", "chmod", "chown", "mkdir", "remove", "unlink", "rmdir",
"utime", "lchown", "chroot", "mkfifo", "mknod", "tempnam",
])
_exception_patterns = [
# Allow lib2to3 to attempt to save a pickled grammar object (#121)
'.*lib2to3.*\.pickle$',
]
"exempt writing to paths that match the pattern"
def __init__(self, sandbox, exceptions=_EXCEPTIONS):
self._sandbox = os.path.normcase(os.path.realpath(sandbox))
self._prefix = os.path.join(self._sandbox,'')
self._exceptions = [
os.path.normcase(os.path.realpath(path))
for path in exceptions
]
AbstractSandbox.__init__(self)
def _violation(self, operation, *args, **kw):
raise SandboxViolation(operation, args, kw)
if _file:
def _file(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("file", path, mode, *args, **kw)
return _file(path,mode,*args,**kw)
def _open(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("open", path, mode, *args, **kw)
return _open(path,mode,*args,**kw)
def tmpnam(self):
self._violation("tmpnam")
def _ok(self, path):
active = self._active
try:
self._active = False
realpath = os.path.normcase(os.path.realpath(path))
return (
self._exempted(realpath)
or realpath == self._sandbox
or realpath.startswith(self._prefix)
)
finally:
self._active = active
def _exempted(self, filepath):
start_matches = (
filepath.startswith(exception)
for exception in self._exceptions
)
pattern_matches = (
re.match(pattern, filepath)
for pattern in self._exception_patterns
)
candidates = itertools.chain(start_matches, pattern_matches)
return any(candidates)
def _remap_input(self, operation, path, *args, **kw):
"""Called for path inputs"""
if operation in self.write_ops and not self._ok(path):
self._violation(operation, os.path.realpath(path), *args, **kw)
return path
def _remap_pair(self, operation, src, dst, *args, **kw):
"""Called for path pairs like rename, link, and symlink operations"""
if not self._ok(src) or not self._ok(dst):
self._violation(operation, src, dst, *args, **kw)
return (src,dst)
def open(self, file, flags, mode=0x1FF, *args, **kw): # 0777
"""Called for low-level os.open()"""
if flags & WRITE_FLAGS and not self._ok(file):
self._violation("os.open", file, flags, mode, *args, **kw)
return _os.open(file,flags,mode, *args, **kw)
WRITE_FLAGS = functools.reduce(
operator.or_, [getattr(_os, a, 0) for a in
"O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split()]
)
class SandboxViolation(DistutilsError):
"""A setup script attempted to modify the filesystem outside the sandbox"""
def __str__(self):
return """SandboxViolation: %s%r %s
The package setup script has attempted to modify files on your system
that are not within the EasyInstall build area, and has been aborted.
This package cannot be safely installed by EasyInstall, and may not
support alternate installation locations even if you run its setup
script by hand. Please inform the package's author and the EasyInstall
maintainers to find out if a fix or workaround is available.""" % self.args
#
| bsd-3-clause |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/setuptools/compat.py | 456 | 2094 | import sys
import itertools
PY3 = sys.version_info >= (3,)
PY2 = not PY3
if PY2:
basestring = basestring
import __builtin__ as builtins
import ConfigParser
from StringIO import StringIO
BytesIO = StringIO
func_code = lambda o: o.func_code
func_globals = lambda o: o.func_globals
im_func = lambda o: o.im_func
from htmlentitydefs import name2codepoint
import httplib
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
from BaseHTTPServer import BaseHTTPRequestHandler
iteritems = lambda o: o.iteritems()
long_type = long
maxsize = sys.maxint
unichr = unichr
unicode = unicode
bytes = str
from urllib import url2pathname, splittag, pathname2url
import urllib2
from urllib2 import urlopen, HTTPError, URLError, unquote, splituser
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit
filterfalse = itertools.ifilterfalse
exec("""def reraise(tp, value, tb=None):
raise tp, value, tb""")
if PY3:
basestring = str
import builtins
import configparser as ConfigParser
from io import StringIO, BytesIO
func_code = lambda o: o.__code__
func_globals = lambda o: o.__globals__
im_func = lambda o: o.__func__
from html.entities import name2codepoint
import http.client as httplib
from http.server import HTTPServer, SimpleHTTPRequestHandler
from http.server import BaseHTTPRequestHandler
iteritems = lambda o: o.items()
long_type = int
maxsize = sys.maxsize
unichr = chr
unicode = str
bytes = bytes
from urllib.error import HTTPError, URLError
import urllib.request as urllib2
from urllib.request import urlopen, url2pathname, pathname2url
from urllib.parse import (
urlparse, urlunparse, unquote, splituser, urljoin, urlsplit,
urlunsplit, splittag,
)
filterfalse = itertools.filterfalse
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
| mit |
guewen/OpenUpgrade | addons/account_payment/wizard/__init__.py | 436 | 1144 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_payment_order
import account_payment_populate_statement
import account_payment_pay
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
zhuwenping/python-for-android | python-build/python-libs/gdata/build/lib/gdata/health/service.py | 263 | 10007 | #!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HealthService extends GDataService to streamline Google Health API access.
HealthService: Provides methods to interact with the profile, profile list,
and register/notices feeds. Extends GDataService.
HealthProfileQuery: Queries the Google Health Profile feed.
HealthProfileListQuery: Queries the Google Health Profile list feed.
"""
__author__ = '[email protected] (Eric Bidelman)'
import atom
import gdata.health
import gdata.service
class HealthService(gdata.service.GDataService):
"""Client extension for the Google Health service Document List feed."""
def __init__(self, email=None, password=None, source=None,
use_h9_sandbox=False, server='www.google.com',
additional_headers=None, **kwargs):
"""Creates a client for the Google Health service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
use_h9_sandbox: boolean (optional) True to issue requests against the
/h9 developer's sandbox.
server: string (optional) The name of the server to which a connection
will be opened.
additional_headers: dictionary (optional) Any additional headers which
should be included with CRUD operations.
kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
service = use_h9_sandbox and 'weaver' or 'health'
gdata.service.GDataService.__init__(
self, email=email, password=password, service=service, source=source,
server=server, additional_headers=additional_headers, **kwargs)
self.ssl = True
self.use_h9_sandbox = use_h9_sandbox
def __get_service(self):
return self.use_h9_sandbox and 'h9' or 'health'
def GetProfileFeed(self, query=None, profile_id=None):
"""Fetches the users Google Health profile feed.
Args:
query: HealthProfileQuery or string (optional) A query to use on the
profile feed. If None, a HealthProfileQuery is constructed.
profile_id: string (optional) The profile id to query the profile feed
with when using ClientLogin. Note: this parameter is ignored if
query is set.
Returns:
A gdata.health.ProfileFeed object containing the user's Health profile.
"""
if query is None:
projection = profile_id and 'ui' or 'default'
uri = HealthProfileQuery(
service=self.__get_service(), projection=projection,
profile_id=profile_id).ToUri()
elif isinstance(query, HealthProfileQuery):
uri = query.ToUri()
else:
uri = query
return self.GetFeed(uri, converter=gdata.health.ProfileFeedFromString)
def GetProfileListFeed(self, query=None):
"""Fetches the users Google Health profile feed.
Args:
query: HealthProfileListQuery or string (optional) A query to use
on the profile list feed. If None, a HealthProfileListQuery is
constructed to /health/feeds/profile/list or /h9/feeds/profile/list.
Returns:
A gdata.health.ProfileListFeed object containing the user's list
of profiles.
"""
if not query:
uri = HealthProfileListQuery(service=self.__get_service()).ToUri()
elif isinstance(query, HealthProfileListQuery):
uri = query.ToUri()
else:
uri = query
return self.GetFeed(uri, converter=gdata.health.ProfileListFeedFromString)
def SendNotice(self, subject, body=None, content_type='html',
ccr=None, profile_id=None):
"""Sends (posts) a notice to the user's Google Health profile.
Args:
subject: A string representing the message's subject line.
body: string (optional) The message body.
content_type: string (optional) The content type of the notice message
body. This parameter is only honored when a message body is
specified.
ccr: string (optional) The CCR XML document to reconcile into the
user's profile.
profile_id: string (optional) The profile id to work with when using
ClientLogin. Note: this parameter is ignored if query is set.
Returns:
A gdata.health.ProfileEntry object of the posted entry.
"""
if body:
content = atom.Content(content_type=content_type, text=body)
else:
content = body
entry = gdata.GDataEntry(
title=atom.Title(text=subject), content=content,
extension_elements=[atom.ExtensionElementFromString(ccr)])
projection = profile_id and 'ui' or 'default'
query = HealthRegisterQuery(service=self.__get_service(),
projection=projection, profile_id=profile_id)
return self.Post(entry, query.ToUri(),
converter=gdata.health.ProfileEntryFromString)
class HealthProfileQuery(gdata.service.Query):
"""Object used to construct a URI to query the Google Health profile feed."""
def __init__(self, service='health', feed='feeds/profile',
projection='default', profile_id=None, text_query=None,
params=None, categories=None):
"""Constructor for Health profile feed query.
Args:
service: string (optional) The service to query. Either 'health' or 'h9'.
feed: string (optional) The path for the feed. The default value is
'feeds/profile'.
projection: string (optional) The visibility of the data. Possible values
are 'default' for AuthSub and 'ui' for ClientLogin. If this value
is set to 'ui', the profile_id parameter should also be set.
profile_id: string (optional) The profile id to query. This should only
be used when using ClientLogin.
text_query: str (optional) The contents of the q query parameter. The
contents of the text_query are URL escaped upon conversion to a URI.
Note: this parameter can only be used on the register feed using
ClientLogin.
params: dict (optional) Parameter value string pairs which become URL
params when translated to a URI. These parameters are added to
the query's items.
categories: list (optional) List of category strings which should be
included as query categories. See gdata.service.Query for
additional documentation.
"""
self.service = service
self.profile_id = profile_id
self.projection = projection
gdata.service.Query.__init__(self, feed=feed, text_query=text_query,
params=params, categories=categories)
def ToUri(self):
"""Generates a URI from the query parameters set in the object.
Returns:
A string containing the URI used to retrieve entries from the Health
profile feed.
"""
old_feed = self.feed
self.feed = '/'.join([self.service, old_feed, self.projection])
if self.profile_id:
self.feed += '/' + self.profile_id
self.feed = '/%s' % (self.feed,)
new_feed = gdata.service.Query.ToUri(self)
self.feed = old_feed
return new_feed
class HealthProfileListQuery(gdata.service.Query):
"""Object used to construct a URI to query a Health profile list feed."""
def __init__(self, service='health', feed='feeds/profile/list'):
"""Constructor for Health profile list feed query.
Args:
service: string (optional) The service to query. Either 'health' or 'h9'.
feed: string (optional) The path for the feed. The default value is
'feeds/profile/list'.
"""
gdata.service.Query.__init__(self, feed)
self.service = service
def ToUri(self):
"""Generates a URI from the query parameters set in the object.
Returns:
A string containing the URI used to retrieve entries from the
profile list feed.
"""
return '/%s' % ('/'.join([self.service, self.feed]),)
class HealthRegisterQuery(gdata.service.Query):
"""Object used to construct a URI to query a Health register/notice feed."""
def __init__(self, service='health', feed='feeds/register',
projection='default', profile_id=None):
"""Constructor for Health profile list feed query.
Args:
service: string (optional) The service to query. Either 'health' or 'h9'.
feed: string (optional) The path for the feed. The default value is
'feeds/register'.
projection: string (optional) The visibility of the data. Possible values
are 'default' for AuthSub and 'ui' for ClientLogin. If this value
is set to 'ui', the profile_id parameter should also be set.
profile_id: string (optional) The profile id to query. This should only
be used when using ClientLogin.
"""
gdata.service.Query.__init__(self, feed)
self.service = service
self.projection = projection
self.profile_id = profile_id
def ToUri(self):
"""Generates a URI from the query parameters set in the object.
Returns:
A string containing the URI needed to interact with the register feed.
"""
old_feed = self.feed
self.feed = '/'.join([self.service, old_feed, self.projection])
new_feed = gdata.service.Query.ToUri(self)
self.feed = old_feed
if self.profile_id:
new_feed += '/' + self.profile_id
return '/%s' % (new_feed,)
| apache-2.0 |
pp-mo/iris | lib/iris/quickplot.py | 2 | 9074 | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
High-level plotting extensions to :mod:`iris.plot`.
These routines work much like their :mod:`iris.plot` counterparts, but they
automatically add a plot title, axis titles, and a colour bar when appropriate.
See also: :ref:`matplotlib <matplotlib:users-guide-index>`.
"""
import cf_units
import matplotlib.pyplot as plt
import iris.config
import iris.coords
import iris.plot as iplt
def _use_symbol(units):
# For non-time units use the shortest unit representation.
# E.g. prefer 'K' over 'kelvin', but not '0.0174532925199433 rad'
# over 'degrees'
return (
not units.is_time()
and not units.is_time_reference()
and len(units.symbol) < len(str(units))
)
def _title(cube_or_coord, with_units):
if cube_or_coord is None or isinstance(cube_or_coord, int):
title = ""
else:
title = cube_or_coord.name().replace("_", " ").capitalize()
units = cube_or_coord.units
if with_units and not (
units.is_unknown()
or units.is_no_unit()
or units == cf_units.Unit("1")
):
if _use_symbol(units):
units = units.symbol
title += " / {}".format(units)
return title
def _label(cube, mode, result=None, ndims=2, coords=None, axes=None):
"""Puts labels on the current plot using the given cube."""
if axes is None:
axes = plt.gca()
axes.set_title(_title(cube, with_units=False))
if result is not None:
draw_edges = mode == iris.coords.POINT_MODE
bar = plt.colorbar(
result, orientation="horizontal", drawedges=draw_edges
)
has_known_units = not (
cube.units.is_unknown() or cube.units.is_no_unit()
)
if has_known_units and cube.units != cf_units.Unit("1"):
# Use shortest unit representation for anything other than time
if _use_symbol(cube.units):
bar.set_label(cube.units.symbol)
else:
bar.set_label(cube.units)
# Remove the tick which is put on the colorbar by default.
bar.ax.tick_params(length=0)
if coords is None:
plot_defn = iplt._get_plot_defn(cube, mode, ndims)
else:
plot_defn = iplt._get_plot_defn_custom_coords_picked(
cube, coords, mode, ndims=ndims
)
if ndims == 2:
if not iplt._can_draw_map(plot_defn.coords):
axes.set_ylabel(_title(plot_defn.coords[0], with_units=True))
axes.set_xlabel(_title(plot_defn.coords[1], with_units=True))
elif ndims == 1:
axes.set_xlabel(_title(plot_defn.coords[0], with_units=True))
axes.set_ylabel(_title(cube, with_units=True))
else:
msg = (
"Unexpected number of dimensions ({}) given to "
"_label.".format(ndims)
)
raise ValueError(msg)
def _label_with_bounds(cube, result=None, ndims=2, coords=None, axes=None):
_label(cube, iris.coords.BOUND_MODE, result, ndims, coords, axes)
def _label_with_points(cube, result=None, ndims=2, coords=None, axes=None):
_label(cube, iris.coords.POINT_MODE, result, ndims, coords, axes)
def _get_titles(u_object, v_object):
if u_object is None:
u_object = iplt._u_object_from_v_object(v_object)
xunits = u_object is not None and not u_object.units.is_time_reference()
yunits = not v_object.units.is_time_reference()
xlabel = _title(u_object, with_units=xunits)
ylabel = _title(v_object, with_units=yunits)
title = ""
if u_object is None:
title = _title(v_object, with_units=False)
elif isinstance(u_object, iris.cube.Cube) and not isinstance(
v_object, iris.cube.Cube
):
title = _title(u_object, with_units=False)
elif isinstance(v_object, iris.cube.Cube) and not isinstance(
u_object, iris.cube.Cube
):
title = _title(v_object, with_units=False)
return xlabel, ylabel, title
def _label_1d_plot(*args, **kwargs):
if len(args) > 1 and isinstance(
args[1], (iris.cube.Cube, iris.coords.Coord)
):
xlabel, ylabel, title = _get_titles(*args[:2])
else:
xlabel, ylabel, title = _get_titles(None, args[0])
axes = kwargs.pop("axes", None)
if len(kwargs) != 0:
msg = "Unexpected kwargs {} given to _label_1d_plot".format(
kwargs.keys()
)
raise ValueError(msg)
if axes is None:
axes = plt.gca()
axes.set_title(title)
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
def contour(cube, *args, **kwargs):
"""
Draws contour lines on a labelled plot based on the given Cube.
With the basic call signature, contour "level" values are chosen
automatically::
contour(cube)
Supply a number to use *N* automatically chosen levels::
contour(cube, N)
Supply a sequence *V* to use explicitly defined levels::
contour(cube, V)
See :func:`iris.plot.contour` for details of valid keyword arguments.
"""
coords = kwargs.get("coords")
axes = kwargs.get("axes")
result = iplt.contour(cube, *args, **kwargs)
_label_with_points(cube, coords=coords, axes=axes)
return result
def contourf(cube, *args, **kwargs):
"""
Draws filled contours on a labelled plot based on the given Cube.
With the basic call signature, contour "level" values are chosen
automatically::
contour(cube)
Supply a number to use *N* automatically chosen levels::
contour(cube, N)
Supply a sequence *V* to use explicitly defined levels::
contour(cube, V)
See :func:`iris.plot.contourf` for details of valid keyword arguments.
"""
coords = kwargs.get("coords")
axes = kwargs.get("axes")
result = iplt.contourf(cube, *args, **kwargs)
_label_with_points(cube, result, coords=coords, axes=axes)
return result
def outline(cube, coords=None, color="k", linewidth=None, axes=None):
"""
Draws cell outlines on a labelled plot based on the given Cube.
Kwargs:
* coords: list of :class:`~iris.coords.Coord` objects or coordinate names
Use the given coordinates as the axes for the plot. The order of the
given coordinates indicates which axis to use for each, where the first
element is the horizontal axis of the plot and the second element is
the vertical axis of the plot.
* color: None or mpl color
The color of the cell outlines. If None, the matplotlibrc setting
patch.edgecolor is used by default.
* linewidth: None or number
The width of the lines showing the cell outlines. If None, the default
width in patch.linewidth in matplotlibrc is used.
"""
result = iplt.outline(
cube, color=color, linewidth=linewidth, coords=coords, axes=axes
)
_label_with_bounds(cube, coords=coords, axes=axes)
return result
def pcolor(cube, *args, **kwargs):
"""
Draws a labelled pseudocolor plot based on the given Cube.
See :func:`iris.plot.pcolor` for details of valid keyword arguments.
"""
coords = kwargs.get("coords")
axes = kwargs.get("axes")
result = iplt.pcolor(cube, *args, **kwargs)
_label_with_bounds(cube, result, coords=coords, axes=axes)
return result
def pcolormesh(cube, *args, **kwargs):
"""
Draws a labelled pseudocolour plot based on the given Cube.
See :func:`iris.plot.pcolormesh` for details of valid keyword arguments.
"""
coords = kwargs.get("coords")
axes = kwargs.get("axes")
result = iplt.pcolormesh(cube, *args, **kwargs)
_label_with_bounds(cube, result, coords=coords, axes=axes)
return result
def points(cube, *args, **kwargs):
"""
Draws sample point positions on a labelled plot based on the given Cube.
See :func:`iris.plot.points` for details of valid keyword arguments.
"""
coords = kwargs.get("coords")
axes = kwargs.get("axes")
result = iplt.points(cube, *args, **kwargs)
_label_with_points(cube, coords=coords, axes=axes)
return result
def plot(*args, **kwargs):
"""
Draws a labelled line plot based on the given cube(s) or
coordinate(s).
See :func:`iris.plot.plot` for details of valid arguments and
keyword arguments.
"""
axes = kwargs.get("axes")
result = iplt.plot(*args, **kwargs)
_label_1d_plot(*args, axes=axes)
return result
def scatter(x, y, *args, **kwargs):
"""
Draws a labelled scatter plot based on the given cubes or
coordinates.
See :func:`iris.plot.scatter` for details of valid arguments and
keyword arguments.
"""
axes = kwargs.get("axes")
result = iplt.scatter(x, y, *args, **kwargs)
_label_1d_plot(x, y, axes=axes)
return result
# Provide a convenience show method from pyplot.
show = plt.show
| lgpl-3.0 |
suizokukan/urwid | urwid/tests/test_str_util.py | 20 | 1258 | import unittest
from urwid.compat import B
from urwid.escape import str_util
class DecodeOneTest(unittest.TestCase):
def gwt(self, ch, exp_ord, exp_pos):
ch = B(ch)
o, pos = str_util.decode_one(ch,0)
assert o==exp_ord, " got:%r expected:%r" % (o, exp_ord)
assert pos==exp_pos, " got:%r expected:%r" % (pos, exp_pos)
def test1byte(self):
self.gwt("ab", ord("a"), 1)
self.gwt("\xc0a", ord("?"), 1) # error
def test2byte(self):
self.gwt("\xc2", ord("?"), 1) # error
self.gwt("\xc0\x80", ord("?"), 1) # error
self.gwt("\xc2\x80", 0x80, 2)
self.gwt("\xdf\xbf", 0x7ff, 2)
def test3byte(self):
self.gwt("\xe0", ord("?"), 1) # error
self.gwt("\xe0\xa0", ord("?"), 1) # error
self.gwt("\xe0\x90\x80", ord("?"), 1) # error
self.gwt("\xe0\xa0\x80", 0x800, 3)
self.gwt("\xef\xbf\xbf", 0xffff, 3)
def test4byte(self):
self.gwt("\xf0", ord("?"), 1) # error
self.gwt("\xf0\x90", ord("?"), 1) # error
self.gwt("\xf0\x90\x80", ord("?"), 1) # error
self.gwt("\xf0\x80\x80\x80", ord("?"), 1) # error
self.gwt("\xf0\x90\x80\x80", 0x10000, 4)
self.gwt("\xf3\xbf\xbf\xbf", 0xfffff, 4)
| lgpl-2.1 |
igabriel85/dmon-adp | misc/keras_test.py | 1 | 1530 | import numpy
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
import os, sys
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
dataDir = os.path.join(os.path.dirname(os.path.abspath('')), 'data')
# load dataset
dataframe = pd.read_csv(os.path.join(dataDir, 'iris.csv'))
dataset = dataframe.values
X = dataset[:,0:4].astype(float)
Y = dataset[:,4]
# print Y
# encode class values as integers
encoder = LabelEncoder()
encoder.fit(Y)
encoded_Y = encoder.transform(Y)
# convert integers to dummy variables (i.e. one hot encoded)
dummy_y = np_utils.to_categorical(encoded_Y)
# print dummy_y
# define baseline model
def baseline_model():
# create model
model = Sequential()
model.add(Dense(8, input_dim=4, activation='relu'))
model.add(Dense(3, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
estimator = KerasClassifier(build_fn=baseline_model, epochs=200, batch_size=20, verbose=1)
kfold = KFold(n_splits=10, shuffle=True, random_state=seed)
results = cross_val_score(estimator, X, dummy_y, cv=kfold)
print("Baseline: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100)) | apache-2.0 |
dreadworks/college-cg | t2-objv/parser.py | 1 | 10996 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import numpy as np
from util import LOG
log = LOG.out.info
"""
Parser Library.
Every class gets instantiated with the
name of the file that should be parsed.
Every instance of a parser is bound to
a file and thus maintains the files data.
The classes should handle their parsing
internally without the need to invoke an
extra method that handles that actual parsing.
Even though this makes error handling of
the parsing more complicated (from the users
perspective), it enables the possibility
to work on very large data without the need
to store everything in memory at once.
"""
class ParserException(Exception):
def __str__(self):
return self.msg
def __init__(self, msg):
self.msg = msg
#
#
#
#
class ObjParser(object):
"""
Parses wavefront obj files.
Currently supported:
v, vn, f, s, o
Ignored:
usemtl
"""
class Obj(object):
"""
Parser for obj entity definitions. This
is the whole obj file with zero or one
"o" directives or the part of the obj
file denoted by "o <name>".
"""
def _parse(self, line):
"""
Takes a line and saves the data
found into the internal data structures.
:param line: One line of the obj file
:returns: None
:rtype: None
"""
dtype, data = re.split(r' ', line, maxsplit=1)
data = data.strip()
#
# VERTICES
#
if dtype == 'v':
data = map(float, data.split())
self._vertices.append(np.array(data, 'f'))
return
#
# NORMALS
#
if dtype == 'vn':
data = map(float, data.split())
self._normals.append(np.array(data, 'f'))
return
#
# FACES
#
# save indices of vertices and normals
# in self._vertices & self._normals
# and map the vertex to a set of points
# in self._v2vn
if dtype == 'f':
data = map(lambda s: s.split('/'), data.split())
vertices = map(lambda l: int(l[0]), data)
normals = []
normal = None # cache
for i, pair in enumerate(data):
# 'f v/vt/vn' case
if len(pair) == 3:
index = int(pair[2])
#
# calculate new surface normal
#
else:
if normal is not None:
index = len(self._normals) - 1
else:
self.stats['calculated normals'] += 1
# retrieve point coordinates
v = map(lambda j: self._vertices[j], vertices)
# create normalized vectors spanning a plane
vectors = (v[0] - v[1]), (v[0] - v[2])
# the surface normal is the cross product
# of the two vectors spanning the plane
normal = np.cross(*vectors)
normal = normal / np.linalg.norm(normal)
index = len(self._normals)
self._normals.append(normal)
# save normals index
normals.append(index)
vnstore = self._v2vn.setdefault(vertices[i], [])
vnstore.append(index)
self._faces += zip(vertices, normals)
return
#
# SMOOTHING GROUPS
#
if dtype == 's':
g = self._smoothing[-1]
facecount = len(self._faces)
if data == 'off':
if len(g) == 1:
self._smoothing[-1] += (facecount,)
return
if data == 'on':
if len(g) == 2:
self._smoothing.append((facecount,))
return
#
# IGNORE
#
if dtype == 'usemtl':
fmt = dtype, data
log('ignoring directive %s with data %s' % fmt)
return
#
# NOT FOUND
#
msg = 'Could not map directive "%s"'
raise ParserException(msg % dtype)
def _smooth(self, group):
"""
Calculates a new normal for a vertex
based on all the surface normals on
that particular point. The provided
group determines the range of faces
where the smoothing has to be applied.
:param group: Tuple denoting the smoothing range
:returns: None
:rtype: None
"""
log('smoothing normals between %d and %d' % group)
for i in range(*group):
v = self._faces[i][0]
# cache miss
if not type(self._v2vn[v]) is int:
# obtain normals
normals = self._v2vn[v]
normals = map(lambda i: self._normals[i], normals)
# calculate average normal
smoothed = sum(normals) / len(normals)
smoothed = smoothed / np.linalg.norm(smoothed)
# save smoothed normal
self._normals.append(smoothed)
self._v2vn[v] = len(self._normals) - 1
self.stats['smoothed normals'] += 1
# save new vn to the faces (v, vn) tuple
self._faces[i] = self._faces[i][0], self._v2vn[v]
def __init__(self, name, data):
"""
Initialize the object parser and
parse the file provided.
:param name: Name of the object
:param data: List of strings with obj definitions
:returns: self
:rtype: parser.ObjParser.Obj
"""
self._name = name
# used for an efficient calculation
# of smoothed surfaces and to retrieve
# normals per vertex when serving faces
self._v2vn = {}
# just for statistics
self.stats = {
'calculated normals': 0,
'smoothed normals': 0}
# enumerations in obj's begin
# with value 1 (for whatever reason...)
# hence the None element.
self._vertices = [None] # [None, (x, y, z)₀, ...] ()₀ is np.array
self._normals = [None] # [None, (x, y, z)₀, ...] ()₀ is np.array
self._smoothing = [(0,)] # Ranges of faces where
# smoothing is activated
self._faces = [] # [(v₀, vn₀), (v₁, vn₁), ...],
# v and vn as indices of elements in
# self._vertices and self._normals
# split data line-wise and remove
# empty lines and comments
sanitize = lambda s: s and not s.startswith('#')
data = filter(sanitize, data.split('\n'))
log('analyzing %d lines of raw data' % len(data))
# analyze data line by line
# note: len is an O(1) operation
for line in data:
try:
self._parse(line)
except Exception as e:
msg = 'Could not parse line "%s"\nbecause of %s: %s'
fmt = (line, type(e), str(e))
raise ParserException(msg % fmt)
# if smoothing never got
# explicitly turned off
if len(self._smoothing[-1]) == 1:
self._smoothing[-1] += (len(self._faces),)
# smooth if necessary
for group in self._smoothing:
self._smooth(group)
# for -verbose
fmt = [len(self._vertices), len(self._normals)]
fmt = tuple(map(lambda x: x - 1, fmt))
log('got %d vertices and %d normals' % fmt)
fmt = len(self._faces)
log('got %d vertex/vertex normal pairs for faces' % fmt)
fmt = self.stats['calculated normals']
log('calculated %d normals' % fmt)
fmt = self.stats['smoothed normals']
fmt = fmt, sum([y - x for x, y in self._smoothing])
log('calculated %d smoothed normals of %d definitions' % fmt)
#
# PROPERTIES
#
# @property
def name(self):
"""
Returns the objects name
:returns: The name
:rtype: string
"""
return self._name
@property
def vertices(self):
"""
Returns a numpy array of vertex coordinates.
:returns: Vertex coordinates
:rtype: numpy.Array
"""
return np.array(self._vertices[1:], 'f')
@property
def faces(self):
"""
Returns the faces as a numpy array consisting
of v, vn pairs, where v are vertex coordinates
and vn surface normal coordinates.
:returns: Geometrical description of faces
:rtype: numpy.Array
"""
v, vn = zip(*self._faces)
v = map(lambda i: self._vertices[i], v)
vn = map(lambda i: self._normals[i], vn)
return np.array(zip(v, vn), 'f')
#
#
#
#
#
def __init__(self, fname):
"""
Parses an wavefront obj file. For every
defined object an ObjParser.Obj object
is created.
:param fname: File name
:returns: self
:rtype: parser.ObjParser
"""
log('parsing %s' % fname)
with open(fname) as f:
data = f.read()
self._objects = []
add = self._objects.append
log('parsing data of size %d' % len(data))
objs = re.split(r'^o (.*)', data)
# no 'o'-directive found
if len(objs) == 1:
add(ObjParser.Obj(fname.rstrip('.obj'), objs[0]))
# multiple objects per obj
for name, data in zip(objs[1::2], objs[2::2]):
add(ObjParser.Obj(name, data))
return
raise ParserException("Could not open file")
@property
def objects(self):
"""
Return all parsed objects
:returns: Parsed objects
:rtype: List of parse.ObjParser.Obj objects
"""
return self._objects
| mit |
fujunwei/chromium-crosswalk | tools/telemetry/telemetry/core/platform/power_monitor/power_monitor_controller.py | 69 | 1214 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import telemetry.core.platform.power_monitor as power_monitor
class PowerMonitorController(power_monitor.PowerMonitor):
"""
PowerMonitor that acts as facade for a list of PowerMonitor objects and uses
the first available one.
"""
def __init__(self, power_monitors):
super(PowerMonitorController, self).__init__()
self._cascading_power_monitors = power_monitors
self._active_monitor = None
def _AsyncPowerMonitor(self):
return next(
(x for x in self._cascading_power_monitors if x.CanMonitorPower()),
None)
def CanMonitorPower(self):
return bool(self._AsyncPowerMonitor())
def StartMonitoringPower(self, browser):
self._active_monitor = self._AsyncPowerMonitor()
assert self._active_monitor, 'No available monitor.'
self._active_monitor.StartMonitoringPower(browser)
def StopMonitoringPower(self):
assert self._active_monitor, 'StartMonitoringPower() not called.'
try:
return self._active_monitor.StopMonitoringPower()
finally:
self._active_monitor = None
| bsd-3-clause |
globocom/oauth2u | tests/helpers.py | 1 | 2130 | import sys
import urllib
import cgi
import base64
import json
from functools import partial
import requests
__all__ = ('TEST_SERVER_HOST',
'build_root_url',
'build_basic_authorization_header',
'build_access_token_url',
'parse_json_response',
'parse_query_string',
'get_code_from_url',
'request_authorization_code')
TEST_SERVER_HOST = 'http://localhost:8888'
def build_url(host, path, query=None):
query = query or {}
return u'{0}/{1}?{2}'.format(host.rstrip('/'),
path.lstrip('/'),
urllib.urlencode(query))
build_root_url = partial(build_url, TEST_SERVER_HOST)
build_authorize_url = partial(build_url, TEST_SERVER_HOST, '/authorize')
build_access_token_url = partial(build_url, TEST_SERVER_HOST, '/access-token')
def parse_json_response(response):
assert 'application/json; charset=UTF-8' == response.headers['content-type']
return json.loads(response.content)
def parse_query_string(url):
url, query_string = url.split('?')
query = dict(cgi.parse_qsl(query_string))
return url, query
def get_code_from_url(url):
''' Given an url returns the 'code' GET parameter '''
query = dict(cgi.parse_qsl(url.split('?')[1]))
return query['code']
def request_authorization_code(client_id='123',
redirect_uri='http://callback'):
''' Performs a GET on the authorization request url, waits for the
redirect and return the code provided
'''
url = build_authorize_url({'client_id': client_id,
'response_type': 'code',
'redirect_uri': redirect_uri})
resp = requests.get(url, allow_redirects=False)
assert 302 == resp.status_code
code = get_code_from_url(resp.headers['Location'])
return code
def build_basic_authorization_header(client_id, code):
''' Build the value for a Basic ``Authorization`` HTTP header '''
digest = base64.b64encode('{0}:{1}'.format(client_id, code))
return 'Basic {0}'.format(digest)
| mit |
Conchylicultor/DeepQA | chatbot/corpus/scotusdata.py | 10 | 1602 | # Copyright 2015 Conchylicultor. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
"""
Load transcripts from the Supreme Court of the USA.
Available from here:
https://github.com/pender/chatbot-rnn
"""
class ScotusData:
"""
"""
def __init__(self, dirName):
"""
Args:
dirName (string): directory where to load the corpus
"""
self.lines = self.loadLines(os.path.join(dirName, "scotus"))
self.conversations = [{"lines": self.lines}]
def loadLines(self, fileName):
"""
Args:
fileName (str): file to load
Return:
list<dict<str>>: the extracted fields for each line
"""
lines = []
with open(fileName, 'r') as f:
for line in f:
l = line[line.index(":")+1:].strip() # Strip name of speaker.
lines.append({"text": l})
return lines
def getConversations(self):
return self.conversations
| apache-2.0 |
DevMine/devmine-core | devmine/__init__.py | 1 | 1683 | __devmine_version__ = '0.1.0'
__api_version__ = '1'
import logging
import bottle
from bottle.ext import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from devmine.app.models import Base
from devmine.config import routes
from devmine.lib import composition
class Devmine:
def __init__(self,
server='auto',
host='0.0.0.0',
port=8080,
db_url='sqlite:///:memory:',
db_echo=False,
reloader=False,
debug=False):
self.server_type = server
self.host = host
self.port = port
self.reloader = reloader
self.debug = debug
self.api_version = __api_version__
self.devmine_version = __devmine_version__
self.app = bottle.Bottle()
routes.setup_routing(self.app)
bottle.debug(self.debug)
engine = create_engine(db_url, echo=db_echo)
sqlalchemy_plugin = sqlalchemy.Plugin(
engine,
Base.metadata,
keyword='db',
create=True,
commit=True,
use_kwargs=False
)
self.app.install(sqlalchemy_plugin)
create_session = sessionmaker(bind=engine)
session = create_session()
logging.info('Prefetching the scores matrix...')
composition.get_scores_matrix(session)
session.close()
@staticmethod
def get_version():
"""Return devmine version."""
return __devmine_version__
@staticmethod
def get_api_version():
"""Return devmine API version."""
return __api_version__
| bsd-3-clause |
walterreade/scikit-learn | sklearn/externals/joblib/my_exceptions.py | 31 | 3690 | """
Exceptions
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
import sys
from ._compat import PY3_OR_LATER
class JoblibException(Exception):
"""A simple exception with an error message that you can get to."""
def __init__(self, *args):
# We need to implement __init__ so that it is picked in the
# multiple heritance hierarchy in the class created in
# _mk_exception. Note: in Python 2, if you implement __init__
# in your exception class you need to set .args correctly,
# otherwise you can dump an exception instance with pickle but
# not load it (at load time an empty .args will be passed to
# the constructor). Also we want to be explicit and not use
# 'super' here. Using 'super' can cause a sibling class method
# to be called and we have no control the sibling class method
# constructor signature in the exception returned by
# _mk_exception.
Exception.__init__(self, *args)
def __repr__(self):
if hasattr(self, 'args') and len(self.args) > 0:
message = self.args[0]
else:
message = ''
name = self.__class__.__name__
return '%s\n%s\n%s\n%s' % (name, 75 * '_', message, 75 * '_')
__str__ = __repr__
class TransportableException(JoblibException):
"""An exception containing all the info to wrap an original
exception and recreate it.
"""
def __init__(self, message, etype):
# The next line set the .args correctly. This is needed to
# make the exception loadable with pickle
JoblibException.__init__(self, message, etype)
self.message = message
self.etype = etype
_exception_mapping = dict()
def _mk_exception(exception, name=None):
# Create an exception inheriting from both JoblibException
# and that exception
if name is None:
name = exception.__name__
this_name = 'Joblib%s' % name
if this_name in _exception_mapping:
# Avoid creating twice the same exception
this_exception = _exception_mapping[this_name]
else:
if exception is Exception:
# JoblibException is already a subclass of Exception. No
# need to use multiple inheritance
return JoblibException, this_name
try:
this_exception = type(
this_name, (JoblibException, exception), {})
_exception_mapping[this_name] = this_exception
except TypeError:
# This happens if "Cannot create a consistent method
# resolution order", e.g. because 'exception' is a
# subclass of JoblibException or 'exception' is not an
# acceptable base class
this_exception = JoblibException
return this_exception, this_name
def _mk_common_exceptions():
namespace = dict()
if PY3_OR_LATER:
import builtins as _builtin_exceptions
common_exceptions = filter(
lambda x: x.endswith('Error'),
dir(_builtin_exceptions))
else:
import exceptions as _builtin_exceptions
common_exceptions = dir(_builtin_exceptions)
for name in common_exceptions:
obj = getattr(_builtin_exceptions, name)
if isinstance(obj, type) and issubclass(obj, BaseException):
this_obj, this_name = _mk_exception(obj, name=name)
namespace[this_name] = this_obj
return namespace
# Updating module locals so that the exceptions pickle right. AFAIK this
# works only at module-creation time
locals().update(_mk_common_exceptions())
| bsd-3-clause |
alianmohammad/pd-gem5 | src/arch/micro_asm_test.py | 86 | 3195 | # Copyright (c) 2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
from micro_asm import MicroAssembler, Combinational_Macroop, Rom_Macroop, Rom
class Bah(object):
def __init__(self):
self.mnemonic = "bah"
class Bah_Tweaked(object):
def __init__(self):
self.mnemonic = "bah_tweaked"
class Hoop(object):
def __init__(self, first_param, second_param):
self.mnemonic = "hoop_%s_%s" % (first_param, second_param)
def __str__(self):
return "%s" % self.mnemonic
class Dah(object):
def __init__(self):
self.mnemonic = "dah"
microops = {
"bah": Bah,
"hoop": Hoop,
"dah": Dah
}
class TestMacroop(Combinational_Macroop):
def tweak(self):
microops["bah"] = Bah_Tweaked
def untweak(self):
microops["bah"] = Bah
def print_debug(self, message):
print message
def __init__(self, name):
super(TestMacroop, self).__init__(name)
self.directives = {
"tweak": self.tweak,
"untweak": self.untweak,
"print": self.print_debug
}
assembler = MicroAssembler(TestMacroop, microops, Rom('main ROM'), Rom_Macroop)
testAssembly = '''
# Single line comment
def rom {
goo: bah
extern la: hoop 4*8, "a"
}; /* multiline comment on one line */
/* multi line comment across lines
to make sure they work */
def macroop squishy {
.tweak
bah
.untweak
.print "In the midst"
bah
dah # single line comment after something
.tweak
};
#Extending the rom...
def rom
{
#Here's more stuff for the rom
bah
};
def macroop squashy {
bah
};
def macroop jumper (bar);
'''
assembler.assemble(testAssembly)
| bsd-3-clause |
akash1808/nova_test_latest | nova/tests/unit/scheduler/weights/test_weights_ioopsweight.py | 73 | 2785 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler IoOpsWeigher weights
"""
from nova.scheduler import weights
from nova.scheduler.weights import io_ops
from nova import test
from nova.tests.unit.scheduler import fakes
class IoOpsWeigherTestCase(test.NoDBTestCase):
def setUp(self):
super(IoOpsWeigherTestCase, self).setUp()
self.weight_handler = weights.HostWeightHandler()
self.weighers = [io_ops.IoOpsWeigher()]
def _get_weighed_host(self, hosts, io_ops_weight_multiplier):
if io_ops_weight_multiplier is not None:
self.flags(io_ops_weight_multiplier=io_ops_weight_multiplier)
return self.weight_handler.get_weighed_objects(self.weighers,
hosts, {})[0]
def _get_all_hosts(self):
host_values = [
('host1', 'node1', {'num_io_ops': 1}),
('host2', 'node2', {'num_io_ops': 2}),
('host3', 'node3', {'num_io_ops': 0}),
('host4', 'node4', {'num_io_ops': 4})
]
return [fakes.FakeHostState(host, node, values)
for host, node, values in host_values]
def _do_test(self, io_ops_weight_multiplier, expected_weight,
expected_host):
hostinfo_list = self._get_all_hosts()
weighed_host = self._get_weighed_host(hostinfo_list,
io_ops_weight_multiplier)
self.assertEqual(weighed_host.weight, expected_weight)
if expected_host:
self.assertEqual(weighed_host.obj.host, expected_host)
def test_io_ops_weight_multiplier_by_default(self):
self._do_test(io_ops_weight_multiplier=None,
expected_weight=0.0,
expected_host='host3')
def test_io_ops_weight_multiplier_zero_value(self):
# We do not know the host, all have same weight.
self._do_test(io_ops_weight_multiplier=0.0,
expected_weight=0.0,
expected_host=None)
def test_io_ops_weight_multiplier_positive_value(self):
self._do_test(io_ops_weight_multiplier=2.0,
expected_weight=2.0,
expected_host='host4')
| apache-2.0 |
jabez1314/shadowsocks | tests/coverage_server.py | 1072 | 1655 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
if __name__ == '__main__':
import tornado.ioloop
import tornado.web
import urllib
class MainHandler(tornado.web.RequestHandler):
def get(self, project):
try:
with open('/tmp/%s-coverage' % project, 'rb') as f:
coverage = f.read().strip()
n = int(coverage.strip('%'))
if n >= 80:
color = 'brightgreen'
else:
color = 'yellow'
self.redirect(('https://img.shields.io/badge/'
'coverage-%s-%s.svg'
'?style=flat') %
(urllib.quote(coverage), color))
except IOError:
raise tornado.web.HTTPError(404)
application = tornado.web.Application([
(r"/([a-zA-Z0-9\-_]+)", MainHandler),
])
if __name__ == "__main__":
application.listen(8888, address='127.0.0.1')
tornado.ioloop.IOLoop.instance().start()
| apache-2.0 |
BeATz-UnKNoWN/python-for-android | python-build/python-libs/gdata/src/gdata/alt/app_engine.py | 136 | 3386 | #!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides functions to persist serialized auth tokens in the datastore.
The get_token and set_token functions should be used in conjunction with
gdata.gauth's token_from_blob and token_to_blob to allow auth token objects
to be reused across requests. It is up to your own code to ensure that the
token key's are unique.
"""
__author__ = '[email protected] (Jeff Scudder)'
from google.appengine.ext import db
from google.appengine.api import memcache
class Token(db.Model):
"""Datastore Model which stores a serialized auth token."""
t = db.BlobProperty()
def get_token(unique_key):
"""Searches for a stored token with the desired key.
Checks memcache and then the datastore if required.
Args:
unique_key: str which uniquely identifies the desired auth token.
Returns:
A string encoding the auth token data. Use gdata.gauth.token_from_blob to
convert back into a usable token object. None if the token was not found
in memcache or the datastore.
"""
token_string = memcache.get(unique_key)
if token_string is None:
# The token wasn't in memcache, so look in the datastore.
token = Token.get_by_key_name(unique_key)
if token is None:
return None
return token.t
return token_string
def set_token(unique_key, token_str):
"""Saves the serialized auth token in the datastore.
The token is also stored in memcache to speed up retrieval on a cache hit.
Args:
unique_key: The unique name for this token as a string. It is up to your
code to ensure that this token value is unique in your application.
Previous values will be silently overwitten.
token_str: A serialized auth token as a string. I expect that this string
will be generated by gdata.gauth.token_to_blob.
Returns:
True if the token was stored sucessfully, False if the token could not be
safely cached (if an old value could not be cleared). If the token was
set in memcache, but not in the datastore, this function will return None.
However, in that situation an exception will likely be raised.
Raises:
Datastore exceptions may be raised from the App Engine SDK in the event of
failure.
"""
# First try to save in memcache.
result = memcache.set(unique_key, token_str)
# If memcache fails to save the value, clear the cached value.
if not result:
result = memcache.delete(unique_key)
# If we could not clear the cached value for this token, refuse to save.
if result == 0:
return False
# Save to the datastore.
if Token(key_name=unique_key, t=token_str).put():
return True
return None
def delete_token(unique_key):
# Clear from memcache.
memcache.delete(unique_key)
# Clear from the datastore.
Token(key_name=unique_key).delete()
| apache-2.0 |
leafclick/intellij-community | python/helpers/py2only/docutils/languages/fr.py | 148 | 1893 | # $Id: fr.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Stefane Fermigier <[email protected]>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
French-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
u'author': u'Auteur',
u'authors': u'Auteurs',
u'organization': u'Organisation',
u'address': u'Adresse',
u'contact': u'Contact',
u'version': u'Version',
u'revision': u'R\u00e9vision',
u'status': u'Statut',
u'date': u'Date',
u'copyright': u'Copyright',
u'dedication': u'D\u00e9dicace',
u'abstract': u'R\u00e9sum\u00e9',
u'attention': u'Attention!',
u'caution': u'Avertissement!',
u'danger': u'!DANGER!',
u'error': u'Erreur',
u'hint': u'Indication',
u'important': u'Important',
u'note': u'Note',
u'tip': u'Astuce',
u'warning': u'Avis',
u'contents': u'Sommaire'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
u'auteur': u'author',
u'auteurs': u'authors',
u'organisation': u'organization',
u'adresse': u'address',
u'contact': u'contact',
u'version': u'version',
u'r\u00e9vision': u'revision',
u'statut': u'status',
u'date': u'date',
u'copyright': u'copyright',
u'd\u00e9dicace': u'dedication',
u'r\u00e9sum\u00e9': u'abstract'}
"""French (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
| apache-2.0 |
play113/swer | heekscnc-read-only/nc/nc.py | 25 | 20718 | ################################################################################
# nc.py
#
# Base class for NC code creation
# And global functions for calling current creator
#
# Hirutso Enni, 2009-01-13
# altered by Dan Falck 2010-08-04
# added tap() arguments Michael Haberler 2010-10-07
################################################################################
ncOFF = 0
ncLEFT = -1
ncRIGHT = +1
ncCW = -1
ncCCW = +1
ncMIST = 1
ncFLOOD = 2
################################################################################
class Creator:
def __init__(self):
pass
############################################################################
## Internals
def file_open(self, name):
self.file = open(name, 'w')
self.filename = name
def file_close(self):
self.file.close()
def write(self, s):
self.file.write(s)
############################################################################
## Programs
def program_begin(self, id, name=''):
"""Begin a program"""
pass
def add_stock(self, type_name, params):
pass
def program_stop(self, optional=False):
"""Stop the machine"""
pass
def program_end(self):
"""End the program"""
pass
def flush_nc(self):
"""Flush all pending codes"""
pass
############################################################################
## Subprograms
def sub_begin(self, id, name=''):
"""Begin a subprogram"""
pass
def sub_call(self, id):
"""Call a subprogram"""
pass
def sub_end(self):
"""Return from a subprogram"""
pass
############################################################################
## Settings
def imperial(self):
"""Set imperial units"""
pass
def metric(self):
"""Set metric units"""
pass
def absolute(self):
"""Set absolute coordinates"""
pass
def incremental(self):
"""Set incremental coordinates"""
pass
def polar(self, on=True):
"""Set polar coordinates"""
pass
def set_plane(self, plane):
"""Set plane"""
pass
def set_temporary_origin(self, x=None, y=None, z=None, a=None, b=None, c=None):
"""Set temporary origin G92"""
pass
def remove_temporary_origin(self):
"""Remote temporary origin G92.1"""
pass
############################################################################
## Tools
def tool_change(self, id):
"""Change the tool"""
pass
def tool_defn(self, id, name='', params=None):
"""Define a tool"""
pass
def offset_radius(self, id, radius=None):
"""Set tool radius offsetting"""
pass
def offset_length(self, id, length=None):
"""Set tool length offsetting"""
pass
def current_tool(self):
return None
############################################################################
## Datums
def datum_shift(self, x=None, y=None, z=None, a=None, b=None, c=None):
"""Shift the datum"""
pass
def datum_set(self, x=None, y=None, z=None, a=None, b=None, c=None):
"""Set the datum"""
pass
def workplane(self, id):
"""Set the workplane"""
pass
def clearanceplane(self,z=None):
"""set clearance plane"""
pass
############################################################################
## APT360 like Transformation Definitions
## These definitions were created while looking at Irvin Kraal's book on APT
## - Numerical Control Progamming in APT - page 211
def matrix(self,a1=None,b1=None,c1=None,a2=None,b2=None,c2=None,a3=None,b3=None,c3=None):
"""Create a matrix for transformations"""
pass
def translate(self,x=None,y=None,z=None):
"""Translate in x,y,z direction"""
pass
def rotate(self,xyrot=None,yzrot=None,zxrot=None,angle=None):
"""Rotate about a coordinate axis"""
pass
def scale(self,k=None):
"""Scale by factor k"""
pass
def matrix_product(self,matrix1=None,matrix2=None):
"""Create matrix that is the product of two other matrices"""
pass
def mirror_plane(self,plane1=None,plane2=None,plane3=None):
"""Mirror image about one or more coordinate planes"""
pass
def mirror_line(self,line=None):
"""Mirror about a line"""
pass
############################################################################
## Rates + Modes
def feedrate(self, f):
"""Set the feedrate"""
pass
def feedrate_hv(self, fh, fv):
"""Set the horizontal and vertical feedrates"""
pass
def spindle(self, s, clockwise=True):
"""Set the spindle speed"""
pass
def coolant(self, mode=0):
"""Set the coolant mode"""
pass
def gearrange(self, gear=0):
"""Set the gear range"""
pass
############################################################################
## Moves
def rapid(self, x=None, y=None, z=None, a=None, b=None, c=None):
"""Rapid move"""
pass
def feed(self, x=None, y=None, z=None, a = None, b = None, c = None):
"""Feed move"""
pass
def arc_cw(self, x=None, y=None, z=None, i=None, j=None, k=None, r=None):
"""Clockwise arc move"""
pass
def arc_ccw(self, x=None, y=None, z=None, i=None, j=None, k=None, r=None):
"""Counterclockwise arc move"""
pass
def dwell(self, t):
"""Dwell"""
pass
def rapid_home(self, x=None, y=None, z=None, a=None, b=None, c=None):
"""Rapid relative to home position"""
pass
def rapid_unhome(self):
"""Return from rapid home"""
pass
def set_machine_coordinates(self):
"""Set machine coordinates"""
pass
############################################################################
## Cutter radius compensation
def use_CRC(self):
"""CRC"""
return False
############################################################################
## Cycles
def pattern(self):
"""Simple pattern eg. circle, rect"""
pass
def pocket(self):
"""Pocket routine"""
pass
def profile(self):
"""Profile routine"""
pass
def drill(self, x=None, y=None, dwell=None, depthparams = None, retract_mode=None, spindle_mode=None, internal_coolant_on=None, rapid_to_clearance=None):
"""Drilling routines"""
pass
# original prototype was:
# def tap(self, x=None, y=None, z=None, zretract=None, depth=None, standoff=None, dwell_bottom=None, pitch=None, stoppos=None, spin_in=None, spin_out=None):
#
# current call is like so:
# tap(x=10, y=10, z=0, tap_mode=0, depth=12.7, standoff=6.35, direction=0, pitch=1.25)
# just add tap_mode & direction parameters
def tap(self, x=None, y=None, z=None, zretract=None, depth=None, standoff=None, dwell_bottom=None, pitch=None, stoppos=None, spin_in=None, spin_out=None, tap_mode=None, direction=None):
"""Tapping routines"""
pass
def bore(self, x=None, y=None, z=None, zretract=None, depth=None, standoff=None, dwell_bottom=None, feed_in=None, feed_out=None, stoppos=None, shift_back=None, shift_right=None, backbore=False, stop=False):
"""Boring routines"""
pass
def end_canned_cycle(self):
pass
############################################################################
## Misc
def comment(self, text):
"""Insert a comment"""
pass
def insert(self, text):
"""APT style INSERT statement"""
pass
def block_delete(self, on=False):
"""block to ignore if block delete switch is on"""
pass
def variable(self, id):
"""Insert a variable"""
pass
def variable_set(self, id, value):
"""Set a variable"""
pass
def probe_linear_centre_outside(self, x1=None, y1=None, depth=None, x2=None, y2=None ):
pass
def probe_single_point(self, point_along_edge_x=None, point_along_edge_y=None, depth=None, retracted_point_x=None, retracted_point_y=None, destination_point_x=None, destination_point_y=None, intersection_variable_x=None, intersection_variable_y=None, probe_offset_x_component=None, probe_offset_y_component=None ):
pass
def probe_downward_point(self, x=None, y=None, depth=None, intersection_variable_z=None):
pass
def report_probe_results(self, x1=None, y1=None, z1=None, x2=None, y2=None, z2=None, x3=None, y3=None, z3=None, x4=None, y4=None, z4=None, x5=None, y5=None, z5=None, x6=None, y6=None, z6=None, xml_file_name=None ):
pass
def open_log_file(self, xml_file_name=None ):
pass
def log_coordinate(self, x=None, y=None, z=None):
pass
def log_message(self, message=None):
pass
def close_log_file(self):
pass
def rapid_to_midpoint(self, x1=None, y1=None, z1=None, x2=None, y2=None, z2=None):
pass
def rapid_to_intersection(self, x1, y1, x2, y2, x3, y3, x4, y4, intersection_x, intersection_y, ua_numerator, ua_denominator, ua, ub_numerator, ub):
pass
def rapid_to_rotated_coordinate(self, x1, y1, x2, y2, ref_x, ref_y, x_current, y_current, x_final, y_final):
pass
def set_path_control_mode(self, mode, motion_blending_tolerance, naive_cam_tolerance ):
pass
############################################################################
## NC code creator for additive machines like RepRap
def wipe(self):
"""wipe routine"""
pass
def extruder_on(self):
"""Turn on the extruder"""
pass
def extruder_off(self):
"""turn off the extruder"""
pass
def set_extruder_flowrate(self, flowrate):
"""Set the flowrate for the extruder"""
pass
def extruder_temp(self, temp):
"""Set the extruder temp in celsius"""
pass
def fan_on(self):
"""turn on the cooling fan"""
pass
def fan_off(self):
"""turn off the cooling fan"""
pass
def build_bed_temp(self, temp):
"""Set the bed temp in celsius"""
pass
def chamber_temp(self, temp):
"""Set the chamber temp in celsius"""
pass
def begin_ncblock(self):
# if the moves have come from backplotting nc code, then the nc code text can be given with these three functions
pass
def end_ncblock(self):
pass
def add_text(self, s, col, cdata):
pass
################################################################################
creator = Creator()
############################################################################
## Internals
def write(s):
creator.write(s)
def output(filename):
creator.file_open(filename)
############################################################################
## Programs
def program_begin(id, name=''):
creator.program_begin(id, name)
def add_stock(type_name, params):
creator.add_stock(type_name, params)
def program_stop(optional=False):
creator.program_stop(optional)
def program_end():
creator.program_end()
def flush_nc():
creator.flush_nc()
############################################################################
## Subprograms
def sub_begin(id, name=''):
creator.sub_begin(id, name)
def sub_call(id):
creator.sub_call(id)
def sub_end():
creator.sub_end()
############################################################################
## Settings
def imperial():
creator.imperial()
def metric():
creator.metric()
def absolute():
creator.absolute()
def incremental():
creator.incremental()
def polar(on=True):
creator.polar(on)
def set_plane(plane):
creator.set_plane(plane)
def set_temporary_origin(x=None, y=None, z=None, a=None, b=None, c=None):
creator.set_temporary_origin(x,y,z,a,b,c)
def remove_temporary_origin():
creator.remove_temporary_origin()
############################################################################
## Tools
def tool_change(id):
creator.tool_change(id)
def tool_defn(id, name='', params=None):
creator.tool_defn(id, name, params)
def offset_radius(id, radius=None):
creator.offset_radius(id, radius)
def offset_length(id, length=None):
creator.offset_length(id, length)
def current_tool(self):
return creator.current_tool()
############################################################################
## Datums
def datum_shift(x=None, y=None, z=None, a=None, b=None, c=None):
creator.datum_shift(x, y, z, a, b, c)
def datum_set(x=None, y=None, z=None, a=None, b=None, c=None):
creator.datum_set(x, y, z, a, b, c)
def workplane(id):
creator.workplane(id)
def clearanceplane(z=None):
creator.clearanceplane(z)
############################################################################
## APT360 like Transformation Definitions
## These definitions were created while looking at Irvin Kraal's book on APT
## - Numerical Control Progamming in APT - page 211
def matrix(a1=None,b1=None,c1=None,a2=None,b2=None,c2=None,a3=None,b3=None,c3=None):
creator.matrix(a1,b1,c1,a2,b2,c2,a3,b3,c3)
def translate(x=None,y=None,z=None):
creator.translate(x,y,z)
def rotate(xyrot=None,yzrot=None,zxrot=None,angle=None):
creator.rotate(xyrot,yzrot,zxrot,angle)
def scale(k=None):
creator.scale(k)
def matrix_product(matrix1=None,matrix2=None):
creator.matrix_product(matrix1,matrix2)
def mirror_plane(plane1=None,plane2=None,plane3=None):
creator.mirror_plane(plane1,plane2,plane3)
def mirror_line(line=None):
creator.mirror_line(line)
############################################################################
## Rates + Modes
def feedrate(f):
creator.feedrate(f)
def feedrate_hv(fh, fv):
creator.feedrate_hv(fh, fv)
def spindle(s, clockwise=True):
creator.spindle(s, clockwise)
def coolant(mode=0):
creator.coolant(mode)
def gearrange(gear=0):
creator.gearrange(gear)
############################################################################
## Moves
def rapid(x=None, y=None, z=None, a=None, b=None, c=None):
creator.rapid(x, y, z, a, b, c)
def feed(x=None, y=None, z=None, a = None, b = None, c = None):
creator.feed(x, y, z)
def arc_cw(x=None, y=None, z=None, i=None, j=None, k=None, r=None):
creator.arc_cw(x, y, z, i, j, k, r)
def arc_ccw(x=None, y=None, z=None, i=None, j=None, k=None, r=None):
creator.arc_ccw(x, y, z, i, j, k, r)
def dwell(t):
creator.dwell(t)
def rapid_home(x=None, y=None, z=None, a=None, b=None, c=None):
creator.rapid_home(x, y, z, a, b, c)
def rapid_unhome():
creator.rapid_unhome()
def set_machine_coordinates():
creator.set_machine_coordinates()
############################################################################
## Cutter radius compensation
def use_CRC():
return creator.use_CRC()
def CRC_nominal_path():
return creator.CRC_nominal_path()
def start_CRC(left = True, radius = 0.0):
creator.start_CRC(left, radius)
def end_CRC():
creator.end_CRC()
############################################################################
## Cycles
def pattern():
creator.pattern()
def pocket():
creator.pocket()
def profile():
creator.profile()
def drill(x=None, y=None, dwell=None, depthparams = None, retract_mode=None, spindle_mode=None, internal_coolant_on=None, rapid_to_clearance=None):
creator.drill(x, y, dwell, depthparams, retract_mode, spindle_mode, internal_coolant_on, rapid_to_clearance)
def tap(x=None, y=None, z=None, zretract=None, depth=None, standoff=None, dwell_bottom=None, pitch=None, stoppos=None, spin_in=None, spin_out=None, tap_mode=None, direction=None):
creator.tap(x, y, z, zretract, depth, standoff, dwell_bottom, pitch, stoppos, spin_in, spin_out, tap_mode, direction)
def bore(x=None, y=None, z=None, zretract=None, depth=None, standoff=None, dwell_bottom=None, feed_in=None, feed_out=None, stoppos=None, shift_back=None, shift_right=None, backbore=False, stop=False):
creator.bore(x, y, z, zretract, depth, standoff, dwell_Bottom, feed_in, feed_out, stoppos, shift_back, shift_right, backbore, stop)
def end_canned_cycle():
creator.end_canned_cycle()
def peck(count, first, last=None, step=0.0):
pecks = []
peck = first
if (last == None) : last = first
for i in range(0,count):
pecks.append(peck)
if (peck - step > last) : peck -= step
return pecks
############################################################################
## Misc
def comment(text):
creator.comment(text)
def insert(text):
creator.insert(text)
def block_delete(on=False):
creator.block_delete(on)
def variable(id):
creator.variable(id)
def variable_set(id, value):
creator.variable_set(id, value)
def probe_single_point(point_along_edge_x=None, point_along_edge_y=None, depth=None, retracted_point_x=None, retracted_point_y=None, destination_point_x=None, destination_point_y=None, intersection_variable_x=None, intersection_variable_y=None, probe_offset_x_component=None, probe_offset_y_component=None ):
creator.probe_single_point(point_along_edge_x, point_along_edge_y, depth, retracted_point_x, retracted_point_y, destination_point_x, destination_point_y, intersection_variable_x, intersection_variable_y, probe_offset_x_component, probe_offset_y_component )
def probe_downward_point(x=None, y=None, depth=None, intersection_variable_z=None):
creator.probe_downward_point(x, y, depth, intersection_variable_z)
def report_probe_results(x1=None, y1=None, z1=None, x2=None, y2=None, z2=None, x3=None, y3=None, z3=None, x4=None, y4=None, z4=None, x5=None, y5=None, z5=None, x6=None, y6=None, z6=None, xml_file_name=None ):
creator.report_probe_results(x1, y1, z1, x2, y2, z2, x3, y3, z3, x4, y4, z4, x5, y5, z5, x6, y6, z6, xml_file_name)
def open_log_file(xml_file_name=None ):
creator.open_log_file(xml_file_name)
def log_coordinate(x=None, y=None, z=None):
creator.log_coordinate(x, y, z)
def log_message(message=None):
creator.log_message(message)
def close_log_file():
creator.close_log_file()
def rapid_to_midpoint(x1=None, y1=None, z1=None, x2=None, y2=None, z2=None):
creator.rapid_to_midpoint(x1, y1, z1, x2, y2, z2)
def rapid_to_intersection(x1, y1, x2, y2, x3, y3, x4, y4, intersection_x, intersection_y, ua_numerator, ua_denominator, ua, ub_numerator, ub):
creator.rapid_to_intersection(x1, y1, x2, y2, x3, y3, x4, y4, intersection_x, intersection_y, ua_numerator, ua_denominator, ua, ub_numerator, ub)
def rapid_to_rotated_coordinate(x1, y1, x2, y2, ref_x, ref_y, x_current, y_current, x_final, y_final):
creator.rapid_to_rotated_coordinate(x1, y1, x2, y2, ref_x, ref_y, x_current, y_current, x_final, y_final)
def set_path_control_mode(mode, motion_blending_tolerance, naive_cam_tolerance ):
creator.set_path_control_mode(mode, motion_blending_tolerance, naive_cam_tolerance )
############################################################################
## NC code creator for additive machines like RepRap
def wipe():
creator.wipe()
def extruder_on():
creator.extruder_on()
def extruder_off():
creator.extruder_off()
def set_extruder_flowrate(flowrate):
creator.set_extruder_flowrate(flowrate)
def extruder_temp(temp=None):
creator.extruder_temp(temp)
def fan_on():
creator.fan_on()
def fan_off():
creator.fan_off()
def build_bed_temp(temp=None):
creator.build_bed_temp(temp)
def chamber_temp(temp=None):
creator.chamber_temp(temp)
| mit |
ParticulateSolutions/django-paydirekt | django_paydirekt/migrations/0001_initial.py | 1 | 4278 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='PaydirektCapture',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('amount', models.DecimalField(verbose_name='amount', max_digits=9, decimal_places=2)),
('transaction_id', models.CharField(unique=True, max_length=255, verbose_name='transaction id')),
('final', models.BooleanField(default=False, verbose_name='final')),
('link', models.URLField(verbose_name='link')),
('status', models.CharField(max_length=255, verbose_name='status', blank=True)),
('capture_type', models.CharField(max_length=255, verbose_name='capture type', blank=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('last_modified', models.DateTimeField(auto_now=True, verbose_name='last modified')),
],
options={
'verbose_name': 'Paydirekt Capture',
'verbose_name_plural': 'Paydirekt Captures',
},
),
migrations.CreateModel(
name='PaydirektCheckout',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('checkout_id', models.CharField(unique=True, max_length=255, verbose_name='checkout id')),
('payment_type', models.CharField(max_length=255, verbose_name='payment type')),
('total_amount', models.DecimalField(verbose_name='total amount', max_digits=9, decimal_places=2)),
('status', models.CharField(max_length=255, verbose_name='status', blank=True)),
('link', models.URLField(verbose_name='link')),
('approve_link', models.URLField(verbose_name='approve link')),
('close_link', models.URLField(verbose_name='close link', blank=True)),
('captures_link', models.URLField(verbose_name='captures link', blank=True)),
('refunds_link', models.URLField(verbose_name='refunds link', blank=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('last_modified', models.DateTimeField(auto_now=True, verbose_name='last modified')),
],
options={
'verbose_name': 'Paydirekt Checkout',
'verbose_name_plural': 'Paydirekt Checkouts',
},
),
migrations.CreateModel(
name='PaydirektRefund',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('amount', models.DecimalField(verbose_name='amount', max_digits=9, decimal_places=2)),
('transaction_id', models.CharField(unique=True, max_length=255, verbose_name='transaction id')),
('link', models.URLField(verbose_name='link')),
('status', models.CharField(max_length=255, verbose_name='status', blank=True)),
('refund_type', models.CharField(max_length=255, verbose_name='refund type', blank=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('last_modified', models.DateTimeField(auto_now=True, verbose_name='last modified')),
('checkout', models.ForeignKey(related_name='refunds', verbose_name='checkout', to='django_paydirekt.PaydirektCheckout', on_delete=models.CASCADE)),
],
options={
'verbose_name': 'Paydirekt Refund',
'verbose_name_plural': 'Paydirekt Refund',
},
),
migrations.AddField(
model_name='paydirektcapture',
name='checkout',
field=models.ForeignKey(related_name='captures', verbose_name='checkout', to='django_paydirekt.PaydirektCheckout', on_delete=models.CASCADE),
),
]
| mit |
MrLoick/python-for-android | python-modules/twisted/twisted/test/test_stringtransport.py | 56 | 9941 | # Copyright (c) 2009-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.test.proto_helpers}.
"""
from zope.interface.verify import verifyObject
from twisted.internet.interfaces import (ITransport, IPushProducer, IConsumer,
IReactorTCP, IReactorSSL, IReactorUNIX, IAddress, IListeningPort,
IConnector)
from twisted.internet.address import IPv4Address
from twisted.trial.unittest import TestCase
from twisted.test.proto_helpers import (StringTransport, MemoryReactor,
RaisingMemoryReactor)
from twisted.internet.protocol import ClientFactory, Factory
class StringTransportTests(TestCase):
"""
Tests for L{twisted.test.proto_helpers.StringTransport}.
"""
def setUp(self):
self.transport = StringTransport()
def test_interfaces(self):
"""
L{StringTransport} instances provide L{ITransport}, L{IPushProducer},
and L{IConsumer}.
"""
self.assertTrue(verifyObject(ITransport, self.transport))
self.assertTrue(verifyObject(IPushProducer, self.transport))
self.assertTrue(verifyObject(IConsumer, self.transport))
def test_registerProducer(self):
"""
L{StringTransport.registerProducer} records the arguments supplied to
it as instance attributes.
"""
producer = object()
streaming = object()
self.transport.registerProducer(producer, streaming)
self.assertIdentical(self.transport.producer, producer)
self.assertIdentical(self.transport.streaming, streaming)
def test_disallowedRegisterProducer(self):
"""
L{StringTransport.registerProducer} raises L{RuntimeError} if a
producer is already registered.
"""
producer = object()
self.transport.registerProducer(producer, True)
self.assertRaises(
RuntimeError, self.transport.registerProducer, object(), False)
self.assertIdentical(self.transport.producer, producer)
self.assertTrue(self.transport.streaming)
def test_unregisterProducer(self):
"""
L{StringTransport.unregisterProducer} causes the transport to forget
about the registered producer and makes it possible to register a new
one.
"""
oldProducer = object()
newProducer = object()
self.transport.registerProducer(oldProducer, False)
self.transport.unregisterProducer()
self.assertIdentical(self.transport.producer, None)
self.transport.registerProducer(newProducer, True)
self.assertIdentical(self.transport.producer, newProducer)
self.assertTrue(self.transport.streaming)
def test_invalidUnregisterProducer(self):
"""
L{StringTransport.unregisterProducer} raises L{RuntimeError} if called
when no producer is registered.
"""
self.assertRaises(RuntimeError, self.transport.unregisterProducer)
def test_initialProducerState(self):
"""
L{StringTransport.producerState} is initially C{'producing'}.
"""
self.assertEqual(self.transport.producerState, 'producing')
def test_pauseProducing(self):
"""
L{StringTransport.pauseProducing} changes the C{producerState} of the
transport to C{'paused'}.
"""
self.transport.pauseProducing()
self.assertEqual(self.transport.producerState, 'paused')
def test_resumeProducing(self):
"""
L{StringTransport.resumeProducing} changes the C{producerState} of the
transport to C{'producing'}.
"""
self.transport.pauseProducing()
self.transport.resumeProducing()
self.assertEqual(self.transport.producerState, 'producing')
def test_stopProducing(self):
"""
L{StringTransport.stopProducing} changes the C{'producerState'} of the
transport to C{'stopped'}.
"""
self.transport.stopProducing()
self.assertEqual(self.transport.producerState, 'stopped')
def test_stoppedTransportCannotPause(self):
"""
L{StringTransport.pauseProducing} raises L{RuntimeError} if the
transport has been stopped.
"""
self.transport.stopProducing()
self.assertRaises(RuntimeError, self.transport.pauseProducing)
def test_stoppedTransportCannotResume(self):
"""
L{StringTransport.resumeProducing} raises L{RuntimeError} if the
transport has been stopped.
"""
self.transport.stopProducing()
self.assertRaises(RuntimeError, self.transport.resumeProducing)
def test_disconnectingTransportCannotPause(self):
"""
L{StringTransport.pauseProducing} raises L{RuntimeError} if the
transport is being disconnected.
"""
self.transport.loseConnection()
self.assertRaises(RuntimeError, self.transport.pauseProducing)
def test_disconnectingTransportCannotResume(self):
"""
L{StringTransport.resumeProducing} raises L{RuntimeError} if the
transport is being disconnected.
"""
self.transport.loseConnection()
self.assertRaises(RuntimeError, self.transport.resumeProducing)
def test_loseConnectionSetsDisconnecting(self):
"""
L{StringTransport.loseConnection} toggles the C{disconnecting} instance
variable to C{True}.
"""
self.assertFalse(self.transport.disconnecting)
self.transport.loseConnection()
self.assertTrue(self.transport.disconnecting)
def test_specifiedHostAddress(self):
"""
If a host address is passed to L{StringTransport.__init__}, that
value is returned from L{StringTransport.getHost}.
"""
address = object()
self.assertIdentical(StringTransport(address).getHost(), address)
def test_specifiedPeerAddress(self):
"""
If a peer address is passed to L{StringTransport.__init__}, that
value is returned from L{StringTransport.getPeer}.
"""
address = object()
self.assertIdentical(
StringTransport(peerAddress=address).getPeer(), address)
def test_defaultHostAddress(self):
"""
If no host address is passed to L{StringTransport.__init__}, an
L{IPv4Address} is returned from L{StringTransport.getHost}.
"""
address = StringTransport().getHost()
self.assertIsInstance(address, IPv4Address)
def test_defaultPeerAddress(self):
"""
If no peer address is passed to L{StringTransport.__init__}, an
L{IPv4Address} is returned from L{StringTransport.getPeer}.
"""
address = StringTransport().getPeer()
self.assertIsInstance(address, IPv4Address)
class ReactorTests(TestCase):
"""
Tests for L{MemoryReactor} and L{RaisingMemoryReactor}.
"""
def test_memoryReactorProvides(self):
"""
L{MemoryReactor} provides all of the attributes described by the
interfaces it advertises.
"""
memoryReactor = MemoryReactor()
verifyObject(IReactorTCP, memoryReactor)
verifyObject(IReactorSSL, memoryReactor)
verifyObject(IReactorUNIX, memoryReactor)
def test_raisingReactorProvides(self):
"""
L{RaisingMemoryReactor} provides all of the attributes described by the
interfaces it advertises.
"""
raisingReactor = RaisingMemoryReactor()
verifyObject(IReactorTCP, raisingReactor)
verifyObject(IReactorSSL, raisingReactor)
verifyObject(IReactorUNIX, raisingReactor)
def test_connectDestination(self):
"""
L{MemoryReactor.connectTCP}, L{MemoryReactor.connectSSL}, and
L{MemoryReactor.connectUNIX} will return an L{IConnector} whose
C{getDestination} method returns an L{IAddress} with attributes which
reflect the values passed.
"""
memoryReactor = MemoryReactor()
for connector in [memoryReactor.connectTCP(
"test.example.com", 8321, ClientFactory()),
memoryReactor.connectSSL(
"test.example.com", 8321, ClientFactory(),
None)]:
verifyObject(IConnector, connector)
address = connector.getDestination()
verifyObject(IAddress, address)
self.assertEquals(address.host, "test.example.com")
self.assertEquals(address.port, 8321)
connector = memoryReactor.connectUNIX("/fake/path", ClientFactory())
verifyObject(IConnector, connector)
address = connector.getDestination()
verifyObject(IAddress, address)
self.assertEquals(address.name, "/fake/path")
def test_listenDefaultHost(self):
"""
L{MemoryReactor.listenTCP}, L{MemoryReactor.listenSSL} and
L{MemoryReactor.listenUNIX} will return an L{IListeningPort} whose
C{getHost} method returns an L{IAddress}; C{listenTCP} and C{listenSSL}
will have a default host of C{'0.0.0.0'}, and a port that reflects the
value passed, and C{listenUNIX} will have a name that reflects the path
passed.
"""
memoryReactor = MemoryReactor()
for port in [memoryReactor.listenTCP(8242, Factory()),
memoryReactor.listenSSL(8242, Factory(), None)]:
verifyObject(IListeningPort, port)
address = port.getHost()
verifyObject(IAddress, address)
self.assertEquals(address.host, '0.0.0.0')
self.assertEquals(address.port, 8242)
port = memoryReactor.listenUNIX("/path/to/socket", Factory())
verifyObject(IListeningPort, port)
address = port.getHost()
verifyObject(IAddress, address)
self.assertEquals(address.name, "/path/to/socket") | apache-2.0 |
jagg81/translate-toolkit | translate/lang/si.py | 4 | 1048 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""This module represents Sinhala language.
For more information, see U{http://en.wikipedia.org/wiki/Sinhala_language}
"""
from translate.lang import common
class si(common.Common):
"""This class represents Sinhala."""
ignoretests = ["startcaps", "simplecaps"]
| gpl-2.0 |
geodrinx/gearthview | ext-libs/twisted/internet/_posixserialport.py | 42 | 2068 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Serial Port Protocol
"""
# system imports
import os, errno
# dependent on pyserial ( http://pyserial.sf.net/ )
# only tested w/ 1.18 (5 Dec 2002)
import serial
from serial import PARITY_NONE, PARITY_EVEN, PARITY_ODD
from serial import STOPBITS_ONE, STOPBITS_TWO
from serial import FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS
from serialport import BaseSerialPort
# twisted imports
from twisted.internet import abstract, fdesc, main
class SerialPort(BaseSerialPort, abstract.FileDescriptor):
"""
A select()able serial device, acting as a transport.
"""
connected = 1
def __init__(self, protocol, deviceNameOrPortNumber, reactor,
baudrate = 9600, bytesize = EIGHTBITS, parity = PARITY_NONE,
stopbits = STOPBITS_ONE, timeout = 0, xonxoff = 0, rtscts = 0):
abstract.FileDescriptor.__init__(self, reactor)
self._serial = self._serialFactory(
deviceNameOrPortNumber, baudrate=baudrate, bytesize=bytesize,
parity=parity, stopbits=stopbits, timeout=timeout,
xonxoff=xonxoff, rtscts=rtscts)
self.reactor = reactor
self.flushInput()
self.flushOutput()
self.protocol = protocol
self.protocol.makeConnection(self)
self.startReading()
def fileno(self):
return self._serial.fd
def writeSomeData(self, data):
"""
Write some data to the serial device.
"""
return fdesc.writeToFD(self.fileno(), data)
def doRead(self):
"""
Some data's readable from serial device.
"""
return fdesc.readFromFD(self.fileno(), self.protocol.dataReceived)
def connectionLost(self, reason):
"""
Called when the serial port disconnects.
Will call C{connectionLost} on the protocol that is handling the
serial data.
"""
abstract.FileDescriptor.connectionLost(self, reason)
self._serial.close()
self.protocol.connectionLost(reason)
| gpl-3.0 |
GeoNode/geonode | geonode/monitoring/views.py | 4 | 27048 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2017 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import json
import pytz
from datetime import datetime, timedelta
from django.shortcuts import render
from django import forms
from django.contrib import auth
from django.conf import settings
from django.views.generic.base import View
from django.urls import reverse
from django.core.management import call_command
from django.views.decorators.csrf import csrf_exempt
from geonode.decorators import view_decorator, superuser_protected
from geonode.utils import json_response
from geonode.monitoring.collector import CollectorAPI
from geonode.monitoring.models import (
Service,
Host,
Metric,
ServiceTypeMetric,
MetricLabel,
MonitoredResource,
ExceptionEvent,
EventType,
NotificationCheck,
MetricNotificationCheck,
)
from geonode.monitoring.models import do_autoconfigure
from geonode.monitoring.utils import TypeChecks, dump, extend_datetime_input_formats
from geonode.monitoring.service_handlers import exposes
# Create your views here.
capi = CollectorAPI()
class MetricsList(View):
def get(self, *args, **kwargs):
_metrics = capi.get_metric_names()
out = []
for srv, mlist in _metrics:
out.append({'service': srv.name,
'metrics': [{'name': m.name, 'unit': m.unit, 'type': m.type}
for m in mlist]})
return json_response({'metrics': out})
class ServicesList(View):
def get_queryset(self):
return Service.objects.filter(active=True).select_related()
def get(self, *args, **kwargs):
q = self.get_queryset()
out = []
for item in q:
out.append({'name': item.name,
'host': item.host.name,
'id': item.id,
'type': item.service_type.name,
'check_interval': item.check_interval.total_seconds(),
'last_check': item.last_check})
return json_response({'services': out})
class HostsList(View):
def get_queryset(self):
return Host.objects.filter(active=True).select_related()
def get(self, *args, **kwargs):
q = self.get_queryset()
out = []
for item in q:
out.append({'name': item.name, 'ip': item.ip})
return json_response({'hosts': out})
class _ValidFromToLastForm(forms.Form):
valid_from = forms.DateTimeField(
required=False,
input_formats=extend_datetime_input_formats(['%Y-%m-%dT%H:%M:%S.%fZ'])
)
valid_to = forms.DateTimeField(
required=False,
input_formats=extend_datetime_input_formats(['%Y-%m-%dT%H:%M:%S.%fZ'])
)
interval = forms.IntegerField(min_value=60, required=False)
last = forms.IntegerField(min_value=60, required=False)
def _check_timestamps(self):
last = self.cleaned_data.get('last')
vf = self.cleaned_data.get('valid_from')
vt = self.cleaned_data.get('valid_to')
if last and (vf or vt):
raise forms.ValidationError(
'Cannot use last and valid_from/valid_to at the same time')
def clean(self):
super(_ValidFromToLastForm, self).clean()
self._check_timestamps()
class CheckTypeForm(_ValidFromToLastForm):
"""
Special form class to validate values from specific db dictionaries
(services, resources, ows services etc)
"""
def _check_type(self, tname):
"""
Returns tname-specific object instance from db.
Internally it uses geonode.monotoring.utils.TypeChecks
to resolve field's value to object.
"""
d = self.cleaned_data
if not d:
return
val = d[tname]
if not val:
return
tcheck = getattr(TypeChecks, f'{tname}_type', None)
if not tcheck:
raise forms.ValidationError(f"No type check for {tname}")
try:
return tcheck(val)
except (Exception,) as err:
raise forms.ValidationError(err)
class MetricsFilters(CheckTypeForm):
GROUP_BY_RESOURCE = 'resource'
GROUP_BY_RESOURCE_ON_LABEL = 'resource_on_label'
GROUP_BY_RESOURCE_ON_USER = 'resource_on_user'
GROUP_BY_COUNT_ON_RESOURCE = 'count_on_resource'
GROUP_BY_LABEL = 'label'
GROUP_BY_USER = 'user'
GROUP_BY_USER_ON_LABEL = 'user_on_label'
GROUP_BY_EVENT_TYPE = 'event_type'
GROUP_BY_EVENT_TYPE_ON_LABEL = 'event_type_on_label'
GROUP_BY_EVENT_TYPE_ON_USER = 'event_type_on_user'
GROUP_BY_CHOICES = ((GROUP_BY_RESOURCE, "By resource",),
(GROUP_BY_RESOURCE_ON_LABEL, "By resource on label",),
(GROUP_BY_RESOURCE_ON_USER, "By resource on user",),
(GROUP_BY_COUNT_ON_RESOURCE, "By resource with count",),
(GROUP_BY_LABEL, "By label",),
(GROUP_BY_USER, "By user",),
(GROUP_BY_USER_ON_LABEL, "By user on label",),
(GROUP_BY_EVENT_TYPE, "By event type",),
(GROUP_BY_EVENT_TYPE_ON_LABEL, "By event type on label",),
(GROUP_BY_EVENT_TYPE_ON_USER, "By event type on user",),)
service = forms.CharField(required=False)
label = forms.CharField(required=False)
user = forms.CharField(required=False)
resource = forms.CharField(required=False)
resource_type = forms.ChoiceField(
choices=MonitoredResource.TYPES, required=False)
event_type = forms.CharField(required=False)
service_type = forms.CharField(required=False)
group_by = forms.ChoiceField(choices=GROUP_BY_CHOICES, required=False)
def clean_resource(self):
return self._check_type('resource')
def clean_service(self):
return self._check_type('service')
def clean_label(self):
return self._check_type('label')
def clean_user(self):
return self._check_type('user')
def clean_event_type(self):
return self._check_type('event_type')
def clean_service_type(self):
return self._check_type('service_type')
def _check_services(self):
s = self.cleaned_data.get('service')
st = self.cleaned_data.get('service_type')
if st and s:
raise forms.ValidationError(
"Cannot use service and service type at the same time")
def clean(self):
super(MetricsFilters, self).clean()
self._check_services()
class LabelsFilterForm(CheckTypeForm):
metric_name = forms.CharField(required=False)
def clean_metric(self):
return self._check_type('metric_name')
class ResourcesFilterForm(LabelsFilterForm):
resource_type = forms.CharField(required=False)
def clean_resource_type(self):
return self._check_type('resource_type')
class EventTypesFilterForm(CheckTypeForm):
ows_service = forms.CharField(required=False)
def clean_ows_service(self):
return self._check_type('ows_service')
class FilteredView(View):
# form which validates request.GET for get_queryset()
filter_form = None
# iterable of pairs (from model field, to key name) to map
# fields from model to elements of output data
fields_map = tuple()
# key name for output ({output_name: data})
output_name = None
def get_filter_args(self, request):
self.errors = None
if not self.filter_form:
return {}
f = self.filter_form(data=request.GET)
if not f.is_valid():
self.errors = f.errors
return f.cleaned_data
def get(self, request, *args, **kwargs):
qargs = self.get_filter_args(request)
if self.errors:
return json_response({'success': False,
'status': 'errors',
'errors': self.errors},
status=400)
q = self.get_queryset(**qargs)
from_fields = [f[0] for f in self.fields_map]
to_fields = [f[1] for f in self.fields_map]
out = [dict(zip(to_fields, (getattr(item, f)
for f in from_fields))) for item in q]
data = {self.output_name: out,
'success': True,
'errors': {},
'status': 'ok'}
if self.output_name != 'data':
data['data'] = {'key': self.output_name}
return json_response(data)
@view_decorator(superuser_protected, subclass=True)
class ResourcesList(FilteredView):
filter_form = ResourcesFilterForm
fields_map = (('id', 'id',),
('type', 'type',),
('name', 'name',),)
output_name = 'resources'
def get_queryset(self, metric_name=None,
resource_type=None,
valid_from=None,
valid_to=None,
last=None,
interval=None):
q = MonitoredResource.objects.all().distinct()
qparams = {}
if resource_type:
qparams['type'] = resource_type
if metric_name:
sm = ServiceTypeMetric.objects.filter(metric__name=metric_name)
qparams['metric_values__service_metric__in'] = sm
if last:
_from = datetime.utcnow().replace(tzinfo=pytz.utc) - timedelta(seconds=last)
if interval is None:
interval = 60
if not isinstance(interval, timedelta):
interval = timedelta(seconds=interval)
valid_from = _from
if valid_from:
qparams['metric_values__valid_from__gte'] = valid_from
if valid_to:
qparams['metric_values__valid_to__lte'] = valid_to
if qparams:
q = q.filter(**qparams)
return q
@view_decorator(superuser_protected, subclass=True)
class ResourceTypesList(FilteredView):
output_name = 'resource_types'
def get(self, request, *args, **kwargs):
if self.filter_form:
f = self.filter_form(data=request.GET)
if not f.is_valid():
return json_response({'success': False,
'status': 'errors',
'errors': f.errors},
status=400)
out = [{"name": mrt[0], "type_label": mrt[1]} for mrt in MonitoredResource.TYPES]
data = {self.output_name: out,
'success': True,
'errors': {},
'status': 'ok'}
if self.output_name != 'data':
data['data'] = {'key': self.output_name}
return json_response(data)
@view_decorator(superuser_protected, subclass=True)
class LabelsList(FilteredView):
filter_form = LabelsFilterForm
fields_map = (('id', 'id',),
('name', 'name',),)
output_name = 'labels'
def get_queryset(self, metric_name, valid_from,
valid_to, interval=None, last=None):
q = MetricLabel.objects.all().distinct()
qparams = {}
if metric_name:
sm = ServiceTypeMetric.objects.filter(metric__name=metric_name)
qparams['metric_values__service_metric__in'] = sm
if last:
_from = datetime.utcnow().replace(tzinfo=pytz.utc) - timedelta(seconds=last)
if interval is None:
interval = 60
if not isinstance(interval, timedelta):
interval = timedelta(seconds=interval)
valid_from = _from
if valid_from:
qparams['metric_values__valid_from__gte'] = valid_from
if valid_to:
qparams['metric_values__valid_to__lte'] = valid_to
if qparams:
q = q.filter(**qparams)
return q
@view_decorator(superuser_protected, subclass=True)
class EventTypeList(FilteredView):
filter_form = EventTypesFilterForm
fields_map = (('name', 'name',), ('type_label', 'type_label',),)
output_name = 'event_types'
def get_queryset(self, **kwargs):
if "ows_service" in kwargs and kwargs["ows_service"] is not None:
if kwargs["ows_service"]:
return EventType.objects.filter(name__icontains="OWS")
else:
return EventType.objects.exclude(name__icontains="OWS")
return EventType.objects.all()
def get(self, request, *args, **kwargs):
qargs = self.get_filter_args(request)
if self.errors:
return json_response({'success': False,
'status': 'errors',
'errors': self.errors},
status=400)
q = self.get_queryset(**qargs)
from_fields = [f[0] for f in self.fields_map]
to_fields = [f[1] for f in self.fields_map]
labels = dict(EventType.EVENT_TYPES)
out = [dict(zip(
to_fields,
(getattr(item, f) if f != 'type_label' else labels[getattr(item, 'name')] for f in from_fields)
)) for item in q]
data = {self.output_name: out,
'success': True,
'errors': {},
'status': 'ok'}
if self.output_name != 'data':
data['data'] = {'key': self.output_name}
return json_response(data)
@view_decorator(superuser_protected, subclass=True)
class MetricDataView(View):
def get_filters(self, **kwargs):
out = {}
self.errors = None
f = MetricsFilters(data=self.request.GET)
if not f.is_valid():
self.errors = f.errors
else:
out.update(f.cleaned_data)
return out
def get(self, request, *args, **kwargs):
filters = self.get_filters(**kwargs)
if self.errors:
return json_response({'status': 'error',
'success': False,
'errors': self.errors},
status=400)
metric_name = kwargs['metric_name']
last = filters.pop('last', None)
if last:
td = timedelta(seconds=last)
now = datetime.utcnow().replace(tzinfo=pytz.utc)
filters['valid_from'] = now - td
filters['valid_to'] = now
out = capi.get_metrics_for(metric_name, **filters)
return json_response({'data': out})
class ExceptionsListForm(CheckTypeForm):
error_type = forms.CharField(required=False)
service_name = forms.CharField(required=False)
service_type = forms.CharField(required=False)
resource = forms.CharField(required=False)
def clean_resource(self):
return self._check_type('resource')
def clean_service(self):
return self._check_type('service')
class ExceptionsListView(FilteredView):
filter_form = ExceptionsListForm
fields_map = (('id', 'id',),
('created', 'created',),
('url', 'url',),
('service_data', 'service',),
('error_type', 'error_type',),)
output_name = 'exceptions'
def get_queryset(self, error_type=None,
valid_from=None,
valid_to=None,
interval=None,
last=None,
service_name=None,
service_type=None,
resource=None):
q = ExceptionEvent.objects.all().select_related()
if error_type:
q = q.filter(error_type=error_type)
if last:
_from = datetime.utcnow().replace(tzinfo=pytz.utc) - timedelta(seconds=last)
if interval is None:
interval = 60
if not isinstance(interval, timedelta):
interval = timedelta(seconds=interval)
valid_from = _from
if valid_from:
q = q.filter(created__gte=valid_from)
if valid_to:
q = q.filter(created__lte=valid_to)
if service_name:
q = q.filter(service__name=service_name)
if service_type:
q = q.filter(service__service_type__name=service_type)
if resource:
q = q.filter(request__resources__in=(resource,))
return q
class ExceptionDataView(View):
def get_object(self, exception_id):
try:
return ExceptionEvent.objects.get(id=exception_id)
except ExceptionEvent.DoesNotExist:
return
def get(self, request, exception_id, *args, **kwargs):
e = self.get_object(exception_id)
if not e:
return json_response(
errors={'exception_id': "Object not found"}, status=404)
data = e.expose()
return json_response(data)
class BeaconView(View):
def get(self, request, *args, **kwargs):
service = kwargs.get('exposed')
if not service:
data = [{'name': s, 'url': reverse(
'monitoring:api_beacon_exposed', args=(s,))} for s in exposes.keys()]
return json_response({'exposed': data})
try:
ex = exposes[service]()
except KeyError:
return json_response(
errors={'exposed': f'No service for {service}'}, status=404)
out = {'data': ex.expose(),
'timestamp': datetime.utcnow().replace(tzinfo=pytz.utc)}
return json_response(out)
def index(request):
if auth.get_user(request).is_superuser:
return render(request, 'monitoring/index.html')
return render(request, 'monitoring/non_superuser.html')
class NotificaitonCheckForm(forms.ModelForm):
class Meta:
model = NotificationCheck
fields = ('name', 'description', 'severity', 'user_threshold',)
class MetricNotificationCheckForm(forms.ModelForm):
metric = forms.CharField(required=True)
service = forms.CharField(required=False)
resource = forms.CharField(required=False)
label = forms.CharField(required=False)
event_type = forms.CharField(required=False)
class Meta:
model = MetricNotificationCheck
fields = (
'notification_check',
'min_value',
'max_value',
'max_timeout',
)
def _get_clean_model(self, cls, name):
val = self.cleaned_data.get(name)
if not self.fields[name].required:
if not val:
return
try:
return cls.objects.get(name=val)
except cls.DoesNotExist:
raise forms.ValidationError(f"Invalid {name}: {val}")
def clean_metric(self):
return self._get_clean_model(Metric, 'metric')
def clean_service(self):
return self._get_clean_model(Service, 'service')
def clean_label(self):
return self._get_clean_model(MetricLabel, 'label')
def clean_event_type(self):
return self._get_clean_model(EventType, 'event_type')
def clean_resource(self):
val = self.cleaned_data.get('resource')
if not val:
return
try:
vtype, vname = val.split('=')
except IndexError:
raise forms.ValidationError(
f"Invalid resource name: {val}")
try:
return MonitoredResource.objects.get(name=vname, type=vtype)
except MonitoredResource.DoesNotExist:
raise forms.ValidationError(f"Invalid resource: {val}")
class UserNotificationConfigView(View):
def get_object(self):
pk = self.kwargs['pk']
return NotificationCheck.objects.get(pk=pk)
def get(self, request, *args, **kwargs):
out = {'success': False, 'status': 'error', 'data': [], 'errors': {}}
fields = ('field_name',
'steps',
'current_value',
'steps_calculated',
'unit',
'is_enabled',)
if auth.get_user(request).is_authenticated:
obj = self.get_object()
out['success'] = True
out['status'] = 'ok'
form = obj.get_user_form()
fields = [dump(r, fields) for r in obj.definitions.all()]
out['data'] = {'form': form.as_table(),
'fields': fields,
'emails': obj.emails,
'notification': dump(obj)}
status = 200
else:
out['errors']['user'] = ['User is not authenticated']
status = 401
return json_response(out, status=status)
def post(self, request, *args, **kwargs):
out = {'success': False, 'status': 'error', 'data': [], 'errors': {}}
if auth.get_user(request).is_authenticated:
obj = self.get_object()
try:
is_json = True
data = json.loads(request.body)
except (TypeError, ValueError,):
is_json = False
data = request.POST.copy()
try:
configs = obj.process_user_form(data, is_json=is_json)
out['success'] = True
out['status'] = 'ok'
out['data'] = [dump(c) for c in configs]
status = 200
except forms.ValidationError as err:
out['errors'] = err.errors
status = 400
else:
out['errors']['user'] = ['User is not authenticated']
status = 401
return json_response(out, status=status)
if settings.MONITORING_DISABLE_CSRF:
post = csrf_exempt(post)
class NotificationsList(FilteredView):
filter_form = None
fields_map = (('id', 'id',),
('url', 'url',),
('name', 'name',),
('active', 'active',),
('severity', 'severity',),
('description', 'description',),
)
output_name = 'data'
def get_filter_args(self, *args, **kwargs):
self.errors = {}
if not auth.get_user(self.request).is_authenticated:
self.errors = {'user': ['User is not authenticated']}
return {}
def get_queryset(self, *args, **kwargs):
return NotificationCheck.objects.all()
def create(self, request, *args, **kwargs):
f = NotificaitonCheckForm(data=request.POST)
if f.is_valid():
d = f.cleaned_data
return NotificationCheck.create(**d)
self.errors = f.errors
def post(self, request, *args, **kwargs):
out = {'success': False, 'status': 'error', 'data': [], 'errors': {}}
d = self.create(request, *args, **kwargs)
if d is None:
out['errors'] = self.errors
status = 400
else:
out['data'] = dump(d)
out['success'] = True
out['status'] = 'ok'
status = 200
return json_response(out, status=status)
class StatusCheckView(View):
fields = ('name',
'severity',
'offending_value',
'threshold_value',
'spotted_at',
'valid_from',
'valid_to',
'check_url',
'check_id',
'description',
'message',)
def get(self, request, *args, **kwargs):
capi = CollectorAPI()
checks = capi.get_notifications()
data = {'status': 'ok', 'success': True, 'data': {}}
d = data['data']
d['problems'] = problems = []
d['health_level'] = 'ok'
_levels = ('fatal', 'error', 'warning',)
levels = set([])
for nc, ncdata in checks:
for ncd in ncdata:
levels.add(ncd.severity)
problems.append(dump(ncd, self.fields))
if levels:
for lyr in _levels:
if lyr in levels:
d['health_level'] = lyr
break
return json_response(data)
class AutoconfigureView(View):
def post(self, request, *args, **kwargs):
if not auth.get_user(request).is_authenticated:
out = {'success': False,
'status': 'error',
'errors': {'user': ['User is not authenticated']}
}
return json_response(out, status=401)
if not (auth.get_user(request).is_superuser or auth.get_user(request).is_staff):
out = {'success': False,
'status': 'error',
'errors': {'user': ['User is not permitted']}
}
return json_response(out, status=401)
do_autoconfigure()
out = {'success': True,
'status': 'ok',
'errors': {}
}
return json_response(out)
class CollectMetricsView(View):
"""
- Run command "collect_metrics -n -t xml" via web
"""
authkey = 'OzhVMECJUn9vDu2oLv1HjGPKByuTBwF8'
def get(self, request, *args, **kwargs):
authkey = kwargs.get('authkey')
if not authkey or authkey != self.authkey:
out = {'success': False,
'status': 'error',
'errors': {'denied': ['Call is not permitted']}
}
return json_response(out, status=401)
else:
call_command(
'collect_metrics', '-n', '-t', 'xml')
out = {'success': True,
'status': 'ok',
'errors': {}
}
return json_response(out)
api_metrics = MetricsList.as_view()
api_services = ServicesList.as_view()
api_hosts = HostsList.as_view()
api_labels = LabelsList.as_view()
api_resources = ResourcesList.as_view()
api_resource_types = ResourceTypesList.as_view()
api_event_types = EventTypeList.as_view()
api_metric_data = MetricDataView.as_view()
api_metric_collect = CollectMetricsView.as_view()
api_exceptions = ExceptionsListView.as_view()
api_exception = ExceptionDataView.as_view()
api_beacon = BeaconView.as_view()
api_user_notification_config = UserNotificationConfigView.as_view()
api_user_notifications = NotificationsList.as_view()
api_status = StatusCheckView.as_view()
api_autoconfigure = AutoconfigureView.as_view()
| gpl-3.0 |
rackerlabs/ironic | ironic/drivers/modules/deploy_utils.py | 1 | 17645 | # Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import socket
import stat
import time
from oslo.config import cfg
from oslo.utils import excutils
from oslo_concurrency import processutils
from ironic.common import disk_partitioner
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common import images
from ironic.common import states
from ironic.common import utils
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import image_cache
from ironic.openstack.common import log as logging
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
# All functions are called from deploy() directly or indirectly.
# They are split for stub-out.
def discovery(portal_address, portal_port):
"""Do iSCSI discovery on portal."""
utils.execute('iscsiadm',
'-m', 'discovery',
'-t', 'st',
'-p', '%s:%s' % (portal_address, portal_port),
run_as_root=True,
check_exit_code=[0],
attempts=5,
delay_on_retry=True)
def login_iscsi(portal_address, portal_port, target_iqn):
"""Login to an iSCSI target."""
utils.execute('iscsiadm',
'-m', 'node',
'-p', '%s:%s' % (portal_address, portal_port),
'-T', target_iqn,
'--login',
run_as_root=True,
check_exit_code=[0],
attempts=5,
delay_on_retry=True)
# Ensure the login complete
time.sleep(3)
def logout_iscsi(portal_address, portal_port, target_iqn):
"""Logout from an iSCSI target."""
utils.execute('iscsiadm',
'-m', 'node',
'-p', '%s:%s' % (portal_address, portal_port),
'-T', target_iqn,
'--logout',
run_as_root=True,
check_exit_code=[0],
attempts=5,
delay_on_retry=True)
def delete_iscsi(portal_address, portal_port, target_iqn):
"""Delete the iSCSI target."""
# Retry delete until it succeeds (exit code 0) or until there is
# no longer a target to delete (exit code 21).
utils.execute('iscsiadm',
'-m', 'node',
'-p', '%s:%s' % (portal_address, portal_port),
'-T', target_iqn,
'-o', 'delete',
run_as_root=True,
check_exit_code=[0, 21],
attempts=5,
delay_on_retry=True)
def make_partitions(dev, root_mb, swap_mb, ephemeral_mb, commit=True):
"""Create partitions for root, swap and ephemeral on a disk device.
:param root_mb: Size of the root partition in mebibytes (MiB).
:param swap_mb: Size of the swap partition in mebibytes (MiB). If 0,
no swap partition will be created.
:param ephemeral_mb: Size of the ephemeral partition in mebibytes (MiB).
If 0, no ephemeral partition will be created.
:param commit: True/False. Default for this setting is True. If False
partitions will not be written to disk.
:returns: A dictionary containing the partition type as Key and partition
path as Value for the partitions created by this method.
"""
part_template = dev + '-part%d'
part_dict = {}
dp = disk_partitioner.DiskPartitioner(dev)
if ephemeral_mb:
part_num = dp.add_partition(ephemeral_mb)
part_dict['ephemeral'] = part_template % part_num
if swap_mb:
part_num = dp.add_partition(swap_mb, fs_type='linux-swap')
part_dict['swap'] = part_template % part_num
# NOTE(lucasagomes): Make the root partition the last partition. This
# enables tools like cloud-init's growroot utility to expand the root
# partition until the end of the disk.
part_num = dp.add_partition(root_mb)
part_dict['root'] = part_template % part_num
if commit:
# write to the disk
dp.commit()
return part_dict
def is_block_device(dev):
"""Check whether a device is block or not."""
s = os.stat(dev)
return stat.S_ISBLK(s.st_mode)
def dd(src, dst):
"""Execute dd from src to dst."""
utils.dd(src, dst, 'bs=1M', 'oflag=direct')
def populate_image(src, dst):
data = images.qemu_img_info(src)
if data.file_format == 'raw':
dd(src, dst)
else:
images.convert_image(src, dst, 'raw', True)
def mkswap(dev, label='swap1'):
"""Execute mkswap on a device."""
utils.mkfs('swap', dev, label)
def mkfs_ephemeral(dev, ephemeral_format, label="ephemeral0"):
utils.mkfs(ephemeral_format, dev, label)
def block_uuid(dev):
"""Get UUID of a block device."""
out, _err = utils.execute('blkid', '-s', 'UUID', '-o', 'value', dev,
run_as_root=True,
check_exit_code=[0])
return out.strip()
def switch_pxe_config(path, root_uuid, boot_mode):
"""Switch a pxe config from deployment mode to service mode."""
with open(path) as f:
lines = f.readlines()
root = 'UUID=%s' % root_uuid
rre = re.compile(r'\{\{ ROOT \}\}')
if boot_mode == 'uefi':
dre = re.compile('^default=.*$')
boot_line = 'default=boot'
else:
pxe_cmd = 'goto' if CONF.pxe.ipxe_enabled else 'default'
dre = re.compile('^%s .*$' % pxe_cmd)
boot_line = '%s boot' % pxe_cmd
with open(path, 'w') as f:
for line in lines:
line = rre.sub(root, line)
line = dre.sub(boot_line, line)
f.write(line)
def notify(address, port):
"""Notify a node that it becomes ready to reboot."""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((address, port))
s.send('done')
finally:
s.close()
def get_dev(address, port, iqn, lun):
"""Returns a device path for given parameters."""
dev = ("/dev/disk/by-path/ip-%s:%s-iscsi-%s-lun-%s"
% (address, port, iqn, lun))
return dev
def get_image_mb(image_path, virtual_size=True):
"""Get size of an image in Megabyte."""
mb = 1024 * 1024
if not virtual_size:
image_byte = os.path.getsize(image_path)
else:
image_byte = images.converted_size(image_path)
# round up size to MB
image_mb = int((image_byte + mb - 1) / mb)
return image_mb
def get_dev_block_size(dev):
"""Get the device size in 512 byte sectors."""
block_sz, cmderr = utils.execute('blockdev', '--getsz', dev,
run_as_root=True, check_exit_code=[0])
return int(block_sz)
def destroy_disk_metadata(dev, node_uuid):
"""Destroy metadata structures on node's disk.
Ensure that node's disk appears to be blank without zeroing the entire
drive. To do this we will zero:
- the first 18KiB to clear MBR / GPT data
- the last 18KiB to clear GPT and other metadata like: LVM, veritas,
MDADM, DMRAID, ...
"""
# NOTE(NobodyCam): This is needed to work around bug:
# https://bugs.launchpad.net/ironic/+bug/1317647
try:
utils.execute('dd', 'if=/dev/zero', 'of=%s' % dev,
'bs=512', 'count=36', run_as_root=True,
check_exit_code=[0])
except processutils.ProcessExecutionError as err:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to erase beginning of disk for node "
"%(node)s. Command: %(command)s. Error: %(error)s."),
{'node': node_uuid,
'command': err.cmd,
'error': err.stderr})
# now wipe the end of the disk.
# get end of disk seek value
try:
block_sz = get_dev_block_size(dev)
except processutils.ProcessExecutionError as err:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to get disk block count for node %(node)s. "
"Command: %(command)s. Error: %(error)s."),
{'node': node_uuid,
'command': err.cmd,
'error': err.stderr})
else:
seek_value = block_sz - 36
try:
utils.execute('dd', 'if=/dev/zero', 'of=%s' % dev,
'bs=512', 'count=36', 'seek=%d' % seek_value,
run_as_root=True, check_exit_code=[0])
except processutils.ProcessExecutionError as err:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to erase the end of the disk on node "
"%(node)s. Command: %(command)s. "
"Error: %(error)s."),
{'node': node_uuid,
'command': err.cmd,
'error': err.stderr})
def work_on_disk(dev, root_mb, swap_mb, ephemeral_mb, ephemeral_format,
image_path, node_uuid, preserve_ephemeral=False):
"""Create partitions and copy an image to the root partition.
:param dev: Path for the device to work on.
:param root_mb: Size of the root partition in megabytes.
:param swap_mb: Size of the swap partition in megabytes.
:param ephemeral_mb: Size of the ephemeral partition in megabytes. If 0,
no ephemeral partition will be created.
:param ephemeral_format: The type of file system to format the ephemeral
partition.
:param image_path: Path for the instance's disk image.
:param node_uuid: node's uuid. Used for logging.
:param preserve_ephemeral: If True, no filesystem is written to the
ephemeral block device, preserving whatever content it had (if the
partition table has not changed).
:returns: the UUID of the root partition.
"""
if not is_block_device(dev):
raise exception.InstanceDeployFailure(_("Parent device '%s' not found")
% dev)
# the only way for preserve_ephemeral to be set to true is if we are
# rebuilding an instance with --preserve_ephemeral.
commit = not preserve_ephemeral
# now if we are committing the changes to disk clean first.
if commit:
destroy_disk_metadata(dev, node_uuid)
part_dict = make_partitions(dev, root_mb, swap_mb, ephemeral_mb,
commit=commit)
ephemeral_part = part_dict.get('ephemeral')
swap_part = part_dict.get('swap')
root_part = part_dict.get('root')
if not is_block_device(root_part):
raise exception.InstanceDeployFailure(_("Root device '%s' not found")
% root_part)
if swap_part and not is_block_device(swap_part):
raise exception.InstanceDeployFailure(_("Swap device '%s' not found")
% swap_part)
if ephemeral_part and not is_block_device(ephemeral_part):
raise exception.InstanceDeployFailure(
_("Ephemeral device '%s' not found") % ephemeral_part)
populate_image(image_path, root_part)
if swap_part:
mkswap(swap_part)
if ephemeral_part and not preserve_ephemeral:
mkfs_ephemeral(ephemeral_part, ephemeral_format)
try:
root_uuid = block_uuid(root_part)
except processutils.ProcessExecutionError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to detect root device UUID."))
return root_uuid
def deploy(address, port, iqn, lun, image_path,
root_mb, swap_mb, ephemeral_mb, ephemeral_format, node_uuid,
preserve_ephemeral=False):
"""All-in-one function to deploy a node.
:param address: The iSCSI IP address.
:param port: The iSCSI port number.
:param iqn: The iSCSI qualified name.
:param lun: The iSCSI logical unit number.
:param image_path: Path for the instance's disk image.
:param root_mb: Size of the root partition in megabytes.
:param swap_mb: Size of the swap partition in megabytes.
:param ephemeral_mb: Size of the ephemeral partition in megabytes. If 0,
no ephemeral partition will be created.
:param ephemeral_format: The type of file system to format the ephemeral
partition.
:param node_uuid: node's uuid. Used for logging.
:param preserve_ephemeral: If True, no filesystem is written to the
ephemeral block device, preserving whatever content it had (if the
partition table has not changed).
:returns: the UUID of the root partition.
"""
dev = get_dev(address, port, iqn, lun)
image_mb = get_image_mb(image_path)
if image_mb > root_mb:
root_mb = image_mb
discovery(address, port)
login_iscsi(address, port, iqn)
try:
root_uuid = work_on_disk(dev, root_mb, swap_mb, ephemeral_mb,
ephemeral_format, image_path, node_uuid,
preserve_ephemeral)
except processutils.ProcessExecutionError as err:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Deploy to address %s failed."), address)
LOG.error(_LE("Command: %s"), err.cmd)
LOG.error(_LE("StdOut: %r"), err.stdout)
LOG.error(_LE("StdErr: %r"), err.stderr)
except exception.InstanceDeployFailure as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Deploy to address %s failed."), address)
LOG.error(e)
finally:
logout_iscsi(address, port, iqn)
delete_iscsi(address, port, iqn)
return root_uuid
def notify_deploy_complete(address):
"""Notifies the completion of deployment to the baremetal node.
:param address: The IP address of the node.
"""
# Ensure the node started netcat on the port after POST the request.
time.sleep(3)
notify(address, 10000)
def check_for_missing_params(info_dict, error_msg, param_prefix=''):
"""Check for empty params in the provided dictionary.
:param info_dict: The dictionary to inspect.
:param error_msg: The error message to prefix before printing the
information about missing parameters.
:param param_prefix: Add this prefix to each parameter for error messages
:raises: MissingParameterValue, if one or more parameters are
empty in the provided dictionary.
"""
missing_info = []
for label, value in info_dict.items():
if not value:
missing_info.append(param_prefix + label)
if missing_info:
exc_msg = _("%(error_msg)s. Missing are: %(missing_info)s")
raise exception.MissingParameterValue(exc_msg %
{'error_msg': error_msg, 'missing_info': missing_info})
def fetch_images(ctx, cache, images_info, force_raw=True):
"""Check for available disk space and fetch images using ImageCache.
:param ctx: context
:param cache: ImageCache instance to use for fetching
:param images_info: list of tuples (image href, destination path)
:param force_raw: boolean value, whether to convert the image to raw
format
:raises: InstanceDeployFailure if unable to find enough disk space
"""
try:
image_cache.clean_up_caches(ctx, cache.master_dir, images_info)
except exception.InsufficientDiskSpace as e:
raise exception.InstanceDeployFailure(reason=e)
# NOTE(dtantsur): This code can suffer from race condition,
# if disk space is used between the check and actual download.
# This is probably unavoidable, as we can't control other
# (probably unrelated) processes
for href, path in images_info:
cache.fetch_image(href, path, ctx=ctx, force_raw=force_raw)
def set_failed_state(task, msg):
"""Sets the deploy status as failed with relevant messages.
This method sets the deployment as fail with the given message.
It sets node's provision_state to DEPLOYFAIL and updates last_error
with the given error message. It also powers off the baremetal node.
:param task: a TaskManager instance containing the node to act on.
:param msg: the message to set in last_error of the node.
"""
node = task.node
node.provision_state = states.DEPLOYFAIL
node.target_provision_state = states.NOSTATE
node.save()
try:
manager_utils.node_power_action(task, states.POWER_OFF)
except Exception:
msg2 = (_LE('Node %s failed to power off while handling deploy '
'failure. This may be a serious condition. Node '
'should be removed from Ironic or put in maintenance '
'mode until the problem is resolved.') % node.uuid)
LOG.exception(msg2)
finally:
# NOTE(deva): node_power_action() erases node.last_error
# so we need to set it again here.
node.last_error = msg
node.save()
| apache-2.0 |
xzturn/tensorflow | tensorflow/tools/docs/generate_lib.py | 4 | 22402 | # Lint as: python2, python3
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate docs for the TensorFlow Python API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import fnmatch
import os
import shutil
import tempfile
import six
from tensorflow.python.util import tf_inspect
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
from tensorflow.tools.docs import doc_controls
from tensorflow.tools.docs import doc_generator_visitor
from tensorflow.tools.docs import parser
from tensorflow.tools.docs import pretty_docs
from tensorflow.tools.docs import py_guide_parser
def write_docs(output_dir,
parser_config,
yaml_toc,
root_title='TensorFlow',
search_hints=True,
site_api_path='api_docs/python'):
"""Write previously extracted docs to disk.
Write a docs page for each symbol included in the indices of parser_config to
a tree of docs at `output_dir`.
Symbols with multiple aliases will have only one page written about
them, which is referenced for all aliases.
Args:
output_dir: Directory to write documentation markdown files to. Will be
created if it doesn't exist.
parser_config: A `parser.ParserConfig` object, containing all the necessary
indices.
yaml_toc: Set to `True` to generate a "_toc.yaml" file.
root_title: The title name for the root level index.md.
search_hints: (bool) include meta-data search hints at the top of each
output file.
site_api_path: The output path relative to the site root. Used in the
`_toc.yaml` and `_redirects.yaml` files.
Raises:
ValueError: if `output_dir` is not an absolute path
"""
# Make output_dir.
if not os.path.isabs(output_dir):
raise ValueError("'output_dir' must be an absolute path.\n"
" output_dir='%s'" % output_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# These dictionaries are used for table-of-contents generation below
# They will contain, after the for-loop below::
# - module name(string):classes and functions the module contains(list)
module_children = {}
# - symbol name(string):pathname (string)
symbol_to_file = {}
# Collect redirects for an api _redirects.yaml file.
redirects = []
# Parse and write Markdown pages, resolving cross-links (@{symbol}).
for full_name, py_object in six.iteritems(parser_config.index):
parser_config.reference_resolver.current_doc_full_name = full_name
if full_name in parser_config.duplicate_of:
continue
# Methods and some routines are documented only as part of their class.
if not (tf_inspect.ismodule(py_object) or tf_inspect.isclass(py_object) or
parser.is_free_function(py_object, full_name, parser_config.index)):
continue
sitepath = os.path.join(parser.documentation_path(full_name)[:-3])
# For TOC, we need to store a mapping from full_name to the file
# we're generating
symbol_to_file[full_name] = sitepath
# For a module, remember the module for the table-of-contents
if tf_inspect.ismodule(py_object):
if full_name in parser_config.tree:
module_children.setdefault(full_name, [])
# For something else that's documented,
# figure out what module it lives in
else:
subname = str(full_name)
while True:
subname = subname[:subname.rindex('.')]
if tf_inspect.ismodule(parser_config.index[subname]):
module_children.setdefault(subname, []).append(full_name)
break
# Generate docs for `py_object`, resolving references.
page_info = parser.docs_for_object(full_name, py_object, parser_config)
path = os.path.join(output_dir, parser.documentation_path(full_name))
directory = os.path.dirname(path)
try:
if not os.path.exists(directory):
os.makedirs(directory)
# This function returns raw bytes in PY2 or unicode in PY3.
if search_hints:
content = [page_info.get_metadata_html()]
else:
content = ['']
content.append(pretty_docs.build_md_page(page_info))
text = '\n'.join(content)
if six.PY3:
text = text.encode('utf-8')
with open(path, 'wb') as f:
f.write(text)
except OSError:
raise OSError(
'Cannot write documentation for %s to %s' % (full_name, directory))
duplicates = parser_config.duplicates.get(full_name, [])
if not duplicates:
continue
duplicates = [item for item in duplicates if item != full_name]
for dup in duplicates:
from_path = os.path.join(site_api_path,
six.ensure_str(dup).replace('.', '/'))
to_path = os.path.join(site_api_path,
six.ensure_str(full_name).replace('.', '/'))
redirects.append((
os.path.join('/', from_path),
os.path.join('/', to_path)))
if redirects:
redirects = sorted(redirects)
template = ('- from: {}\n'
' to: {}\n')
redirects = [template.format(f, t) for f, t in redirects]
api_redirects_path = os.path.join(output_dir, '_redirects.yaml')
with open(api_redirects_path, 'w') as redirect_file:
redirect_file.write('redirects:\n')
redirect_file.write(''.join(redirects))
if yaml_toc:
# Generate table of contents
# Put modules in alphabetical order, case-insensitive
modules = sorted(list(module_children.keys()), key=lambda a: a.upper())
leftnav_path = os.path.join(output_dir, '_toc.yaml')
with open(leftnav_path, 'w') as f:
# Generate header
f.write('# Automatically generated file; please do not edit\ntoc:\n')
for module in modules:
indent_num = module.count('.')
# Don't list `tf.submodule` inside `tf`
indent_num = max(indent_num, 1)
indent = ' '*indent_num
if indent_num > 1:
# tf.contrib.baysflow.entropy will be under
# tf.contrib->baysflow->entropy
title = six.ensure_str(module).split('.')[-1]
else:
title = module
header = [
'- title: ' + six.ensure_str(title), ' section:',
' - title: Overview', ' path: ' +
os.path.join('/', site_api_path, symbol_to_file[module])
]
header = ''.join([indent+line+'\n' for line in header])
f.write(header)
symbols_in_module = module_children.get(module, [])
# Sort case-insensitive, if equal sort case sensitive (upper first)
symbols_in_module.sort(key=lambda a: (a.upper(), a))
for full_name in symbols_in_module:
item = [
' - title: ' + full_name[len(module) + 1:],
' path: ' + os.path.join('/', site_api_path,
symbol_to_file[full_name])]
item = ''.join([indent+line+'\n' for line in item])
f.write(item)
# Write a global index containing all full names with links.
with open(os.path.join(output_dir, 'index.md'), 'w') as f:
f.write(
six.ensure_str(
parser.generate_global_index(root_title, parser_config.index,
parser_config.reference_resolver)))
def add_dict_to_dict(add_from, add_to):
for key in add_from:
if key in add_to:
add_to[key].extend(add_from[key])
else:
add_to[key] = add_from[key]
# Exclude some libraries in contrib from the documentation altogether.
def _get_default_private_map():
return {
'tf.test': ['mock'],
'tf': ['contrib'],
'tf.compat': ['v1', 'v2'],
}
# Exclude members of some libraries.
def _get_default_do_not_descend_map():
# TODO(markdaoust): Use docs_controls decorators, locally, instead.
return {
'tf': ['cli', 'lib', 'wrappers'],
}
class DocControlsAwareCrawler(public_api.PublicAPIVisitor):
"""A `docs_controls` aware API-crawler."""
def _is_private(self, path, name, obj):
if doc_controls.should_skip(obj):
return True
return super(DocControlsAwareCrawler, self)._is_private(path, name, obj)
def extract(py_modules,
private_map,
do_not_descend_map,
visitor_cls=doc_generator_visitor.DocGeneratorVisitor):
"""Extract docs from tf namespace and write them to disk."""
# Traverse the first module.
visitor = visitor_cls(py_modules[0][0])
api_visitor = DocControlsAwareCrawler(visitor)
api_visitor.set_root_name(py_modules[0][0])
add_dict_to_dict(private_map, api_visitor.private_map)
add_dict_to_dict(do_not_descend_map, api_visitor.do_not_descend_map)
traverse.traverse(py_modules[0][1], api_visitor)
# Traverse all py_modules after the first:
for module_name, module in py_modules[1:]:
visitor.set_root_name(module_name)
api_visitor.set_root_name(module_name)
traverse.traverse(module, api_visitor)
return visitor
class _GetMarkdownTitle(py_guide_parser.PyGuideParser):
"""Extract the title from a .md file."""
def __init__(self):
self.title = None
py_guide_parser.PyGuideParser.__init__(self)
def process_title(self, _, title):
if self.title is None: # only use the first title
self.title = title
class _DocInfo(object):
"""A simple struct for holding a doc's url and title."""
def __init__(self, url, title):
self.url = url
self.title = title
def build_doc_index(src_dir):
"""Build an index from a keyword designating a doc to _DocInfo objects."""
doc_index = {}
if not os.path.isabs(src_dir):
raise ValueError("'src_dir' must be an absolute path.\n"
" src_dir='%s'" % src_dir)
if not os.path.exists(src_dir):
raise ValueError("'src_dir' path must exist.\n"
" src_dir='%s'" % src_dir)
for dirpath, _, filenames in os.walk(src_dir):
suffix = os.path.relpath(path=dirpath, start=src_dir)
for base_name in filenames:
if not six.ensure_str(base_name).endswith('.md'):
continue
title_parser = _GetMarkdownTitle()
title_parser.process(os.path.join(dirpath, base_name))
if title_parser.title is None:
msg = ('`{}` has no markdown title (# title)'.format(
os.path.join(dirpath, base_name)))
raise ValueError(msg)
key_parts = six.ensure_str(os.path.join(suffix,
base_name[:-3])).split('/')
if key_parts[-1] == 'index':
key_parts = key_parts[:-1]
doc_info = _DocInfo(os.path.join(suffix, base_name), title_parser.title)
doc_index[key_parts[-1]] = doc_info
if len(key_parts) > 1:
doc_index['/'.join(key_parts[-2:])] = doc_info
return doc_index
class _GuideRef(object):
def __init__(self, base_name, title, section_title, section_tag):
self.url = 'api_guides/python/' + six.ensure_str(
(('%s#%s' % (base_name, section_tag)) if section_tag else base_name))
self.link_text = (('%s > %s' % (title, section_title))
if section_title else title)
def make_md_link(self, url_prefix):
return '[%s](%s%s)' % (self.link_text, url_prefix, self.url)
class _GenerateGuideIndex(py_guide_parser.PyGuideParser):
"""Turn guide files into an index from symbol name to a list of _GuideRefs."""
def __init__(self):
self.index = {}
py_guide_parser.PyGuideParser.__init__(self)
def process(self, full_path, base_name):
"""Index a file, reading from `full_path`, with `base_name` as the link."""
self.full_path = full_path
self.base_name = base_name
self.title = None
self.section_title = None
self.section_tag = None
py_guide_parser.PyGuideParser.process(self, full_path)
def process_title(self, _, title):
if self.title is None: # only use the first title
self.title = title
def process_section(self, _, section_title, tag):
self.section_title = section_title
self.section_tag = tag
def process_line(self, _, line):
"""Index the file and section of each `symbol` reference."""
for match in parser.AUTO_REFERENCE_RE.finditer(line):
val = self.index.get(match.group(1), [])
val.append(
_GuideRef(self.base_name, self.title, self.section_title,
self.section_tag))
self.index[match.group(1)] = val
def _build_guide_index(guide_src_dir):
"""Return dict: symbol name -> _GuideRef from the files in `guide_src_dir`."""
index_generator = _GenerateGuideIndex()
if os.path.exists(guide_src_dir):
for full_path, base_name in py_guide_parser.md_files_in_dir(guide_src_dir):
index_generator.process(full_path, base_name)
return index_generator.index
class _UpdateTags(py_guide_parser.PyGuideParser):
"""Rewrites a Python guide so that each section has an explicit id tag.
"section" here refers to blocks delimited by second level headings.
"""
def process_section(self, line_number, section_title, tag):
self.replace_line(line_number, '<h2 id="%s">%s</h2>' % (tag, section_title))
def update_id_tags_inplace(src_dir):
"""Set explicit ids on all second-level headings to ensure back-links work.
Args:
src_dir: The directory of md-files to convert (inplace).
"""
tag_updater = _UpdateTags()
for dirpath, _, filenames in os.walk(src_dir):
for base_name in filenames:
if not base_name.endswith('.md'):
continue
full_path = os.path.join(src_dir, dirpath, base_name)
# Tag updater loads the file, makes the replacements, and returns the
# modified file contents
content = tag_updater.process(full_path)
with open(full_path, 'w') as f:
f.write(six.ensure_str(content))
EXCLUDED = set(['__init__.py', 'OWNERS', 'README.txt'])
def replace_refs(src_dir,
output_dir,
reference_resolver,
file_pattern='*.md',
api_docs_relpath='api_docs'):
"""Fix @{} references in all files under `src_dir` matching `file_pattern`.
A matching directory structure, with the modified files is
written to `output_dir`.
`{"__init__.py","OWNERS","README.txt"}` are skipped.
Files not matching `file_pattern` (using `fnmatch`) are copied with no change.
Also, files in the `api_guides/python` directory get explicit ids set on all
heading-2s to ensure back-links work.
Args:
src_dir: The directory to convert files from.
output_dir: The root directory to write the resulting files to.
reference_resolver: A `parser.ReferenceResolver` to make the replacements.
file_pattern: Only replace references in files matching file_patters,
using fnmatch. Non-matching files are copied unchanged.
api_docs_relpath: Relative-path string to the api_docs, from the src_dir.
"""
# Iterate through all the source files and process them.
for dirpath, _, filenames in os.walk(src_dir):
depth = os.path.relpath(src_dir, start=dirpath)
# How to get from `dirpath` to api_docs/python/
relative_path_to_root = os.path.join(depth, api_docs_relpath, 'python')
# Make the directory under output_dir.
new_dir = os.path.join(output_dir,
os.path.relpath(path=dirpath, start=src_dir))
if not os.path.exists(new_dir):
os.makedirs(new_dir)
for base_name in filenames:
if base_name in EXCLUDED:
continue
full_in_path = os.path.join(dirpath, base_name)
# Set the `current_doc_full_name` so bad files can be reported on errors.
reference_resolver.current_doc_full_name = full_in_path
suffix = os.path.relpath(path=full_in_path, start=src_dir)
full_out_path = os.path.join(output_dir, suffix)
# Copy files that do not match the file_pattern, unmodified.
if not fnmatch.fnmatch(base_name, file_pattern):
if full_in_path != full_out_path:
shutil.copyfile(full_in_path, full_out_path)
continue
with open(full_in_path, 'rb') as f:
content = f.read().decode('utf-8')
content = reference_resolver.replace_references(content,
relative_path_to_root)
with open(full_out_path, 'wb') as f:
f.write(six.ensure_binary(content, 'utf-8'))
class DocGenerator(object):
"""Main entry point for generating docs."""
def __init__(self):
self.argument_parser = argparse.ArgumentParser()
self._py_modules = None
self._private_map = _get_default_private_map()
self._do_not_descend_map = _get_default_do_not_descend_map()
self.yaml_toc = True
self.argument_parser.add_argument(
'--no_search_hints',
dest='search_hints',
action='store_false',
default=True)
self.argument_parser.add_argument(
'--site_api_path',
type=str, default='api_docs/python',
help='The path from the site-root to api_docs'
'directory for this project')
self.argument_parser.add_argument(
'--api_cache_out_path',
type=str,
default=None,
help='Path to store a json-serialized api-index, so links can be '
'inserted into docs without rebuilding the api_docs')
def add_output_dir_argument(self):
self.argument_parser.add_argument(
'--output_dir',
type=str,
default=None,
required=True,
help='Directory to write docs to.')
def add_src_dir_argument(self):
self.argument_parser.add_argument(
'--src_dir',
type=str,
default=tempfile.mkdtemp(),
required=False,
help='Optional directory of source docs to add api_docs links to')
def add_base_dir_argument(self, default_base_dir):
self.argument_parser.add_argument(
'--base_dir',
type=str,
default=default_base_dir,
help='Base directory to strip from file names referenced in docs.')
def parse_known_args(self):
flags, _ = self.argument_parser.parse_known_args()
return flags
def add_to_private_map(self, d):
add_dict_to_dict(d, self._private_map)
def add_to_do_not_descend_map(self, d):
add_dict_to_dict(d, self._do_not_descend_map)
def set_private_map(self, d):
self._private_map = d
def set_do_not_descend_map(self, d):
self._do_not_descend_map = d
def set_py_modules(self, py_modules):
self._py_modules = py_modules
def py_module_names(self):
if self._py_modules is None:
raise RuntimeError(
'Must call set_py_modules() before running py_module_names().')
return [name for (name, _) in self._py_modules]
def make_reference_resolver(self, visitor, doc_index):
return parser.ReferenceResolver.from_visitor(
visitor, doc_index, py_module_names=self.py_module_names())
def make_parser_config(self, visitor, reference_resolver, guide_index,
base_dir):
return parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates=visitor.duplicates,
duplicate_of=visitor.duplicate_of,
tree=visitor.tree,
index=visitor.index,
reverse_index=visitor.reverse_index,
guide_index=guide_index,
base_dir=base_dir)
def run_extraction(self):
return extract(self._py_modules, self._private_map,
self._do_not_descend_map)
def build(self, flags):
"""Build all the docs.
This produces two outputs
python api docs:
* generated from modules set with `set_py_modules`.
* written to '{FLAGS.output_dir}/api_docs/python/'
non-api docs:
* Everything in '{FLAGS.src_dir}' is copied to '{FLAGS.output_dir}'.
* '@{}' references in '.md' files are replaced with links.
* '.md' files under 'api_guides/python' have explicit ids set for their
second level headings.
Args:
flags:
* src_dir: Where to fetch the non-api-docs.
* base_dir: Base of the docs directory (Used to build correct
relative links).
* output_dir: Where to write the resulting docs.
Returns:
The number of errors encountered while processing.
"""
# Extract the python api from the _py_modules
doc_index = build_doc_index(flags.src_dir)
visitor = self.run_extraction()
reference_resolver = self.make_reference_resolver(visitor, doc_index)
if getattr(flags, 'api_cache_out_path', None):
reference_resolver.to_json_file(flags.api_cache_out_path)
# Build the guide_index for the api_docs back links.
root_title = getattr(flags, 'root_title', 'TensorFlow')
guide_index = _build_guide_index(
os.path.join(flags.src_dir, 'api_guides/python'))
# Write the api docs.
parser_config = self.make_parser_config(visitor, reference_resolver,
guide_index, flags.base_dir)
output_dir = os.path.join(flags.output_dir, 'api_docs/python')
write_docs(
output_dir,
parser_config,
yaml_toc=self.yaml_toc,
root_title=root_title,
search_hints=getattr(flags, 'search_hints', True),
site_api_path=getattr(flags, 'site_api_path', ''))
# Replace all the @{} references in files under `FLAGS.src_dir`
replace_refs(flags.src_dir, flags.output_dir, reference_resolver, '*.md')
# Fix the tags in the guide dir.
guide_dir = os.path.join(flags.output_dir, 'api_guides/python')
if os.path.exists(guide_dir):
update_id_tags_inplace(guide_dir)
# Report all errors found by the reference resolver, and return the error
# code.
parser_config.reference_resolver.log_errors()
return parser_config.reference_resolver.num_errors()
| apache-2.0 |
Mevlock/xbmc | lib/gtest/test/gtest_env_var_test.py | 184 | 3546 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs gtest_env_var_test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ,
capture_stderr=False).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
TestFlag('filter', 'FooTest.Bar', '*')
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
if __name__ == '__main__':
gtest_test_utils.Main()
| gpl-2.0 |
sudiptpa/google-diff-match-patch | python2/diff_match_patch_test.py | 319 | 41744 | #!/usr/bin/python2.4
"""Test harness for diff_match_patch.py
Copyright 2006 Google Inc.
http://code.google.com/p/google-diff-match-patch/
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import time
import unittest
import diff_match_patch as dmp_module
# Force a module reload. Allows one to edit the DMP module and rerun the tests
# without leaving the Python interpreter.
reload(dmp_module)
class DiffMatchPatchTest(unittest.TestCase):
def setUp(self):
"Test harness for dmp_module."
self.dmp = dmp_module.diff_match_patch()
def diff_rebuildtexts(self, diffs):
# Construct the two texts which made up the diff originally.
text1 = ""
text2 = ""
for x in range(0, len(diffs)):
if diffs[x][0] != dmp_module.diff_match_patch.DIFF_INSERT:
text1 += diffs[x][1]
if diffs[x][0] != dmp_module.diff_match_patch.DIFF_DELETE:
text2 += diffs[x][1]
return (text1, text2)
class DiffTest(DiffMatchPatchTest):
"""DIFF TEST FUNCTIONS"""
def testDiffCommonPrefix(self):
# Detect any common prefix.
# Null case.
self.assertEquals(0, self.dmp.diff_commonPrefix("abc", "xyz"))
# Non-null case.
self.assertEquals(4, self.dmp.diff_commonPrefix("1234abcdef", "1234xyz"))
# Whole case.
self.assertEquals(4, self.dmp.diff_commonPrefix("1234", "1234xyz"))
def testDiffCommonSuffix(self):
# Detect any common suffix.
# Null case.
self.assertEquals(0, self.dmp.diff_commonSuffix("abc", "xyz"))
# Non-null case.
self.assertEquals(4, self.dmp.diff_commonSuffix("abcdef1234", "xyz1234"))
# Whole case.
self.assertEquals(4, self.dmp.diff_commonSuffix("1234", "xyz1234"))
def testDiffCommonOverlap(self):
# Null case.
self.assertEquals(0, self.dmp.diff_commonOverlap("", "abcd"))
# Whole case.
self.assertEquals(3, self.dmp.diff_commonOverlap("abc", "abcd"))
# No overlap.
self.assertEquals(0, self.dmp.diff_commonOverlap("123456", "abcd"))
# Overlap.
self.assertEquals(3, self.dmp.diff_commonOverlap("123456xxx", "xxxabcd"))
# Unicode.
# Some overly clever languages (C#) may treat ligatures as equal to their
# component letters. E.g. U+FB01 == 'fi'
self.assertEquals(0, self.dmp.diff_commonOverlap("fi", u"\ufb01i"))
def testDiffHalfMatch(self):
# Detect a halfmatch.
self.dmp.Diff_Timeout = 1
# No match.
self.assertEquals(None, self.dmp.diff_halfMatch("1234567890", "abcdef"))
self.assertEquals(None, self.dmp.diff_halfMatch("12345", "23"))
# Single Match.
self.assertEquals(("12", "90", "a", "z", "345678"), self.dmp.diff_halfMatch("1234567890", "a345678z"))
self.assertEquals(("a", "z", "12", "90", "345678"), self.dmp.diff_halfMatch("a345678z", "1234567890"))
self.assertEquals(("abc", "z", "1234", "0", "56789"), self.dmp.diff_halfMatch("abc56789z", "1234567890"))
self.assertEquals(("a", "xyz", "1", "7890", "23456"), self.dmp.diff_halfMatch("a23456xyz", "1234567890"))
# Multiple Matches.
self.assertEquals(("12123", "123121", "a", "z", "1234123451234"), self.dmp.diff_halfMatch("121231234123451234123121", "a1234123451234z"))
self.assertEquals(("", "-=-=-=-=-=", "x", "", "x-=-=-=-=-=-=-="), self.dmp.diff_halfMatch("x-=-=-=-=-=-=-=-=-=-=-=-=", "xx-=-=-=-=-=-=-="))
self.assertEquals(("-=-=-=-=-=", "", "", "y", "-=-=-=-=-=-=-=y"), self.dmp.diff_halfMatch("-=-=-=-=-=-=-=-=-=-=-=-=y", "-=-=-=-=-=-=-=yy"))
# Non-optimal halfmatch.
# Optimal diff would be -q+x=H-i+e=lloHe+Hu=llo-Hew+y not -qHillo+x=HelloHe-w+Hulloy
self.assertEquals(("qHillo", "w", "x", "Hulloy", "HelloHe"), self.dmp.diff_halfMatch("qHilloHelloHew", "xHelloHeHulloy"))
# Optimal no halfmatch.
self.dmp.Diff_Timeout = 0
self.assertEquals(None, self.dmp.diff_halfMatch("qHilloHelloHew", "xHelloHeHulloy"))
def testDiffLinesToChars(self):
# Convert lines down to characters.
self.assertEquals(("\x01\x02\x01", "\x02\x01\x02", ["", "alpha\n", "beta\n"]), self.dmp.diff_linesToChars("alpha\nbeta\nalpha\n", "beta\nalpha\nbeta\n"))
self.assertEquals(("", "\x01\x02\x03\x03", ["", "alpha\r\n", "beta\r\n", "\r\n"]), self.dmp.diff_linesToChars("", "alpha\r\nbeta\r\n\r\n\r\n"))
self.assertEquals(("\x01", "\x02", ["", "a", "b"]), self.dmp.diff_linesToChars("a", "b"))
# More than 256 to reveal any 8-bit limitations.
n = 300
lineList = []
charList = []
for x in range(1, n + 1):
lineList.append(str(x) + "\n")
charList.append(unichr(x))
self.assertEquals(n, len(lineList))
lines = "".join(lineList)
chars = "".join(charList)
self.assertEquals(n, len(chars))
lineList.insert(0, "")
self.assertEquals((chars, "", lineList), self.dmp.diff_linesToChars(lines, ""))
def testDiffCharsToLines(self):
# Convert chars up to lines.
diffs = [(self.dmp.DIFF_EQUAL, "\x01\x02\x01"), (self.dmp.DIFF_INSERT, "\x02\x01\x02")]
self.dmp.diff_charsToLines(diffs, ["", "alpha\n", "beta\n"])
self.assertEquals([(self.dmp.DIFF_EQUAL, "alpha\nbeta\nalpha\n"), (self.dmp.DIFF_INSERT, "beta\nalpha\nbeta\n")], diffs)
# More than 256 to reveal any 8-bit limitations.
n = 300
lineList = []
charList = []
for x in range(1, n + 1):
lineList.append(str(x) + "\n")
charList.append(unichr(x))
self.assertEquals(n, len(lineList))
lines = "".join(lineList)
chars = "".join(charList)
self.assertEquals(n, len(chars))
lineList.insert(0, "")
diffs = [(self.dmp.DIFF_DELETE, chars)]
self.dmp.diff_charsToLines(diffs, lineList)
self.assertEquals([(self.dmp.DIFF_DELETE, lines)], diffs)
def testDiffCleanupMerge(self):
# Cleanup a messy diff.
# Null case.
diffs = []
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([], diffs)
# No change case.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_INSERT, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_INSERT, "c")], diffs)
# Merge equalities.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_EQUAL, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "abc")], diffs)
# Merge deletions.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_DELETE, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc")], diffs)
# Merge insertions.
diffs = [(self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_INSERT, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_INSERT, "abc")], diffs)
# Merge interweave.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_DELETE, "c"), (self.dmp.DIFF_INSERT, "d"), (self.dmp.DIFF_EQUAL, "e"), (self.dmp.DIFF_EQUAL, "f")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "ac"), (self.dmp.DIFF_INSERT, "bd"), (self.dmp.DIFF_EQUAL, "ef")], diffs)
# Prefix and suffix detection.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "abc"), (self.dmp.DIFF_DELETE, "dc")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "d"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_EQUAL, "c")], diffs)
# Prefix and suffix detection with equalities.
diffs = [(self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "abc"), (self.dmp.DIFF_DELETE, "dc"), (self.dmp.DIFF_EQUAL, "y")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "xa"), (self.dmp.DIFF_DELETE, "d"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_EQUAL, "cy")], diffs)
# Slide edit left.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_INSERT, "ba"), (self.dmp.DIFF_EQUAL, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_INSERT, "ab"), (self.dmp.DIFF_EQUAL, "ac")], diffs)
# Slide edit right.
diffs = [(self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_INSERT, "ab"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "ca"), (self.dmp.DIFF_INSERT, "ba")], diffs)
# Slide edit left recursive.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_DELETE, "ac"), (self.dmp.DIFF_EQUAL, "x")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "acx")], diffs)
# Slide edit right recursive.
diffs = [(self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "ca"), (self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "xca"), (self.dmp.DIFF_DELETE, "cba")], diffs)
def testDiffCleanupSemanticLossless(self):
# Slide diffs to match logical boundaries.
# Null case.
diffs = []
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([], diffs)
# Blank lines.
diffs = [(self.dmp.DIFF_EQUAL, "AAA\r\n\r\nBBB"), (self.dmp.DIFF_INSERT, "\r\nDDD\r\n\r\nBBB"), (self.dmp.DIFF_EQUAL, "\r\nEEE")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "AAA\r\n\r\n"), (self.dmp.DIFF_INSERT, "BBB\r\nDDD\r\n\r\n"), (self.dmp.DIFF_EQUAL, "BBB\r\nEEE")], diffs)
# Line boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "AAA\r\nBBB"), (self.dmp.DIFF_INSERT, " DDD\r\nBBB"), (self.dmp.DIFF_EQUAL, " EEE")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "AAA\r\n"), (self.dmp.DIFF_INSERT, "BBB DDD\r\n"), (self.dmp.DIFF_EQUAL, "BBB EEE")], diffs)
# Word boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The c"), (self.dmp.DIFF_INSERT, "ow and the c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The "), (self.dmp.DIFF_INSERT, "cow and the "), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# Alphanumeric boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The-c"), (self.dmp.DIFF_INSERT, "ow-and-the-c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The-"), (self.dmp.DIFF_INSERT, "cow-and-the-"), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# Hitting the start.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "ax")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "aax")], diffs)
# Hitting the end.
diffs = [(self.dmp.DIFF_EQUAL, "xa"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "xaa"), (self.dmp.DIFF_DELETE, "a")], diffs)
# Sentence boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The xxx. The "), (self.dmp.DIFF_INSERT, "zzz. The "), (self.dmp.DIFF_EQUAL, "yyy.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The xxx."), (self.dmp.DIFF_INSERT, " The zzz."), (self.dmp.DIFF_EQUAL, " The yyy.")], diffs)
def testDiffCleanupSemantic(self):
# Cleanup semantically trivial equalities.
# Null case.
diffs = []
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([], diffs)
# No elimination #1.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "cd"), (self.dmp.DIFF_EQUAL, "12"), (self.dmp.DIFF_DELETE, "e")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "cd"), (self.dmp.DIFF_EQUAL, "12"), (self.dmp.DIFF_DELETE, "e")], diffs)
# No elimination #2.
diffs = [(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "ABC"), (self.dmp.DIFF_EQUAL, "1234"), (self.dmp.DIFF_DELETE, "wxyz")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "ABC"), (self.dmp.DIFF_EQUAL, "1234"), (self.dmp.DIFF_DELETE, "wxyz")], diffs)
# Simple elimination.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "c")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "b")], diffs)
# Backpass elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_EQUAL, "cd"), (self.dmp.DIFF_DELETE, "e"), (self.dmp.DIFF_EQUAL, "f"), (self.dmp.DIFF_INSERT, "g")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abcdef"), (self.dmp.DIFF_INSERT, "cdfg")], diffs)
# Multiple eliminations.
diffs = [(self.dmp.DIFF_INSERT, "1"), (self.dmp.DIFF_EQUAL, "A"), (self.dmp.DIFF_DELETE, "B"), (self.dmp.DIFF_INSERT, "2"), (self.dmp.DIFF_EQUAL, "_"), (self.dmp.DIFF_INSERT, "1"), (self.dmp.DIFF_EQUAL, "A"), (self.dmp.DIFF_DELETE, "B"), (self.dmp.DIFF_INSERT, "2")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "AB_AB"), (self.dmp.DIFF_INSERT, "1A2_1A2")], diffs)
# Word boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The c"), (self.dmp.DIFF_DELETE, "ow and the c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The "), (self.dmp.DIFF_DELETE, "cow and the "), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# No overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "abcxx"), (self.dmp.DIFF_INSERT, "xxdef")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abcxx"), (self.dmp.DIFF_INSERT, "xxdef")], diffs)
# Overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "abcxxx"), (self.dmp.DIFF_INSERT, "xxxdef")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "xxx"), (self.dmp.DIFF_INSERT, "def")], diffs)
# Reverse overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "xxxabc"), (self.dmp.DIFF_INSERT, "defxxx")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_INSERT, "def"), (self.dmp.DIFF_EQUAL, "xxx"), (self.dmp.DIFF_DELETE, "abc")], diffs)
# Two overlap eliminations.
diffs = [(self.dmp.DIFF_DELETE, "abcd1212"), (self.dmp.DIFF_INSERT, "1212efghi"), (self.dmp.DIFF_EQUAL, "----"), (self.dmp.DIFF_DELETE, "A3"), (self.dmp.DIFF_INSERT, "3BC")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abcd"), (self.dmp.DIFF_EQUAL, "1212"), (self.dmp.DIFF_INSERT, "efghi"), (self.dmp.DIFF_EQUAL, "----"), (self.dmp.DIFF_DELETE, "A"), (self.dmp.DIFF_EQUAL, "3"), (self.dmp.DIFF_INSERT, "BC")], diffs)
def testDiffCleanupEfficiency(self):
# Cleanup operationally trivial equalities.
self.dmp.Diff_EditCost = 4
# Null case.
diffs = []
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([], diffs)
# No elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")], diffs)
# Four-edit elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abxyzcd"), (self.dmp.DIFF_INSERT, "12xyz34")], diffs)
# Three-edit elimination.
diffs = [(self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "xcd"), (self.dmp.DIFF_INSERT, "12x34")], diffs)
# Backpass elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "xy"), (self.dmp.DIFF_INSERT, "34"), (self.dmp.DIFF_EQUAL, "z"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "56")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abxyzcd"), (self.dmp.DIFF_INSERT, "12xy34z56")], diffs)
# High cost elimination.
self.dmp.Diff_EditCost = 5
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abwxyzcd"), (self.dmp.DIFF_INSERT, "12wxyz34")], diffs)
self.dmp.Diff_EditCost = 4
def testDiffPrettyHtml(self):
# Pretty print.
diffs = [(self.dmp.DIFF_EQUAL, "a\n"), (self.dmp.DIFF_DELETE, "<B>b</B>"), (self.dmp.DIFF_INSERT, "c&d")]
self.assertEquals("<span>a¶<br></span><del style=\"background:#ffe6e6;\"><B>b</B></del><ins style=\"background:#e6ffe6;\">c&d</ins>", self.dmp.diff_prettyHtml(diffs))
def testDiffText(self):
# Compute the source and destination texts.
diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, " lazy")]
self.assertEquals("jumps over the lazy", self.dmp.diff_text1(diffs))
self.assertEquals("jumped over a lazy", self.dmp.diff_text2(diffs))
def testDiffDelta(self):
# Convert a diff into delta string.
diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, " lazy"), (self.dmp.DIFF_INSERT, "old dog")]
text1 = self.dmp.diff_text1(diffs)
self.assertEquals("jumps over the lazy", text1)
delta = self.dmp.diff_toDelta(diffs)
self.assertEquals("=4\t-1\t+ed\t=6\t-3\t+a\t=5\t+old dog", delta)
# Convert delta string into a diff.
self.assertEquals(diffs, self.dmp.diff_fromDelta(text1, delta))
# Generates error (19 != 20).
try:
self.dmp.diff_fromDelta(text1 + "x", delta)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Generates error (19 != 18).
try:
self.dmp.diff_fromDelta(text1[1:], delta)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Generates error (%c3%xy invalid Unicode).
try:
self.dmp.diff_fromDelta("", "+%c3xy")
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Test deltas with special characters.
diffs = [(self.dmp.DIFF_EQUAL, u"\u0680 \x00 \t %"), (self.dmp.DIFF_DELETE, u"\u0681 \x01 \n ^"), (self.dmp.DIFF_INSERT, u"\u0682 \x02 \\ |")]
text1 = self.dmp.diff_text1(diffs)
self.assertEquals(u"\u0680 \x00 \t %\u0681 \x01 \n ^", text1)
delta = self.dmp.diff_toDelta(diffs)
self.assertEquals("=7\t-7\t+%DA%82 %02 %5C %7C", delta)
# Convert delta string into a diff.
self.assertEquals(diffs, self.dmp.diff_fromDelta(text1, delta))
# Verify pool of unchanged characters.
diffs = [(self.dmp.DIFF_INSERT, "A-Z a-z 0-9 - _ . ! ~ * ' ( ) ; / ? : @ & = + $ , # ")]
text2 = self.dmp.diff_text2(diffs)
self.assertEquals("A-Z a-z 0-9 - _ . ! ~ * \' ( ) ; / ? : @ & = + $ , # ", text2)
delta = self.dmp.diff_toDelta(diffs)
self.assertEquals("+A-Z a-z 0-9 - _ . ! ~ * \' ( ) ; / ? : @ & = + $ , # ", delta)
# Convert delta string into a diff.
self.assertEquals(diffs, self.dmp.diff_fromDelta("", delta))
def testDiffXIndex(self):
# Translate a location in text1 to text2.
self.assertEquals(5, self.dmp.diff_xIndex([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "1234"), (self.dmp.DIFF_EQUAL, "xyz")], 2))
# Translation on deletion.
self.assertEquals(1, self.dmp.diff_xIndex([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "1234"), (self.dmp.DIFF_EQUAL, "xyz")], 3))
def testDiffLevenshtein(self):
# Levenshtein with trailing equality.
self.assertEquals(4, self.dmp.diff_levenshtein([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "1234"), (self.dmp.DIFF_EQUAL, "xyz")]))
# Levenshtein with leading equality.
self.assertEquals(4, self.dmp.diff_levenshtein([(self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "1234")]))
# Levenshtein with middle equality.
self.assertEquals(7, self.dmp.diff_levenshtein([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_INSERT, "1234")]))
def testDiffBisect(self):
# Normal.
a = "cat"
b = "map"
# Since the resulting diff hasn't been normalized, it would be ok if
# the insertion and deletion pairs are swapped.
# If the order changes, tweak this test as required.
self.assertEquals([(self.dmp.DIFF_DELETE, "c"), (self.dmp.DIFF_INSERT, "m"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "t"), (self.dmp.DIFF_INSERT, "p")], self.dmp.diff_bisect(a, b, sys.maxint))
# Timeout.
self.assertEquals([(self.dmp.DIFF_DELETE, "cat"), (self.dmp.DIFF_INSERT, "map")], self.dmp.diff_bisect(a, b, 0))
def testDiffMain(self):
# Perform a trivial diff.
# Null case.
self.assertEquals([], self.dmp.diff_main("", "", False))
# Equality.
self.assertEquals([(self.dmp.DIFF_EQUAL, "abc")], self.dmp.diff_main("abc", "abc", False))
# Simple insertion.
self.assertEquals([(self.dmp.DIFF_EQUAL, "ab"), (self.dmp.DIFF_INSERT, "123"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("abc", "ab123c", False))
# Simple deletion.
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "123"), (self.dmp.DIFF_EQUAL, "bc")], self.dmp.diff_main("a123bc", "abc", False))
# Two insertions.
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_INSERT, "123"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_INSERT, "456"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("abc", "a123b456c", False))
# Two deletions.
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "123"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "456"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("a123b456c", "abc", False))
# Perform a real diff.
# Switch off the timeout.
self.dmp.Diff_Timeout = 0
# Simple cases.
self.assertEquals([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "b")], self.dmp.diff_main("a", "b", False))
self.assertEquals([(self.dmp.DIFF_DELETE, "Apple"), (self.dmp.DIFF_INSERT, "Banana"), (self.dmp.DIFF_EQUAL, "s are a"), (self.dmp.DIFF_INSERT, "lso"), (self.dmp.DIFF_EQUAL, " fruit.")], self.dmp.diff_main("Apples are a fruit.", "Bananas are also fruit.", False))
self.assertEquals([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, u"\u0680"), (self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "\t"), (self.dmp.DIFF_INSERT, "\x00")], self.dmp.diff_main("ax\t", u"\u0680x\x00", False))
# Overlaps.
self.assertEquals([(self.dmp.DIFF_DELETE, "1"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "y"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "2"), (self.dmp.DIFF_INSERT, "xab")], self.dmp.diff_main("1ayb2", "abxab", False))
self.assertEquals([(self.dmp.DIFF_INSERT, "xaxcx"), (self.dmp.DIFF_EQUAL, "abc"), (self.dmp.DIFF_DELETE, "y")], self.dmp.diff_main("abcy", "xaxcxabc", False))
self.assertEquals([(self.dmp.DIFF_DELETE, "ABCD"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "="), (self.dmp.DIFF_INSERT, "-"), (self.dmp.DIFF_EQUAL, "bcd"), (self.dmp.DIFF_DELETE, "="), (self.dmp.DIFF_INSERT, "-"), (self.dmp.DIFF_EQUAL, "efghijklmnopqrs"), (self.dmp.DIFF_DELETE, "EFGHIJKLMNOefg")], self.dmp.diff_main("ABCDa=bcd=efghijklmnopqrsEFGHIJKLMNOefg", "a-bcd-efghijklmnopqrs", False))
# Large equality.
self.assertEquals([(self.dmp.DIFF_INSERT, " "), (self.dmp.DIFF_EQUAL,"a"), (self.dmp.DIFF_INSERT,"nd"), (self.dmp.DIFF_EQUAL," [[Pennsylvania]]"), (self.dmp.DIFF_DELETE," and [[New")], self.dmp.diff_main("a [[Pennsylvania]] and [[New", " and [[Pennsylvania]]", False))
# Timeout.
self.dmp.Diff_Timeout = 0.1 # 100ms
a = "`Twas brillig, and the slithy toves\nDid gyre and gimble in the wabe:\nAll mimsy were the borogoves,\nAnd the mome raths outgrabe.\n"
b = "I am the very model of a modern major general,\nI've information vegetable, animal, and mineral,\nI know the kings of England, and I quote the fights historical,\nFrom Marathon to Waterloo, in order categorical.\n"
# Increase the text lengths by 1024 times to ensure a timeout.
for x in range(10):
a = a + a
b = b + b
startTime = time.time()
self.dmp.diff_main(a, b)
endTime = time.time()
# Test that we took at least the timeout period.
self.assertTrue(self.dmp.Diff_Timeout <= endTime - startTime)
# Test that we didn't take forever (be forgiving).
# Theoretically this test could fail very occasionally if the
# OS task swaps or locks up for a second at the wrong moment.
self.assertTrue(self.dmp.Diff_Timeout * 2 > endTime - startTime)
self.dmp.Diff_Timeout = 0
# Test the linemode speedup.
# Must be long to pass the 100 char cutoff.
# Simple line-mode.
a = "1234567890\n" * 13
b = "abcdefghij\n" * 13
self.assertEquals(self.dmp.diff_main(a, b, False), self.dmp.diff_main(a, b, True))
# Single line-mode.
a = "1234567890" * 13
b = "abcdefghij" * 13
self.assertEquals(self.dmp.diff_main(a, b, False), self.dmp.diff_main(a, b, True))
# Overlap line-mode.
a = "1234567890\n" * 13
b = "abcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n"
texts_linemode = self.diff_rebuildtexts(self.dmp.diff_main(a, b, True))
texts_textmode = self.diff_rebuildtexts(self.dmp.diff_main(a, b, False))
self.assertEquals(texts_textmode, texts_linemode)
# Test null inputs.
try:
self.dmp.diff_main(None, None)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
class MatchTest(DiffMatchPatchTest):
"""MATCH TEST FUNCTIONS"""
def testMatchAlphabet(self):
# Initialise the bitmasks for Bitap.
self.assertEquals({"a":4, "b":2, "c":1}, self.dmp.match_alphabet("abc"))
self.assertEquals({"a":37, "b":18, "c":8}, self.dmp.match_alphabet("abcaba"))
def testMatchBitap(self):
self.dmp.Match_Distance = 100
self.dmp.Match_Threshold = 0.5
# Exact matches.
self.assertEquals(5, self.dmp.match_bitap("abcdefghijk", "fgh", 5))
self.assertEquals(5, self.dmp.match_bitap("abcdefghijk", "fgh", 0))
# Fuzzy matches.
self.assertEquals(4, self.dmp.match_bitap("abcdefghijk", "efxhi", 0))
self.assertEquals(2, self.dmp.match_bitap("abcdefghijk", "cdefxyhijk", 5))
self.assertEquals(-1, self.dmp.match_bitap("abcdefghijk", "bxy", 1))
# Overflow.
self.assertEquals(2, self.dmp.match_bitap("123456789xx0", "3456789x0", 2))
self.assertEquals(0, self.dmp.match_bitap("abcdef", "xxabc", 4))
self.assertEquals(3, self.dmp.match_bitap("abcdef", "defyy", 4))
self.assertEquals(0, self.dmp.match_bitap("abcdef", "xabcdefy", 0))
# Threshold test.
self.dmp.Match_Threshold = 0.4
self.assertEquals(4, self.dmp.match_bitap("abcdefghijk", "efxyhi", 1))
self.dmp.Match_Threshold = 0.3
self.assertEquals(-1, self.dmp.match_bitap("abcdefghijk", "efxyhi", 1))
self.dmp.Match_Threshold = 0.0
self.assertEquals(1, self.dmp.match_bitap("abcdefghijk", "bcdef", 1))
self.dmp.Match_Threshold = 0.5
# Multiple select.
self.assertEquals(0, self.dmp.match_bitap("abcdexyzabcde", "abccde", 3))
self.assertEquals(8, self.dmp.match_bitap("abcdexyzabcde", "abccde", 5))
# Distance test.
self.dmp.Match_Distance = 10 # Strict location.
self.assertEquals(-1, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdefg", 24))
self.assertEquals(0, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdxxefg", 1))
self.dmp.Match_Distance = 1000 # Loose location.
self.assertEquals(0, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdefg", 24))
def testMatchMain(self):
# Full match.
# Shortcut matches.
self.assertEquals(0, self.dmp.match_main("abcdef", "abcdef", 1000))
self.assertEquals(-1, self.dmp.match_main("", "abcdef", 1))
self.assertEquals(3, self.dmp.match_main("abcdef", "", 3))
self.assertEquals(3, self.dmp.match_main("abcdef", "de", 3))
self.assertEquals(3, self.dmp.match_main("abcdef", "defy", 4))
self.assertEquals(0, self.dmp.match_main("abcdef", "abcdefy", 0))
# Complex match.
self.dmp.Match_Threshold = 0.7
self.assertEquals(4, self.dmp.match_main("I am the very model of a modern major general.", " that berry ", 5))
self.dmp.Match_Threshold = 0.5
# Test null inputs.
try:
self.dmp.match_main(None, None, 0)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
class PatchTest(DiffMatchPatchTest):
"""PATCH TEST FUNCTIONS"""
def testPatchObj(self):
# Patch Object.
p = dmp_module.patch_obj()
p.start1 = 20
p.start2 = 21
p.length1 = 18
p.length2 = 17
p.diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, "\nlaz")]
strp = str(p)
self.assertEquals("@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n %0Alaz\n", strp)
def testPatchFromText(self):
self.assertEquals([], self.dmp.patch_fromText(""))
strp = "@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n %0Alaz\n"
self.assertEquals(strp, str(self.dmp.patch_fromText(strp)[0]))
self.assertEquals("@@ -1 +1 @@\n-a\n+b\n", str(self.dmp.patch_fromText("@@ -1 +1 @@\n-a\n+b\n")[0]))
self.assertEquals("@@ -1,3 +0,0 @@\n-abc\n", str(self.dmp.patch_fromText("@@ -1,3 +0,0 @@\n-abc\n")[0]))
self.assertEquals("@@ -0,0 +1,3 @@\n+abc\n", str(self.dmp.patch_fromText("@@ -0,0 +1,3 @@\n+abc\n")[0]))
# Generates error.
try:
self.dmp.patch_fromText("Bad\nPatch\n")
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
def testPatchToText(self):
strp = "@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n laz\n"
p = self.dmp.patch_fromText(strp)
self.assertEquals(strp, self.dmp.patch_toText(p))
strp = "@@ -1,9 +1,9 @@\n-f\n+F\n oo+fooba\n@@ -7,9 +7,9 @@\n obar\n-,\n+.\n tes\n"
p = self.dmp.patch_fromText(strp)
self.assertEquals(strp, self.dmp.patch_toText(p))
def testPatchAddContext(self):
self.dmp.Patch_Margin = 4
p = self.dmp.patch_fromText("@@ -21,4 +21,10 @@\n-jump\n+somersault\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps over the lazy dog.")
self.assertEquals("@@ -17,12 +17,18 @@\n fox \n-jump\n+somersault\n s ov\n", str(p))
# Same, but not enough trailing context.
p = self.dmp.patch_fromText("@@ -21,4 +21,10 @@\n-jump\n+somersault\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps.")
self.assertEquals("@@ -17,10 +17,16 @@\n fox \n-jump\n+somersault\n s.\n", str(p))
# Same, but not enough leading context.
p = self.dmp.patch_fromText("@@ -3 +3,2 @@\n-e\n+at\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps.")
self.assertEquals("@@ -1,7 +1,8 @@\n Th\n-e\n+at\n qui\n", str(p))
# Same, but with ambiguity.
p = self.dmp.patch_fromText("@@ -3 +3,2 @@\n-e\n+at\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps. The quick brown fox crashes.")
self.assertEquals("@@ -1,27 +1,28 @@\n Th\n-e\n+at\n quick brown fox jumps. \n", str(p))
def testPatchMake(self):
# Null case.
patches = self.dmp.patch_make("", "")
self.assertEquals("", self.dmp.patch_toText(patches))
text1 = "The quick brown fox jumps over the lazy dog."
text2 = "That quick brown fox jumped over a lazy dog."
# Text2+Text1 inputs.
expectedPatch = "@@ -1,8 +1,7 @@\n Th\n-at\n+e\n qui\n@@ -21,17 +21,18 @@\n jump\n-ed\n+s\n over \n-a\n+the\n laz\n"
# The second patch must be "-21,17 +21,18", not "-22,17 +21,18" due to rolling context.
patches = self.dmp.patch_make(text2, text1)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Text2 inputs.
expectedPatch = "@@ -1,11 +1,12 @@\n Th\n-e\n+at\n quick b\n@@ -22,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n laz\n"
patches = self.dmp.patch_make(text1, text2)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Diff input.
diffs = self.dmp.diff_main(text1, text2, False)
patches = self.dmp.patch_make(diffs)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Diff inputs.
patches = self.dmp.patch_make(text1, diffs)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Text2+Diff inputs (deprecated).
patches = self.dmp.patch_make(text1, text2, diffs)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Character encoding.
patches = self.dmp.patch_make("`1234567890-=[]\\;',./", "~!@#$%^&*()_+{}|:\"<>?")
self.assertEquals("@@ -1,21 +1,21 @@\n-%601234567890-=%5B%5D%5C;',./\n+~!@#$%25%5E&*()_+%7B%7D%7C:%22%3C%3E?\n", self.dmp.patch_toText(patches))
# Character decoding.
diffs = [(self.dmp.DIFF_DELETE, "`1234567890-=[]\\;',./"), (self.dmp.DIFF_INSERT, "~!@#$%^&*()_+{}|:\"<>?")]
self.assertEquals(diffs, self.dmp.patch_fromText("@@ -1,21 +1,21 @@\n-%601234567890-=%5B%5D%5C;',./\n+~!@#$%25%5E&*()_+%7B%7D%7C:%22%3C%3E?\n")[0].diffs)
# Long string with repeats.
text1 = ""
for x in range(100):
text1 += "abcdef"
text2 = text1 + "123"
expectedPatch = "@@ -573,28 +573,31 @@\n cdefabcdefabcdefabcdefabcdef\n+123\n"
patches = self.dmp.patch_make(text1, text2)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Test null inputs.
try:
self.dmp.patch_make(None, None)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
def testPatchSplitMax(self):
# Assumes that Match_MaxBits is 32.
patches = self.dmp.patch_make("abcdefghijklmnopqrstuvwxyz01234567890", "XabXcdXefXghXijXklXmnXopXqrXstXuvXwxXyzX01X23X45X67X89X0")
self.dmp.patch_splitMax(patches)
self.assertEquals("@@ -1,32 +1,46 @@\n+X\n ab\n+X\n cd\n+X\n ef\n+X\n gh\n+X\n ij\n+X\n kl\n+X\n mn\n+X\n op\n+X\n qr\n+X\n st\n+X\n uv\n+X\n wx\n+X\n yz\n+X\n 012345\n@@ -25,13 +39,18 @@\n zX01\n+X\n 23\n+X\n 45\n+X\n 67\n+X\n 89\n+X\n 0\n", self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("abcdef1234567890123456789012345678901234567890123456789012345678901234567890uvwxyz", "abcdefuvwxyz")
oldToText = self.dmp.patch_toText(patches)
self.dmp.patch_splitMax(patches)
self.assertEquals(oldToText, self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("1234567890123456789012345678901234567890123456789012345678901234567890", "abc")
self.dmp.patch_splitMax(patches)
self.assertEquals("@@ -1,32 +1,4 @@\n-1234567890123456789012345678\n 9012\n@@ -29,32 +1,4 @@\n-9012345678901234567890123456\n 7890\n@@ -57,14 +1,3 @@\n-78901234567890\n+abc\n", self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("abcdefghij , h : 0 , t : 1 abcdefghij , h : 0 , t : 1 abcdefghij , h : 0 , t : 1", "abcdefghij , h : 1 , t : 1 abcdefghij , h : 1 , t : 1 abcdefghij , h : 0 , t : 1")
self.dmp.patch_splitMax(patches)
self.assertEquals("@@ -2,32 +2,32 @@\n bcdefghij , h : \n-0\n+1\n , t : 1 abcdef\n@@ -29,32 +29,32 @@\n bcdefghij , h : \n-0\n+1\n , t : 1 abcdef\n", self.dmp.patch_toText(patches))
def testPatchAddPadding(self):
# Both edges full.
patches = self.dmp.patch_make("", "test")
self.assertEquals("@@ -0,0 +1,4 @@\n+test\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEquals("@@ -1,8 +1,12 @@\n %01%02%03%04\n+test\n %01%02%03%04\n", self.dmp.patch_toText(patches))
# Both edges partial.
patches = self.dmp.patch_make("XY", "XtestY")
self.assertEquals("@@ -1,2 +1,6 @@\n X\n+test\n Y\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEquals("@@ -2,8 +2,12 @@\n %02%03%04X\n+test\n Y%01%02%03\n", self.dmp.patch_toText(patches))
# Both edges none.
patches = self.dmp.patch_make("XXXXYYYY", "XXXXtestYYYY")
self.assertEquals("@@ -1,8 +1,12 @@\n XXXX\n+test\n YYYY\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEquals("@@ -5,8 +5,12 @@\n XXXX\n+test\n YYYY\n", self.dmp.patch_toText(patches))
def testPatchApply(self):
self.dmp.Match_Distance = 1000
self.dmp.Match_Threshold = 0.5
self.dmp.Patch_DeleteThreshold = 0.5
# Null case.
patches = self.dmp.patch_make("", "")
results = self.dmp.patch_apply(patches, "Hello world.")
self.assertEquals(("Hello world.", []), results)
# Exact match.
patches = self.dmp.patch_make("The quick brown fox jumps over the lazy dog.", "That quick brown fox jumped over a lazy dog.")
results = self.dmp.patch_apply(patches, "The quick brown fox jumps over the lazy dog.")
self.assertEquals(("That quick brown fox jumped over a lazy dog.", [True, True]), results)
# Partial match.
results = self.dmp.patch_apply(patches, "The quick red rabbit jumps over the tired tiger.")
self.assertEquals(("That quick red rabbit jumped over a tired tiger.", [True, True]), results)
# Failed match.
results = self.dmp.patch_apply(patches, "I am the very model of a modern major general.")
self.assertEquals(("I am the very model of a modern major general.", [False, False]), results)
# Big delete, small change.
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x123456789012345678901234567890-----++++++++++-----123456789012345678901234567890y")
self.assertEquals(("xabcy", [True, True]), results)
# Big delete, big change 1.
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x12345678901234567890---------------++++++++++---------------12345678901234567890y")
self.assertEquals(("xabc12345678901234567890---------------++++++++++---------------12345678901234567890y", [False, True]), results)
# Big delete, big change 2.
self.dmp.Patch_DeleteThreshold = 0.6
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x12345678901234567890---------------++++++++++---------------12345678901234567890y")
self.assertEquals(("xabcy", [True, True]), results)
self.dmp.Patch_DeleteThreshold = 0.5
# Compensate for failed patch.
self.dmp.Match_Threshold = 0.0
self.dmp.Match_Distance = 0
patches = self.dmp.patch_make("abcdefghijklmnopqrstuvwxyz--------------------1234567890", "abcXXXXXXXXXXdefghijklmnopqrstuvwxyz--------------------1234567YYYYYYYYYY890")
results = self.dmp.patch_apply(patches, "ABCDEFGHIJKLMNOPQRSTUVWXYZ--------------------1234567890")
self.assertEquals(("ABCDEFGHIJKLMNOPQRSTUVWXYZ--------------------1234567YYYYYYYYYY890", [False, True]), results)
self.dmp.Match_Threshold = 0.5
self.dmp.Match_Distance = 1000
# No side effects.
patches = self.dmp.patch_make("", "test")
patchstr = self.dmp.patch_toText(patches)
results = self.dmp.patch_apply(patches, "")
self.assertEquals(patchstr, self.dmp.patch_toText(patches))
# No side effects with major delete.
patches = self.dmp.patch_make("The quick brown fox jumps over the lazy dog.", "Woof")
patchstr = self.dmp.patch_toText(patches)
self.dmp.patch_apply(patches, "The quick brown fox jumps over the lazy dog.")
self.assertEquals(patchstr, self.dmp.patch_toText(patches))
# Edge exact match.
patches = self.dmp.patch_make("", "test")
self.dmp.patch_apply(patches, "")
self.assertEquals(("test", [True]), results)
# Near edge exact match.
patches = self.dmp.patch_make("XY", "XtestY")
results = self.dmp.patch_apply(patches, "XY")
self.assertEquals(("XtestY", [True]), results)
# Edge partial match.
patches = self.dmp.patch_make("y", "y123")
results = self.dmp.patch_apply(patches, "x")
self.assertEquals(("x123", [True]), results)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
chaluemwut/fbserver | venv/lib/python2.7/site-packages/scipy/io/matlab/tests/test_mio_funcs.py | 17 | 1816 | #!/usr/bin/env python
''' Jottings to work out format for __function_workspace__ matrix at end
of mat file.
'''
from __future__ import division, print_function, absolute_import
from os.path import join as pjoin, dirname
import sys
from io import BytesIO
from numpy.testing import \
assert_array_equal, \
assert_array_almost_equal, \
assert_equal, \
assert_raises, run_module_suite
from nose.tools import assert_true
import numpy as np
from numpy.compat import asstr
from scipy.io.matlab.mio5 import MatlabObject, MatFile5Writer, \
MatFile5Reader, MatlabFunction
test_data_path = pjoin(dirname(__file__), 'data')
def read_minimat_vars(rdr):
rdr.initialize_read()
mdict = {'__globals__': []}
i = 0
while not rdr.end_of_stream():
hdr, next_position = rdr.read_var_header()
name = asstr(hdr.name)
if name == '':
name = 'var_%d' % i
i += 1
res = rdr.read_var_array(hdr, process=False)
rdr.mat_stream.seek(next_position)
mdict[name] = res
if hdr.is_global:
mdict['__globals__'].append(name)
return mdict
def read_workspace_vars(fname):
fp = open(fname, 'rb')
rdr = MatFile5Reader(fp, struct_as_record=True)
vars = rdr.get_variables()
fws = vars['__function_workspace__']
ws_bs = BytesIO(fws.tostring())
ws_bs.seek(2)
rdr.mat_stream = ws_bs
# Guess byte order.
mi = rdr.mat_stream.read(2)
rdr.byte_order = mi == b'IM' and '<' or '>'
rdr.mat_stream.read(4) # presumably byte padding
mdict = read_minimat_vars(rdr)
fp.close()
return mdict
def test_jottings():
# example
fname = pjoin(test_data_path, 'parabola.mat')
ws_vars = read_workspace_vars(fname)
if __name__ == "__main__":
run_module_suite()
| apache-2.0 |
uclaros/QGIS | tests/src/python/test_qgsoptional.py | 74 | 2145 | # -*- coding: utf-8 -*-
'''
test_qgsoptional.py
--------------------------------------
Date : September 2016
Copyright : (C) 2016 Matthias Kuhn
email : [email protected]
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
'''
import qgis # NOQA
from qgis.testing import unittest
from qgis.core import QgsOptionalExpression, QgsExpression
class TestQgsOptional(unittest.TestCase):
def setUp(self):
"""Run before each test."""
pass
def tearDown(self):
"""Run after each test."""
pass
def testQgsOptionalExpression(self):
opt = QgsOptionalExpression()
self.assertFalse(opt.enabled())
opt = QgsOptionalExpression(QgsExpression('true'))
self.assertTrue(opt.enabled())
self.assertEqual(opt.data().expression(), 'true')
opt.setEnabled(False)
self.assertFalse(opt.enabled())
# boolean operator not yet working in python
# self.assertFalse(opt)
self.assertEqual(opt.data().expression(), 'true')
opt.setEnabled(True)
self.assertTrue(opt.enabled())
# self.assertTrue(opt)
self.assertEqual(opt.data().expression(), 'true')
opt.setData(QgsExpression('xyz'))
self.assertTrue(opt.enabled())
self.assertEqual(opt.data().expression(), 'xyz')
opt = QgsOptionalExpression(QgsExpression('true'), False)
self.assertFalse(opt.enabled())
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
JingZhou0404/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/build.py | 119 | 2636 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.tool.steps.abstractstep import AbstractStep
from webkitpy.tool.steps.options import Options
_log = logging.getLogger(__name__)
class Build(AbstractStep):
@classmethod
def options(cls):
return AbstractStep.options() + [
Options.build,
Options.quiet,
Options.build_style,
]
def build(self, build_style):
environment = self._tool.copy_current_environment()
environment.disable_gcc_smartquotes()
env = environment.to_dictionary()
build_webkit_command = self._tool.deprecated_port().build_webkit_command(build_style=build_style)
self._tool.executive.run_and_throw_if_fail(build_webkit_command, self._options.quiet,
cwd=self._tool.scm().checkout_root, env=env)
def run(self, state):
if not self._options.build:
return
_log.info("Building WebKit")
if self._options.build_style == "both":
self.build("debug")
self.build("release")
else:
self.build(self._options.build_style)
| bsd-3-clause |
simartin/servo | python/servo/build_commands.py | 2 | 45345 | # Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
from __future__ import print_function, unicode_literals
import datetime
import locale
import os
import os.path as path
import platform
import shutil
import subprocess
import sys
import six.moves.urllib as urllib
import zipfile
import stat
from time import time
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
)
from mach.registrar import Registrar
from mach_bootstrap import _get_exec_path
from servo.command_base import CommandBase, cd, call, check_call, append_to_path_env, gstreamer_root
from servo.gstreamer import windows_dlls, windows_plugins, macos_dylibs, macos_plugins
from servo.util import host_triple
def format_duration(seconds):
return str(datetime.timedelta(seconds=int(seconds)))
def notify_linux(title, text):
try:
import dbus
bus = dbus.SessionBus()
notify_obj = bus.get_object("org.freedesktop.Notifications", "/org/freedesktop/Notifications")
method = notify_obj.get_dbus_method("Notify", "org.freedesktop.Notifications")
method(title, 0, "", text, "", [], {"transient": True}, -1)
except ImportError:
raise Exception("Optional Python module 'dbus' is not installed.")
def notify_win(title, text):
try:
from servo.win32_toast import WindowsToast
w = WindowsToast()
w.balloon_tip(title, text)
except WindowsError:
from ctypes import Structure, windll, POINTER, sizeof
from ctypes.wintypes import DWORD, HANDLE, WINFUNCTYPE, BOOL, UINT
class FLASHWINDOW(Structure):
_fields_ = [("cbSize", UINT),
("hwnd", HANDLE),
("dwFlags", DWORD),
("uCount", UINT),
("dwTimeout", DWORD)]
FlashWindowExProto = WINFUNCTYPE(BOOL, POINTER(FLASHWINDOW))
FlashWindowEx = FlashWindowExProto(("FlashWindowEx", windll.user32))
FLASHW_CAPTION = 0x01
FLASHW_TRAY = 0x02
FLASHW_TIMERNOFG = 0x0C
params = FLASHWINDOW(sizeof(FLASHWINDOW),
windll.kernel32.GetConsoleWindow(),
FLASHW_CAPTION | FLASHW_TRAY | FLASHW_TIMERNOFG, 3, 0)
FlashWindowEx(params)
def notify_darwin(title, text):
try:
import Foundation
bundleDict = Foundation.NSBundle.mainBundle().infoDictionary()
bundleIdentifier = 'CFBundleIdentifier'
if bundleIdentifier not in bundleDict:
bundleDict[bundleIdentifier] = 'mach'
note = Foundation.NSUserNotification.alloc().init()
note.setTitle_(title)
note.setInformativeText_(text)
now = Foundation.NSDate.dateWithTimeInterval_sinceDate_(0, Foundation.NSDate.date())
note.setDeliveryDate_(now)
centre = Foundation.NSUserNotificationCenter.defaultUserNotificationCenter()
centre.scheduleNotification_(note)
except ImportError:
raise Exception("Optional Python module 'pyobjc' is not installed.")
def notify_with_command(command):
def notify(title, text):
if call([command, title, text]) != 0:
raise Exception("Could not run '%s'." % command)
return notify
def notify_build_done(config, elapsed, success=True):
"""Generate desktop notification when build is complete and the
elapsed build time was longer than 30 seconds."""
if elapsed > 30:
notify(config, "Servo build",
"%s in %s" % ("Completed" if success else "FAILED", format_duration(elapsed)))
def notify(config, title, text):
"""Generate a desktop notification using appropriate means on
supported platforms Linux, Windows, and Mac OS. On unsupported
platforms, this function acts as a no-op.
If notify-command is set in the [tools] section of the configuration,
that is used instead."""
notify_command = config["tools"].get("notify-command")
if notify_command:
func = notify_with_command(notify_command)
else:
platforms = {
"linux": notify_linux,
"linux2": notify_linux,
"win32": notify_win,
"darwin": notify_darwin
}
func = platforms.get(sys.platform)
if func is not None:
try:
func(title, text)
except Exception as e:
extra = getattr(e, "message", "")
print("[Warning] Could not generate notification! %s" % extra, file=sys.stderr)
@CommandProvider
class MachCommands(CommandBase):
@Command('build',
description='Build Servo',
category='build')
@CommandArgument('--release', '-r',
action='store_true',
help='Build in release mode')
@CommandArgument('--dev', '-d',
action='store_true',
help='Build in development mode')
@CommandArgument('--jobs', '-j',
default=None,
help='Number of jobs to run in parallel')
@CommandArgument('--no-package',
action='store_true',
help='For Android, disable packaging into a .apk after building')
@CommandArgument('--verbose', '-v',
action='store_true',
help='Print verbose output')
@CommandArgument('--very-verbose', '-vv',
action='store_true',
help='Print very verbose output')
@CommandArgument('--uwp',
action='store_true',
help='Build for HoloLens (x64)')
@CommandArgument('--win-arm64', action='store_true', help="Use arm64 Windows target")
@CommandArgument('params', nargs='...',
help="Command-line arguments to be passed through to Cargo")
@CommandBase.build_like_command_arguments
def build(self, release=False, dev=False, jobs=None, params=None, media_stack=None,
no_package=False, verbose=False, very_verbose=False,
target=None, android=False, magicleap=False, libsimpleservo=False,
features=None, uwp=False, win_arm64=False, **kwargs):
# Force the UWP-enabled target if the convenience UWP flags are passed.
if uwp and not target:
if win_arm64:
target = 'aarch64-uwp-windows-msvc'
else:
target = 'x86_64-uwp-windows-msvc'
opts = params or []
features = features or []
target, android = self.pick_target_triple(target, android, magicleap)
# Infer UWP build if only provided a target.
if not uwp:
uwp = target and 'uwp' in target
features += self.pick_media_stack(media_stack, target)
target_path = base_path = self.get_target_dir()
if android:
target_path = path.join(target_path, "android")
base_path = path.join(target_path, target)
elif magicleap:
target_path = path.join(target_path, "magicleap")
base_path = path.join(target_path, target)
release_path = path.join(base_path, "release", "servo")
dev_path = path.join(base_path, "debug", "servo")
release_exists = path.exists(release_path)
dev_exists = path.exists(dev_path)
if not (release or dev):
if self.config["build"]["mode"] == "dev":
dev = True
elif self.config["build"]["mode"] == "release":
release = True
elif release_exists and not dev_exists:
release = True
elif dev_exists and not release_exists:
dev = True
else:
print("Please specify either --dev (-d) for a development")
print(" build, or --release (-r) for an optimized build.")
sys.exit(1)
if release and dev:
print("Please specify either --dev or --release.")
sys.exit(1)
if release:
opts += ["--release"]
servo_path = release_path
else:
servo_path = dev_path
if jobs is not None:
opts += ["-j", jobs]
if verbose:
opts += ["-v"]
if very_verbose:
opts += ["-vv"]
env = self.build_env(target=target, is_build=True, uwp=uwp, features=features)
self.ensure_bootstrapped(target=target)
self.ensure_clobbered()
build_start = time()
env["CARGO_TARGET_DIR"] = target_path
host = host_triple()
target_triple = target or host_triple()
if 'apple-darwin' in host and target_triple == host:
if 'CXXFLAGS' not in env:
env['CXXFLAGS'] = ''
env["CXXFLAGS"] += "-mmacosx-version-min=10.10"
if 'windows' in host:
vs_dirs = self.vs_dirs()
if host != target_triple and 'windows' in target_triple:
if os.environ.get('VisualStudioVersion') or os.environ.get('VCINSTALLDIR'):
print("Can't cross-compile for Windows inside of a Visual Studio shell.\n"
"Please run `python mach build [arguments]` to bypass automatic "
"Visual Studio shell, and make sure the VisualStudioVersion and "
"VCINSTALLDIR environment variables are not set.")
sys.exit(1)
vcinstalldir = vs_dirs['vcdir']
if not os.path.exists(vcinstalldir):
print("Can't find Visual C++ %s installation at %s." % (vs_dirs['vs_version'], vcinstalldir))
sys.exit(1)
env['PKG_CONFIG_ALLOW_CROSS'] = "1"
if uwp:
# Ensure libstd is ready for the new UWP target.
check_call(["rustup", "component", "add", "rust-src"])
env['RUST_SYSROOT'] = path.expanduser('~\\.xargo')
# Don't try and build a desktop port.
libsimpleservo = True
arches = {
"aarch64": {
"angle": "arm64",
"gst": "ARM64",
"gst_root": "arm64",
},
"x86_64": {
"angle": "x64",
"gst": "X86_64",
"gst_root": "x64",
},
}
arch = arches.get(target_triple.split('-')[0])
if not arch:
print("Unsupported UWP target.")
sys.exit(1)
# Ensure that the NuGet ANGLE package containing libEGL is accessible
# to the Rust linker.
append_to_path_env(angle_root(target_triple, env), env, "LIB")
# Don't want to mix non-UWP libraries with vendored UWP libraries.
if "gstreamer" in env['LIB']:
print("Found existing GStreamer library path in LIB. Please remove it.")
sys.exit(1)
# Override any existing GStreamer installation with the vendored libraries.
env["GSTREAMER_1_0_ROOT_" + arch['gst']] = path.join(
self.msvc_package_dir("gstreamer-uwp"), arch['gst_root']
)
env["PKG_CONFIG_PATH"] = path.join(
self.msvc_package_dir("gstreamer-uwp"), arch['gst_root'],
"lib", "pkgconfig"
)
if 'windows' in host:
process = subprocess.Popen('("%s" %s > nul) && "python" -c "import os; print(repr(os.environ))"' %
(os.path.join(vs_dirs['vcdir'], "Auxiliary", "Build", "vcvarsall.bat"), "x64"),
stdout=subprocess.PIPE, shell=True)
stdout, stderr = process.communicate()
exitcode = process.wait()
encoding = locale.getpreferredencoding() # See https://stackoverflow.com/a/9228117
if exitcode == 0:
decoded = stdout.decode(encoding)
if decoded.startswith("environ("):
decoded = decoded.strip()[8:-1]
os.environ.update(eval(decoded))
else:
print("Failed to run vcvarsall. stderr:")
print(stderr.decode(encoding))
exit(1)
# Ensure that GStreamer libraries are accessible when linking.
if 'windows' in target_triple:
gst_root = gstreamer_root(target_triple, env)
if gst_root:
append_to_path_env(os.path.join(gst_root, "lib"), env, "LIB")
if android:
if "ANDROID_NDK" not in env:
print("Please set the ANDROID_NDK environment variable.")
sys.exit(1)
if "ANDROID_SDK" not in env:
print("Please set the ANDROID_SDK environment variable.")
sys.exit(1)
android_platform = self.config["android"]["platform"]
android_toolchain_name = self.config["android"]["toolchain_name"]
android_toolchain_prefix = self.config["android"]["toolchain_prefix"]
android_lib = self.config["android"]["lib"]
android_arch = self.config["android"]["arch"]
# Build OpenSSL for android
env["OPENSSL_VERSION"] = "1.1.1d"
make_cmd = ["make"]
if jobs is not None:
make_cmd += ["-j" + jobs]
openssl_dir = path.join(target_path, target, "native", "openssl")
if not path.exists(openssl_dir):
os.makedirs(openssl_dir)
shutil.copy(path.join(self.android_support_dir(), "openssl.makefile"), openssl_dir)
shutil.copy(path.join(self.android_support_dir(), "openssl.sh"), openssl_dir)
# Check if the NDK version is 15
if not os.path.isfile(path.join(env["ANDROID_NDK"], 'source.properties')):
print("ANDROID_NDK should have file `source.properties`.")
print("The environment variable ANDROID_NDK may be set at a wrong path.")
sys.exit(1)
with open(path.join(env["ANDROID_NDK"], 'source.properties')) as ndk_properties:
lines = ndk_properties.readlines()
if lines[1].split(' = ')[1].split('.')[0] != '15':
print("Currently only support NDK 15. Please re-run `./mach bootstrap-android`.")
sys.exit(1)
env["RUST_TARGET"] = target
with cd(openssl_dir):
status = call(
make_cmd + ["-f", "openssl.makefile"],
env=env,
verbose=verbose)
if status:
return status
openssl_dir = path.join(openssl_dir, "openssl-{}".format(env["OPENSSL_VERSION"]))
env['OPENSSL_LIB_DIR'] = openssl_dir
env['OPENSSL_INCLUDE_DIR'] = path.join(openssl_dir, "include")
env['OPENSSL_STATIC'] = 'TRUE'
# Android builds also require having the gcc bits on the PATH and various INCLUDE
# path munging if you do not want to install a standalone NDK. See:
# https://dxr.mozilla.org/mozilla-central/source/build/autoconf/android.m4#139-161
os_type = platform.system().lower()
if os_type not in ["linux", "darwin"]:
raise Exception("Android cross builds are only supported on Linux and macOS.")
cpu_type = platform.machine().lower()
host_suffix = "unknown"
if cpu_type in ["i386", "i486", "i686", "i768", "x86"]:
host_suffix = "x86"
elif cpu_type in ["x86_64", "x86-64", "x64", "amd64"]:
host_suffix = "x86_64"
host = os_type + "-" + host_suffix
host_cc = env.get('HOST_CC') or _get_exec_path(["clang"]) or _get_exec_path(["gcc"])
host_cxx = env.get('HOST_CXX') or _get_exec_path(["clang++"]) or _get_exec_path(["g++"])
llvm_toolchain = path.join(env['ANDROID_NDK'], "toolchains", "llvm", "prebuilt", host)
gcc_toolchain = path.join(env['ANDROID_NDK'], "toolchains",
android_toolchain_prefix + "-4.9", "prebuilt", host)
gcc_libs = path.join(gcc_toolchain, "lib", "gcc", android_toolchain_name, "4.9.x")
env['PATH'] = (path.join(llvm_toolchain, "bin") + ':' + env['PATH'])
env['ANDROID_SYSROOT'] = path.join(env['ANDROID_NDK'], "sysroot")
support_include = path.join(env['ANDROID_NDK'], "sources", "android", "support", "include")
cpufeatures_include = path.join(env['ANDROID_NDK'], "sources", "android", "cpufeatures")
cxx_include = path.join(env['ANDROID_NDK'], "sources", "cxx-stl",
"llvm-libc++", "include")
clang_include = path.join(llvm_toolchain, "lib64", "clang", "3.8", "include")
cxxabi_include = path.join(env['ANDROID_NDK'], "sources", "cxx-stl",
"llvm-libc++abi", "include")
sysroot_include = path.join(env['ANDROID_SYSROOT'], "usr", "include")
arch_include = path.join(sysroot_include, android_toolchain_name)
android_platform_dir = path.join(env['ANDROID_NDK'], "platforms", android_platform, "arch-" + android_arch)
arch_libs = path.join(android_platform_dir, "usr", "lib")
clang_include = path.join(llvm_toolchain, "lib64", "clang", "5.0", "include")
android_api = android_platform.replace('android-', '')
env['HOST_CC'] = host_cc
env['HOST_CXX'] = host_cxx
env['HOST_CFLAGS'] = ''
env['HOST_CXXFLAGS'] = ''
env['CC'] = path.join(llvm_toolchain, "bin", "clang")
env['CPP'] = path.join(llvm_toolchain, "bin", "clang") + " -E"
env['CXX'] = path.join(llvm_toolchain, "bin", "clang++")
env['ANDROID_TOOLCHAIN'] = gcc_toolchain
env['ANDROID_TOOLCHAIN_DIR'] = gcc_toolchain
env['ANDROID_VERSION'] = android_api
env['ANDROID_PLATFORM_DIR'] = android_platform_dir
env['GCC_TOOLCHAIN'] = gcc_toolchain
gcc_toolchain_bin = path.join(gcc_toolchain, android_toolchain_name, "bin")
env['AR'] = path.join(gcc_toolchain_bin, "ar")
env['RANLIB'] = path.join(gcc_toolchain_bin, "ranlib")
env['OBJCOPY'] = path.join(gcc_toolchain_bin, "objcopy")
env['YASM'] = path.join(env['ANDROID_NDK'], 'prebuilt', host, 'bin', 'yasm')
# A cheat-sheet for some of the build errors caused by getting the search path wrong...
#
# fatal error: 'limits' file not found
# -- add -I cxx_include
# unknown type name '__locale_t' (when running bindgen in mozjs_sys)
# -- add -isystem sysroot_include
# error: use of undeclared identifier 'UINTMAX_C'
# -- add -D__STDC_CONSTANT_MACROS
#
# Also worth remembering: autoconf uses C for its configuration,
# even for C++ builds, so the C flags need to line up with the C++ flags.
env['CFLAGS'] = ' '.join([
"--target=" + target,
"--sysroot=" + env['ANDROID_SYSROOT'],
"--gcc-toolchain=" + gcc_toolchain,
"-isystem", sysroot_include,
"-I" + arch_include,
"-B" + arch_libs,
"-L" + arch_libs,
"-D__ANDROID_API__=" + android_api,
])
env['CXXFLAGS'] = ' '.join([
"--target=" + target,
"--sysroot=" + env['ANDROID_SYSROOT'],
"--gcc-toolchain=" + gcc_toolchain,
"-I" + cpufeatures_include,
"-I" + cxx_include,
"-I" + clang_include,
"-isystem", sysroot_include,
"-I" + cxxabi_include,
"-I" + clang_include,
"-I" + arch_include,
"-I" + support_include,
"-L" + gcc_libs,
"-B" + arch_libs,
"-L" + arch_libs,
"-D__ANDROID_API__=" + android_api,
"-D__STDC_CONSTANT_MACROS",
"-D__NDK_FPABI__=",
])
env['CPPFLAGS'] = ' '.join([
"--target=" + target,
"--sysroot=" + env['ANDROID_SYSROOT'],
"-I" + arch_include,
])
env["NDK_ANDROID_VERSION"] = android_api
env["ANDROID_ABI"] = android_lib
env["ANDROID_PLATFORM"] = android_platform
env["NDK_CMAKE_TOOLCHAIN_FILE"] = path.join(env['ANDROID_NDK'], "build", "cmake", "android.toolchain.cmake")
env["CMAKE_TOOLCHAIN_FILE"] = path.join(self.android_support_dir(), "toolchain.cmake")
# Set output dir for gradle aar files
aar_out_dir = self.android_aar_dir()
if not os.path.exists(aar_out_dir):
os.makedirs(aar_out_dir)
env["AAR_OUT_DIR"] = aar_out_dir
# GStreamer and its dependencies use pkg-config and this flag is required
# to make it work in a cross-compilation context.
env["PKG_CONFIG_ALLOW_CROSS"] = '1'
# Build the name of the package containing all GStreamer dependencies
# according to the build target.
gst_lib = "gst-build-{}".format(self.config["android"]["lib"])
gst_lib_zip = "gstreamer-{}-1.16.0-20190517-095630.zip".format(self.config["android"]["lib"])
gst_dir = os.path.join(target_path, "gstreamer")
gst_lib_path = os.path.join(gst_dir, gst_lib)
pkg_config_path = os.path.join(gst_lib_path, "pkgconfig")
env["PKG_CONFIG_PATH"] = pkg_config_path
if not os.path.exists(gst_lib_path):
# Download GStreamer dependencies if they have not already been downloaded
# This bundle is generated with `libgstreamer_android_gen`
# Follow these instructions to build and deploy new binaries
# https://github.com/servo/libgstreamer_android_gen#build
print("Downloading GStreamer dependencies")
gst_url = "https://servo-deps-2.s3.amazonaws.com/gstreamer/%s" % gst_lib_zip
print(gst_url)
urllib.request.urlretrieve(gst_url, gst_lib_zip)
zip_ref = zipfile.ZipFile(gst_lib_zip, "r")
zip_ref.extractall(gst_dir)
os.remove(gst_lib_zip)
# Change pkgconfig info to make all GStreamer dependencies point
# to the libgstreamer_android.so bundle.
for each in os.listdir(pkg_config_path):
if each.endswith('.pc'):
print("Setting pkgconfig info for %s" % each)
pc = os.path.join(pkg_config_path, each)
expr = "s#libdir=.*#libdir=%s#g" % gst_lib_path
subprocess.call(["perl", "-i", "-pe", expr, pc])
if magicleap:
if platform.system() not in ["Darwin"]:
raise Exception("Magic Leap builds are only supported on macOS. "
"If you only wish to test if your code builds, "
"run ./mach build -p libmlservo.")
ml_sdk = env.get("MAGICLEAP_SDK")
if not ml_sdk:
raise Exception("Magic Leap builds need the MAGICLEAP_SDK environment variable")
if not os.path.exists(ml_sdk):
raise Exception("Path specified by MAGICLEAP_SDK does not exist.")
ml_support = path.join(self.get_top_dir(), "support", "magicleap")
# We pretend to be an Android build
env.setdefault("ANDROID_VERSION", "21")
env.setdefault("ANDROID_NDK", env["MAGICLEAP_SDK"])
env.setdefault("ANDROID_NDK_VERSION", "16.0.0")
env.setdefault("ANDROID_PLATFORM_DIR", path.join(env["MAGICLEAP_SDK"], "lumin"))
env.setdefault("ANDROID_TOOLCHAIN_DIR", path.join(env["MAGICLEAP_SDK"], "tools", "toolchains"))
env.setdefault("ANDROID_CLANG", path.join(env["ANDROID_TOOLCHAIN_DIR"], "bin", "clang"))
# A random collection of search paths
env.setdefault("STLPORT_LIBS", " ".join([
"-L" + path.join(env["MAGICLEAP_SDK"], "lumin", "stl", "libc++-lumin", "lib"),
"-lc++"
]))
env.setdefault("STLPORT_CPPFLAGS", " ".join([
"-I" + path.join(env["MAGICLEAP_SDK"], "lumin", "stl", "libc++-lumin", "include")
]))
env.setdefault("CPPFLAGS", " ".join([
"--no-standard-includes",
"--sysroot=" + env["ANDROID_PLATFORM_DIR"],
"-I" + path.join(env["ANDROID_PLATFORM_DIR"], "usr", "include"),
"-isystem" + path.join(env["ANDROID_TOOLCHAIN_DIR"], "lib64", "clang", "3.8", "include"),
]))
env.setdefault("CFLAGS", " ".join([
env["CPPFLAGS"],
"-L" + path.join(env["ANDROID_TOOLCHAIN_DIR"], "lib", "gcc", target, "4.9.x"),
]))
env.setdefault("CXXFLAGS", " ".join([
# Sigh, Angle gets confused if there's another EGL around
"-I./gfx/angle/checkout/include",
env["STLPORT_CPPFLAGS"],
env["CFLAGS"]
]))
# The toolchain commands
env.setdefault("AR", path.join(env["ANDROID_TOOLCHAIN_DIR"], "bin", "aarch64-linux-android-ar"))
env.setdefault("AS", path.join(env["ANDROID_TOOLCHAIN_DIR"], "bin", "aarch64-linux-android-clang"))
env.setdefault("CC", path.join(env["ANDROID_TOOLCHAIN_DIR"], "bin", "aarch64-linux-android-clang"))
env.setdefault("CPP", path.join(env["ANDROID_TOOLCHAIN_DIR"], "bin", "aarch64-linux-android-clang -E"))
env.setdefault("CXX", path.join(env["ANDROID_TOOLCHAIN_DIR"], "bin", "aarch64-linux-android-clang++"))
env.setdefault("LD", path.join(env["ANDROID_TOOLCHAIN_DIR"], "bin", "aarch64-linux-android-ld"))
env.setdefault("OBJCOPY", path.join(env["ANDROID_TOOLCHAIN_DIR"], "bin", "aarch64-linux-android-objcopy"))
env.setdefault("OBJDUMP", path.join(env["ANDROID_TOOLCHAIN_DIR"], "bin", "aarch64-linux-android-objdump"))
env.setdefault("RANLIB", path.join(env["ANDROID_TOOLCHAIN_DIR"], "bin", "aarch64-linux-android-ranlib"))
env.setdefault("STRIP", path.join(env["ANDROID_TOOLCHAIN_DIR"], "bin", "aarch64-linux-android-strip"))
# Undo all of that when compiling build tools for the host
env.setdefault("HOST_CFLAGS", "")
env.setdefault("HOST_CXXFLAGS", "")
env.setdefault("HOST_CC", "/usr/local/opt/llvm/bin/clang")
env.setdefault("HOST_CXX", "/usr/local/opt/llvm/bin/clang++")
env.setdefault("HOST_LD", "ld")
# Some random build configurations
env.setdefault("HARFBUZZ_SYS_NO_PKG_CONFIG", "1")
env.setdefault("PKG_CONFIG_ALLOW_CROSS", "1")
env.setdefault("CMAKE_TOOLCHAIN_FILE", path.join(ml_support, "toolchain.cmake"))
env.setdefault("_LIBCPP_INLINE_VISIBILITY", "__attribute__((__always_inline__))")
# The Open SSL configuration
env.setdefault("OPENSSL_DIR", path.join(target_path, target, "native", "openssl"))
env.setdefault("OPENSSL_VERSION", "1.1.1d")
env.setdefault("OPENSSL_STATIC", "1")
# GStreamer configuration
env.setdefault("GSTREAMER_DIR", path.join(target_path, target, "native", "gstreamer-1.16.0"))
env.setdefault("GSTREAMER_URL", "https://servo-deps-2.s3.amazonaws.com/gstreamer/gstreamer-magicleap-1.16.0-20190823-104505.tgz")
env.setdefault("PKG_CONFIG_PATH", path.join(env["GSTREAMER_DIR"], "system", "lib64", "pkgconfig"))
# Override the linker set in .cargo/config
env.setdefault("CARGO_TARGET_AARCH64_LINUX_ANDROID_LINKER", path.join(ml_support, "fake-ld.sh"))
# Only build libmlservo
opts += ["--package", "libmlservo"]
# Download and build OpenSSL if necessary
status = call(path.join(ml_support, "openssl.sh"), env=env, verbose=verbose)
if status:
return status
# Download prebuilt Gstreamer if necessary
if not os.path.exists(path.join(env["GSTREAMER_DIR"], "system")):
if not os.path.exists(env["GSTREAMER_DIR"] + ".tgz"):
check_call([
'curl',
'-L',
'-f',
'-o', env["GSTREAMER_DIR"] + ".tgz",
env["GSTREAMER_URL"],
])
check_call([
'mkdir',
'-p',
env["GSTREAMER_DIR"],
])
check_call([
'tar',
'xzf',
env["GSTREAMER_DIR"] + ".tgz",
'-C', env["GSTREAMER_DIR"],
])
# https://internals.rust-lang.org/t/exploring-crate-graph-build-times-with-cargo-build-ztimings/10975
# Prepend so that e.g. `-Ztimings` (which means `-Ztimings=info,html`)
# given on the command line can override it
opts = ["-Ztimings=info"] + opts
if very_verbose:
print(["Calling", "cargo", "build"] + opts)
for key in env:
print((key, env[key]))
if sys.platform == "win32":
env.setdefault("CC", "clang-cl.exe")
env.setdefault("CXX", "clang-cl.exe")
if uwp:
env.setdefault("TARGET_CFLAGS", "")
env.setdefault("TARGET_CXXFLAGS", "")
env["TARGET_CFLAGS"] += " -DWINAPI_FAMILY=WINAPI_FAMILY_APP"
env["TARGET_CXXFLAGS"] += " -DWINAPI_FAMILY=WINAPI_FAMILY_APP"
else:
env.setdefault("CC", "clang")
env.setdefault("CXX", "clang++")
status = self.run_cargo_build_like_command(
"build", opts, env=env, verbose=verbose,
target=target, android=android, magicleap=magicleap, libsimpleservo=libsimpleservo, uwp=uwp,
features=features, **kwargs
)
elapsed = time() - build_start
# Do some additional things if the build succeeded
if status == 0:
if android and not no_package:
flavor = None
if "googlevr" in features:
flavor = "googlevr"
elif "oculusvr" in features:
flavor = "oculusvr"
rv = Registrar.dispatch("package", context=self.context,
release=release, dev=dev, target=target, flavor=flavor)
if rv:
return rv
if sys.platform == "win32":
servo_exe_dir = os.path.dirname(
self.get_binary_path(release, dev, target=target, simpleservo=libsimpleservo)
)
assert os.path.exists(servo_exe_dir)
# on msvc builds, use editbin to change the subsystem to windows, but only
# on release builds -- on debug builds, it hides log output
if not dev and not libsimpleservo:
call(["editbin", "/nologo", "/subsystem:windows", path.join(servo_exe_dir, "servo.exe")],
verbose=verbose)
# on msvc, we need to copy in some DLLs in to the servo.exe dir and the directory for unit tests.
for ssl_lib in ["libssl.dll", "libcrypto.dll"]:
ssl_path = path.join(env['OPENSSL_LIB_DIR'], "../bin", ssl_lib)
shutil.copy(ssl_path, servo_exe_dir)
shutil.copy(ssl_path, path.join(servo_exe_dir, "deps"))
build_path = path.join(servo_exe_dir, "build")
assert os.path.exists(build_path)
def package_generated_shared_libraries(libs, build_path, servo_exe_dir):
for root, dirs, files in os.walk(build_path):
remaining_libs = list(libs)
for lib in libs:
if lib in files:
shutil.copy(path.join(root, lib), servo_exe_dir)
remaining_libs.remove(lib)
continue
libs = remaining_libs
if not libs:
return True
for lib in libs:
print("WARNING: could not find " + lib)
# UWP build has its own ANGLE library that it packages.
if not uwp:
print("Packaging EGL DLLs")
egl_libs = ["libEGL.dll", "libGLESv2.dll"]
if not package_generated_shared_libraries(egl_libs, build_path, servo_exe_dir):
status = 1
# copy needed gstreamer DLLs in to servo.exe dir
print("Packaging gstreamer DLLs")
if not package_gstreamer_dlls(env, servo_exe_dir, target_triple, uwp):
status = 1
# UWP app packaging already bundles all required DLLs for us.
print("Packaging MSVC DLLs")
if not package_msvc_dlls(servo_exe_dir, target_triple, vs_dirs['vcdir'], vs_dirs['vs_version']):
status = 1
elif sys.platform == "darwin":
servo_exe_dir = os.path.dirname(
self.get_binary_path(release, dev, target=target, simpleservo=libsimpleservo)
)
assert os.path.exists(servo_exe_dir)
if not package_gstreamer_dylibs(servo_exe_dir):
return 1
# On the Mac, set a lovely icon. This makes it easier to pick out the Servo binary in tools
# like Instruments.app.
try:
import Cocoa
icon_path = path.join(self.get_top_dir(), "resources", "servo_1024.png")
icon = Cocoa.NSImage.alloc().initWithContentsOfFile_(icon_path)
if icon is not None:
Cocoa.NSWorkspace.sharedWorkspace().setIcon_forFile_options_(icon,
servo_path,
0)
except ImportError:
pass
# Generate Desktop Notification if elapsed-time > some threshold value
notify_build_done(self.config, elapsed, status == 0)
print("Build %s in %s" % ("Completed" if status == 0 else "FAILED", format_duration(elapsed)))
return status
@Command('clean',
description='Clean the build directory.',
category='build')
@CommandArgument('--manifest-path',
default=None,
help='Path to the manifest to the package to clean')
@CommandArgument('--verbose', '-v',
action='store_true',
help='Print verbose output')
@CommandArgument('params', nargs='...',
help="Command-line arguments to be passed through to Cargo")
def clean(self, manifest_path=None, params=[], verbose=False):
self.ensure_bootstrapped()
virtualenv_fname = '_virtualenv%d.%d' % (sys.version_info[0], sys.version_info[1])
virtualenv_path = path.join(self.get_top_dir(), 'python', virtualenv_fname)
if path.exists(virtualenv_path):
print('Removing virtualenv directory: %s' % virtualenv_path)
shutil.rmtree(virtualenv_path)
self.clean_uwp()
opts = ["--manifest-path", manifest_path or path.join(self.context.topdir, "Cargo.toml")]
if verbose:
opts += ["-v"]
opts += params
return check_call(["cargo", "clean"] + opts, env=self.build_env(), verbose=verbose)
@Command('clean-uwp',
description='Clean the support/hololens/ directory.',
category='build')
def clean_uwp(self):
uwp_artifacts = [
"support/hololens/x64/",
"support/hololens/ARM/",
"support/hololens/ARM64/",
"support/hololens/ServoApp/x64/",
"support/hololens/ServoApp/ARM/",
"support/hololens/ServoApp/ARM64/",
"support/hololens/ServoApp/Generated Files/",
"support/hololens/ServoApp/BundleArtifacts/",
"support/hololens/ServoApp/support/",
"support/hololens/ServoApp/Debug/",
"support/hololens/ServoApp/Release/",
"support/hololens/packages/",
"support/hololens/AppPackages/",
"support/hololens/ServoApp/ServoApp.vcxproj.user",
]
for uwp_artifact in uwp_artifacts:
artifact = path.join(self.get_top_dir(), uwp_artifact)
if path.exists(artifact):
if path.isdir(artifact):
shutil.rmtree(artifact)
else:
os.remove(artifact)
def angle_root(target, nuget_env):
arch = {
"aarch64": "arm64",
"x86_64": "x64",
}
angle_arch = arch[target.split('-')[0]]
package_name = "ANGLE.WindowsStore.Servo"
import xml.etree.ElementTree as ET
tree = ET.parse(os.path.join('support', 'hololens', 'ServoApp', 'packages.config'))
root = tree.getroot()
for package in root.iter('package'):
if package.get('id') == package_name:
package_version = package.get('version')
break
else:
raise Exception("Couldn't locate ANGLE package")
angle_default_path = path.join(os.getcwd(), "support", "hololens", "packages",
package_name + "." + package_version, "bin", "UAP", angle_arch)
# Nuget executable command
nuget_app = path.join(os.getcwd(), "support", "hololens", "ServoApp.sln")
if not os.path.exists(angle_default_path):
check_call(['nuget.exe', 'restore', nuget_app], env=nuget_env)
return angle_default_path
def package_gstreamer_dylibs(servo_exe_dir):
missing = []
gst_dylibs = macos_dylibs() + macos_plugins()
for gst_lib in gst_dylibs:
try:
dest_path = os.path.join(servo_exe_dir, os.path.basename(gst_lib))
if os.path.isfile(dest_path):
os.remove(dest_path)
shutil.copy(gst_lib, servo_exe_dir)
except Exception as e:
print(e)
missing += [str(gst_lib)]
for gst_lib in missing:
print("ERROR: could not find required GStreamer DLL: " + gst_lib)
return not missing
def package_gstreamer_dlls(env, servo_exe_dir, target, uwp):
gst_root = gstreamer_root(target, env)
if not gst_root:
print("Could not find GStreamer installation directory.")
return False
# All the shared libraries required for starting up and loading plugins.
gst_dlls = [
"avcodec-58.dll",
"avfilter-7.dll",
"avformat-58.dll",
"avutil-56.dll",
"bz2.dll",
"ffi-7.dll",
"gio-2.0-0.dll",
"glib-2.0-0.dll",
"gmodule-2.0-0.dll",
"gobject-2.0-0.dll",
"intl-8.dll",
"orc-0.4-0.dll",
"swresample-3.dll",
"z-1.dll",
]
gst_dlls += windows_dlls(uwp)
if uwp:
# These come from a more recent version of ffmpeg and
# aren't present in the official GStreamer 1.16 release.
gst_dlls += [
"avresample-4.dll",
"postproc-55.dll",
"swscale-5.dll",
"x264-157.dll",
]
else:
# These are built with MinGW and are not yet compatible
# with UWP's restrictions.
gst_dlls += [
"graphene-1.0-0.dll",
"libcrypto-1_1-x64.dll",
"libgmp-10.dll",
"libgnutls-30.dll",
"libhogweed-4.dll",
"libjpeg-8.dll",
"libnettle-6.dll.",
"libogg-0.dll",
"libopus-0.dll",
"libpng16-16.dll",
"libssl-1_1-x64.dll",
"libtasn1-6.dll",
"libtheora-0.dll",
"libtheoradec-1.dll",
"libtheoraenc-1.dll",
"libusrsctp-1.dll",
"libvorbis-0.dll",
"libvorbisenc-2.dll",
"libwinpthread-1.dll",
"nice-10.dll",
]
missing = []
for gst_lib in gst_dlls:
try:
shutil.copy(path.join(gst_root, "bin", gst_lib), servo_exe_dir)
except Exception:
missing += [str(gst_lib)]
for gst_lib in missing:
print("ERROR: could not find required GStreamer DLL: " + gst_lib)
if missing:
return False
# Only copy a subset of the available plugins.
gst_dlls = windows_plugins(uwp)
gst_plugin_path_root = os.environ.get("GSTREAMER_PACKAGE_PLUGIN_PATH") or gst_root
gst_plugin_path = path.join(gst_plugin_path_root, "lib", "gstreamer-1.0")
if not os.path.exists(gst_plugin_path):
print("ERROR: couldn't find gstreamer plugins at " + gst_plugin_path)
return False
missing = []
for gst_lib in gst_dlls:
try:
shutil.copy(path.join(gst_plugin_path, gst_lib), servo_exe_dir)
except Exception:
missing += [str(gst_lib)]
for gst_lib in missing:
print("ERROR: could not find required GStreamer DLL: " + gst_lib)
return not missing
def package_msvc_dlls(servo_exe_dir, target, vcinstalldir, vs_version):
# copy some MSVC DLLs to servo.exe dir
msvc_redist_dir = None
vs_platforms = {
"x86_64": "x64",
"i686": "x86",
"aarch64": "arm64",
}
target_arch = target.split('-')[0]
vs_platform = vs_platforms[target_arch]
vc_dir = vcinstalldir or os.environ.get("VCINSTALLDIR", "")
if not vs_version:
vs_version = os.environ.get("VisualStudioVersion", "")
msvc_deps = [
"msvcp140.dll",
"vcruntime140.dll",
]
if target_arch != "aarch64" and "uwp" not in target and vs_version in ("14.0", "15.0", "16.0"):
msvc_deps += ["api-ms-win-crt-runtime-l1-1-0.dll"]
# Check if it's Visual C++ Build Tools or Visual Studio 2015
vs14_vcvars = path.join(vc_dir, "vcvarsall.bat")
is_vs14 = True if os.path.isfile(vs14_vcvars) or vs_version == "14.0" else False
if is_vs14:
msvc_redist_dir = path.join(vc_dir, "redist", vs_platform, "Microsoft.VC140.CRT")
elif vs_version in ("15.0", "16.0"):
redist_dir = path.join(vc_dir, "Redist", "MSVC")
if os.path.isdir(redist_dir):
for p in os.listdir(redist_dir)[::-1]:
redist_path = path.join(redist_dir, p)
for v in ["VC141", "VC142", "VC150", "VC160"]:
# there are two possible paths
# `x64\Microsoft.VC*.CRT` or `onecore\x64\Microsoft.VC*.CRT`
redist1 = path.join(redist_path, vs_platform, "Microsoft.{}.CRT".format(v))
redist2 = path.join(redist_path, "onecore", vs_platform, "Microsoft.{}.CRT".format(v))
if os.path.isdir(redist1):
msvc_redist_dir = redist1
break
elif os.path.isdir(redist2):
msvc_redist_dir = redist2
break
if msvc_redist_dir:
break
if not msvc_redist_dir:
print("Couldn't locate MSVC redistributable directory")
return False
redist_dirs = [
msvc_redist_dir,
]
if "WindowsSdkDir" in os.environ:
redist_dirs += [path.join(os.environ["WindowsSdkDir"], "Redist", "ucrt", "DLLs", vs_platform)]
missing = []
for msvc_dll in msvc_deps:
for dll_dir in redist_dirs:
dll = path.join(dll_dir, msvc_dll)
servo_dir_dll = path.join(servo_exe_dir, msvc_dll)
if os.path.isfile(dll):
if os.path.isfile(servo_dir_dll):
# avoid permission denied error when overwrite dll in servo build directory
os.chmod(servo_dir_dll, stat.S_IWUSR)
shutil.copy(dll, servo_exe_dir)
break
else:
missing += [msvc_dll]
for msvc_dll in missing:
print("DLL file `{}` not found!".format(msvc_dll))
return not missing
| mpl-2.0 |
MalkIPP/ipp_work | ipp_work/simulations/ir_marg_rate.py | 1 | 8481 | # -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <[email protected]>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pandas
import logging
import openfisca_france_data
from openfisca_france_data.input_data_builders import get_input_data_frame
from openfisca_france_data.surveys import SurveyScenario
from openfisca_core.rates import average_rate
from ipp_work.utils import from_simulation_to_data_frame_by_entity_key_plural
log = logging.getLogger(__name__)
def from_input_df_to_entity_key_plural_df(input_data_frame, tax_benefit_system, simulation, used_as_input_variables = None):
'''
En entrée il faut:
une input_data_frame
une liste des variables nécessaires et leurs entités => il faut le tax_benefit_system
Objectif: créer une input_data_frame_by_entity_key_plural
Il faut ensuite créer une 2e fonction qui transforme cette df en Array
'''
assert input_data_frame is not None
assert tax_benefit_system is not None
id_variables = [
entity.index_for_person_variable_name for entity in simulation.entity_by_key_singular.values()
if not entity.is_persons_entity]
role_variables = [
entity.role_for_person_variable_name for entity in simulation.entity_by_key_singular.values()
if not entity.is_persons_entity]
column_by_name = tax_benefit_system.column_by_name
# Check 1 (ici ou dans la méthode de classe ?)
for column_name in input_data_frame:
if column_name not in column_by_name:
log.info('Unknown column "{}" in survey, dropped from input table'.format(column_name))
# waiting for the new pandas version to hit Travis repo
input_data_frame = input_data_frame.drop(column_name, axis = 1)
# , inplace = True) # TODO: effet de bords ?
# Check 2 (ici ou dans la méthode de classe ?)
for column_name in input_data_frame:
if column_name in id_variables + role_variables:
continue
#TODO: make that work ? (MG, may 15)
# if column_by_name[column_name].formula_class.function is not None:
# if column_name in column_by_name.used_as_input_variables:
# log.info(
# 'Column "{}" not dropped because present in used_as_input_variabels'.format(column_name))
# continue
#
# log.info('Column "{}" in survey set to be calculated, dropped from input table'.format(column_name))
# input_data_frame = input_data_frame.drop(column_name, axis = 1)
# , inplace = True) # TODO: effet de bords ?
# Work on entities
for entity in simulation.entity_by_key_singular.values():
if entity.is_persons_entity:
entity.count = entity.step_size = len(input_data_frame)
else:
entity.count = entity.step_size = (input_data_frame[entity.role_for_person_variable_name] == 0).sum()
entity.roles_count = input_data_frame[entity.role_for_person_variable_name].max() + 1
# Classify column by entity:
columns_by_entity = {}
columns_by_entity['individu'] = []
columns_by_entity['quifam'] = []
columns_by_entity['quifoy'] = []
columns_by_entity['quimen'] = []
for column_name, column_serie in input_data_frame.iteritems():
holder = simulation.get_or_new_holder(column_name)
entity = holder.entity
if entity.is_persons_entity:
columns_by_entity['individu'].append(column_name)
else:
columns_by_entity[entity.role_for_person_variable_name].append(column_name)
input_data_frame_by_entity_key_plural = {}
for entity in simulation.entity_by_key_singular.values():
if entity.is_persons_entity:
input_data_frame_by_entity_key_plural['individus'] = \
input_data_frame[columns_by_entity['individu']]
entity.count = entity.step_size = len(input_data_frame)
else:
input_data_frame_by_entity_key_plural[entity.index_for_person_variable_name] = \
input_data_frame[columns_by_entity[entity.role_for_person_variable_name]][input_data_frame[entity.role_for_person_variable_name] == 0]
return input_data_frame_by_entity_key_plural
def marginal_rate_survey(df, target = None, target_2 = None, varying = None, varying_2 = None):
# target: numerator, varying: denominator
return 1 - (df[target] - df[target_2]) / (df[varying] - df[varying_2])
def varying_survey_simulation(year = 2009, increment = 10, target = 'irpp', varying = 'rni', used_as_input_variables = None):
TaxBenefitSystem = openfisca_france_data.init_country()
tax_benefit_system = TaxBenefitSystem()
input_data_frame = get_input_data_frame(year)
# Simulation 1 : get varying and target
survey_scenario = SurveyScenario().init_from_data_frame(
input_data_frame = input_data_frame,
used_as_input_variables = used_as_input_variables,
year = year,
tax_benefit_system = tax_benefit_system
)
simulation = survey_scenario.new_simulation(debug = False)
output_data_frame = pandas.DataFrame(
dict([(name, simulation.calculate_add(name)) for name in [
target, varying, 'idfoy_original'
]]))
# Make input_data_frame_by_entity_key_plural from the previous input_data_frame and simulation
input_data_frames_by_entity_key_plural = \
from_input_df_to_entity_key_plural_df(input_data_frame, tax_benefit_system, simulation)
foyers = input_data_frames_by_entity_key_plural['idfoy']
foyers = pandas.merge(foyers, output_data_frame, on = 'idfoy_original')
# Incrementation of varying:
foyers[varying] = foyers[varying] + increment
# On remplace la nouvelle base dans le dictionnaire
input_data_frames_by_entity_key_plural['idfoy'] = foyers
# 2e simulation à partir de input_data_frame_by_entity_key_plural:
# TODO: fix used_as_input_variabels in the from_input_df_to_entity_key_plural_df() function
used_as_input_variables = used_as_input_variables + [varying]
TaxBenefitSystem = openfisca_france_data.init_country()
tax_benefit_system = TaxBenefitSystem()
survey_scenario = SurveyScenario().init_from_data_frame(
input_data_frame = None,
input_data_frames_by_entity_key_plural = input_data_frames_by_entity_key_plural,
used_as_input_variables = used_as_input_variables,
year = year,
tax_benefit_system = tax_benefit_system,
)
simulation = survey_scenario.new_simulation(debug = False)
output_data_frame2 = pandas.DataFrame(
dict([(name, simulation.calculate_add(name)) for name in [
target, varying, 'idfoy_original'
]]))
output_data_frame2.rename(columns = {varying: '{}_2'.format(varying),
target: '{}_2'.format(target)}, inplace = True)
merged = pandas.merge(output_data_frame, output_data_frame2, on = 'idfoy_original')
merged['marginal_rate'] = marginal_rate_survey(merged, '{}'.format(target), '{}_2'.format(target), 'rni', 'rni_2')
merged['average_rate'] = average_rate(target = merged[target], varying = merged[varying])
return merged
if __name__ == '__main__':
import logging
import time
log = logging.getLogger(__name__)
import sys
logging.basicConfig(level = logging.INFO, stream = sys.stdout)
start = time.time()
used_as_input_variables = ['salaire_imposable', 'cho', 'rst', 'age_en_mois', 'smic55']
merged = varying_survey_simulation(year = 2009, increment = 10, target = 'irpp', varying = 'rni',
used_as_input_variables = used_as_input_variables)
| agpl-3.0 |
agx/git-buildpackage | gbp/scripts/import_ref.py | 1 | 8733 | # vim: set fileencoding=utf-8 :
#
# (C) 2018 Michael Stapelberg <[email protected]>
# 2018 Guido Günther <[email protected]>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, please see
# <http://www.gnu.org/licenses/>
#
"""Import a new upstream version from a git branch onto the Debian branch"""
import os
import sys
import gbp.command_wrappers as gbpc
from gbp.deb.git import GitRepositoryError
from gbp.config import GbpOptionParserDebian, GbpOptionGroup
from gbp.errors import GbpError
import gbp.log
from gbp.scripts.common import ExitCodes
from gbp.deb.rollbackgit import RollbackDebianGitRepository
from gbp.scripts.import_orig import (debian_branch_merge,
postimport_hook,
set_bare_repo_options,
rollback)
def get_commit_and_version_to_merge(repo, options):
"""
Get the commit and version we want to merge based on the
--upstream-tag setting
"""
version = options.version
if options.upstream_tree.upper() == 'VERSION':
# Determine tag name from given version
if not options.version:
raise GbpError("No upstream version given, try -u<version>")
commit = repo.version_to_tag(options.upstream_tag, options.version)
elif options.upstream_tree.upper() == 'BRANCH':
# Use head of upstrem branch
if not repo.has_branch(options.upstream_branch):
raise GbpError("%s is not a valid branch" % options.upstream_branch)
commit = options.upstream_branch
else:
# Use whatever is passed in as commitish
commit = "%s^{commit}" % options.upstream_tree
return commit, version
def build_parser(name):
try:
parser = GbpOptionParserDebian(command=os.path.basename(name), prefix='',
usage='%prog [options] -u<upstream-version>')
except GbpError as err:
gbp.log.err(err)
return None
import_group = GbpOptionGroup(parser, "import options",
"import related options")
tag_group = GbpOptionGroup(parser, "tag options",
"tag related options ")
branch_group = GbpOptionGroup(parser, "version and branch naming options",
"version number and branch layout options")
cmd_group = GbpOptionGroup(parser, "external command options",
"how and when to invoke external commands and hooks")
for group in [import_group, branch_group, tag_group, cmd_group]:
parser.add_option_group(group)
branch_group.add_option("-u", "--upstream-version", dest="version",
help="The version number to use for the new version, "
"default is ''", default='')
branch_group.add_config_file_option(option_name="debian-branch",
dest="debian_branch")
branch_group.add_config_file_option(option_name="upstream-branch",
dest="upstream_branch")
branch_group.add_config_file_option(option_name="upstream-tree",
dest="upstream_tree",
help="Where to merge the upstream changes from.",
default="VERSION")
branch_group.add_config_file_option(option_name="merge-mode", dest="merge_mode")
tag_group.add_boolean_config_file_option(option_name="sign-tags",
dest="sign_tags")
tag_group.add_config_file_option(option_name="keyid",
dest="keyid")
tag_group.add_config_file_option(option_name="upstream-tag",
dest="upstream_tag")
import_group.add_config_file_option(option_name="import-msg",
dest="import_msg")
cmd_group.add_config_file_option(option_name="postimport", dest="postimport")
parser.add_boolean_config_file_option(option_name="rollback",
dest="rollback")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False,
help="verbose command execution")
parser.add_config_file_option(option_name="color", dest="color", type='tristate')
parser.add_config_file_option(option_name="color-scheme",
dest="color_scheme")
return parser
def parse_args(argv):
"""Parse the command line arguments
@return: options and arguments
"""
parser = build_parser(argv[0])
if not parser:
return None, None
(options, args) = parser.parse_args(argv[1:])
gbp.log.setup(options.color, options.verbose, options.color_scheme)
return options, args
def main(argv):
ret = 0
repo = None
(options, args) = parse_args(argv)
if not options:
return ExitCodes.parse_error
# TODO: honor --filter option
# TODO: add --filter-with-copyright which takes d/copyright into account
# TODO: handle automatic versions based on timestamp + sha1
# TODO: handle updating of upstream branch from remote
gbp.log.warn("This script is experimental, it might change incompatibly between versions.")
try:
try:
repo = RollbackDebianGitRepository('.')
except GitRepositoryError:
raise GbpError("%s is not a git repository" % (os.path.abspath('.')))
commit, version = get_commit_and_version_to_merge(repo, options)
is_empty = repo.is_empty()
(clean, out) = repo.is_clean()
if not clean and not is_empty:
gbp.log.err("Repository has uncommitted changes, commit these first: ")
raise GbpError(out)
if repo.bare:
set_bare_repo_options(options)
try:
tag = repo.version_to_tag(options.upstream_tag, version)
if not repo.has_tag(tag):
gbp.log.info("Upstream tag '%s' not found. Creating it for you." % tag)
repo.create_tag(name=tag,
msg="Upstream version %s" % version,
commit="%s^0" % commit,
sign=options.sign_tags,
keyid=options.keyid)
if is_empty:
repo.create_branch(branch=options.debian_branch, rev=commit)
repo.force_head(options.debian_branch, hard=True)
# In an empty repo avoid master branch defaulted to by
# git and check out debian branch instead.
if not repo.bare:
cur = repo.branch
if cur != options.debian_branch:
repo.set_branch(options.debian_branch)
repo.delete_branch(cur)
else:
repo.rrr_branch(options.debian_branch)
debian_branch_merge(repo, tag, version, options)
# Update working copy and index if we've possibly updated the
# checked out branch
current_branch = repo.get_branch()
if current_branch in [options.upstream_branch,
repo.pristine_tar_branch]:
repo.force_head(current_branch, hard=True)
postimport_hook(repo, tag, version, options)
except (gbpc.CommandExecFailed, GitRepositoryError) as err:
msg = str(err) or 'Unknown error, please report a bug'
raise GbpError("Import of %s failed: %s" % (commit, msg))
except KeyboardInterrupt:
raise GbpError("Import of %s failed: aborted by user" % (options.git_ref))
except GbpError as err:
if str(err):
gbp.log.err(err)
ret = 1
rollback(repo, options)
if not ret:
gbp.log.info("Successfully imported version %s" % (version))
return ret
if __name__ == "__main__":
sys.exit(main(sys.argv))
# vim:et:ts=4:sw=4:et:sts=4:ai:set list listchars=tab\:»·,trail\:·:
| gpl-2.0 |
yodalee/servo | tests/wpt/web-platform-tests/tools/py/testing/log/test_warning.py | 161 | 2253 | import pytest
import py
mypath = py.path.local(__file__).new(ext=".py")
@pytest.mark.xfail
def test_forwarding_to_warnings_module():
pytest.deprecated_call(py.log._apiwarn, "1.3", "..")
def test_apiwarn_functional(recwarn):
capture = py.io.StdCapture()
py.log._apiwarn("x.y.z", "something", stacklevel=1)
out, err = capture.reset()
py.builtin.print_("out", out)
py.builtin.print_("err", err)
assert err.find("x.y.z") != -1
lno = py.code.getrawcode(test_apiwarn_functional).co_firstlineno + 2
exp = "%s:%s" % (mypath, lno)
assert err.find(exp) != -1
def test_stacklevel(recwarn):
def f():
py.log._apiwarn("x", "some", stacklevel=2)
# 3
# 4
capture = py.io.StdCapture()
f()
out, err = capture.reset()
lno = py.code.getrawcode(test_stacklevel).co_firstlineno + 6
warning = str(err)
assert warning.find(":%s" % lno) != -1
def test_stacklevel_initpkg_with_resolve(testdir, recwarn):
testdir.makepyfile(modabc="""
import py
def f():
py.log._apiwarn("x", "some", stacklevel="apipkg123")
""")
testdir.makepyfile(apipkg123="""
def __getattr__():
import modabc
modabc.f()
""")
p = testdir.makepyfile("""
import apipkg123
apipkg123.__getattr__()
""")
capture = py.io.StdCapture()
p.pyimport()
out, err = capture.reset()
warning = str(err)
loc = 'test_stacklevel_initpkg_with_resolve.py:2'
assert warning.find(loc) != -1
def test_stacklevel_initpkg_no_resolve(recwarn):
def f():
py.log._apiwarn("x", "some", stacklevel="apipkg")
capture = py.io.StdCapture()
f()
out, err = capture.reset()
lno = py.code.getrawcode(test_stacklevel_initpkg_no_resolve).co_firstlineno + 2
warning = str(err)
assert warning.find(":%s" % lno) != -1
def test_function(recwarn):
capture = py.io.StdCapture()
py.log._apiwarn("x.y.z", "something", function=test_function)
out, err = capture.reset()
py.builtin.print_("out", out)
py.builtin.print_("err", err)
assert err.find("x.y.z") != -1
lno = py.code.getrawcode(test_function).co_firstlineno
exp = "%s:%s" % (mypath, lno)
assert err.find(exp) != -1
| mpl-2.0 |
quasiben/bokeh | examples/plotting/server/selection_histogram.py | 4 | 3752 | # You must first run "bokeh serve" to view this example
import numpy as np
from bokeh.client import push_session
from bokeh.models import BoxSelectTool, LassoSelectTool, Paragraph
from bokeh.plotting import curdoc, figure, hplot, vplot
# create three normal population samples with different parameters
x1 = np.random.normal(loc=5.0, size=400) * 100
y1 = np.random.normal(loc=10.0, size=400) * 10
x2 = np.random.normal(loc=5.0, size=800) * 50
y2 = np.random.normal(loc=5.0, size=800) * 10
x3 = np.random.normal(loc=55.0, size=200) * 10
y3 = np.random.normal(loc=4.0, size=200) * 10
x = np.concatenate((x1, x2, x3))
y = np.concatenate((y1, y2, y3))
TOOLS="pan,wheel_zoom,box_select,lasso_select"
# create the scatter plot
p = figure(tools=TOOLS, plot_width=600, plot_height=600, title=None, min_border=10, min_border_left=50)
r = p.scatter(x, y, size=3, color="#3A5785", alpha=0.6)
p.select(BoxSelectTool).select_every_mousemove = False
p.select(LassoSelectTool).select_every_mousemove = False
# create the horizontal histogram
hhist, hedges = np.histogram(x, bins=20)
hzeros = np.zeros(len(hedges)-1)
hmax = max(hhist)*1.1
LINE_ARGS = dict(color="#3A5785", line_color=None)
ph = figure(toolbar_location=None, plot_width=p.plot_width, plot_height=200, x_range=p.x_range,
y_range=(-hmax, hmax), title=None, min_border=10, min_border_left=50)
ph.xgrid.grid_line_color = None
ph.quad(bottom=0, left=hedges[:-1], right=hedges[1:], top=hhist, color="white", line_color="#3A5785")
hh1 = ph.quad(bottom=0, left=hedges[:-1], right=hedges[1:], top=hzeros, alpha=0.5, **LINE_ARGS)
hh2 = ph.quad(bottom=0, left=hedges[:-1], right=hedges[1:], top=hzeros, alpha=0.1, **LINE_ARGS)
# create the vertical histogram
vhist, vedges = np.histogram(y, bins=20)
vzeros = np.zeros(len(vedges)-1)
vmax = max(vhist)*1.1
th = 42 # need to adjust for toolbar height, unfortunately
pv = figure(toolbar_location=None, plot_width=200, plot_height=p.plot_height+th-10, x_range=(-vmax, vmax),
y_range=p.y_range, title=None, min_border=10, min_border_top=th)
pv.ygrid.grid_line_color = None
pv.xaxis.major_label_orientation = -3.14/2
pv.quad(left=0, bottom=vedges[:-1], top=vedges[1:], right=vhist, color="white", line_color="#3A5785")
vh1 = pv.quad(left=0, bottom=vedges[:-1], top=vedges[1:], right=vzeros, alpha=0.5, **LINE_ARGS)
vh2 = pv.quad(left=0, bottom=vedges[:-1], top=vedges[1:], right=vzeros, alpha=0.1, **LINE_ARGS)
# NOTE: Version 0.11 has introduced auto spacing by default on VBox/HBox/vplot/hplot
# so for now we must tweak some spacing and borders to have it closely
# aligned.
pv.min_border_top = 80
pv.min_border_left = 0
ph.min_border_top = 10
ph.min_border_right = 10
p.min_border_right = 10
layout = vplot(hplot(p, pv), hplot(ph, Paragraph(width=200)), width=800, height=800)
# open a session to keep our local document in sync with server
session = push_session(curdoc())
def update(attr, old, new):
inds = np.array(new['1d']['indices'])
if len(inds) == 0 or len(inds) == len(x):
hhist1, hhist2 = hzeros, hzeros
vhist1, vhist2 = vzeros, vzeros
else:
neg_inds = np.ones_like(x, dtype=np.bool)
neg_inds[inds] = False
hhist1, _ = np.histogram(x[inds], bins=hedges)
vhist1, _ = np.histogram(y[inds], bins=vedges)
hhist2, _ = np.histogram(x[neg_inds], bins=hedges)
vhist2, _ = np.histogram(y[neg_inds], bins=vedges)
hh1.data_source.data["top"] = hhist1
hh2.data_source.data["top"] = -hhist2
vh1.data_source.data["right"] = vhist1
vh2.data_source.data["right"] = -vhist2
r.data_source.on_change('selected', update)
session.show(layout) # open the document in a browser
session.loop_until_closed() # run forever
| bsd-3-clause |
shownomercy/django | django/conf/locale/lt/formats.py | 504 | 1830 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'Y \m. E j \d.'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = r'Y \m. E j \d., H:i'
YEAR_MONTH_FORMAT = r'Y \m. F'
MONTH_DAY_FORMAT = r'E j \d.'
SHORT_DATE_FORMAT = 'Y-m-d'
SHORT_DATETIME_FORMAT = 'Y-m-d H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
]
TIME_INPUT_FORMATS = [
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
'%H.%M.%S', # '14.30.59'
'%H.%M.%S.%f', # '14.30.59.000200'
'%H.%M', # '14.30'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y %H.%M.%S', # '25.10.06 14.30.59'
'%d.%m.%y %H.%M.%S.%f', # '25.10.06 14.30.59.000200'
'%d.%m.%y %H.%M', # '25.10.06 14.30'
'%d.%m.%y', # '25.10.06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
uwcirg/true_nth_usa_portal | tests/test_portal.py | 1 | 11562 | """Unit test module for portal views"""
from datetime import datetime
import tempfile
import urllib
from flask_swagger import swagger
from flask_webtest import SessionScope
from swagger_spec_validator import validate_spec_url
from portal.config.config import TestConfig
from portal.extensions import db
from portal.factories.app import create_app
from portal.models.intervention import INTERVENTION, UserIntervention
from portal.models.message import EmailMessage
from portal.models.organization import Organization
from portal.models.role import ROLE
from portal.models.user import User, get_user
from tests import OAUTH_INFO_PROVIDER_LOGIN, TEST_USER_ID, TestCase
class TestPortal(TestCase):
"""Portal view tests"""
def test_card_html(self):
"""Interventions can customize the button text """
client = self.add_client()
intervention = INTERVENTION.DECISION_SUPPORT_P3P
intervention.public_access = True # make the card avail for the test
client.intervention = intervention
intervention.card_html = "Custom Label"
self.login()
self.add_required_clinical_data()
self.bless_with_basics(make_patient=False)
response = self.client.get('/home')
assert response.status_code == 200
assert 'Custom Label' in response.get_data(as_text=True)
intervention = db.session.merge(intervention)
assert intervention.card_html in response.get_data(as_text=True)
def test_user_card_html(self):
"""Interventions can further customize per user"""
client = self.add_client()
intervention = INTERVENTION.DECISION_SUPPORT_P3P
intervention.public_access = True # make the card avail for the test
client.intervention = intervention
ui = UserIntervention(
user_id=TEST_USER_ID, intervention_id=intervention.id)
ui.card_html = "<b>Bold Card Text</b>"
ui.link_label = "Custom User Label"
ui.link_url = 'http://example.com/?test=param1'
with SessionScope(db):
db.session.add(ui)
db.session.commit()
self.login()
self.add_required_clinical_data()
self.bless_with_basics(make_patient=False)
user = db.session.merge(self.test_user)
response = self.client.get('/home')
assert response.status_code == 200
ui = db.session.merge(ui)
assert ui.card_html in response.get_data(as_text=True)
assert ui.link_label in response.get_data(as_text=True)
assert ui.link_url in response.get_data(as_text=True)
intervention = db.session.merge(intervention)
assert (
intervention.display_for_user(user).link_label
in response.get_data(as_text=True))
def test_staff_html(self):
"""Interventions can customize the staff text """
client = self.add_client()
intervention = INTERVENTION.sexual_recovery
client.intervention = intervention
ui = UserIntervention(
user_id=TEST_USER_ID,
intervention_id=intervention.id)
ui.staff_html = "Custom text for <i>staff</i>"
with SessionScope(db):
db.session.add(ui)
db.session.commit()
self.bless_with_basics()
self.login()
self.promote_user(role_name=ROLE.INTERVENTION_STAFF.value)
# This test requires PATIENT_LIST_ADDL_FIELDS includes the
# 'reports' field
self.app.config['PATIENT_LIST_ADDL_FIELDS'] = ['reports']
response = self.client.get('/patients/')
ui = db.session.merge(ui)
results = response.get_data(as_text=True)
assert ui.staff_html in results
def test_public_access(self):
"""Interventions w/o public access should be hidden"""
client = self.add_client()
intervention = INTERVENTION.sexual_recovery
client.intervention = intervention
intervention.public_access = False
self.login()
self.add_required_clinical_data()
self.bless_with_basics()
response = self.client.get('/home')
assert 'Sexual Recovery' not in response.get_data(as_text=True)
# now give just the test user access
intervention = db.session.merge(intervention)
ui = UserIntervention(
user_id=TEST_USER_ID,
intervention_id=intervention.id,
access="granted")
with SessionScope(db):
db.session.add(ui)
db.session.commit()
response = self.client.get('/home')
assert 'Sexual Recovery' in response.get_data(as_text=True)
def test_admin_list(self):
"""Test admin view lists all users"""
# Generate a few users with a smattering of roles
u1 = self.add_user(username='[email protected]')
u2 = self.add_user(username='[email protected]')
self.promote_user(u1, role_name=ROLE.ADMIN.value)
self.promote_user(u2, role_name=ROLE.APPLICATION_DEVELOPER.value)
# Test user needs admin role to view list
self.promote_user(role_name=ROLE.ADMIN.value)
self.login()
response = self.client.get('/admin')
# Should at least see an entry per user in system
assert (response.get_data(as_text=True).count('/profile')
>= User.query.count())
def test_invite(self):
"""Test email invite form"""
test_user = User.query.get(TEST_USER_ID)
test_user.email = '[email protected]'
db.session.add(test_user)
db.session.commit()
self.login()
postdata = {
'subject': 'unittest subject',
'recipients': '[email protected] [email protected]',
'body': "Ode to joy"}
response = self.client.post('/invite', data=postdata,
follow_redirects=True)
assert "Email Invite Sent" in response.get_data(as_text=True)
def test_message_sent(self):
"""Email invites - test view for sent messages"""
sent_at = datetime.strptime(
"2000/01/01 12:31:00", "%Y/%m/%d %H:%M:%S")
message = EmailMessage(
subject='a subject', user_id=TEST_USER_ID,
sender="[email protected]",
body='Welcome to testing \u2713',
sent_at=sent_at,
recipients="[email protected] [email protected]")
db.session.add(message)
db.session.commit()
# confirm styling unicode functions
body = message.style_message(message.body)
assert 'DOCTYPE' in body
assert 'style' in body
assert isinstance(body, str)
self.login()
response = self.client.get('/invite/{0}'.format(message.id))
assert (response.get_data(as_text=True).find(
sent_at.strftime('%m/%d/%Y %H:%M:%S')) > 0)
assert (response.get_data(as_text=True).find('[email protected] [email protected]')
> 0)
def test_missing_message(self):
"""Request to view non existant message should 404"""
self.login()
response = self.client.get('/invite/404')
assert response.status_code == 404
def test_swagger_docgen(self):
"""Build swagger docs for entire project"""
expected_keys = (
'info',
'paths',
'swagger',
'definitions',
)
swag = swagger(self.client.application)
for key in expected_keys:
assert key in swag
def test_swagger_validation(self):
"""Ensure our swagger spec matches swagger schema"""
with tempfile.NamedTemporaryFile(
prefix='swagger_test_',
suffix='.json',
delete=True,
) as temp_spec:
temp_spec.write(self.client.get('/spec').data)
temp_spec.seek(0)
validate_spec_url("file:%s" % temp_spec.name)
def test_report_error(self):
self.login()
params = {
'subject_id': 112,
'page_url': '/not/real',
'message': 'creative test string'
}
response = self.client.get('/report-error?{}'.format(
urllib.parse.urlencode(params)))
assert response.status_code == 200
def test_configuration_settings(self):
self.login()
lr_group = self.app.config['LR_GROUP']
response = self.client.get('/api/settings/lr_group')
assert response.status_code == 200
assert response.json.get('LR_GROUP') == lr_group
response2 = self.client.get('/api/settings/bad_value')
assert response2.status_code == 400
def test_configuration_secrets(self):
"""Ensure config keys containing secrets are not exposed"""
blacklist = (
'SECRET',
'URI',
'SQL',
)
response = self.client.get('/api/settings')
assert response.status_code == 200
assert not any(
any(k in config_key for k in blacklist)
for config_key in response.json
)
class TestPortalEproms(TestCase):
"""Portal views depending on eproms blueprint"""
def create_app(self):
"""
Overload base version to hide the GIL (allows registration of ePROMs)
"""
tc = TestConfig()
setattr(tc, 'HIDE_GIL', True)
self._app = create_app(tc)
return self._app
def test_redirect_validation(self):
self.promote_user(role_name=ROLE.ADMIN.value)
self.promote_user(role_name=ROLE.STAFF.value)
org = Organization(name='test org')
user = get_user(TEST_USER_ID)
with SessionScope(db):
db.session.add(org)
user.organizations.append(org)
db.session.commit()
self.login()
client = self.add_client()
client_url = client._redirect_uris
local_url = "http://{}/home?test".format(
self.app.config.get('SERVER_NAME'))
invalid_url = 'http://invalid.org'
# validate redirect of /website-consent-script GET
response = self.client.get(
'/website-consent-script/{}'.format(TEST_USER_ID),
query_string={'redirect_url': local_url}
)
assert response.status_code == 200
response2 = self.client.get(
'/website-consent-script/{}'.format(TEST_USER_ID),
query_string={'redirect_url': invalid_url}
)
assert response2.status_code == 401
# validate session login redirect with valid url
oauth_info = {
'user_id': TEST_USER_ID,
'next': client_url,
}
response3 = self.login(oauth_info=oauth_info)
assert response3.status_code == 200
# validate session login redirect with invalid url
oauth_info['next'] = invalid_url
response4 = self.login(oauth_info=oauth_info)
assert response4.status_code == 401
# validate provider login redirect with invalid url
oauth_info = dict(OAUTH_INFO_PROVIDER_LOGIN)
oauth_info['next'] = invalid_url
response5 = self.login(oauth_info=oauth_info)
assert response5.status_code == 401
# validate redirect of /challenge POST
formdata = {'user_id': TEST_USER_ID, 'next_url': local_url}
response6 = self.client.post('/challenge', data=formdata)
assert response6.status_code == 200
formdata['next_url'] = invalid_url
response7 = self.client.post('/challenge', data=formdata)
assert response7.status_code == 401
| bsd-3-clause |
andersinno/foosball | config/settings/production.py | 1 | 4603 | # -*- coding: utf-8 -*-
"""
Production Configurations
"""
from __future__ import absolute_import, unicode_literals
from django.utils import six
from .common import * # noqa
# Enable social integration plugins
SOCIAL_ACCOUNTS = (
'allauth.socialaccount.providers.google',
'allauth.socialaccount.providers.facebook',
)
INSTALLED_APPS += SOCIAL_ACCOUNTS
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env("DJANGO_SECRET_KEY")
# This ensures that Django will be able to detect a secure connection
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURITY_MIDDLEWARE = (
'django.middleware.security.SecurityMiddleware',
)
# Make sure SecurityMiddleware is listed first
MIDDLEWARE_CLASSES = SECURITY_MIDDLEWARE + MIDDLEWARE_CLASSES
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = True
ACCOUNT_DEFAULT_HTTP_PROTOCOL = "https"
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['foosball.anders.fi'])
# END SITE CONFIGURATION
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='foosball <[email protected]>')
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default='[foosball] ')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[1]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db("DATABASE_URL")
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "{0}/{1}".format(env('REDIS_URL', default="redis://127.0.0.1:6379"), 0),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"IGNORE_EXCEPTIONS": True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True
}
}
}
# Custom Admin URL, use {% url 'admin:index' %}
ADMIN_URL = env('DJANGO_ADMIN_URL')
| mit |
ducthien1490/youtube-dl | youtube_dl/extractor/instagram.py | 93 | 4498 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
limit_length,
)
class InstagramIE(InfoExtractor):
_VALID_URL = r'https://instagram\.com/p/(?P<id>[\da-zA-Z]+)'
_TEST = {
'url': 'https://instagram.com/p/aye83DjauH/?foo=bar#abc',
'md5': '0d2da106a9d2631273e192b372806516',
'info_dict': {
'id': 'aye83DjauH',
'ext': 'mp4',
'uploader_id': 'naomipq',
'title': 'Video by naomipq',
'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
uploader_id = self._search_regex(r'"owner":{"username":"(.+?)"',
webpage, 'uploader id', fatal=False)
desc = self._search_regex(r'"caption":"(.*?)"', webpage, 'description',
fatal=False)
return {
'id': video_id,
'url': self._og_search_video_url(webpage, secure=False),
'ext': 'mp4',
'title': 'Video by %s' % uploader_id,
'thumbnail': self._og_search_thumbnail(webpage),
'uploader_id': uploader_id,
'description': desc,
}
class InstagramUserIE(InfoExtractor):
_VALID_URL = r'https://instagram\.com/(?P<username>[^/]{2,})/?(?:$|[?#])'
IE_DESC = 'Instagram user profile'
IE_NAME = 'instagram:user'
_TEST = {
'url': 'https://instagram.com/porsche',
'info_dict': {
'id': 'porsche',
'title': 'porsche',
},
'playlist_mincount': 2,
'playlist': [{
'info_dict': {
'id': '614605558512799803_462752227',
'ext': 'mp4',
'title': '#Porsche Intelligent Performance.',
'thumbnail': 're:^https?://.*\.jpg',
'uploader': 'Porsche',
'uploader_id': 'porsche',
'timestamp': 1387486713,
'upload_date': '20131219',
},
}],
'params': {
'extract_flat': True,
'skip_download': True,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
uploader_id = mobj.group('username')
entries = []
page_count = 0
media_url = 'http://instagram.com/%s/media' % uploader_id
while True:
page = self._download_json(
media_url, uploader_id,
note='Downloading page %d ' % (page_count + 1),
)
page_count += 1
for it in page['items']:
if it.get('type') != 'video':
continue
like_count = int_or_none(it.get('likes', {}).get('count'))
user = it.get('user', {})
formats = [{
'format_id': k,
'height': v.get('height'),
'width': v.get('width'),
'url': v['url'],
} for k, v in it['videos'].items()]
self._sort_formats(formats)
thumbnails_el = it.get('images', {})
thumbnail = thumbnails_el.get('thumbnail', {}).get('url')
# In some cases caption is null, which corresponds to None
# in python. As a result, it.get('caption', {}) gives None
title = (it.get('caption') or {}).get('text', it['id'])
entries.append({
'id': it['id'],
'title': limit_length(title, 80),
'formats': formats,
'thumbnail': thumbnail,
'webpage_url': it.get('link'),
'uploader': user.get('full_name'),
'uploader_id': user.get('username'),
'like_count': like_count,
'timestamp': int_or_none(it.get('created_time')),
})
if not page['items']:
break
max_id = page['items'][-1]['id']
media_url = (
'http://instagram.com/%s/media?max_id=%s' % (
uploader_id, max_id))
return {
'_type': 'playlist',
'entries': entries,
'id': uploader_id,
'title': uploader_id,
}
| unlicense |
bloopletech/Comix | src/preferences.py | 6 | 22727 | """preferences.py - Preference handler."""
import os
import cPickle
import gtk
import pango
import constants
import labels
ZOOM_MODE_BEST = 0
ZOOM_MODE_WIDTH = 1
ZOOM_MODE_HEIGHT = 2
ZOOM_MODE_MANUAL = 3
# All the preferences are stored here.
prefs = {
'comment extensions': ['txt', 'nfo'],
'auto load last file': False,
'page of last file': 1,
'path to last file': '',
'auto open next archive': True,
'bg colour': (5000, 5000, 5000),
'checkered bg for transparent images': True,
'cache': True,
'stretch': False,
'default double page': False,
'default fullscreen': False,
'default zoom mode': ZOOM_MODE_BEST,
'default manga mode': False,
'lens magnification': 2,
'lens size': 200,
'no double page for wide images': False,
'double step in double page mode': True,
'show page numbers on thumbnails': True,
'thumbnail size': 80,
'create thumbnails': True,
'slideshow delay': 3000,
'smart space scroll': True,
'flip with wheel': False,
'smart bg': False,
'store recent file info': True,
'hide all': False,
'hide all in fullscreen': True,
'stored hide all values': (True, True, True, True, True),
'path of last browsed in filechooser': constants.HOME_DIR,
'last filter in main filechooser': 0,
'last filter in library filechooser': 1,
'show menubar': True,
'show scrollbar': True,
'show statusbar': True,
'show toolbar': True,
'show thumbnails': True,
'rotation': 0,
'auto rotate from exif': True,
'vertical flip': False,
'horizontal flip': False,
'keep transformation': False,
'window height': gtk.gdk.screen_get_default().get_height() * 3 // 4,
'window width': min(gtk.gdk.screen_get_default().get_width() * 3 // 4,
gtk.gdk.screen_get_default().get_height() * 5 // 8),
'library cover size': 128,
'auto add books into collections': True,
'last library collection': None,
'lib window height': gtk.gdk.screen_get_default().get_height() * 3 // 4,
'lib window width': gtk.gdk.screen_get_default().get_width() * 3 // 4
}
_config_path = os.path.join(constants.CONFIG_DIR, 'preferences.pickle')
_dialog = None
class _PreferencesDialog(gtk.Dialog):
"""The preferences dialog where most (but not all) settings that are
saved between sessions are presented to the user.
"""
def __init__(self, window):
self._window = window
gtk.Dialog.__init__(self, _('Preferences'), window, 0,
(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE))
self.connect('response', self._response)
self.set_has_separator(False)
self.set_resizable(True)
self.set_default_response(gtk.RESPONSE_CLOSE)
notebook = gtk.Notebook()
self.vbox.pack_start(notebook)
self.set_border_width(4)
notebook.set_border_width(6)
# ----------------------------------------------------------------
# The "Appearance" tab.
# ----------------------------------------------------------------
page = _PreferencePage(80)
page.new_section(_('Background'))
fixed_bg_button = gtk.RadioButton(None, '%s:' %
_('Use this colour as background'))
fixed_bg_button.set_tooltip_text(
_('Always use this selected colour as the background colour.'))
color_button = gtk.ColorButton(gtk.gdk.Color(*prefs['bg colour']))
color_button.connect('color_set', self._color_button_cb)
page.add_row(fixed_bg_button, color_button)
dynamic_bg_button = gtk.RadioButton(fixed_bg_button,
_('Use dynamic background colour.'))
dynamic_bg_button.set_active(prefs['smart bg'])
dynamic_bg_button.connect('toggled', self._check_button_cb, 'smart bg')
dynamic_bg_button.set_tooltip_text(
_('Automatically pick a background colour that fits the viewed image.'))
page.add_row(dynamic_bg_button)
page.new_section(_('Thumbnails'))
label = gtk.Label('%s:' % _('Thumbnail size (in pixels)'))
adjustment = gtk.Adjustment(prefs['thumbnail size'], 20, 128, 1, 10)
thumb_size_spinner = gtk.SpinButton(adjustment)
thumb_size_spinner.connect('value_changed', self._spinner_cb,
'thumbnail size')
page.add_row(label, thumb_size_spinner)
thumb_number_button = gtk.CheckButton(
_('Show page numbers on thumbnails.'))
thumb_number_button.set_active(
prefs['show page numbers on thumbnails'])
thumb_number_button.connect('toggled', self._check_button_cb,
'show page numbers on thumbnails')
page.add_row(thumb_number_button)
page.new_section(_('Magnifying Glass'))
label = gtk.Label('%s:' % _('Magnifying glass size (in pixels)'))
adjustment = gtk.Adjustment(prefs['lens size'], 50, 400, 1, 10)
glass_size_spinner = gtk.SpinButton(adjustment)
glass_size_spinner.connect('value_changed', self._spinner_cb,
'lens size')
glass_size_spinner.set_tooltip_text(
_('Set the size of the magnifying glass. It is a square with a side of this many pixels.'))
page.add_row(label, glass_size_spinner)
label = gtk.Label('%s:' % _('Magnification factor'))
adjustment = gtk.Adjustment(prefs['lens magnification'], 1.1, 10.0,
0.1, 1.0)
glass_magnification_spinner = gtk.SpinButton(adjustment, digits=1)
glass_magnification_spinner.connect('value_changed', self._spinner_cb,
'lens magnification')
glass_magnification_spinner.set_tooltip_text(
_('Set the magnification factor of the magnifying glass.'))
page.add_row(label, glass_magnification_spinner)
page.new_section(_('Image scaling'))
stretch_button = gtk.CheckButton(_('Stretch small images.'))
stretch_button.set_active(prefs['stretch'])
stretch_button.connect('toggled', self._check_button_cb, 'stretch')
stretch_button.set_tooltip_text(
_('Stretch images to a size that is larger than their original size if the current zoom mode requests it. If this preference is unset, images are never scaled to be larger than their original size.'))
page.add_row(stretch_button)
page.new_section(_('Transparency'))
checkered_bg_button = gtk.CheckButton(
_('Use checkered background for transparent images.'))
checkered_bg_button.set_active(
prefs['checkered bg for transparent images'])
checkered_bg_button.connect('toggled', self._check_button_cb,
'checkered bg for transparent images')
checkered_bg_button.set_tooltip_text(
_('Use a grey checkered background for transparent images. If this preference is unset, the background is plain white instead.'))
page.add_row(checkered_bg_button)
notebook.append_page(page, gtk.Label(_('Appearance')))
# ----------------------------------------------------------------
# The "Behaviour" tab.
# ----------------------------------------------------------------
page = _PreferencePage(150)
page.new_section(_('Scroll'))
smart_space_button = gtk.CheckButton(
_('Use smart space key scrolling.'))
smart_space_button.set_active(prefs['smart space scroll'])
smart_space_button.connect('toggled', self._check_button_cb,
'smart space scroll')
smart_space_button.set_tooltip_text(
_('Use smart scrolling with the space key. Normally the space key scrolls only right down (or up when shift is pressed), but with this preference set it also scrolls sideways and so tries to follow the natural reading order of the comic book.'))
page.add_row(smart_space_button)
flip_with_wheel_button = gtk.CheckButton(
_('Flip pages when scrolling off the edges of the page.'))
flip_with_wheel_button.set_active(prefs['flip with wheel'])
flip_with_wheel_button.connect('toggled', self._check_button_cb,
'flip with wheel')
flip_with_wheel_button.set_tooltip_text(
_('Flip pages when scrolling "off the page" with the scroll wheel or with the arrow keys. It takes three consecutive "steps" with the scroll wheel or the arrow keys for the pages to be flipped.'))
page.add_row(flip_with_wheel_button)
page.new_section(_('Double page mode'))
step_length_button = gtk.CheckButton(
_('Flip two pages in double page mode.'))
step_length_button.set_active(prefs['double step in double page mode'])
step_length_button.connect('toggled', self._check_button_cb,
'double step in double page mode')
step_length_button.set_tooltip_text(
_('Flip two pages, instead of one, each time we flip pages in double page mode.'))
page.add_row(step_length_button)
virtual_double_button = gtk.CheckButton(
_('Show only one wide image in double page mode.'))
virtual_double_button.set_active(
prefs['no double page for wide images'])
virtual_double_button.connect('toggled', self._check_button_cb,
'no double page for wide images')
virtual_double_button.set_tooltip_text(
_("Display only one image in double page mode, if the image's width exceeds its height. The result of this is that scans that span two pages are displayed properly (i.e. alone) also in double page mode."))
page.add_row(virtual_double_button)
page.new_section(_('Files'))
auto_open_next_button = gtk.CheckButton(
_('Automatically open the next archive.'))
auto_open_next_button.set_active(prefs['auto open next archive'])
auto_open_next_button.connect('toggled', self._check_button_cb,
'auto open next archive')
auto_open_next_button.set_tooltip_text(
_('Automatically open the next archive in the directory when flipping past the last page, or the previous archive when flipping past the first page.'))
page.add_row(auto_open_next_button)
auto_open_last_button = gtk.CheckButton(
_('Automatically open the last viewed file on startup.'))
auto_open_last_button.set_active(prefs['auto load last file'])
auto_open_last_button.connect('toggled', self._check_button_cb,
'auto load last file')
auto_open_last_button.set_tooltip_text(
_('Automatically open, on startup, the file that was open when Comix was last closed.'))
page.add_row(auto_open_last_button)
store_recent_button = gtk.CheckButton(
_('Store information about recently opened files.'))
store_recent_button.set_active(prefs['store recent file info'])
store_recent_button.connect('toggled', self._check_button_cb,
'store recent file info')
store_recent_button.set_tooltip_text(
_('Add information about all files opened from within Comix to the shared recent files list.'))
page.add_row(store_recent_button)
create_thumbs_button = gtk.CheckButton(
_('Store thumbnails for opened files.'))
create_thumbs_button.set_active(prefs['create thumbnails'])
create_thumbs_button.connect('toggled', self._check_button_cb,
'create thumbnails')
create_thumbs_button.set_tooltip_text(
_('Store thumbnails for opened files according to the freedesktop.org specification. These thumbnails are shared by many other applications, such as most file managers.'))
page.add_row(create_thumbs_button)
page.new_section(_('Cache'))
cache_button = gtk.CheckButton(_('Use a cache to speed up browsing.'))
cache_button.set_active(prefs['cache'])
cache_button.connect('toggled', self._check_button_cb, 'cache')
cache_button.set_tooltip_text(
_('Cache the images that are next to the currently viewed image in order to speed up browsing. Since the speed improvements are quite big, it is recommended that you have this preference set, unless you are running short on free RAM.'))
page.add_row(cache_button)
notebook.append_page(page, gtk.Label(_('Behaviour')))
# ----------------------------------------------------------------
# The "Display" tab.
# ----------------------------------------------------------------
page = _PreferencePage(180)
page.new_section(_('Default modes'))
double_page_button = gtk.CheckButton(
_('Use double page mode by default.'))
double_page_button.set_active(prefs['default double page'])
double_page_button.connect('toggled', self._check_button_cb,
'default double page')
page.add_row(double_page_button)
fullscreen_button = gtk.CheckButton(_('Use fullscreen by default.'))
fullscreen_button.set_active(prefs['default fullscreen'])
fullscreen_button.connect('toggled', self._check_button_cb,
'default fullscreen')
page.add_row(fullscreen_button)
manga_button = gtk.CheckButton(_('Use manga mode by default.'))
manga_button.set_active(prefs['default manga mode'])
manga_button.connect('toggled', self._check_button_cb,
'default manga mode')
page.add_row(manga_button)
label = gtk.Label('%s:' % _('Default zoom mode'))
zoom_combo = gtk.combo_box_new_text()
zoom_combo.append_text(_('Best fit mode'))
zoom_combo.append_text(_('Fit width mode'))
zoom_combo.append_text(_('Fit height mode'))
zoom_combo.append_text(_('Manual zoom mode'))
# Change this if the combobox entries are reordered.
zoom_combo.set_active(prefs['default zoom mode'])
zoom_combo.connect('changed', self._combo_box_cb)
page.add_row(label, zoom_combo)
page.new_section(_('Fullscreen'))
hide_in_fullscreen_button = gtk.CheckButton(
_('Automatically hide all toolbars in fullscreen.'))
hide_in_fullscreen_button.set_active(prefs['hide all in fullscreen'])
hide_in_fullscreen_button.connect('toggled', self._check_button_cb,
'hide all in fullscreen')
page.add_row(hide_in_fullscreen_button)
page.new_section(_('Slideshow'))
label = gtk.Label('%s:' % _('Slideshow delay (in seconds)'))
adjustment = gtk.Adjustment(prefs['slideshow delay'] / 1000.0,
0.5, 3600.0, 0.1, 1)
delay_spinner = gtk.SpinButton(adjustment, digits=1)
delay_spinner.connect('value_changed', self._spinner_cb,
'slideshow delay')
page.add_row(label, delay_spinner)
page.new_section(_('Comments'))
label = gtk.Label('%s:' % _('Comment extensions'))
extensions_entry = gtk.Entry()
extensions_entry.set_text(', '.join(prefs['comment extensions']))
extensions_entry.connect('activate', self._entry_cb)
extensions_entry.connect('focus_out_event', self._entry_cb)
extensions_entry.set_tooltip_text(
_('Treat all files found within archives, that have one of these file endings, as comments.'))
page.add_row(label, extensions_entry)
page.new_section(_('Rotation'))
auto_rotate_button = gtk.CheckButton(
_('Automatically rotate images according to their metadata.'))
auto_rotate_button.set_active(prefs['auto rotate from exif'])
auto_rotate_button.connect('toggled', self._check_button_cb,
'auto rotate from exif')
auto_rotate_button.set_tooltip_text(
_('Automatically rotate images when an orientation is specified in the image metadata, such as in an Exif tag.'))
page.add_row(auto_rotate_button)
notebook.append_page(page, gtk.Label(_('Display')))
self.show_all()
def _check_button_cb(self, button, preference):
"""Callback for all checkbutton-type preferences."""
prefs[preference] = button.get_active()
if preference == 'smart bg':
if not prefs[preference]:
self._window.set_bg_colour(prefs['bg colour'])
else:
self._window.draw_image(scroll=False)
elif preference in ('stretch', 'checkered bg for transparent images',
'no double page for wide images', 'auto rotate from exif'):
self._window.draw_image(scroll=False)
elif (preference == 'hide all in fullscreen' and
self._window.is_fullscreen):
self._window.draw_image(scroll=False)
elif preference == 'show page numbers on thumbnails':
self._window.thumbnailsidebar.clear()
self._window.thumbnailsidebar.load_thumbnails()
def _color_button_cb(self, colorbutton):
"""Callback for the background colour selection button."""
colour = colorbutton.get_color()
prefs['bg colour'] = colour.red, colour.green, colour.blue
if not prefs['smart bg'] or not self._window.file_handler.file_loaded:
self._window.set_bg_colour(prefs['bg colour'])
def _spinner_cb(self, spinbutton, preference):
"""Callback for spinner-type preferences."""
value = spinbutton.get_value()
if preference == 'lens size':
prefs[preference] = int(value)
elif preference == 'lens magnification':
prefs[preference] = value
elif preference == 'slideshow delay':
prefs[preference] = int(value * 1000)
self._window.slideshow.update_delay()
elif preference == 'thumbnail size':
prefs[preference] = int(value)
self._window.thumbnailsidebar.resize()
self._window.draw_image(scroll=False)
def _combo_box_cb(self, combobox):
"""Callback for combobox-type preferences."""
zoom_mode = combobox.get_active()
prefs['default zoom mode'] = zoom_mode
def _entry_cb(self, entry, event=None):
"""Callback for entry-type preferences."""
text = entry.get_text()
extensions = [e.strip() for e in text.split(',')]
prefs['comment extensions'] = [e for e in extensions if e]
self._window.file_handler.update_comment_extensions()
def _response(self, dialog, response):
_close_dialog()
class _PreferencePage(gtk.VBox):
"""The _PreferencePage is a conveniece class for making one "page"
in a preferences-style dialog that contains one or more
_PreferenceSections.
"""
def __init__(self, right_column_width):
"""Create a new page where any possible right columns have the
width request <right_column_width>.
"""
gtk.VBox.__init__(self, False, 12)
self.set_border_width(12)
self._right_column_width = right_column_width
self._section = None
def new_section(self, header):
"""Start a new section in the page, with the header text from
<header>.
"""
self._section = _PreferenceSection(header, self._right_column_width)
self.pack_start(self._section, False, False)
def add_row(self, left_item, right_item=None):
"""Add a row to the page (in the latest section), containing one
or two items. If the left item is a label it is automatically
aligned properly.
"""
if isinstance(left_item, gtk.Label):
left_item.set_alignment(0, 0.5)
if right_item is None:
self._section.contentbox.pack_start(left_item)
else:
left_box, right_box = self._section.new_split_vboxes()
left_box.pack_start(left_item)
right_box.pack_start(right_item)
class _PreferenceSection(gtk.VBox):
"""The _PreferenceSection is a convenience class for making one
"section" of a preference-style dialog, e.g. it has a bold header
and a number of rows which are indented with respect to that header.
"""
def __init__(self, header, right_column_width=150):
"""Contruct a new section with the header set to the text in
<header>, and the width request of the (possible) right columns
set to that of <right_column_width>.
"""
gtk.VBox.__init__(self, False, 0)
self._right_column_width = right_column_width
self.contentbox = gtk.VBox(False, 6)
label = labels.BoldLabel(header)
label.set_alignment(0, 0.5)
hbox = gtk.HBox(False, 0)
hbox.pack_start(gtk.HBox(), False, False, 6)
hbox.pack_start(self.contentbox)
self.pack_start(label, False, False)
self.pack_start(hbox, False, False, 6)
def new_split_vboxes(self):
"""Return two new VBoxes that are automatically put in the section
after the previously added items. The right one has a width request
equal to the right_column_width value passed to the class contructor,
in order to make it easy for all "right column items" in a page to
line up nicely.
"""
left_box = gtk.VBox(False, 6)
right_box = gtk.VBox(False, 6)
right_box.set_size_request(self._right_column_width, -1)
hbox = gtk.HBox(False, 12)
hbox.pack_start(left_box)
hbox.pack_start(right_box, False, False)
self.contentbox.pack_start(hbox)
return left_box, right_box
def open_dialog(action, window):
global _dialog
if _dialog is None:
_dialog = _PreferencesDialog(window)
else:
_dialog.present()
def _close_dialog(*args):
global _dialog
if _dialog is not None:
_dialog.destroy()
_dialog = None
def read_preferences_file():
"""Read preferences data from disk."""
if os.path.isfile(_config_path):
config = None
try:
config = open(_config_path, 'rb')
version = cPickle.load(config)
old_prefs = cPickle.load(config)
config.close()
except Exception:
print '! Corrupt preferences file "%s", deleting...' % _config_path
if config is not None:
config.close()
os.remove(_config_path)
else:
for key in old_prefs:
if key in prefs:
prefs[key] = old_prefs[key]
def write_preferences_file():
"""Write preference data to disk."""
config = open(_config_path, 'wb')
cPickle.dump(constants.VERSION, config, cPickle.HIGHEST_PROTOCOL)
cPickle.dump(prefs, config, cPickle.HIGHEST_PROTOCOL)
config.close()
| gpl-2.0 |
ghxandsky/zstack-utility | kvmagent/kvmagent/plugins/network_plugin.py | 1 | 7289 | '''
@author: frank
'''
from kvmagent import kvmagent
from zstacklib.utils import jsonobject
from zstacklib.utils import http
from zstacklib.utils import log
from zstacklib.utils import lock
from zstacklib.utils import shell
from zstacklib.utils import linux
import os
import traceback
CHECK_PHYSICAL_NETWORK_INTERFACE_PATH = '/network/checkphysicalnetworkinterface'
KVM_REALIZE_L2NOVLAN_NETWORK_PATH = "/network/l2novlan/createbridge"
KVM_REALIZE_L2VLAN_NETWORK_PATH = "/network/l2vlan/createbridge"
KVM_CHECK_L2NOVLAN_NETWORK_PATH = "/network/l2novlan/checkbridge"
KVM_CHECK_L2VLAN_NETWORK_PATH = "/network/l2vlan/checkbridge"
logger = log.get_logger(__name__)
class CheckPhysicalNetworkInterfaceCmd(kvmagent.AgentCommand):
def __init__(self):
super(CheckPhysicalNetworkInterfaceCmd, self).__init__()
self.interfaceNames = None
class CheckPhysicalNetworkInterfaceResponse(kvmagent.AgentResponse):
def __init__(self):
super(CheckPhysicalNetworkInterfaceResponse, self).__init__()
self.failedInterfaceNames = None
class CreateBridgeCmd(kvmagent.AgentCommand):
def __init__(self):
super(CreateBridgeCmd, self).__init__()
self.physicalInterfaceName = None
self.bridgeName = None
class CreateBridgeResponse(kvmagent.AgentResponse):
def __init__(self):
super(CreateBridgeResponse, self).__init__()
class CreateVlanBridgeCmd(kvmagent.AgentCommand):
def __init__(self):
super(CreateVlanBridgeCmd, self).__init__()
self.vlan = None
class CreateVlanBridgeResponse(kvmagent.AgentResponse):
def __init__(self):
super(CreateVlanBridgeResponse, self).__init__()
class CheckBridgeResponse(kvmagent.AgentResponse):
def __init__(self):
super(CheckBridgeResponse, self).__init__()
class CheckVlanBridgeResponse(kvmagent.AgentResponse):
def __init__(self):
super(CheckVlanBridgeResponse, self).__init__()
class NetworkPlugin(kvmagent.KvmAgent):
'''
classdocs
'''
def _ifup_device_if_down(self, device_name):
state_path = '/sys/class/net/%s/operstate' % device_name
if not os.path.exists(state_path):
raise Exception('cannot find %s' % state_path)
with open(state_path, 'r') as fd:
state = fd.read()
if 'up' in state:
return
shell.call('ip link set %s up' % device_name)
def _configure_bridge(self):
shell.call('echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables')
shell.call('echo 1 > /proc/sys/net/ipv4/conf/default/forwarding')
@kvmagent.replyerror
def check_physical_network_interface(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = CheckPhysicalNetworkInterfaceResponse()
for i in cmd.interfaceNames:
shell_cmd = shell.ShellCmd("ip link | grep '%s'" % i)
shell_cmd(False)
if shell_cmd.return_code != 0:
rsp.failedInterfaceNames = [i]
rsp.success = False
return jsonobject.dumps(rsp)
for i in cmd.interfaceNames:
self._ifup_device_if_down(i)
logger.debug(http.path_msg(CHECK_PHYSICAL_NETWORK_INTERFACE_PATH, 'checked physical interfaces: %s' % cmd.interfaceNames))
return jsonobject.dumps(rsp)
@lock.lock('create_bridge')
@kvmagent.replyerror
def create_bridge(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = CreateBridgeResponse()
self._ifup_device_if_down(cmd.physicalInterfaceName)
if linux.is_vif_on_bridge(cmd.bridgeName, cmd.physicalInterfaceName):
logger.debug('%s is a bridge device. Interface %s is attached to bridge. No need to create bridge or attach device interface' % (cmd.bridgeName, cmd.physicalInterfaceName))
self._configure_bridge()
return jsonobject.dumps(rsp)
try:
linux.create_bridge(cmd.bridgeName, cmd.physicalInterfaceName)
self._configure_bridge()
logger.debug('successfully realize bridge[%s] from device[%s]' % (cmd.bridgeName, cmd.physicalInterfaceName))
except Exception as e:
logger.warning(traceback.format_exc())
rsp.error = 'unable to create bridge[%s] from device[%s], because %s' % (cmd.bridgeName, cmd.physicalInterfaceName, str(e))
rsp.success = False
return jsonobject.dumps(rsp)
@lock.lock('create_bridge')
@kvmagent.replyerror
def create_vlan_bridge(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = CreateVlanBridgeResponse()
if linux.is_bridge(cmd.bridgeName):
logger.debug('%s is a bridge device, no need to create bridge' % cmd.bridgeName)
self._ifup_device_if_down('%s.%s' % (cmd.physicalInterfaceName, cmd.vlan))
self._configure_bridge()
return jsonobject.dumps(rsp)
try:
linux.create_vlan_bridge(cmd.bridgeName, cmd.physicalInterfaceName, cmd.vlan)
self._configure_bridge()
logger.debug('successfully realize vlan bridge[name:%s, vlan:%s] from device[%s]' % (cmd.bridgeName, cmd.vlan, cmd.physicalInterfaceName))
except Exception as e:
logger.warning(traceback.format_exc())
rsp.error = 'unable to create vlan bridge[name:%s, vlan:%s] from device[%s], because %s' % (cmd.bridgeName, cmd.vlan, cmd.physicalInterfaceName, str(e))
rsp.success = False
return jsonobject.dumps(rsp)
@lock.lock('create_bridge')
@kvmagent.replyerror
def check_bridge(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = CheckBridgeResponse()
if not linux.is_bridge(cmd.bridgeName):
rsp.error = "can not find bridge[%s]" % cmd.bridgeName
rsp.success = False
else:
self._ifup_device_if_down(cmd.physicalInterfaceName)
return jsonobject.dumps(rsp)
@lock.lock('create_bridge')
@kvmagent.replyerror
def check_vlan_bridge(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = CheckVlanBridgeResponse()
if not linux.is_bridge(cmd.bridgeName):
rsp.error = "can not find vlan bridge[%s]" % cmd.bridgeName
rsp.success = False
else:
self._ifup_device_if_down(cmd.physicalInterfaceName)
return jsonobject.dumps(rsp)
def start(self):
http_server = kvmagent.get_http_server()
http_server.register_sync_uri(CHECK_PHYSICAL_NETWORK_INTERFACE_PATH, self.check_physical_network_interface)
http_server.register_async_uri(KVM_REALIZE_L2NOVLAN_NETWORK_PATH, self.create_bridge)
http_server.register_async_uri(KVM_REALIZE_L2VLAN_NETWORK_PATH, self.create_vlan_bridge)
http_server.register_async_uri(KVM_CHECK_L2NOVLAN_NETWORK_PATH, self.check_bridge)
http_server.register_async_uri(KVM_CHECK_L2VLAN_NETWORK_PATH, self.check_vlan_bridge)
def stop(self):
pass
| apache-2.0 |
KanoComputing/nush | cherrypy/test/test_auth_basic.py | 54 | 2853 | # This file is part of CherryPy <http://www.cherrypy.org/>
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:expandtab:fileencoding=utf-8
import cherrypy
from cherrypy._cpcompat import md5, ntob
from cherrypy.lib import auth_basic
from cherrypy.test import helper
class BasicAuthTest(helper.CPWebCase):
def setup_server():
class Root:
def index(self):
return "This is public."
index.exposed = True
class BasicProtected:
def index(self):
return "Hello %s, you've been authorized." % cherrypy.request.login
index.exposed = True
class BasicProtected2:
def index(self):
return "Hello %s, you've been authorized." % cherrypy.request.login
index.exposed = True
userpassdict = {'xuser' : 'xpassword'}
userhashdict = {'xuser' : md5(ntob('xpassword')).hexdigest()}
def checkpasshash(realm, user, password):
p = userhashdict.get(user)
return p and p == md5(ntob(password)).hexdigest() or False
conf = {'/basic': {'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'wonderland',
'tools.auth_basic.checkpassword': auth_basic.checkpassword_dict(userpassdict)},
'/basic2': {'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'wonderland',
'tools.auth_basic.checkpassword': checkpasshash},
}
root = Root()
root.basic = BasicProtected()
root.basic2 = BasicProtected2()
cherrypy.tree.mount(root, config=conf)
setup_server = staticmethod(setup_server)
def testPublic(self):
self.getPage("/")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/html;charset=utf-8')
self.assertBody('This is public.')
def testBasic(self):
self.getPage("/basic/")
self.assertStatus(401)
self.assertHeader('WWW-Authenticate', 'Basic realm="wonderland"')
self.getPage('/basic/', [('Authorization', 'Basic eHVzZXI6eHBhc3N3b3JX')])
self.assertStatus(401)
self.getPage('/basic/', [('Authorization', 'Basic eHVzZXI6eHBhc3N3b3Jk')])
self.assertStatus('200 OK')
self.assertBody("Hello xuser, you've been authorized.")
def testBasic2(self):
self.getPage("/basic2/")
self.assertStatus(401)
self.assertHeader('WWW-Authenticate', 'Basic realm="wonderland"')
self.getPage('/basic2/', [('Authorization', 'Basic eHVzZXI6eHBhc3N3b3JX')])
self.assertStatus(401)
self.getPage('/basic2/', [('Authorization', 'Basic eHVzZXI6eHBhc3N3b3Jk')])
self.assertStatus('200 OK')
self.assertBody("Hello xuser, you've been authorized.")
| gpl-3.0 |
ghickman/tvrenamr | tvrenamr/logs.py | 2 | 2377 | import logging
import logging.handlers
import os
def convert_log_level(level=26):
"""
Get a numeric log level from a string. The default 26 is for SHORT logs.
:param level
:return level
"""
# annoying but the level can be passed in as None
if not level:
level = 26
levels = {'notset': 0, 'debug': 10, 'info': 20, 'minimal': 22,
'short': 26, 'warning': 30, 'error': 40, 'critical': 50}
if isinstance(level, str):
level = levels.get(level)
return level
def get_log_file(filename=None):
# make sure the log directory exists and place the log file there
if filename is None:
filename = os.path.join(
os.path.expanduser('~'),
'.tvrenamr',
'tvrenamr.log'
)
filename = filename.replace('~', os.path.expanduser('~'))
try:
os.makedirs(os.path.split(filename)[0])
except OSError:
pass
return filename
def start_logging(filename, log_level, quiet=False):
"""
Setup the file logging and start the root logger
"""
filename = get_log_file(filename)
log_level = convert_log_level(log_level)
# add the custom levels
logging.addLevelName(22, 'MINIMAL')
logging.addLevelName(26, 'SHORT')
# setup log file
file_format = '%(asctime)-15s %(levelname)-8s %(name)-11s %(message)s'
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1048576, backupCount=10)
handler.setFormatter(logging.Formatter(file_format, '%Y-%m-%dT%H:%M'))
logging.getLogger().addHandler(handler)
logging.getLogger().setLevel(logging.DEBUG)
if not quiet:
# setup the console logs to debug
# debug
if log_level is 10:
console_format = '%(asctime)-15s %(levelname)-8s %(name)-11s %(message)s'
console_datefmt = '%Y-%m-%d %H:%M'
else:
console_format = '%(message)s'
console_datefmt = ''
console_formatter = logging.Formatter(console_format, console_datefmt)
# define a Handler with the given level and outputs to the console
console = logging.StreamHandler()
console.setLevel(log_level)
# set the console format & attach the handler to the root logger with it.
console.setFormatter(console_formatter)
logging.getLogger().addHandler(console)
| mit |
simleo/openmicroscopy | components/tools/OmeroPy/test/integration/test_chgrp.py | 2 | 46241 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2015 University of Dundee & Open Microscopy Environment.
# All rights reserved. Use is subject to license terms supplied in LICENSE.txt
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Integration test for moving objects between groups.
"""
import omero
import omero.gateway
from omero.testlib import ITest
import pytest
from omero.cmd import Chgrp2
from omero.cmd.graphs import ChildOption
from omero.model import DatasetI, DatasetImageLinkI, ExperimenterGroupI, ImageI
from omero.model import TagAnnotationI
from omero.model import ProjectDatasetLinkI, ProjectI, PlateI, ScreenI
from omero.model import ExperimenterI
from omero.rtypes import rstring, unwrap
from omero.api import Save
PRIVATE = 'rw----'
READONLY = 'rwr---'
READANNOTATE = 'rwra--'
COLLAB = 'rwrw--'
class TestChgrp(ITest):
def testChgrpImportedImage(self):
"""
Tests chgrp for an imported image, moving to a collaborative group
"""
# One user in two groups
client, exp = self.new_client_and_user()
grp = self.new_group(experimenters=[exp], perms=COLLAB)
gid = grp.id.val
client.sf.getAdminService().getEventContext() # Reset session
# Import an image into the client context
images = self.import_fake_file(name="testChgrpImportedImage",
client=client)
image = images[0]
# Chgrp
chgrp = Chgrp2(targetObjects={'Image': [image.id.val]}, groupId=gid)
self.do_submit(chgrp, client)
# Change our context to new group...
admin = client.sf.getAdminService()
admin.setDefaultGroup(exp, ExperimenterGroupI(gid, False))
self.set_context(client, gid)
# ...check image
img = client.sf.getQueryService().get("Image", image.id.val)
assert img.details.group.id.val == gid
def testChgrpImage(self):
"""
Tests chgrp for a dummny image object (no Pixels)
"""
# One user in two groups
client, exp = self.new_client_and_user()
grp = self.new_group([exp])
gid = grp.id.val
client.sf.getAdminService().getEventContext() # Reset session
update = client.sf.getUpdateService()
query = client.sf.getQueryService()
admin = client.sf.getAdminService()
first_gid = admin.getEventContext().groupId
# Create a dataset in the 'first group'
ds = self.make_dataset(name="testChgrpImage_target", client=client)
ds_id = ds.id.val
# Change our context to new group and create image
admin.setDefaultGroup(exp, ExperimenterGroupI(gid, False))
self.set_context(client, gid)
update = client.sf.getUpdateService() # do we need to get this again?
img = self.new_image()
img = update.saveAndReturnObject(img)
# Move image to new group
chgrp = Chgrp2(
targetObjects={'Image': [img.id.val]}, groupId=first_gid)
# Link to Save
link = DatasetImageLinkI()
link.child = ImageI(img.id.val, False)
link.parent = DatasetI(ds_id, False)
save = Save()
save.obj = link
requests = [chgrp, save] # we're going to chgrp THEN save DIlink
# Change our context to original group...
admin.setDefaultGroup(exp, ExperimenterGroupI(first_gid, False))
self.set_context(client, first_gid)
# We have to be in destination group for link Save to work
self.do_submit(requests, client)
# ...check image
img = client.sf.getQueryService().get("Image", img.id.val)
assert img.details.group.id.val == first_gid
# check Dataset
query = "select link from DatasetImageLink link\
where link.child.id=%s" % img.id.val
l = client.sf.getQueryService().findByQuery(query, None)
assert l is not None, "New DatasetImageLink on image not found"
assert l.details.group.id.val == first_gid,\
"Link Created in same group as Image target"
def testChgrpPDI(self):
"""
Tests chgrp for a Project, Dataset, Image hierarchy
"""
# One user in two groups
client, exp = self.new_client_and_user()
grp = self.new_group([exp])
gid = grp.id.val
client.sf.getAdminService().getEventContext() # Reset session
# Data Setup (image in the P/D hierarchy)
img = self.make_image(client=client)
project = self.make_project(name="chgrp-test", client=client)
dataset = self.make_dataset(name="chgrp-test", client=client)
self.link(dataset, img, client=client)
self.link(project, dataset, client=client)
# Move Project to new group
chgrp = Chgrp2(
targetObjects={'Project': [project.id.val]}, groupId=gid)
self.do_submit(chgrp, client)
# Change our context to new group...
admin = client.sf.getAdminService()
admin.setDefaultGroup(exp, ExperimenterGroupI(gid, False))
self.set_context(client, gid)
# ...check image
img = client.sf.getQueryService().get("Image", img.id.val)
assert img.details.group.id.val == gid
# check Project
prj = client.sf.getQueryService().get("Project", project.id.val)
assert prj.details.group.id.val == gid
def testChgrpRdef7825(self):
# One user in two groups
owner, owner_obj = self.new_client_and_user(perms="rwrw--")
admin = owner.sf.getAdminService()
ec = admin.getEventContext()
source_grp = admin.getGroup(ec.groupId)
target_grp = self.new_group([owner])
target_gid = target_grp.id.val
ec = admin.getEventContext() # Refresh
# Add another user to the source group
member = self.new_client(group=source_grp)
# Create an image as the owner
images = self.import_fake_file(name="testChgrpRdef7825",
client=owner)
image = images[0]
# Render as both users
owner_g = omero.gateway.BlitzGateway(client_obj=owner)
member_g = omero.gateway.BlitzGateway(client_obj=member)
def render(g):
g.getObject("Image", image.id.val).getThumbnail()
render(owner_g)
render(member_g)
# Now chgrp and try to delete
chgrp = Chgrp2(
targetObjects={'Image': [image.id.val]}, groupId=target_gid)
self.do_submit(chgrp, owner)
# Shouldn't be necessary to change group, but we're gonna
owner_g.SERVICE_OPTS.setOmeroGroup("-1")
handle = owner_g.deleteObjects("Image", [image.id.val])
self.wait_on_cmd(owner_g.c, handle)
def testChgrpOneImageFilesetErr(self):
"""
Simple example of the MIF chgrp bad case:
A single fileset containing 2 images - we try to chgrp ONE image.
Each sibling CANNOT be moved independently of the other.
"""
# One user in two groups
client, user = self.new_client_and_user(perms=PRIVATE)
target_grp = self.new_group([user], perms=PRIVATE)
target_gid = target_grp.id.val
# 2 images sharing a fileset
images = self.import_fake_file(2, client=client)
# Now chgrp
chgrp = Chgrp2(
targetObjects={'Image': [images[0].id.val]}, groupId=target_gid)
self.do_submit(chgrp, client, test_should_pass=False)
def testChgrpAllImagesFilesetOK(self):
"""
Simple example of the MIF chgrp bad case:
A single fileset containing 2 images
can be moved to the same group together.
"""
# One user in two groups
client, user = self.new_client_and_user(perms=PRIVATE)
target_grp = self.new_group([user], perms=PRIVATE)
target_gid = target_grp.id.val
images = self.import_fake_file(2, client=client)
# chgrp should succeed
ids = [images[0].id.val, images[1].id.val]
chgrp = Chgrp2(targetObjects={'Image': ids}, groupId=target_gid)
self.do_submit(chgrp, client)
# Check both Images moved
query_service = client.sf.getQueryService()
ctx = {'omero.group': '-1'} # query across groups
for i in images:
image = query_service.get('Image', i.id.val, ctx)
img_gid = image.details.group.id.val
assert target_gid == img_gid,\
"Image should be in group: %s, NOT %s" % (target_gid, img_gid)
def testChgrpAllImagesFilesetTwoCommandsErr(self):
"""
Simple example of the MIF chgrp bad case with Chgrp2:
A single fileset containing 2 images cannot be moved
to the same group together using two commands
See testChgrpAllImagesFilesetOK for the good.
"""
# One user in two groups
client, user = self.new_client_and_user(perms=PRIVATE)
target_grp = self.new_group([user], perms=PRIVATE)
target_gid = target_grp.id.val
images = self.import_fake_file(2, client=client)
# chgrp should succeed
chgrp1 = Chgrp2(
targetObjects={'Image': [images[0].id.val]}, groupId=target_gid)
chgrp2 = Chgrp2(
targetObjects={'Image': [images[1].id.val]}, groupId=target_gid)
self.do_submit([chgrp1, chgrp2], client, test_should_pass=False)
def testChgrpOneDatasetFilesetErr(self):
"""
Simple example of the MIF chgrp bad case:
A single fileset containing 2 images is split among 2 datasets.
We try to chgrp ONE Dataset.
Each dataset CANNOT be moved independently of the other.
"""
# One user in two groups
client, user = self.new_client_and_user(perms=PRIVATE)
target_grp = self.new_group([user], perms=PRIVATE)
target_gid = target_grp.id.val
datasets = self.create_datasets(
2, "testChgrpOneDatasetFilesetErr", client=client)
images = self.import_fake_file(2, client=client)
for i in range(2):
self.link(datasets[i], images[i], client=client)
# chgrp should succeed with the first Dataset only
chgrp = Chgrp2(
targetObjects={"Dataset": [datasets[0].id.val]},
groupId=target_gid)
self.do_submit(chgrp, client)
query_service = client.sf.getQueryService()
# Check Images not moved
for i in range(2):
image = query_service.get('Image', images[i].id.val)
assert target_gid != image.details.group.id.val,\
"Image should not be in group: %s" % target_gid
# Check second Dataset not moved
dataset = query_service.get('Dataset', datasets[1].id.val)
assert target_gid != dataset.details.group.id.val,\
"Dataset should not be in group: %s" % target_gid
ctx = {'omero.group': str(target_gid)} # query in the target group
# Check first Dataset moved
dataset = query_service.get('Dataset', datasets[0].id.val, ctx)
assert target_gid == dataset.details.group.id.val,\
"Dataset should be in group: %s" % target_gid
def testChgrpAllDatasetsFilesetOK(self):
"""
Simple example of the MIF chgrp bad case:
a single fileset containing 2 images is split among 2 datasets.
Datasets can be moved to the same group together.
"""
# One user in two groups
client, user = self.new_client_and_user(perms=PRIVATE)
target_grp = self.new_group([user], perms=PRIVATE)
target_gid = target_grp.id.val
datasets = self.create_datasets(
2, "testChgrpAllDatasetsFilesetOK", client=client)
images = self.import_fake_file(2, client=client)
for i in range(2):
self.link(datasets[i], images[i], client=client)
# Now chgrp, should succeed
ids = [datasets[0].id.val, datasets[1].id.val]
chgrp = Chgrp2(targetObjects={"Dataset": ids}, groupId=target_gid)
self.do_submit(chgrp, client)
# Check both Datasets and Images moved
query_service = client.sf.getQueryService()
ctx = {'omero.group': str(target_gid)} # query in the target group
for i in range(2):
dataset = query_service.get('Dataset', datasets[i].id.val, ctx)
image = query_service.get('Image', images[i].id.val, ctx)
assert target_gid == dataset.details.group.id.val,\
"Dataset should be in group: %s" % target_gid
assert target_gid == image.details.group.id.val,\
"Image should be in group: %s" % target_gid
def testChgrpOneDatasetFilesetOK(self):
"""
Simple example of the MIF chgrp good case:
a single fileset containing 2 images in one dataset.
The dataset can be moved.
"""
# One user in two groups
client, user = self.new_client_and_user(perms=PRIVATE)
target_grp = self.new_group([user], perms=PRIVATE)
target_gid = target_grp.id.val
ds = self.make_dataset(name="testChgrpOneDatasetFilesetOK",
client=client)
images = self.import_fake_file(2, client=client)
for i in range(2):
self.link(ds, images[i], client=client)
# Now chgrp, should succeed
chgrp = Chgrp2(
targetObjects={"Dataset": [ds.id.val]}, groupId=target_gid)
self.do_submit(chgrp, client)
# Check Dataset and both Images moved
query_service = client.sf.getQueryService()
ctx = {'omero.group': '-1'} # query across groups
dataset = query_service.get('Dataset', ds.id.val, ctx)
assert target_gid == dataset.details.group.id.val,\
"Dataset should be in group: %s" % target_gid
for i in range(2):
image = query_service.get('Image', images[i].id.val, ctx)
img_gid = image.details.group.id.val
assert target_gid == img_gid,\
"Image should be in group: %s, NOT %s" % (target_gid, img_gid)
def testChgrpImagesTwoFilesetsErr(self):
"""
If we try to 'split' 2 Filesets, both should be returned
by the chgrp error
"""
# One user in two groups
client, user = self.new_client_and_user(perms=PRIVATE)
target_grp = self.new_group([user], perms=PRIVATE)
target_gid = target_grp.id.val
images_fs_one = self.import_fake_file(2, client=client)
images_fs_two = self.import_fake_file(2, client=client)
# chgrp should fail...
ids = [images_fs_one[0].id.val, images_fs_two[0].id.val]
chgrp = Chgrp2(targetObjects={"Image": ids}, groupId=target_gid)
self.do_submit(chgrp, client, test_should_pass=False)
def testChgrpDatasetTwoFilesetsErr(self):
"""
If we try to 'split' 2 Filesets, both should be returned
by the chgrp error
"""
# One user in two groups
client, user = self.new_client_and_user(perms=PRIVATE)
target_grp = self.new_group([user], perms=PRIVATE)
target_gid = target_grp.id.val
images_fs_one = self.import_fake_file(2, client=client)
images_fs_two = self.import_fake_file(2, client=client)
ds = self.make_dataset(name="testChgrpDatasetTwoFilesetsErr",
client=client)
self.import_fake_file(2, client=client)
for i in (images_fs_one, images_fs_two):
self.link(ds, i[0], client=client)
# chgrp should succeed with the Dataset only
chgrp = Chgrp2(
targetObjects={"Dataset": [ds.id.val]}, groupId=target_gid)
self.do_submit(chgrp, client)
query_service = client.sf.getQueryService()
# Check Images not moved
for i in (images_fs_one[0], images_fs_two[0]):
image = query_service.get('Image', i.id.val)
assert target_gid != image.details.group.id.val,\
"Image should not be in group: %s" % target_gid
ctx = {'omero.group': str(target_gid)} # query in the target group
# Check Dataset moved
dataset = query_service.get('Dataset', ds.id.val, ctx)
assert target_gid == dataset.details.group.id.val,\
"Dataset should be in group: %s" % target_gid
def testChgrpDatasetCheckFsGroup(self):
"""
Move a Dataset of MIF images into a new group,
then check that the Fileset group is the same as the target group.
From 'Security Violation'
Bug https://github.com/openmicroscopy/openmicroscopy/pull/1139
"""
# One user in two groups
client, user = self.new_client_and_user(perms=PRIVATE)
target_grp = self.new_group([user], perms=PRIVATE)
target_gid = target_grp.id.val
ds = self.make_dataset(name="testChgrpDatasetCheckFsGroup",
client=client)
images = self.import_fake_file(2, client=client)
for i in range(2):
self.link(ds, images[i], client=client)
# Now chgrp, should succeed
chgrp = Chgrp2(
targetObjects={"Dataset": [ds.id.val]}, groupId=target_gid)
self.do_submit(chgrp, client)
# Check the group of the fileset is in sync with image.
ctx = {'omero.group': '-1'}
qs = client.sf.getQueryService()
image1 = qs.get("Image", images[0].id.val, ctx)
fs_id = image1.fileset.id.val
image_gid = image1.details.group.id.val
fileset_gid = qs.get("Fileset", fs_id, ctx).details.group.id.val
assert image_gid == fileset_gid,\
"Image group: %s and Fileset group: %s don't match" %\
(image_gid, fileset_gid)
def testChgrpFilesetOK(self):
"""
Move a Fileset of MIF images into a new group,
then check that the Fileset group is the same as the target group.
"""
# One user in two groups
client, user = self.new_client_and_user(perms=PRIVATE)
query = client.sf.getQueryService()
target_grp = self.new_group([user], perms=PRIVATE)
target_gid = target_grp.id.val
images = self.import_fake_file(2, client=client)
fs_id = query.get("Image", images[0].id.val).fileset.id.val
# Now chgrp, should succeed
chgrp = Chgrp2(targetObjects={"Fileset": [fs_id]}, groupId=target_gid)
self.do_submit(chgrp, client)
# Check Fileset and both Images moved and
# thus the Fileset is in sync with Images.
ctx = {'omero.group': '-1'} # query across groups
fileset = query.get('Fileset', fs_id, ctx)
assert target_gid == fileset.details.group.id.val,\
"Fileset should be in group: %s" % target_gid
for i in range(2):
image = query.get('Image', images[i].id.val, ctx)
img_gid = image.details.group.id.val
assert target_gid == img_gid,\
"Image should be in group: %s, NOT %s" % (target_gid, img_gid)
def testChgrp11000(self):
"""
Move a Dataset of MIF images *with a companion file* into a new group.
Note: once FakeReader supports companion files this logic can be
simplified.
"""
# One user in two groups
client, user = self.new_client_and_user(perms=PRIVATE)
ds = self.make_dataset(name="testChgrp11000", client=client)
images = self.import_fake_file(2, client=client)
for i in range(2):
self.link(ds, images[i], client=client)
# Perform the extra companion file logic
fs = client.sf.getQueryService().findByQuery("""
select fs from Image i
join i.fileset fs
join fetch fs.usedFiles as uf
join fetch uf.originalFile
where i.id = %s
""" % images[0].id.val, None)
entry1 = fs.getFilesetEntry(0)
ofile = entry1.getOriginalFile()
for i in range(2):
ann = omero.model.FileAnnotationI()
ann.file = ofile.proxy()
self.link(images[i], ann, client=client)
def testChgrp11109(self):
"""
Place a plate in a single screen and attempt to move it.
"""
# One user in two groups
client, user = self.new_client_and_user(perms=PRIVATE)
admin = client.sf.getAdminService()
target_grp = self.new_group([user], perms=PRIVATE)
target_gid = target_grp.id.val
admin.getEventContext() # Refresh
update = client.sf.getUpdateService()
plate = PlateI()
plate.name = rstring("testChgrp11109")
screen = ScreenI()
screen.name = rstring("testChgrp11109")
link = screen.linkPlate(plate)
link = update.saveAndReturnObject(link)
# Now chgrp, should succeed
chgrp = Chgrp2(
targetObjects={"Plate": [link.child.id.val]}, groupId=target_gid)
self.do_submit(chgrp, client)
# Check that the links have been destroyed
query = client.sf.getQueryService()
with pytest.raises(omero.ValidationException):
query.get("ScreenPlateLink", link.id.val, {"omero.group": "-1"})
def testChgrpDatasetWithImage(self):
"""
D->I
ChGrp D
See https://trac.openmicroscopy.org.uk/ome/ticket/12452
"""
client, user = self.new_client_and_user(perms=PRIVATE)
admin = client.sf.getAdminService()
target_grp = self.new_group([user], perms=PRIVATE)
target_gid = target_grp.id.val
admin.getEventContext() # Refresh
query = client.sf.getQueryService()
d = self.make_dataset(client=client)
i = self.make_image(client=client)
self.link(d, i, client=client)
self.change_group([d], target_gid, client)
ctx = {'omero.group': '-1'}
assert target_gid == query.get("Image",
i.id.val, ctx).details.group.id.val
assert target_gid == query.get("Dataset",
d.id.val, ctx).details.group.id.val
def testChgrpPDIReverseLinkOrder(self):
"""
P->D->I
ChGrp P
See https://trac.openmicroscopy.org.uk/ome/ticket/12452
"""
client, user = self.new_client_and_user(perms=PRIVATE)
admin = client.sf.getAdminService()
target_grp = self.new_group([user], perms=PRIVATE)
target_gid = target_grp.id.val
admin.getEventContext() # Refresh
query = client.sf.getQueryService()
p = self.make_project(client=client)
d = self.make_dataset(client=client)
i = self.make_image(client=client)
self.link(p, d, client=client)
self.link(d, i, client=client)
self.change_group([p], target_gid, client=client)
ctx = {'omero.group': '-1'}
assert target_gid == query.get("Project",
p.id.val, ctx).details.group.id.val
assert target_gid == query.get("Dataset",
d.id.val, ctx).details.group.id.val
assert target_gid == query.get("Image",
i.id.val, ctx).details.group.id.val
def testChgrpTwoDatasetsLinkedToSingleImageDefault(self):
"""
D1->I
D2->I
ChGrp D1
See https://trac.openmicroscopy.org.uk/ome/ticket/12452
"""
client, user = self.new_client_and_user(perms=PRIVATE)
admin = client.sf.getAdminService()
target_grp = self.new_group([user], perms=PRIVATE)
target_gid = target_grp.id.val
admin.getEventContext() # Refresh
query = client.sf.getQueryService()
d1 = self.make_dataset(client=client)
d2 = self.make_dataset(client=client)
i = self.make_image(client=client)
self.link(d1, i, client=client)
self.link(d2, i, client=client)
self.change_group([d1], target_gid, client=client)
ctx = {'omero.group': '-1'}
assert target_gid == query.get("Dataset",
d1.id.val, ctx).details.group.id.val
assert target_gid != query.get("Dataset",
d2.id.val, ctx).details.group.id.val
assert target_gid != query.get("Image",
i.id.val, ctx).details.group.id.val
def testChgrpTwoDatasetsLinkedToSingleImageHard(self):
"""
D1->I
D2->I
ChGrp D1
See https://trac.openmicroscopy.org.uk/ome/ticket/12452
"""
client, user = self.new_client_and_user(perms=PRIVATE)
admin = client.sf.getAdminService()
target_grp = self.new_group([user], perms=PRIVATE)
target_gid = target_grp.id.val
admin.getEventContext() # Refresh
query = client.sf.getQueryService()
d1 = self.make_dataset(client=client)
d2 = self.make_dataset(client=client)
i = self.make_image(client=client)
self.link(d1, i, client=client)
self.link(d2, i, client=client)
hard = ChildOption(includeType=["Image"])
chgrp = Chgrp2(
targetObjects={"Dataset": [d1.id.val]}, childOptions=[hard],
groupId=target_gid)
self.do_submit(chgrp, client)
ctx = {'omero.group': '-1'}
assert target_gid == query.get("Dataset",
d1.id.val, ctx).details.group.id.val
assert target_gid != query.get("Dataset",
d2.id.val, ctx).details.group.id.val
assert target_gid == query.get("Image",
i.id.val, ctx).details.group.id.val
def testChgrpProjectWithDatasetLinkedToImageWithOtherDatasetDefault(self):
"""
P->D1->I
D2->I
ChGrp P
See https://trac.openmicroscopy.org.uk/ome/ticket/12452
"""
client, user = self.new_client_and_user(perms=PRIVATE)
admin = client.sf.getAdminService()
target_grp = self.new_group([user], perms=PRIVATE)
target_gid = target_grp.id.val
admin.getEventContext() # Refresh
query = client.sf.getQueryService()
p = self.make_project(client=client)
d1 = self.make_dataset(client=client)
d2 = self.make_dataset(client=client)
i = self.make_image(client=client)
self.link(d1, i, client=client)
self.link(d2, i, client=client)
self.link(p, d1, client=client)
self.change_group([p], target_gid, client)
ctx = {'omero.group': '-1'}
assert target_gid == query.get("Project",
p.id.val, ctx).details.group.id.val
assert target_gid == query.get("Dataset",
d1.id.val, ctx).details.group.id.val
assert target_gid != query.get("Image",
i.id.val, ctx).details.group.id.val
def testChgrpProjectWithDatasetLinkedToImageWithOtherDatasetHard(self):
"""
P->D1->I
D2->I
ChGrp P
See https://trac.openmicroscopy.org.uk/ome/ticket/12452
"""
client, user = self.new_client_and_user(perms=PRIVATE)
admin = client.sf.getAdminService()
target_grp = self.new_group([user], perms=PRIVATE)
target_gid = target_grp.id.val
admin.getEventContext() # Refresh
query = client.sf.getQueryService()
p = self.make_project(client=client)
d1 = self.make_dataset(client=client)
d2 = self.make_dataset(client=client)
i = self.make_image(client=client)
self.link(d1, i, client=client)
self.link(d2, i, client=client)
self.link(p, d1, client=client)
hard = ChildOption(includeType=["Image"])
chgrp = Chgrp2(
targetObjects={"Project": [p.id.val]}, childOptions=[hard],
groupId=target_gid)
self.do_submit(chgrp, client)
ctx = {'omero.group': '-1'}
assert target_gid == query.get("Project",
p.id.val, ctx).details.group.id.val
assert target_gid == query.get("Dataset",
d1.id.val, ctx).details.group.id.val
assert target_gid != query.get("Dataset",
d2.id.val, ctx).details.group.id.val
assert target_gid == query.get("Image",
i.id.val, ctx).details.group.id.val
def testChgrpDatasetWithImageLinkedToTwoProjects(self):
"""
P1->D->I
P2->D->I
ChGrp D
See https://trac.openmicroscopy.org.uk/ome/ticket/12452
"""
client, user = self.new_client_and_user(perms=PRIVATE)
admin = client.sf.getAdminService()
target_grp = self.new_group([user], perms=PRIVATE)
target_gid = target_grp.id.val
admin.getEventContext() # Refresh
query = client.sf.getQueryService()
p1 = self.make_project(client=client)
p2 = self.make_project(client=client)
d = self.make_dataset(client=client)
i = self.make_image(client=client)
self.link(p1, d, client=client)
self.link(p2, d, client=client)
self.link(d, i, client=client)
self.change_group([d], target_gid, client)
ctx = {'omero.group': '-1'}
assert not target_gid == query.get("Project",
p1.id.val, ctx).details.group.id.val
assert not target_gid == query.get("Project",
p2.id.val, ctx).details.group.id.val
assert target_gid == query.get("Dataset",
d.id.val, ctx).details.group.id.val
assert target_gid == query.get("Image",
i.id.val, ctx).details.group.id.val
def testChgrpProjectLinkedToDatasetAndImageDefault(self):
"""
P1->D->I
P2->D->I
ChGrp P1
See https://trac.openmicroscopy.org.uk/ome/ticket/12452
"""
client, user = self.new_client_and_user(perms=PRIVATE)
admin = client.sf.getAdminService()
target_grp = self.new_group([user], perms=PRIVATE)
target_gid = target_grp.id.val
admin.getEventContext() # Refresh
query = client.sf.getQueryService()
p1 = self.make_project(client=client)
p2 = self.make_project(client=client)
d = self.make_dataset(client=client)
i = self.make_image(client=client)
self.link(p1, d, client=client)
self.link(p2, d, client=client)
self.link(d, i, client=client)
self.change_group([p1], target_gid, client)
ctx = {'omero.group': '-1'}
assert target_gid == query.get("Project",
p1.id.val, ctx).details.group.id.val
assert target_gid != query.get("Dataset",
d.id.val, ctx).details.group.id.val
assert target_gid != query.get("Image",
i.id.val, ctx).details.group.id.val
def testChgrpProjectLinkedToDatasetAndImageHard(self):
"""
P1->D->I
P2->D->I
ChGrp P1
See https://trac.openmicroscopy.org.uk/ome/ticket/12452
"""
client, user = self.new_client_and_user(perms=PRIVATE)
admin = client.sf.getAdminService()
target_grp = self.new_group([user], perms=PRIVATE)
target_gid = target_grp.id.val
admin.getEventContext() # Refresh
query = client.sf.getQueryService()
p1 = self.make_project(client=client)
p2 = self.make_project(client=client)
d = self.make_dataset(client=client)
i = self.make_image(client=client)
self.link(p1, d, client=client)
self.link(p2, d, client=client)
self.link(d, i, client=client)
hard = ChildOption(includeType=["Dataset"])
chgrp = Chgrp2(
targetObjects={"Project": [p1.id.val]}, childOptions=[hard],
groupId=target_gid)
self.do_submit(chgrp, client)
ctx = {'omero.group': '-1'}
assert target_gid == query.get("Project",
p1.id.val, ctx).details.group.id.val
assert target_gid != query.get("Project",
p2.id.val, ctx).details.group.id.val
assert target_gid == query.get("Dataset",
d.id.val, ctx).details.group.id.val
assert target_gid == query.get("Image",
i.id.val, ctx).details.group.id.val
def testChgrpProjectLinkedToDatasetDefault(self):
"""
P1->D
P2->D
ChGrp P1
See https://trac.openmicroscopy.org.uk/ome/ticket/12452
"""
client, user = self.new_client_and_user(perms=PRIVATE)
admin = client.sf.getAdminService()
target_grp = self.new_group([user], perms=PRIVATE)
target_gid = target_grp.id.val
admin.getEventContext() # Refresh
query = client.sf.getQueryService()
p1 = self.make_project(client=client)
p2 = self.make_project(client=client)
d = self.make_dataset(client=client)
self.link(p1, d, client=client)
self.link(p2, d, client=client)
self.change_group([p1], target_gid, client)
ctx = {'omero.group': '-1'}
assert target_gid == query.get("Project",
p1.id.val, ctx).details.group.id.val
assert target_gid != query.get("Dataset",
d.id.val, ctx).details.group.id.val
def testChgrpProjectLinkedToDatasetHard(self):
"""
P1->D
P2->D
ChGrp P1
See https://trac.openmicroscopy.org.uk/ome/ticket/12452
"""
client, user = self.new_client_and_user(perms=PRIVATE)
admin = client.sf.getAdminService()
target_grp = self.new_group([user], perms=PRIVATE)
target_gid = target_grp.id.val
admin.getEventContext() # Refresh
query = client.sf.getQueryService()
p1 = self.make_project(client=client)
p2 = self.make_project(client=client)
d = self.make_dataset(client=client)
self.link(p1, d, client=client)
self.link(p2, d, client=client)
hard = ChildOption(includeType=["Dataset"])
chgrp = Chgrp2(
targetObjects={"Project": [p1.id.val]}, childOptions=[hard],
groupId=target_gid)
self.do_submit(chgrp, client)
ctx = {'omero.group': '-1'}
assert target_gid == query.get("Project",
p1.id.val, ctx).details.group.id.val
assert target_gid != query.get("Project",
p2.id.val, ctx).details.group.id.val
assert target_gid == query.get("Dataset",
d.id.val, ctx).details.group.id.val
def testChgrpProjectLinkedToTwoDatasetsAndImage(self):
"""
P->D1->I
P->D2->I
ChGrp P
See https://trac.openmicroscopy.org.uk/ome/ticket/12452
"""
client, user = self.new_client_and_user(perms=PRIVATE)
admin = client.sf.getAdminService()
target_grp = self.new_group([user], perms=PRIVATE)
target_gid = target_grp.id.val
admin.getEventContext() # Refresh
query = client.sf.getQueryService()
p = self.make_project(client=client)
d1 = self.make_dataset(client=client)
d2 = self.make_dataset(client=client)
i = self.make_image(client=client)
self.link(p, d1, client=client)
self.link(p, d2, client=client)
self.link(d1, i, client=client)
self.link(d2, i, client=client)
self.change_group([p], target_gid, client)
ctx = {'omero.group': '-1'}
assert target_gid == query.get("Project",
p.id.val, ctx).details.group.id.val
assert target_gid == query.get("Dataset",
d1.id.val, ctx).details.group.id.val
assert target_gid == query.get("Dataset",
d2.id.val, ctx).details.group.id.val
assert target_gid == query.get("Image",
i.id.val, ctx).details.group.id.val
def testIntergroupLinks(self):
# create read-annotate group 'read-annotate' with implicit owner
ra_group = self.new_group(perms=READANNOTATE)
self.new_user(group=ra_group, owner=True)
# create private group 'private' with implicit owner
p_group = self.new_group(perms=PRIVATE)
self.new_user(group=p_group, owner=True)
# create new user 'image-owner' who is a member of both 'read-annotate'
# and 'private'
io_client, image_owner = self.new_client_and_user(group=ra_group)
self.add_groups(image_owner, [p_group])
# create new user 'tag-owner' who is a member of both 'read-annotate'
# and 'private'
to_client, tag_owner = self.new_client_and_user(group=ra_group)
self.add_groups(tag_owner, [p_group])
# switch user to 'image-owner'
# import two images into 'read-annotate'
images = []
for x in range(0, 2):
values = self.import_fake_file(client=io_client)
images.append(values[0])
image = io_client.sf.getQueryService().get("Image",
images[x].id.val)
assert ra_group.id.val == image.details.group.id.val
# switch user to tag-owner
# tag both image-owner's images with the same new tag
tag = self.new_object(
TagAnnotationI, name="tag from user %s" % tag_owner.omeName.val)
tag = to_client.sf.getUpdateService().saveAndReturnObject(tag)
assert tag_owner.id.val == tag.details.owner.id.val
links = []
for image in images:
links.append(self.link(image, tag, client=to_client))
# (shell) as root
# run bin/omero hql --all 'select parent.details.group.id,
# child.details.group.id from ImageIink'
# and observe that for each row
# the group ID in Col1 matches that in Col2
for link in links:
assert link.parent.details.group.id == link.child.details.group.id
# switch user to image-owner
# right-click one of the images and move it to private
self.change_group([images[0]], p_group.id.val, io_client)
# (shell) as root
# run bin/omero hql --all 'select parent.details.group.id,
# child.details.group.id from ImageAnnotationLink' and recoil in horror
params = omero.sys.ParametersI()
params.addId(tag.id.val)
ctx = {"omero.group": "-1"}
query = "select parent.details.group.id,"
query += " child.details.group.id from ImageAnnotationLink"
query += " where child.id = :id"
links = unwrap(self.root.sf.getQueryService().projection(query, params,
ctx))
assert links is not None
for link in links:
assert link[0] == link[1]
class TestChgrpTarget(ITest):
def createDSInGroup(self, gid, name=None, client=None):
if name is None:
name = self.uuid()
if client is None:
client = self.client
ctx = {'omero.group': str(gid)}
update = client.sf.getUpdateService()
ds = self.new_dataset(name)
return update.saveAndReturnObject(ds, ctx)
def chgrpImagesToTargetDataset(self, img_count):
"""
Helper method to test chgrp of image(s) to target Dataset
"""
# One user in two groups
client, user = self.new_client_and_user(perms=PRIVATE)
admin = client.sf.getAdminService()
target_grp = self.new_group([user], perms=PRIVATE)
target_gid = target_grp.id.val
images = self.import_fake_file(img_count, client=client)
ds = self.createDSInGroup(target_gid, client=client)
# each chgrp includes a 'save' link to target dataset
saves = []
ids = []
for i in images:
ids.append(i.id.val)
link = DatasetImageLinkI()
link.child = ImageI(i.id.val, False)
link.parent = DatasetI(ds.id.val, False)
save = Save()
save.obj = link
saves.append(save)
chgrp = Chgrp2(
targetObjects={"Image": ids}, groupId=target_gid)
requests = [chgrp]
requests.extend(saves)
self.do_submit(requests, client, omero_group=target_gid)
# Check Images moved to correct group
query_service = client.sf.getQueryService()
ctx = {'omero.group': '-1'} # query across groups
for i in images:
image = query_service.get('Image', i.id.val, ctx)
img_gid = image.details.group.id.val
assert target_gid == img_gid,\
"Image should be in group: %s, NOT %s" % (target_gid, img_gid)
# Check Dataset has images linked
ds_imgs = client.sf.getContainerService().getImages(
'Dataset', [ds.id.val], None, ctx)
assert len(ds_imgs) == len(images),\
"All Images should be in target Dataset"
previous_gid = admin.getEventContext().groupId
return (ds, images, client, user, previous_gid, target_gid)
def testChgrpImageToTargetDataset(self):
""" Chgrp a single Image to target Dataset """
self.chgrpImagesToTargetDataset(1)
def testChgrpMifImagesToTargetDataset(self):
""" Chgrp 2 images in a MIF to target Dataset """
self.chgrpImagesToTargetDataset(2)
def testChgrpImageToTargetDatasetAndBackNoDS(self):
"""
Chgrp a single Image to target Dataset and then back
No target is provided on the way back.
see ticket:11118
"""
ds, images, client, user, old_gid, new_gid =\
self.chgrpImagesToTargetDataset(1)
chgrp = Chgrp2(
targetObjects={"Image": [images[0].id.val]}, groupId=old_gid)
self.do_submit(chgrp, client, omero_group=old_gid)
def testChgrpImageToTargetDatasetAndBackDS(self):
"""
Chgrp a single Image to target Dataset and then back
see ticket:11118
"""
new_ds, images, client, user, old_gid, new_gid =\
self.chgrpImagesToTargetDataset(1)
# create Dataset in original group
old_ds = self.createDSInGroup(old_gid, client=client)
link = DatasetImageLinkI()
link.parent = old_ds.proxy()
link.child = images[0].proxy()
chgrp = Chgrp2(
targetObjects={"Image": [images[0].id.val]}, groupId=old_gid)
save = Save(link)
self.do_submit([chgrp, save], client, omero_group=old_gid)
dils = client.sf.getQueryService().findAllByQuery(
"select dil from DatasetImageLink dil where dil.child.id = :id",
omero.sys.ParametersI().addId(images[0].id.val),
{"omero.group": "-1"})
assert 1 == len(dils)
@pytest.mark.parametrize("credentials", ["user", "admin"])
def testChgrpDatasetToTargetProject(self, credentials):
"""
Tests that an Admin can move a user's Dataset to a private
group and link it to an existing user's Project there.
Also tests that the user can do the same chgrp themselves.
"""
# One user in two groups
client, user = self.new_client_and_user(perms=PRIVATE)
target_grp = self.new_group([user], perms=PRIVATE)
e_ctx = client.sf.getAdminService().getEventContext() # Reset session
user_id = e_ctx.userId
target_gid = target_grp.id.val
# User creates Dataset in current group...
update = client.sf.getUpdateService()
ds = self.make_dataset(client=client)
# ...and Project in target group
ctx = {'omero.group': str(target_gid)}
pr = self.new_project()
pr = update.saveAndReturnObject(pr, ctx)
requests = []
saves = []
chgrp = Chgrp2(
targetObjects={"Dataset": [ds.id.val]}, groupId=target_gid)
requests.append(chgrp)
link = ProjectDatasetLinkI()
link.details.owner = ExperimenterI(user_id, False)
link.child = DatasetI(ds.id.val, False)
link.parent = ProjectI(pr.id.val, False)
save = Save()
save.obj = link
saves.append(save)
requests.extend(saves)
if credentials == "user":
c = client
else:
c = self.root
self.do_submit(requests, c, omero_group=target_gid)
query_service = client.sf.getQueryService()
ctx = {'omero.group': '-1'} # query across groups
dataset = query_service.get('Dataset', ds.id.val, ctx)
ds_gid = dataset.details.group.id.val
assert target_gid == ds_gid,\
"Dataset should be in group: %s, NOT %s" % (target_gid, ds_gid)
| gpl-2.0 |
MarcosCommunity/odoo | addons/base_report_designer/plugin/openerp_report_designer/bin/script/Repeatln.py | 293 | 13228 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer [email protected]
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
import string
import unohelper
import xmlrpclib
from com.sun.star.task import XJobExecutor
if __name__<>"package":
from lib.gui import *
from lib.error import ErrorDialog
from lib.functions import *
from ServerParameter import *
from lib.logreport import *
from lib.rpc import *
from LoginTest import *
database="test_db1"
uid = 3
#class RepeatIn:
class RepeatIn( unohelper.Base, XJobExecutor ):
def __init__(self, sObject="", sVariable="", sFields="", sDisplayName="", bFromModify=False):
# Interface Design
LoginTest()
self.logobj=Logger()
if not loginstatus and __name__=="package":
exit(1)
self.win = DBModalDialog(60, 50, 180, 250, "RepeatIn Builder")
self.win.addFixedText("lblVariable", 2, 12, 60, 15, "Objects to loop on :")
self.win.addComboBox("cmbVariable", 180-120-2, 10, 120, 15,True, itemListenerProc=self.cmbVariable_selected)
self.insVariable = self.win.getControl( "cmbVariable" )
self.win.addFixedText("lblFields", 10, 32, 60, 15, "Field to loop on :")
self.win.addComboListBox("lstFields", 180-120-2, 30, 120, 150, False,itemListenerProc=self.lstbox_selected)
self.insField = self.win.getControl( "lstFields" )
self.win.addFixedText("lblName", 12, 187, 60, 15, "Variable name :")
self.win.addEdit("txtName", 180-120-2, 185, 120, 15,)
self.win.addFixedText("lblUName", 8, 207, 60, 15, "Displayed name :")
self.win.addEdit("txtUName", 180-120-2, 205, 120, 15,)
self.win.addButton('btnOK',-2 ,-10,45,15,'Ok', actionListenerProc = self.btnOk_clicked )
self.win.addButton('btnCancel',-2 - 45 - 5 ,-10,45,15,'Cancel', actionListenerProc = self.btnCancel_clicked )
global passwd
self.password = passwd
global url
self.sock=RPCSession(url)
# Variable Declaration
self.sValue=None
self.sObj=None
self.aSectionList=[]
self.sGVariable=sVariable
self.sGDisplayName=sDisplayName
self.aItemList=[]
self.aComponentAdd=[]
self.aObjectList=[]
self.aListRepeatIn=[]
self.aVariableList=[]
# Call method to perform Enumration on Report Document
EnumDocument(self.aItemList,self.aComponentAdd)
# Perform checking that Field-1 and Field - 4 is available or not alos get Combobox
# filled if condition is true
desktop = getDesktop()
doc = desktop.getCurrentComponent()
docinfo = doc.getDocumentInfo()
# Check weather Field-1 is available if not then exit from application
self.sMyHost= ""
if not docinfo.getUserFieldValue(3) == "" and not docinfo.getUserFieldValue(0)=="":
self.sMyHost= docinfo.getUserFieldValue(0)
self.count=0
oParEnum = doc.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
if oPar.supportsService("com.sun.star.text.TextField.DropDown"):
self.count += 1
getList(self.aObjectList, self.sMyHost,self.count)
cursor = doc.getCurrentController().getViewCursor()
text = cursor.getText()
tcur = text.createTextCursorByRange(cursor)
self.aVariableList.extend( filter( lambda obj: obj[:obj.find(" ")] == "List", self.aObjectList ) )
for i in range(len(self.aItemList)):
try:
anItem = self.aItemList[i][1]
component = self.aComponentAdd[i]
if component == "Document":
sLVal = anItem[anItem.find(",'") + 2:anItem.find("')")]
self.aVariableList.extend( filter( lambda obj: obj[:obj.find("(")] == sLVal, self.aObjectList ) )
if tcur.TextSection:
getRecersiveSection(tcur.TextSection,self.aSectionList)
if component in self.aSectionList:
sLVal = anItem[anItem.find(",'") + 2:anItem.find("')")]
self.aVariableList.extend( filter( lambda obj: obj[:obj.find("(")] == sLVal, self.aObjectList ) )
if tcur.TextTable:
if not component == "Document" and component[component.rfind(".") + 1:] == tcur.TextTable.Name:
VariableScope( tcur, self.aVariableList, self.aObjectList, self.aComponentAdd, self.aItemList, component )
except :
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('RepeatIn', LOG_ERROR, info)
self.bModify=bFromModify
if self.bModify==True:
if sObject=="":
self.insVariable.setText("List of "+docinfo.getUserFieldValue(3))
self.insField.addItem("objects",self.win.getListBoxItemCount("lstFields"))
self.win.setEditText("txtName", sVariable)
self.win.setEditText("txtUName",sDisplayName)
self.sValue= "objects"
else:
sItem=""
for anObject in self.aObjectList:
if anObject[:anObject.find("(")] == sObject:
sItem = anObject
self.insVariable.setText( sItem )
genTree(
sItem[sItem.find("(")+1:sItem.find(")")],
self.aListRepeatIn,
self.insField,
self.sMyHost,
2,
ending=['one2many','many2many'],
recur=['one2many','many2many']
)
self.sValue= self.win.getListBoxItem("lstFields",self.aListRepeatIn.index(sFields))
for var in self.aVariableList:
if var[:8] <> 'List of ':
self.model_ids = self.sock.execute(database, uid, self.password, 'ir.model' , 'search', [('model','=',var[var.find("(")+1:var.find(")")])])
else:
self.model_ids = self.sock.execute(database, uid, self.password, 'ir.model' , 'search', [('model','=',var[8:])])
fields=['name','model']
self.model_res = self.sock.execute(database, uid, self.password, 'ir.model', 'read', self.model_ids,fields)
if self.model_res <> []:
if var[:8]<>'List of ':
self.insVariable.addItem(var[:var.find("(")+1] + self.model_res[0]['name'] + ")" ,self.insVariable.getItemCount())
else:
self.insVariable.addItem('List of ' + self.model_res[0]['name'] ,self.insVariable.getItemCount())
else:
self.insVariable.addItem(var ,self.insVariable.getItemCount())
self.win.doModalDialog("lstFields",self.sValue)
else:
ErrorDialog("Please Select Appropriate module" ,"Create new report from: \nOdoo -> Open a New Report")
self.win.endExecute()
def lstbox_selected(self, oItemEvent):
sItem=self.win.getListBoxSelectedItem("lstFields")
sMain=self.aListRepeatIn[self.win.getListBoxSelectedItemPos("lstFields")]
if self.bModify==True:
self.win.setEditText("txtName", self.sGVariable)
self.win.setEditText("txtUName",self.sGDisplayName)
else:
self.win.setEditText("txtName",sMain[sMain.rfind("/")+1:])
self.win.setEditText("txtUName","|-."+sItem[sItem.rfind("/")+1:]+".-|")
def cmbVariable_selected(self, oItemEvent):
if self.count > 0 :
desktop=getDesktop()
doc =desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
self.win.removeListBoxItems("lstFields", 0, self.win.getListBoxItemCount("lstFields"))
sItem=self.win.getComboBoxText("cmbVariable")
for var in self.aVariableList:
if var[:8]=='List of ':
if var[:8]==sItem[:8]:
sItem = var
elif var[:var.find("(")+1] == sItem[:sItem.find("(")+1]:
sItem = var
self.aListRepeatIn=[]
data = ( sItem[sItem.rfind(" ") + 1:] == docinfo.getUserFieldValue(3) ) and docinfo.getUserFieldValue(3) or sItem[sItem.find("(")+1:sItem.find(")")]
genTree( data, self.aListRepeatIn, self.insField, self.sMyHost, 2, ending=['one2many','many2many'], recur=['one2many','many2many'] )
self.win.selectListBoxItemPos("lstFields", 0, True )
else:
sItem=self.win.getComboBoxText("cmbVariable")
for var in self.aVariableList:
if var[:8]=='List of ' and var[:8] == sItem[:8]:
sItem = var
if sItem.find(".")==-1:
temp=sItem[sItem.rfind("x_"):]
else:
temp=sItem[sItem.rfind(".")+1:]
self.win.setEditText("txtName",temp)
self.win.setEditText("txtUName","|-."+temp+".-|")
self.insField.addItem("objects",self.win.getListBoxItemCount("lstFields"))
self.win.selectListBoxItemPos("lstFields", 0, True )
def btnOk_clicked(self, oActionEvent):
desktop=getDesktop()
doc = desktop.getCurrentComponent()
cursor = doc.getCurrentController().getViewCursor()
selectedItem = self.win.getListBoxSelectedItem( "lstFields" )
selectedItemPos = self.win.getListBoxSelectedItemPos( "lstFields" )
txtName = self.win.getEditText( "txtName" )
txtUName = self.win.getEditText( "txtUName" )
if selectedItem != "" and txtName != "" and txtUName != "":
sKey=u""+ txtUName
if selectedItem == "objects":
sValue=u"[[ repeatIn(" + selectedItem + ",'" + txtName + "') ]]"
else:
sObjName=self.win.getComboBoxText("cmbVariable")
sObjName=sObjName[:sObjName.find("(")]
sValue=u"[[ repeatIn(" + sObjName + self.aListRepeatIn[selectedItemPos].replace("/",".") + ",'" + txtName +"') ]]"
if self.bModify == True:
oCurObj = cursor.TextField
oCurObj.Items = (sKey,sValue)
oCurObj.update()
else:
oInputList = doc.createInstance("com.sun.star.text.TextField.DropDown")
if self.win.getListBoxSelectedItem("lstFields") == "objects":
oInputList.Items = (sKey,sValue)
doc.Text.insertTextContent(cursor,oInputList,False)
else:
sValue=u"[[ repeatIn(" + sObjName + self.aListRepeatIn[selectedItemPos].replace("/",".") + ",'" + txtName +"') ]]"
if cursor.TextTable==None:
oInputList.Items = (sKey,sValue)
doc.Text.insertTextContent(cursor,oInputList,False)
else:
oInputList.Items = (sKey,sValue)
widget = ( cursor.TextTable or selectedItem <> 'objects' ) and cursor.TextTable.getCellByName( cursor.Cell.CellName ) or doc.Text
widget.insertTextContent(cursor,oInputList,False)
self.win.endExecute()
else:
ErrorDialog("Please fill appropriate data in Object Field or Name field \nor select particular value from the list of fields.")
def btnCancel_clicked(self, oActionEvent):
self.win.endExecute()
if __name__<>"package" and __name__=="__main__":
RepeatIn()
elif __name__=="package":
g_ImplementationHelper = unohelper.ImplementationHelper()
g_ImplementationHelper.addImplementation( RepeatIn, "org.openoffice.openerp.report.repeatln", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
NitroKK/kernel_lge_iproj | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
CLVsol/oehealth | oehealth_pharmacy/oehealth_annotation.py | 1 | 1638 | # -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
from openerp.osv import orm, fields
class oehealth_annotation(orm.Model):
_inherit = 'oehealth.annotation'
_columns = {
'pharmacy_id' : fields.many2one ('oehealth.pharmacy', 'Pharmacy'),
}
oehealth_annotation()
| agpl-3.0 |
SrNetoChan/Quantum-GIS | python/plugins/processing/algs/qgis/Heatmap.py | 15 | 11238 | # -*- coding: utf-8 -*-
"""
***************************************************************************
Heatmap.py
---------------------
Date : November 2016
Copyright : (C) 2016 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nyall Dawson'
__date__ = 'November 2016'
__copyright__ = '(C) 2016, Nyall Dawson'
import os
from collections import OrderedDict
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsApplication,
QgsFeatureRequest,
QgsRasterFileWriter,
QgsProcessing,
QgsProcessingException,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterNumber,
QgsProcessingParameterDistance,
QgsProcessingParameterField,
QgsProcessingParameterEnum,
QgsProcessingParameterDefinition,
QgsProcessingParameterRasterDestination)
from qgis.analysis import QgsKernelDensityEstimation
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class Heatmap(QgisAlgorithm):
INPUT = 'INPUT'
RADIUS = 'RADIUS'
RADIUS_FIELD = 'RADIUS_FIELD'
WEIGHT_FIELD = 'WEIGHT_FIELD'
PIXEL_SIZE = 'PIXEL_SIZE'
KERNEL = 'KERNEL'
DECAY = 'DECAY'
OUTPUT_VALUE = 'OUTPUT_VALUE'
OUTPUT = 'OUTPUT'
def icon(self):
return QgsApplication.getThemeIcon("/heatmap.svg")
def tags(self):
return self.tr('heatmap,kde,hotspot').split(',')
def group(self):
return self.tr('Interpolation')
def groupId(self):
return 'interpolation'
def name(self):
return 'heatmapkerneldensityestimation'
def displayName(self):
return self.tr('Heatmap (Kernel Density Estimation)')
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.KERNELS = OrderedDict([(self.tr('Quartic'), QgsKernelDensityEstimation.KernelQuartic),
(self.tr('Triangular'), QgsKernelDensityEstimation.KernelTriangular),
(self.tr('Uniform'), QgsKernelDensityEstimation.KernelUniform),
(self.tr('Triweight'), QgsKernelDensityEstimation.KernelTriweight),
(self.tr('Epanechnikov'), QgsKernelDensityEstimation.KernelEpanechnikov)])
self.OUTPUT_VALUES = OrderedDict([(self.tr('Raw'), QgsKernelDensityEstimation.OutputRaw),
(self.tr('Scaled'), QgsKernelDensityEstimation.OutputScaled)])
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT,
self.tr('Point layer'),
[QgsProcessing.TypeVectorPoint]))
self.addParameter(QgsProcessingParameterDistance(self.RADIUS,
self.tr('Radius'),
100.0, self.INPUT, False, 0.0))
radius_field_param = QgsProcessingParameterField(self.RADIUS_FIELD,
self.tr('Radius from field'),
None,
self.INPUT,
QgsProcessingParameterField.Numeric,
optional=True
)
radius_field_param.setFlags(radius_field_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(radius_field_param)
class ParameterHeatmapPixelSize(QgsProcessingParameterNumber):
def __init__(self, name='', description='', parent_layer=None, radius_param=None, radius_field_param=None, minValue=None,
default=None, optional=False):
QgsProcessingParameterNumber.__init__(self, name, description, QgsProcessingParameterNumber.Double, default, optional, minValue)
self.parent_layer = parent_layer
self.radius_param = radius_param
self.radius_field_param = radius_field_param
def clone(self):
copy = ParameterHeatmapPixelSize(self.name(), self.description(), self.parent_layer, self.radius_param, self.radius_field_param, self.minimum(), self.maximum(), self.defaultValue((), self.flags() & QgsProcessingParameterDefinition.FlagOptional))
return copy
pixel_size_param = ParameterHeatmapPixelSize(self.PIXEL_SIZE,
self.tr('Output raster size'),
parent_layer=self.INPUT,
radius_param=self.RADIUS,
radius_field_param=self.RADIUS_FIELD,
minValue=0.0,
default=0.1)
pixel_size_param.setMetadata({
'widget_wrapper': {
'class': 'processing.algs.qgis.ui.HeatmapWidgets.HeatmapPixelSizeWidgetWrapper'}})
self.addParameter(pixel_size_param)
weight_field_param = QgsProcessingParameterField(self.WEIGHT_FIELD,
self.tr('Weight from field'),
None,
self.INPUT,
QgsProcessingParameterField.Numeric,
optional=True
)
weight_field_param.setFlags(weight_field_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(weight_field_param)
keys = list(self.KERNELS.keys())
kernel_shape_param = QgsProcessingParameterEnum(self.KERNEL,
self.tr('Kernel shape'),
keys,
allowMultiple=False,
defaultValue=0)
kernel_shape_param.setFlags(kernel_shape_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(kernel_shape_param)
decay_ratio = QgsProcessingParameterNumber(self.DECAY,
self.tr('Decay ratio (Triangular kernels only)'),
QgsProcessingParameterNumber.Double,
0.0, True, -100.0, 100.0)
decay_ratio.setFlags(decay_ratio.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(decay_ratio)
keys = list(self.OUTPUT_VALUES.keys())
output_scaling = QgsProcessingParameterEnum(self.OUTPUT_VALUE,
self.tr('Output value scaling'),
keys,
allowMultiple=False,
defaultValue=0)
output_scaling.setFlags(output_scaling.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(output_scaling)
self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT, self.tr('Heatmap')))
def processAlgorithm(self, parameters, context, feedback):
source = self.parameterAsSource(parameters, self.INPUT, context)
if source is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT))
radius = self.parameterAsDouble(parameters, self.RADIUS, context)
kernel_shape = self.parameterAsEnum(parameters, self.KERNEL, context)
pixel_size = self.parameterAsDouble(parameters, self.PIXEL_SIZE, context)
decay = self.parameterAsDouble(parameters, self.DECAY, context)
output_values = self.parameterAsEnum(parameters, self.OUTPUT_VALUE, context)
outputFile = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
output_format = QgsRasterFileWriter.driverForExtension(os.path.splitext(outputFile)[1])
weight_field = self.parameterAsString(parameters, self.WEIGHT_FIELD, context)
radius_field = self.parameterAsString(parameters, self.RADIUS_FIELD, context)
attrs = []
kde_params = QgsKernelDensityEstimation.Parameters()
kde_params.source = source
kde_params.radius = radius
kde_params.pixelSize = pixel_size
# radius field
if radius_field:
kde_params.radiusField = radius_field
attrs.append(source.fields().lookupField(radius_field))
# weight field
if weight_field:
kde_params.weightField = weight_field
attrs.append(source.fields().lookupField(weight_field))
kde_params.shape = kernel_shape
kde_params.decayRatio = decay
kde_params.outputValues = output_values
kde = QgsKernelDensityEstimation(kde_params, outputFile, output_format)
if kde.prepare() != QgsKernelDensityEstimation.Success:
raise QgsProcessingException(
self.tr('Could not create destination layer'))
request = QgsFeatureRequest()
request.setSubsetOfAttributes(attrs)
features = source.getFeatures(request)
total = 100.0 / source.featureCount() if source.featureCount() else 0
for current, f in enumerate(features):
if feedback.isCanceled():
break
if kde.addFeature(f) != QgsKernelDensityEstimation.Success:
feedback.reportError(self.tr('Error adding feature with ID {} to heatmap').format(f.id()))
feedback.setProgress(int(current * total))
if kde.finalise() != QgsKernelDensityEstimation.Success:
raise QgsProcessingException(
self.tr('Could not save destination layer'))
return {self.OUTPUT: outputFile}
| gpl-2.0 |
SnabbCo/neutron | neutron/tests/unit/services/firewall/agents/l3reference/test_firewall_l3_agent.py | 3 | 16283 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sumit Naiksatam, [email protected], Big Switch Networks, Inc.
# @author: Sridar Kandaswamy, [email protected], Cisco Systems, Inc.
# @author: Dan Florea, [email protected], Cisco Systems, Inc.
import contextlib
import uuid
import mock
from oslo.config import cfg
from neutron.agent.common import config as agent_config
from neutron.agent import l3_agent
from neutron.agent.linux import ip_lib
from neutron.common import config as base_config
from neutron import context
from neutron.plugins.common import constants
from neutron.services.firewall.agents.l3reference import firewall_l3_agent
from neutron.tests import base
from neutron.tests.unit.services.firewall.agents import test_firewall_agent_api
class FWaasHelper(object):
def __init__(self, host):
pass
class FWaasAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, FWaasHelper):
def __init__(self, conf=None):
super(FWaasAgent, self).__init__(conf)
class TestFwaasL3AgentRpcCallback(base.BaseTestCase):
def setUp(self):
super(TestFwaasL3AgentRpcCallback, self).setUp()
self.conf = cfg.ConfigOpts()
self.conf.register_opts(base_config.core_opts)
self.conf.register_opts(l3_agent.L3NATAgent.OPTS)
agent_config.register_use_namespaces_opts_helper(self.conf)
agent_config.register_root_helper(self.conf)
self.conf.root_helper = 'sudo'
self.api = FWaasAgent(self.conf)
self.api.fwaas_driver = test_firewall_agent_api.NoopFwaasDriver()
def test_create_firewall(self):
fake_firewall = {'id': 0}
with mock.patch.object(
self.api,
'_invoke_driver_for_plugin_api'
) as mock_driver:
self.assertEqual(
self.api.create_firewall(
mock.sentinel.context,
fake_firewall,
'host'),
mock_driver.return_value)
def test_update_firewall(self):
fake_firewall = {'id': 0}
with mock.patch.object(
self.api,
'_invoke_driver_for_plugin_api'
) as mock_driver:
self.assertEqual(
self.api.update_firewall(
mock.sentinel.context,
fake_firewall,
'host'),
mock_driver.return_value)
def test_delete_firewall(self):
fake_firewall = {'id': 0}
with mock.patch.object(
self.api,
'_invoke_driver_for_plugin_api'
) as mock_driver:
self.assertEqual(
self.api.delete_firewall(
mock.sentinel.context,
fake_firewall,
'host'),
mock_driver.return_value)
def test_invoke_driver_for_plugin_api(self):
fake_firewall = {'id': 0, 'tenant_id': 1,
'admin_state_up': True}
self.api.plugin_rpc = mock.Mock()
with contextlib.nested(
mock.patch.object(self.api.plugin_rpc, 'get_routers'),
mock.patch.object(self.api, '_get_router_info_list_for_tenant'),
mock.patch.object(self.api.fwaas_driver, 'create_firewall'),
mock.patch.object(self.api.fwplugin_rpc, 'set_firewall_status')
) as (
mock_get_routers,
mock_get_router_info_list_for_tenant,
mock_driver_create_firewall,
mock_set_firewall_status):
mock_driver_create_firewall.return_value = True
self.api.create_firewall(
context=mock.sentinel.context,
firewall=fake_firewall, host='host')
mock_get_routers.assert_called_once_with(
mock.sentinel.context)
mock_get_router_info_list_for_tenant.assert_called_once_with(
mock_get_routers.return_value, fake_firewall['tenant_id'])
mock_set_firewall_status.assert_called_once_with(
mock.sentinel.context,
fake_firewall['id'],
'ACTIVE')
def test_invoke_driver_for_plugin_api_admin_state_down(self):
fake_firewall = {'id': 0, 'tenant_id': 1,
'admin_state_up': False}
self.api.plugin_rpc = mock.Mock()
with contextlib.nested(
mock.patch.object(self.api.plugin_rpc, 'get_routers'),
mock.patch.object(self.api, '_get_router_info_list_for_tenant'),
mock.patch.object(self.api.fwaas_driver, 'update_firewall'),
mock.patch.object(self.api.fwplugin_rpc,
'get_firewalls_for_tenant'),
mock.patch.object(self.api.fwplugin_rpc, 'set_firewall_status')
) as (
mock_get_routers,
mock_get_router_info_list_for_tenant,
mock_driver_update_firewall,
mock_get_firewalls_for_tenant,
mock_set_firewall_status):
mock_driver_update_firewall.return_value = True
self.api.update_firewall(
context=mock.sentinel.context,
firewall=fake_firewall, host='host')
mock_get_routers.assert_called_once_with(
mock.sentinel.context)
mock_get_router_info_list_for_tenant.assert_called_once_with(
mock_get_routers.return_value, fake_firewall['tenant_id'])
mock_set_firewall_status.assert_called_once_with(
mock.sentinel.context,
fake_firewall['id'],
'DOWN')
def test_invoke_driver_for_plugin_api_delete(self):
fake_firewall = {'id': 0, 'tenant_id': 1,
'admin_state_up': True}
self.api.plugin_rpc = mock.Mock()
with contextlib.nested(
mock.patch.object(self.api.plugin_rpc, 'get_routers'),
mock.patch.object(self.api, '_get_router_info_list_for_tenant'),
mock.patch.object(self.api.fwaas_driver, 'delete_firewall'),
mock.patch.object(self.api.fwplugin_rpc, 'firewall_deleted')
) as (
mock_get_routers,
mock_get_router_info_list_for_tenant,
mock_driver_delete_firewall,
mock_firewall_deleted):
mock_driver_delete_firewall.return_value = True
self.api.delete_firewall(
context=mock.sentinel.context,
firewall=fake_firewall, host='host')
mock_get_routers.assert_called_once_with(
mock.sentinel.context)
mock_get_router_info_list_for_tenant.assert_called_once_with(
mock_get_routers.return_value, fake_firewall['tenant_id'])
mock_firewall_deleted.assert_called_once_with(
mock.sentinel.context,
fake_firewall['id'])
def test_delete_firewall_no_router(self):
fake_firewall = {'id': 0, 'tenant_id': 1}
self.api.plugin_rpc = mock.Mock()
with contextlib.nested(
mock.patch.object(self.api.plugin_rpc, 'get_routers'),
mock.patch.object(self.api, '_get_router_info_list_for_tenant'),
mock.patch.object(self.api.fwplugin_rpc, 'firewall_deleted')
) as (
mock_get_routers,
mock_get_router_info_list_for_tenant,
mock_firewall_deleted):
mock_get_router_info_list_for_tenant.return_value = []
self.api.delete_firewall(
context=mock.sentinel.context,
firewall=fake_firewall, host='host')
mock_get_routers.assert_called_once_with(
mock.sentinel.context)
mock_get_router_info_list_for_tenant.assert_called_once_with(
mock_get_routers.return_value, fake_firewall['tenant_id'])
mock_firewall_deleted.assert_called_once_with(
mock.sentinel.context,
fake_firewall['id'])
def test_process_router_add_fw_update(self):
fake_firewall_list = [{'id': 0, 'tenant_id': 1,
'status': constants.PENDING_UPDATE,
'admin_state_up': True}]
fake_router = {'id': 1111, 'tenant_id': 2}
self.api.plugin_rpc = mock.Mock()
ri = mock.Mock()
ri.router = fake_router
routers = [ri.router]
with contextlib.nested(
mock.patch.object(self.api.plugin_rpc, 'get_routers'),
mock.patch.object(self.api, '_get_router_info_list_for_tenant'),
mock.patch.object(self.api.fwaas_driver, 'update_firewall'),
mock.patch.object(self.api.fwplugin_rpc, 'set_firewall_status'),
mock.patch.object(self.api.fwplugin_rpc,
'get_firewalls_for_tenant'),
mock.patch.object(context, 'Context')
) as (
mock_get_routers,
mock_get_router_info_list_for_tenant,
mock_driver_update_firewall,
mock_set_firewall_status,
mock_get_firewalls_for_tenant,
mock_Context):
mock_driver_update_firewall.return_value = True
ctx = mock.sentinel.context
mock_Context.return_value = ctx
mock_get_router_info_list_for_tenant.return_value = routers
mock_get_firewalls_for_tenant.return_value = fake_firewall_list
self.api._process_router_add(ri)
mock_get_router_info_list_for_tenant.assert_called_with(
routers,
ri.router['tenant_id'])
mock_get_firewalls_for_tenant.assert_called_once_with(ctx)
mock_driver_update_firewall.assert_called_once_with(
routers,
fake_firewall_list[0])
mock_set_firewall_status.assert_called_once_with(
ctx,
fake_firewall_list[0]['id'],
constants.ACTIVE)
def test_process_router_add_fw_delete(self):
fake_firewall_list = [{'id': 0, 'tenant_id': 1,
'status': constants.PENDING_DELETE}]
fake_router = {'id': 1111, 'tenant_id': 2}
self.api.plugin_rpc = mock.Mock()
ri = mock.Mock()
ri.router = fake_router
routers = [ri.router]
with contextlib.nested(
mock.patch.object(self.api.plugin_rpc, 'get_routers'),
mock.patch.object(self.api, '_get_router_info_list_for_tenant'),
mock.patch.object(self.api.fwaas_driver, 'delete_firewall'),
mock.patch.object(self.api.fwplugin_rpc, 'firewall_deleted'),
mock.patch.object(self.api.fwplugin_rpc,
'get_firewalls_for_tenant'),
mock.patch.object(context, 'Context')
) as (
mock_get_routers,
mock_get_router_info_list_for_tenant,
mock_driver_delete_firewall,
mock_firewall_deleted,
mock_get_firewalls_for_tenant,
mock_Context):
mock_driver_delete_firewall.return_value = True
ctx = mock.sentinel.context
mock_Context.return_value = ctx
mock_get_router_info_list_for_tenant.return_value = routers
mock_get_firewalls_for_tenant.return_value = fake_firewall_list
self.api._process_router_add(ri)
mock_get_router_info_list_for_tenant.assert_called_with(
routers,
ri.router['tenant_id'])
mock_get_firewalls_for_tenant.assert_called_once_with(ctx)
mock_driver_delete_firewall.assert_called_once_with(
routers,
fake_firewall_list[0])
mock_firewall_deleted.assert_called_once_with(
ctx,
fake_firewall_list[0]['id'])
def _prepare_router_data(self, use_namespaces):
router = {'id': str(uuid.uuid4()), 'tenant_id': str(uuid.uuid4())}
return l3_agent.RouterInfo(router['id'], self.conf.root_helper,
use_namespaces, router=router)
def _get_router_info_list_with_namespace_helper(self,
router_use_namespaces):
self.conf.set_override('use_namespaces', True)
ri = self._prepare_router_data(
use_namespaces=router_use_namespaces)
routers = [ri.router]
self.api.router_info = {ri.router_id: ri}
with mock.patch.object(ip_lib.IPWrapper,
'get_namespaces') as mock_get_namespaces:
mock_get_namespaces.return_value = ri.ns_name
router_info_list = self.api._get_router_info_list_for_tenant(
routers,
ri.router['tenant_id'])
self.assertEqual([ri], router_info_list)
mock_get_namespaces.assert_called_once_with(
self.conf.root_helper)
def _get_router_info_list_without_namespace_helper(self,
router_use_namespaces):
self.conf.set_override('use_namespaces', False)
ri = self._prepare_router_data(
use_namespaces=router_use_namespaces)
routers = [ri.router]
self.api.router_info = {ri.router_id: ri}
router_info_list = self.api._get_router_info_list_for_tenant(
routers,
ri.router['tenant_id'])
if router_use_namespaces:
self.assertFalse(router_info_list)
else:
self.assertEqual([ri], router_info_list)
def test_get_router_info_list_for_tenant_for_namespaces_enabled(self):
self._get_router_info_list_with_namespace_helper(
router_use_namespaces=True)
def test_get_router_info_list_for_tenant_for_namespaces_disabled(self):
self._get_router_info_list_without_namespace_helper(
router_use_namespaces=False)
def test_get_router_info_list_tenant_with_namespace_router_without(self):
self._get_router_info_list_with_namespace_helper(
router_use_namespaces=False)
def test_get_router_info_list_tenant_without_namespace_router_with(self):
self._get_router_info_list_without_namespace_helper(
router_use_namespaces=True)
def _get_router_info_list_router_without_router_info_helper(self,
rtr_with_ri):
self.conf.set_override('use_namespaces', True)
# ri.router with associated router_info (ri)
# rtr2 has no router_info
ri = self._prepare_router_data(use_namespaces=True)
rtr2 = {'id': str(uuid.uuid4()), 'tenant_id': ri.router['tenant_id']}
routers = [rtr2]
self.api.router_info = {}
ri_expected = []
if rtr_with_ri:
self.api.router_info[ri.router_id] = ri
routers.append(ri.router)
ri_expected.append(ri)
with mock.patch.object(ip_lib.IPWrapper,
'get_namespaces') as mock_get_namespaces:
mock_get_namespaces.return_value = ri.ns_name
router_info_list = self.api._get_router_info_list_for_tenant(
routers,
ri.router['tenant_id'])
self.assertEqual(ri_expected, router_info_list)
def test_get_router_info_list_router_without_router_info(self):
self._get_router_info_list_router_without_router_info_helper(
rtr_with_ri=False)
def test_get_router_info_list_two_routers_one_without_router_info(self):
self._get_router_info_list_router_without_router_info_helper(
rtr_with_ri=True)
| apache-2.0 |