repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
BambooL/jeeves | refs/heads/master | test/web/calendar/test_Jcal.py | 3 | # -*- coding: iso-8859-15 -*-
"""Conf FunkLoad test
$Id$
"""
import unittest
from random import random
from funkload.FunkLoadTestCase import FunkLoadTestCase
class Jcal(FunkLoadTestCase):
"""This test use a configuration file Conf.conf."""
def setUp(self):
"""Setting up test."""
self.server_url = self.conf_get('main', 'url')
def test_simple(self):
# The description should be set in the configuration file
server_url = self.server_url
# begin of test ---------------------------------------------
nb_time = self.conf_getInt('test_simple', 'nb_time')
for i in range(nb_time):
self.get(server_url, description='Get url')
# end of test -----------------------------------------------
if __name__ in ('main', '__main__'):
unittest.main()
|
ofir123/CouchPotatoServer | refs/heads/master | libs/chardet/jisfreq.py | 3130 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
#
# Japanese frequency table, applied to both S-JIS and EUC-JP
# They are sorted in order.
# 128 --> 0.77094
# 256 --> 0.85710
# 512 --> 0.92635
# 1024 --> 0.97130
# 2048 --> 0.99431
#
# Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58
# Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191
#
# Typical Distribution Ratio, 25% of IDR
JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0
# Char to FreqOrder table ,
JIS_TABLE_SIZE = 4368
JISCharToFreqOrder = (
40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16
3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32
1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48
2042,1061,1062, 48, 49, 44, 45, 433, 434,1040,1041, 996, 787,2997,1255,4305, # 64
2108,4609,1684,1648,5073,5074,5075,5076,5077,5078,3687,5079,4610,5080,3927,3928, # 80
5081,3296,3432, 290,2285,1471,2187,5082,2580,2825,1303,2140,1739,1445,2691,3375, # 96
1691,3297,4306,4307,4611, 452,3376,1182,2713,3688,3069,4308,5083,5084,5085,5086, # 112
5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102, # 128
5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,4097,5113,5114,5115,5116,5117, # 144
5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133, # 160
5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149, # 176
5150,5151,5152,4612,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164, # 192
5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,1472, 598, 618, 820,1205, # 208
1309,1412,1858,1307,1692,5176,5177,5178,5179,5180,5181,5182,1142,1452,1234,1172, # 224
1875,2043,2149,1793,1382,2973, 925,2404,1067,1241, 960,1377,2935,1491, 919,1217, # 240
1865,2030,1406,1499,2749,4098,5183,5184,5185,5186,5187,5188,2561,4099,3117,1804, # 256
2049,3689,4309,3513,1663,5189,3166,3118,3298,1587,1561,3433,5190,3119,1625,2998, # 272
3299,4613,1766,3690,2786,4614,5191,5192,5193,5194,2161, 26,3377, 2,3929, 20, # 288
3691, 47,4100, 50, 17, 16, 35, 268, 27, 243, 42, 155, 24, 154, 29, 184, # 304
4, 91, 14, 92, 53, 396, 33, 289, 9, 37, 64, 620, 21, 39, 321, 5, # 320
12, 11, 52, 13, 3, 208, 138, 0, 7, 60, 526, 141, 151,1069, 181, 275, # 336
1591, 83, 132,1475, 126, 331, 829, 15, 69, 160, 59, 22, 157, 55,1079, 312, # 352
109, 38, 23, 25, 10, 19, 79,5195, 61, 382,1124, 8, 30,5196,5197,5198, # 368
5199,5200,5201,5202,5203,5204,5205,5206, 89, 62, 74, 34,2416, 112, 139, 196, # 384
271, 149, 84, 607, 131, 765, 46, 88, 153, 683, 76, 874, 101, 258, 57, 80, # 400
32, 364, 121,1508, 169,1547, 68, 235, 145,2999, 41, 360,3027, 70, 63, 31, # 416
43, 259, 262,1383, 99, 533, 194, 66, 93, 846, 217, 192, 56, 106, 58, 565, # 432
280, 272, 311, 256, 146, 82, 308, 71, 100, 128, 214, 655, 110, 261, 104,1140, # 448
54, 51, 36, 87, 67,3070, 185,2618,2936,2020, 28,1066,2390,2059,5207,5208, # 464
5209,5210,5211,5212,5213,5214,5215,5216,4615,5217,5218,5219,5220,5221,5222,5223, # 480
5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,3514,5237,5238, # 496
5239,5240,5241,5242,5243,5244,2297,2031,4616,4310,3692,5245,3071,5246,3598,5247, # 512
4617,3231,3515,5248,4101,4311,4618,3808,4312,4102,5249,4103,4104,3599,5250,5251, # 528
5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267, # 544
5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283, # 560
5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299, # 576
5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315, # 592
5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331, # 608
5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347, # 624
5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363, # 640
5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379, # 656
5380,5381, 363, 642,2787,2878,2788,2789,2316,3232,2317,3434,2011, 165,1942,3930, # 672
3931,3932,3933,5382,4619,5383,4620,5384,5385,5386,5387,5388,5389,5390,5391,5392, # 688
5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408, # 704
5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424, # 720
5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440, # 736
5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456, # 752
5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472, # 768
5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488, # 784
5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504, # 800
5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520, # 816
5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536, # 832
5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552, # 848
5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568, # 864
5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584, # 880
5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600, # 896
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616, # 912
5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632, # 928
5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648, # 944
5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664, # 960
5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680, # 976
5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696, # 992
5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712, # 1008
5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728, # 1024
5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744, # 1040
5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760, # 1056
5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776, # 1072
5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792, # 1088
5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808, # 1104
5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824, # 1120
5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840, # 1136
5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856, # 1152
5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872, # 1168
5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888, # 1184
5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904, # 1200
5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, # 1216
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936, # 1232
5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952, # 1248
5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968, # 1264
5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984, # 1280
5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000, # 1296
6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016, # 1312
6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032, # 1328
6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048, # 1344
6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064, # 1360
6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080, # 1376
6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096, # 1392
6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112, # 1408
6113,6114,2044,2060,4621, 997,1235, 473,1186,4622, 920,3378,6115,6116, 379,1108, # 1424
4313,2657,2735,3934,6117,3809, 636,3233, 573,1026,3693,3435,2974,3300,2298,4105, # 1440
854,2937,2463, 393,2581,2417, 539, 752,1280,2750,2480, 140,1161, 440, 708,1569, # 1456
665,2497,1746,1291,1523,3000, 164,1603, 847,1331, 537,1997, 486, 508,1693,2418, # 1472
1970,2227, 878,1220, 299,1030, 969, 652,2751, 624,1137,3301,2619, 65,3302,2045, # 1488
1761,1859,3120,1930,3694,3516, 663,1767, 852, 835,3695, 269, 767,2826,2339,1305, # 1504
896,1150, 770,1616,6118, 506,1502,2075,1012,2519, 775,2520,2975,2340,2938,4314, # 1520
3028,2086,1224,1943,2286,6119,3072,4315,2240,1273,1987,3935,1557, 175, 597, 985, # 1536
3517,2419,2521,1416,3029, 585, 938,1931,1007,1052,1932,1685,6120,3379,4316,4623, # 1552
804, 599,3121,1333,2128,2539,1159,1554,2032,3810, 687,2033,2904, 952, 675,1467, # 1568
3436,6121,2241,1096,1786,2440,1543,1924, 980,1813,2228, 781,2692,1879, 728,1918, # 1584
3696,4624, 548,1950,4625,1809,1088,1356,3303,2522,1944, 502, 972, 373, 513,2827, # 1600
586,2377,2391,1003,1976,1631,6122,2464,1084, 648,1776,4626,2141, 324, 962,2012, # 1616
2177,2076,1384, 742,2178,1448,1173,1810, 222, 102, 301, 445, 125,2420, 662,2498, # 1632
277, 200,1476,1165,1068, 224,2562,1378,1446, 450,1880, 659, 791, 582,4627,2939, # 1648
3936,1516,1274, 555,2099,3697,1020,1389,1526,3380,1762,1723,1787,2229, 412,2114, # 1664
1900,2392,3518, 512,2597, 427,1925,2341,3122,1653,1686,2465,2499, 697, 330, 273, # 1680
380,2162, 951, 832, 780, 991,1301,3073, 965,2270,3519, 668,2523,2636,1286, 535, # 1696
1407, 518, 671, 957,2658,2378, 267, 611,2197,3030,6123, 248,2299, 967,1799,2356, # 1712
850,1418,3437,1876,1256,1480,2828,1718,6124,6125,1755,1664,2405,6126,4628,2879, # 1728
2829, 499,2179, 676,4629, 557,2329,2214,2090, 325,3234, 464, 811,3001, 992,2342, # 1744
2481,1232,1469, 303,2242, 466,1070,2163, 603,1777,2091,4630,2752,4631,2714, 322, # 1760
2659,1964,1768, 481,2188,1463,2330,2857,3600,2092,3031,2421,4632,2318,2070,1849, # 1776
2598,4633,1302,2254,1668,1701,2422,3811,2905,3032,3123,2046,4106,1763,1694,4634, # 1792
1604, 943,1724,1454, 917, 868,2215,1169,2940, 552,1145,1800,1228,1823,1955, 316, # 1808
1080,2510, 361,1807,2830,4107,2660,3381,1346,1423,1134,4108,6127, 541,1263,1229, # 1824
1148,2540, 545, 465,1833,2880,3438,1901,3074,2482, 816,3937, 713,1788,2500, 122, # 1840
1575, 195,1451,2501,1111,6128, 859, 374,1225,2243,2483,4317, 390,1033,3439,3075, # 1856
2524,1687, 266, 793,1440,2599, 946, 779, 802, 507, 897,1081, 528,2189,1292, 711, # 1872
1866,1725,1167,1640, 753, 398,2661,1053, 246, 348,4318, 137,1024,3440,1600,2077, # 1888
2129, 825,4319, 698, 238, 521, 187,2300,1157,2423,1641,1605,1464,1610,1097,2541, # 1904
1260,1436, 759,2255,1814,2150, 705,3235, 409,2563,3304, 561,3033,2005,2564, 726, # 1920
1956,2343,3698,4109, 949,3812,3813,3520,1669, 653,1379,2525, 881,2198, 632,2256, # 1936
1027, 778,1074, 733,1957, 514,1481,2466, 554,2180, 702,3938,1606,1017,1398,6129, # 1952
1380,3521, 921, 993,1313, 594, 449,1489,1617,1166, 768,1426,1360, 495,1794,3601, # 1968
1177,3602,1170,4320,2344, 476, 425,3167,4635,3168,1424, 401,2662,1171,3382,1998, # 1984
1089,4110, 477,3169, 474,6130,1909, 596,2831,1842, 494, 693,1051,1028,1207,3076, # 2000
606,2115, 727,2790,1473,1115, 743,3522, 630, 805,1532,4321,2021, 366,1057, 838, # 2016
684,1114,2142,4322,2050,1492,1892,1808,2271,3814,2424,1971,1447,1373,3305,1090, # 2032
1536,3939,3523,3306,1455,2199, 336, 369,2331,1035, 584,2393, 902, 718,2600,6131, # 2048
2753, 463,2151,1149,1611,2467, 715,1308,3124,1268, 343,1413,3236,1517,1347,2663, # 2064
2093,3940,2022,1131,1553,2100,2941,1427,3441,2942,1323,2484,6132,1980, 872,2368, # 2080
2441,2943, 320,2369,2116,1082, 679,1933,3941,2791,3815, 625,1143,2023, 422,2200, # 2096
3816,6133, 730,1695, 356,2257,1626,2301,2858,2637,1627,1778, 937, 883,2906,2693, # 2112
3002,1769,1086, 400,1063,1325,3307,2792,4111,3077, 456,2345,1046, 747,6134,1524, # 2128
884,1094,3383,1474,2164,1059, 974,1688,2181,2258,1047, 345,1665,1187, 358, 875, # 2144
3170, 305, 660,3524,2190,1334,1135,3171,1540,1649,2542,1527, 927, 968,2793, 885, # 2160
1972,1850, 482, 500,2638,1218,1109,1085,2543,1654,2034, 876, 78,2287,1482,1277, # 2176
861,1675,1083,1779, 724,2754, 454, 397,1132,1612,2332, 893, 672,1237, 257,2259, # 2192
2370, 135,3384, 337,2244, 547, 352, 340, 709,2485,1400, 788,1138,2511, 540, 772, # 2208
1682,2260,2272,2544,2013,1843,1902,4636,1999,1562,2288,4637,2201,1403,1533, 407, # 2224
576,3308,1254,2071, 978,3385, 170, 136,1201,3125,2664,3172,2394, 213, 912, 873, # 2240
3603,1713,2202, 699,3604,3699, 813,3442, 493, 531,1054, 468,2907,1483, 304, 281, # 2256
4112,1726,1252,2094, 339,2319,2130,2639, 756,1563,2944, 748, 571,2976,1588,2425, # 2272
2715,1851,1460,2426,1528,1392,1973,3237, 288,3309, 685,3386, 296, 892,2716,2216, # 2288
1570,2245, 722,1747,2217, 905,3238,1103,6135,1893,1441,1965, 251,1805,2371,3700, # 2304
2601,1919,1078, 75,2182,1509,1592,1270,2640,4638,2152,6136,3310,3817, 524, 706, # 2320
1075, 292,3818,1756,2602, 317, 98,3173,3605,3525,1844,2218,3819,2502, 814, 567, # 2336
385,2908,1534,6137, 534,1642,3239, 797,6138,1670,1529, 953,4323, 188,1071, 538, # 2352
178, 729,3240,2109,1226,1374,2000,2357,2977, 731,2468,1116,2014,2051,6139,1261, # 2368
1593, 803,2859,2736,3443, 556, 682, 823,1541,6140,1369,2289,1706,2794, 845, 462, # 2384
2603,2665,1361, 387, 162,2358,1740, 739,1770,1720,1304,1401,3241,1049, 627,1571, # 2400
2427,3526,1877,3942,1852,1500, 431,1910,1503, 677, 297,2795, 286,1433,1038,1198, # 2416
2290,1133,1596,4113,4639,2469,1510,1484,3943,6141,2442, 108, 712,4640,2372, 866, # 2432
3701,2755,3242,1348, 834,1945,1408,3527,2395,3243,1811, 824, 994,1179,2110,1548, # 2448
1453, 790,3003, 690,4324,4325,2832,2909,3820,1860,3821, 225,1748, 310, 346,1780, # 2464
2470, 821,1993,2717,2796, 828, 877,3528,2860,2471,1702,2165,2910,2486,1789, 453, # 2480
359,2291,1676, 73,1164,1461,1127,3311, 421, 604, 314,1037, 589, 116,2487, 737, # 2496
837,1180, 111, 244, 735,6142,2261,1861,1362, 986, 523, 418, 581,2666,3822, 103, # 2512
855, 503,1414,1867,2488,1091, 657,1597, 979, 605,1316,4641,1021,2443,2078,2001, # 2528
1209, 96, 587,2166,1032, 260,1072,2153, 173, 94, 226,3244, 819,2006,4642,4114, # 2544
2203, 231,1744, 782, 97,2667, 786,3387, 887, 391, 442,2219,4326,1425,6143,2694, # 2560
633,1544,1202, 483,2015, 592,2052,1958,2472,1655, 419, 129,4327,3444,3312,1714, # 2576
1257,3078,4328,1518,1098, 865,1310,1019,1885,1512,1734, 469,2444, 148, 773, 436, # 2592
1815,1868,1128,1055,4329,1245,2756,3445,2154,1934,1039,4643, 579,1238, 932,2320, # 2608
353, 205, 801, 115,2428, 944,2321,1881, 399,2565,1211, 678, 766,3944, 335,2101, # 2624
1459,1781,1402,3945,2737,2131,1010, 844, 981,1326,1013, 550,1816,1545,2620,1335, # 2640
1008, 371,2881, 936,1419,1613,3529,1456,1395,2273,1834,2604,1317,2738,2503, 416, # 2656
1643,4330, 806,1126, 229, 591,3946,1314,1981,1576,1837,1666, 347,1790, 977,3313, # 2672
764,2861,1853, 688,2429,1920,1462, 77, 595, 415,2002,3034, 798,1192,4115,6144, # 2688
2978,4331,3035,2695,2582,2072,2566, 430,2430,1727, 842,1396,3947,3702, 613, 377, # 2704
278, 236,1417,3388,3314,3174, 757,1869, 107,3530,6145,1194, 623,2262, 207,1253, # 2720
2167,3446,3948, 492,1117,1935, 536,1838,2757,1246,4332, 696,2095,2406,1393,1572, # 2736
3175,1782, 583, 190, 253,1390,2230, 830,3126,3389, 934,3245,1703,1749,2979,1870, # 2752
2545,1656,2204, 869,2346,4116,3176,1817, 496,1764,4644, 942,1504, 404,1903,1122, # 2768
1580,3606,2945,1022, 515, 372,1735, 955,2431,3036,6146,2797,1110,2302,2798, 617, # 2784
6147, 441, 762,1771,3447,3607,3608,1904, 840,3037, 86, 939,1385, 572,1370,2445, # 2800
1336, 114,3703, 898, 294, 203,3315, 703,1583,2274, 429, 961,4333,1854,1951,3390, # 2816
2373,3704,4334,1318,1381, 966,1911,2322,1006,1155, 309, 989, 458,2718,1795,1372, # 2832
1203, 252,1689,1363,3177, 517,1936, 168,1490, 562, 193,3823,1042,4117,1835, 551, # 2848
470,4645, 395, 489,3448,1871,1465,2583,2641, 417,1493, 279,1295, 511,1236,1119, # 2864
72,1231,1982,1812,3004, 871,1564, 984,3449,1667,2696,2096,4646,2347,2833,1673, # 2880
3609, 695,3246,2668, 807,1183,4647, 890, 388,2333,1801,1457,2911,1765,1477,1031, # 2896
3316,3317,1278,3391,2799,2292,2526, 163,3450,4335,2669,1404,1802,6148,2323,2407, # 2912
1584,1728,1494,1824,1269, 298, 909,3318,1034,1632, 375, 776,1683,2061, 291, 210, # 2928
1123, 809,1249,1002,2642,3038, 206,1011,2132, 144, 975, 882,1565, 342, 667, 754, # 2944
1442,2143,1299,2303,2062, 447, 626,2205,1221,2739,2912,1144,1214,2206,2584, 760, # 2960
1715, 614, 950,1281,2670,2621, 810, 577,1287,2546,4648, 242,2168, 250,2643, 691, # 2976
123,2644, 647, 313,1029, 689,1357,2946,1650, 216, 771,1339,1306, 808,2063, 549, # 2992
913,1371,2913,2914,6149,1466,1092,1174,1196,1311,2605,2396,1783,1796,3079, 406, # 3008
2671,2117,3949,4649, 487,1825,2220,6150,2915, 448,2348,1073,6151,2397,1707, 130, # 3024
900,1598, 329, 176,1959,2527,1620,6152,2275,4336,3319,1983,2191,3705,3610,2155, # 3040
3706,1912,1513,1614,6153,1988, 646, 392,2304,1589,3320,3039,1826,1239,1352,1340, # 3056
2916, 505,2567,1709,1437,2408,2547, 906,6154,2672, 384,1458,1594,1100,1329, 710, # 3072
423,3531,2064,2231,2622,1989,2673,1087,1882, 333, 841,3005,1296,2882,2379, 580, # 3088
1937,1827,1293,2585, 601, 574, 249,1772,4118,2079,1120, 645, 901,1176,1690, 795, # 3104
2207, 478,1434, 516,1190,1530, 761,2080, 930,1264, 355, 435,1552, 644,1791, 987, # 3120
220,1364,1163,1121,1538, 306,2169,1327,1222, 546,2645, 218, 241, 610,1704,3321, # 3136
1984,1839,1966,2528, 451,6155,2586,3707,2568, 907,3178, 254,2947, 186,1845,4650, # 3152
745, 432,1757, 428,1633, 888,2246,2221,2489,3611,2118,1258,1265, 956,3127,1784, # 3168
4337,2490, 319, 510, 119, 457,3612, 274,2035,2007,4651,1409,3128, 970,2758, 590, # 3184
2800, 661,2247,4652,2008,3950,1420,1549,3080,3322,3951,1651,1375,2111, 485,2491, # 3200
1429,1156,6156,2548,2183,1495, 831,1840,2529,2446, 501,1657, 307,1894,3247,1341, # 3216
666, 899,2156,1539,2549,1559, 886, 349,2208,3081,2305,1736,3824,2170,2759,1014, # 3232
1913,1386, 542,1397,2948, 490, 368, 716, 362, 159, 282,2569,1129,1658,1288,1750, # 3248
2674, 276, 649,2016, 751,1496, 658,1818,1284,1862,2209,2087,2512,3451, 622,2834, # 3264
376, 117,1060,2053,1208,1721,1101,1443, 247,1250,3179,1792,3952,2760,2398,3953, # 3280
6157,2144,3708, 446,2432,1151,2570,3452,2447,2761,2835,1210,2448,3082, 424,2222, # 3296
1251,2449,2119,2836, 504,1581,4338, 602, 817, 857,3825,2349,2306, 357,3826,1470, # 3312
1883,2883, 255, 958, 929,2917,3248, 302,4653,1050,1271,1751,2307,1952,1430,2697, # 3328
2719,2359, 354,3180, 777, 158,2036,4339,1659,4340,4654,2308,2949,2248,1146,2232, # 3344
3532,2720,1696,2623,3827,6158,3129,1550,2698,1485,1297,1428, 637, 931,2721,2145, # 3360
914,2550,2587, 81,2450, 612, 827,2646,1242,4655,1118,2884, 472,1855,3181,3533, # 3376
3534, 569,1353,2699,1244,1758,2588,4119,2009,2762,2171,3709,1312,1531,6159,1152, # 3392
1938, 134,1830, 471,3710,2276,1112,1535,3323,3453,3535, 982,1337,2950, 488, 826, # 3408
674,1058,1628,4120,2017, 522,2399, 211, 568,1367,3454, 350, 293,1872,1139,3249, # 3424
1399,1946,3006,1300,2360,3324, 588, 736,6160,2606, 744, 669,3536,3828,6161,1358, # 3440
199, 723, 848, 933, 851,1939,1505,1514,1338,1618,1831,4656,1634,3613, 443,2740, # 3456
3829, 717,1947, 491,1914,6162,2551,1542,4121,1025,6163,1099,1223, 198,3040,2722, # 3472
370, 410,1905,2589, 998,1248,3182,2380, 519,1449,4122,1710, 947, 928,1153,4341, # 3488
2277, 344,2624,1511, 615, 105, 161,1212,1076,1960,3130,2054,1926,1175,1906,2473, # 3504
414,1873,2801,6164,2309, 315,1319,3325, 318,2018,2146,2157, 963, 631, 223,4342, # 3520
4343,2675, 479,3711,1197,2625,3712,2676,2361,6165,4344,4123,6166,2451,3183,1886, # 3536
2184,1674,1330,1711,1635,1506, 799, 219,3250,3083,3954,1677,3713,3326,2081,3614, # 3552
1652,2073,4657,1147,3041,1752, 643,1961, 147,1974,3955,6167,1716,2037, 918,3007, # 3568
1994, 120,1537, 118, 609,3184,4345, 740,3455,1219, 332,1615,3830,6168,1621,2980, # 3584
1582, 783, 212, 553,2350,3714,1349,2433,2082,4124, 889,6169,2310,1275,1410, 973, # 3600
166,1320,3456,1797,1215,3185,2885,1846,2590,2763,4658, 629, 822,3008, 763, 940, # 3616
1990,2862, 439,2409,1566,1240,1622, 926,1282,1907,2764, 654,2210,1607, 327,1130, # 3632
3956,1678,1623,6170,2434,2192, 686, 608,3831,3715, 903,3957,3042,6171,2741,1522, # 3648
1915,1105,1555,2552,1359, 323,3251,4346,3457, 738,1354,2553,2311,2334,1828,2003, # 3664
3832,1753,2351,1227,6172,1887,4125,1478,6173,2410,1874,1712,1847, 520,1204,2607, # 3680
264,4659, 836,2677,2102, 600,4660,3833,2278,3084,6174,4347,3615,1342, 640, 532, # 3696
543,2608,1888,2400,2591,1009,4348,1497, 341,1737,3616,2723,1394, 529,3252,1321, # 3712
983,4661,1515,2120, 971,2592, 924, 287,1662,3186,4349,2700,4350,1519, 908,1948, # 3728
2452, 156, 796,1629,1486,2223,2055, 694,4126,1259,1036,3392,1213,2249,2742,1889, # 3744
1230,3958,1015, 910, 408, 559,3617,4662, 746, 725, 935,4663,3959,3009,1289, 563, # 3760
867,4664,3960,1567,2981,2038,2626, 988,2263,2381,4351, 143,2374, 704,1895,6175, # 3776
1188,3716,2088, 673,3085,2362,4352, 484,1608,1921,2765,2918, 215, 904,3618,3537, # 3792
894, 509, 976,3043,2701,3961,4353,2837,2982, 498,6176,6177,1102,3538,1332,3393, # 3808
1487,1636,1637, 233, 245,3962, 383, 650, 995,3044, 460,1520,1206,2352, 749,3327, # 3824
530, 700, 389,1438,1560,1773,3963,2264, 719,2951,2724,3834, 870,1832,1644,1000, # 3840
839,2474,3717, 197,1630,3394, 365,2886,3964,1285,2133, 734, 922, 818,1106, 732, # 3856
480,2083,1774,3458, 923,2279,1350, 221,3086, 85,2233,2234,3835,1585,3010,2147, # 3872
1387,1705,2382,1619,2475, 133, 239,2802,1991,1016,2084,2383, 411,2838,1113, 651, # 3888
1985,1160,3328, 990,1863,3087,1048,1276,2647, 265,2627,1599,3253,2056, 150, 638, # 3904
2019, 656, 853, 326,1479, 680,1439,4354,1001,1759, 413,3459,3395,2492,1431, 459, # 3920
4355,1125,3329,2265,1953,1450,2065,2863, 849, 351,2678,3131,3254,3255,1104,1577, # 3936
227,1351,1645,2453,2193,1421,2887, 812,2121, 634, 95,2435, 201,2312,4665,1646, # 3952
1671,2743,1601,2554,2702,2648,2280,1315,1366,2089,3132,1573,3718,3965,1729,1189, # 3968
328,2679,1077,1940,1136, 558,1283, 964,1195, 621,2074,1199,1743,3460,3619,1896, # 3984
1916,1890,3836,2952,1154,2112,1064, 862, 378,3011,2066,2113,2803,1568,2839,6178, # 4000
3088,2919,1941,1660,2004,1992,2194, 142, 707,1590,1708,1624,1922,1023,1836,1233, # 4016
1004,2313, 789, 741,3620,6179,1609,2411,1200,4127,3719,3720,4666,2057,3721, 593, # 4032
2840, 367,2920,1878,6180,3461,1521, 628,1168, 692,2211,2649, 300, 720,2067,2571, # 4048
2953,3396, 959,2504,3966,3539,3462,1977, 701,6181, 954,1043, 800, 681, 183,3722, # 4064
1803,1730,3540,4128,2103, 815,2314, 174, 467, 230,2454,1093,2134, 755,3541,3397, # 4080
1141,1162,6182,1738,2039, 270,3256,2513,1005,1647,2185,3837, 858,1679,1897,1719, # 4096
2954,2324,1806, 402, 670, 167,4129,1498,2158,2104, 750,6183, 915, 189,1680,1551, # 4112
455,4356,1501,2455, 405,1095,2955, 338,1586,1266,1819, 570, 641,1324, 237,1556, # 4128
2650,1388,3723,6184,1368,2384,1343,1978,3089,2436, 879,3724, 792,1191, 758,3012, # 4144
1411,2135,1322,4357, 240,4667,1848,3725,1574,6185, 420,3045,1546,1391, 714,4358, # 4160
1967, 941,1864, 863, 664, 426, 560,1731,2680,1785,2864,1949,2363, 403,3330,1415, # 4176
1279,2136,1697,2335, 204, 721,2097,3838, 90,6186,2085,2505, 191,3967, 124,2148, # 4192
1376,1798,1178,1107,1898,1405, 860,4359,1243,1272,2375,2983,1558,2456,1638, 113, # 4208
3621, 578,1923,2609, 880, 386,4130, 784,2186,2266,1422,2956,2172,1722, 497, 263, # 4224
2514,1267,2412,2610, 177,2703,3542, 774,1927,1344, 616,1432,1595,1018, 172,4360, # 4240
2325, 911,4361, 438,1468,3622, 794,3968,2024,2173,1681,1829,2957, 945, 895,3090, # 4256
575,2212,2476, 475,2401,2681, 785,2744,1745,2293,2555,1975,3133,2865, 394,4668, # 4272
3839, 635,4131, 639, 202,1507,2195,2766,1345,1435,2572,3726,1908,1184,1181,2457, # 4288
3727,3134,4362, 843,2611, 437, 916,4669, 234, 769,1884,3046,3047,3623, 833,6187, # 4304
1639,2250,2402,1355,1185,2010,2047, 999, 525,1732,1290,1488,2612, 948,1578,3728, # 4320
2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336
1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352
2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512
#Everything below is of no interest for detection purpose
2138,2122,3730,2888,1995,1820,1044,6190,6191,6192,6193,6194,6195,6196,6197,6198, # 4384
6199,6200,6201,6202,6203,6204,6205,4670,6206,6207,6208,6209,6210,6211,6212,6213, # 4400
6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,6224,6225,6226,6227,6228,6229, # 4416
6230,6231,6232,6233,6234,6235,6236,6237,3187,6238,6239,3969,6240,6241,6242,6243, # 4432
6244,4671,6245,6246,4672,6247,6248,4133,6249,6250,4364,6251,2923,2556,2613,4673, # 4448
4365,3970,6252,6253,6254,6255,4674,6256,6257,6258,2768,2353,4366,4675,4676,3188, # 4464
4367,3463,6259,4134,4677,4678,6260,2267,6261,3842,3332,4368,3543,6262,6263,6264, # 4480
3013,1954,1928,4135,4679,6265,6266,2478,3091,6267,4680,4369,6268,6269,1699,6270, # 4496
3544,4136,4681,6271,4137,6272,4370,2804,6273,6274,2593,3971,3972,4682,6275,2236, # 4512
4683,6276,6277,4684,6278,6279,4138,3973,4685,6280,6281,3258,6282,6283,6284,6285, # 4528
3974,4686,2841,3975,6286,6287,3545,6288,6289,4139,4687,4140,6290,4141,6291,4142, # 4544
6292,6293,3333,6294,6295,6296,4371,6297,3399,6298,6299,4372,3976,6300,6301,6302, # 4560
4373,6303,6304,3843,3731,6305,4688,4374,6306,6307,3259,2294,6308,3732,2530,4143, # 4576
6309,4689,6310,6311,6312,3048,6313,6314,4690,3733,2237,6315,6316,2282,3334,6317, # 4592
6318,3844,6319,6320,4691,6321,3400,4692,6322,4693,6323,3049,6324,4375,6325,3977, # 4608
6326,6327,6328,3546,6329,4694,3335,6330,4695,4696,6331,6332,6333,6334,4376,3978, # 4624
6335,4697,3979,4144,6336,3980,4698,6337,6338,6339,6340,6341,4699,4700,4701,6342, # 4640
6343,4702,6344,6345,4703,6346,6347,4704,6348,4705,4706,3135,6349,4707,6350,4708, # 4656
6351,4377,6352,4709,3734,4145,6353,2506,4710,3189,6354,3050,4711,3981,6355,3547, # 4672
3014,4146,4378,3735,2651,3845,3260,3136,2224,1986,6356,3401,6357,4712,2594,3627, # 4688
3137,2573,3736,3982,4713,3628,4714,4715,2682,3629,4716,6358,3630,4379,3631,6359, # 4704
6360,6361,3983,6362,6363,6364,6365,4147,3846,4717,6366,6367,3737,2842,6368,4718, # 4720
2628,6369,3261,6370,2386,6371,6372,3738,3984,4719,3464,4720,3402,6373,2924,3336, # 4736
4148,2866,6374,2805,3262,4380,2704,2069,2531,3138,2806,2984,6375,2769,6376,4721, # 4752
4722,3403,6377,6378,3548,6379,6380,2705,3092,1979,4149,2629,3337,2889,6381,3338, # 4768
4150,2557,3339,4381,6382,3190,3263,3739,6383,4151,4723,4152,2558,2574,3404,3191, # 4784
6384,6385,4153,6386,4724,4382,6387,6388,4383,6389,6390,4154,6391,4725,3985,6392, # 4800
3847,4155,6393,6394,6395,6396,6397,3465,6398,4384,6399,6400,6401,6402,6403,6404, # 4816
4156,6405,6406,6407,6408,2123,6409,6410,2326,3192,4726,6411,6412,6413,6414,4385, # 4832
4157,6415,6416,4158,6417,3093,3848,6418,3986,6419,6420,3849,6421,6422,6423,4159, # 4848
6424,6425,4160,6426,3740,6427,6428,6429,6430,3987,6431,4727,6432,2238,6433,6434, # 4864
4386,3988,6435,6436,3632,6437,6438,2843,6439,6440,6441,6442,3633,6443,2958,6444, # 4880
6445,3466,6446,2364,4387,3850,6447,4388,2959,3340,6448,3851,6449,4728,6450,6451, # 4896
3264,4729,6452,3193,6453,4389,4390,2706,3341,4730,6454,3139,6455,3194,6456,3051, # 4912
2124,3852,1602,4391,4161,3853,1158,3854,4162,3989,4392,3990,4731,4732,4393,2040, # 4928
4163,4394,3265,6457,2807,3467,3855,6458,6459,6460,3991,3468,4733,4734,6461,3140, # 4944
2960,6462,4735,6463,6464,6465,6466,4736,4737,4738,4739,6467,6468,4164,2403,3856, # 4960
6469,6470,2770,2844,6471,4740,6472,6473,6474,6475,6476,6477,6478,3195,6479,4741, # 4976
4395,6480,2867,6481,4742,2808,6482,2493,4165,6483,6484,6485,6486,2295,4743,6487, # 4992
6488,6489,3634,6490,6491,6492,6493,6494,6495,6496,2985,4744,6497,6498,4745,6499, # 5008
6500,2925,3141,4166,6501,6502,4746,6503,6504,4747,6505,6506,6507,2890,6508,6509, # 5024
6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,3469,4167,6520,6521,6522,4748, # 5040
4396,3741,4397,4749,4398,3342,2125,4750,6523,4751,4752,4753,3052,6524,2961,4168, # 5056
6525,4754,6526,4755,4399,2926,4169,6527,3857,6528,4400,4170,6529,4171,6530,6531, # 5072
2595,6532,6533,6534,6535,3635,6536,6537,6538,6539,6540,6541,6542,4756,6543,6544, # 5088
6545,6546,6547,6548,4401,6549,6550,6551,6552,4402,3405,4757,4403,6553,6554,6555, # 5104
4172,3742,6556,6557,6558,3992,3636,6559,6560,3053,2726,6561,3549,4173,3054,4404, # 5120
6562,6563,3993,4405,3266,3550,2809,4406,6564,6565,6566,4758,4759,6567,3743,6568, # 5136
4760,3744,4761,3470,6569,6570,6571,4407,6572,3745,4174,6573,4175,2810,4176,3196, # 5152
4762,6574,4177,6575,6576,2494,2891,3551,6577,6578,3471,6579,4408,6580,3015,3197, # 5168
6581,3343,2532,3994,3858,6582,3094,3406,4409,6583,2892,4178,4763,4410,3016,4411, # 5184
6584,3995,3142,3017,2683,6585,4179,6586,6587,4764,4412,6588,6589,4413,6590,2986, # 5200
6591,2962,3552,6592,2963,3472,6593,6594,4180,4765,6595,6596,2225,3267,4414,6597, # 5216
3407,3637,4766,6598,6599,3198,6600,4415,6601,3859,3199,6602,3473,4767,2811,4416, # 5232
1856,3268,3200,2575,3996,3997,3201,4417,6603,3095,2927,6604,3143,6605,2268,6606, # 5248
3998,3860,3096,2771,6607,6608,3638,2495,4768,6609,3861,6610,3269,2745,4769,4181, # 5264
3553,6611,2845,3270,6612,6613,6614,3862,6615,6616,4770,4771,6617,3474,3999,4418, # 5280
4419,6618,3639,3344,6619,4772,4182,6620,2126,6621,6622,6623,4420,4773,6624,3018, # 5296
6625,4774,3554,6626,4183,2025,3746,6627,4184,2707,6628,4421,4422,3097,1775,4185, # 5312
3555,6629,6630,2868,6631,6632,4423,6633,6634,4424,2414,2533,2928,6635,4186,2387, # 5328
6636,4775,6637,4187,6638,1891,4425,3202,3203,6639,6640,4776,6641,3345,6642,6643, # 5344
3640,6644,3475,3346,3641,4000,6645,3144,6646,3098,2812,4188,3642,3204,6647,3863, # 5360
3476,6648,3864,6649,4426,4001,6650,6651,6652,2576,6653,4189,4777,6654,6655,6656, # 5376
2846,6657,3477,3205,4002,6658,4003,6659,3347,2252,6660,6661,6662,4778,6663,6664, # 5392
6665,6666,6667,6668,6669,4779,4780,2048,6670,3478,3099,6671,3556,3747,4004,6672, # 5408
6673,6674,3145,4005,3748,6675,6676,6677,6678,6679,3408,6680,6681,6682,6683,3206, # 5424
3207,6684,6685,4781,4427,6686,4782,4783,4784,6687,6688,6689,4190,6690,6691,3479, # 5440
6692,2746,6693,4428,6694,6695,6696,6697,6698,6699,4785,6700,6701,3208,2727,6702, # 5456
3146,6703,6704,3409,2196,6705,4429,6706,6707,6708,2534,1996,6709,6710,6711,2747, # 5472
6712,6713,6714,4786,3643,6715,4430,4431,6716,3557,6717,4432,4433,6718,6719,6720, # 5488
6721,3749,6722,4006,4787,6723,6724,3644,4788,4434,6725,6726,4789,2772,6727,6728, # 5504
6729,6730,6731,2708,3865,2813,4435,6732,6733,4790,4791,3480,6734,6735,6736,6737, # 5520
4436,3348,6738,3410,4007,6739,6740,4008,6741,6742,4792,3411,4191,6743,6744,6745, # 5536
6746,6747,3866,6748,3750,6749,6750,6751,6752,6753,6754,6755,3867,6756,4009,6757, # 5552
4793,4794,6758,2814,2987,6759,6760,6761,4437,6762,6763,6764,6765,3645,6766,6767, # 5568
3481,4192,6768,3751,6769,6770,2174,6771,3868,3752,6772,6773,6774,4193,4795,4438, # 5584
3558,4796,4439,6775,4797,6776,6777,4798,6778,4799,3559,4800,6779,6780,6781,3482, # 5600
6782,2893,6783,6784,4194,4801,4010,6785,6786,4440,6787,4011,6788,6789,6790,6791, # 5616
6792,6793,4802,6794,6795,6796,4012,6797,6798,6799,6800,3349,4803,3483,6801,4804, # 5632
4195,6802,4013,6803,6804,4196,6805,4014,4015,6806,2847,3271,2848,6807,3484,6808, # 5648
6809,6810,4441,6811,4442,4197,4443,3272,4805,6812,3412,4016,1579,6813,6814,4017, # 5664
6815,3869,6816,2964,6817,4806,6818,6819,4018,3646,6820,6821,4807,4019,4020,6822, # 5680
6823,3560,6824,6825,4021,4444,6826,4198,6827,6828,4445,6829,6830,4199,4808,6831, # 5696
6832,6833,3870,3019,2458,6834,3753,3413,3350,6835,4809,3871,4810,3561,4446,6836, # 5712
6837,4447,4811,4812,6838,2459,4448,6839,4449,6840,6841,4022,3872,6842,4813,4814, # 5728
6843,6844,4815,4200,4201,4202,6845,4023,6846,6847,4450,3562,3873,6848,6849,4816, # 5744
4817,6850,4451,4818,2139,6851,3563,6852,6853,3351,6854,6855,3352,4024,2709,3414, # 5760
4203,4452,6856,4204,6857,6858,3874,3875,6859,6860,4819,6861,6862,6863,6864,4453, # 5776
3647,6865,6866,4820,6867,6868,6869,6870,4454,6871,2869,6872,6873,4821,6874,3754, # 5792
6875,4822,4205,6876,6877,6878,3648,4206,4455,6879,4823,6880,4824,3876,6881,3055, # 5808
4207,6882,3415,6883,6884,6885,4208,4209,6886,4210,3353,6887,3354,3564,3209,3485, # 5824
2652,6888,2728,6889,3210,3755,6890,4025,4456,6891,4825,6892,6893,6894,6895,4211, # 5840
6896,6897,6898,4826,6899,6900,4212,6901,4827,6902,2773,3565,6903,4828,6904,6905, # 5856
6906,6907,3649,3650,6908,2849,3566,6909,3567,3100,6910,6911,6912,6913,6914,6915, # 5872
4026,6916,3355,4829,3056,4457,3756,6917,3651,6918,4213,3652,2870,6919,4458,6920, # 5888
2438,6921,6922,3757,2774,4830,6923,3356,4831,4832,6924,4833,4459,3653,2507,6925, # 5904
4834,2535,6926,6927,3273,4027,3147,6928,3568,6929,6930,6931,4460,6932,3877,4461, # 5920
2729,3654,6933,6934,6935,6936,2175,4835,2630,4214,4028,4462,4836,4215,6937,3148, # 5936
4216,4463,4837,4838,4217,6938,6939,2850,4839,6940,4464,6941,6942,6943,4840,6944, # 5952
4218,3274,4465,6945,6946,2710,6947,4841,4466,6948,6949,2894,6950,6951,4842,6952, # 5968
4219,3057,2871,6953,6954,6955,6956,4467,6957,2711,6958,6959,6960,3275,3101,4843, # 5984
6961,3357,3569,6962,4844,6963,6964,4468,4845,3570,6965,3102,4846,3758,6966,4847, # 6000
3878,4848,4849,4029,6967,2929,3879,4850,4851,6968,6969,1733,6970,4220,6971,6972, # 6016
6973,6974,6975,6976,4852,6977,6978,6979,6980,6981,6982,3759,6983,6984,6985,3486, # 6032
3487,6986,3488,3416,6987,6988,6989,6990,6991,6992,6993,6994,6995,6996,6997,4853, # 6048
6998,6999,4030,7000,7001,3211,7002,7003,4221,7004,7005,3571,4031,7006,3572,7007, # 6064
2614,4854,2577,7008,7009,2965,3655,3656,4855,2775,3489,3880,4222,4856,3881,4032, # 6080
3882,3657,2730,3490,4857,7010,3149,7011,4469,4858,2496,3491,4859,2283,7012,7013, # 6096
7014,2365,4860,4470,7015,7016,3760,7017,7018,4223,1917,7019,7020,7021,4471,7022, # 6112
2776,4472,7023,7024,7025,7026,4033,7027,3573,4224,4861,4034,4862,7028,7029,1929, # 6128
3883,4035,7030,4473,3058,7031,2536,3761,3884,7032,4036,7033,2966,2895,1968,4474, # 6144
3276,4225,3417,3492,4226,2105,7034,7035,1754,2596,3762,4227,4863,4475,3763,4864, # 6160
3764,2615,2777,3103,3765,3658,3418,4865,2296,3766,2815,7036,7037,7038,3574,2872, # 6176
3277,4476,7039,4037,4477,7040,7041,4038,7042,7043,7044,7045,7046,7047,2537,7048, # 6192
7049,7050,7051,7052,7053,7054,4478,7055,7056,3767,3659,4228,3575,7057,7058,4229, # 6208
7059,7060,7061,3660,7062,3212,7063,3885,4039,2460,7064,7065,7066,7067,7068,7069, # 6224
7070,7071,7072,7073,7074,4866,3768,4867,7075,7076,7077,7078,4868,3358,3278,2653, # 6240
7079,7080,4479,3886,7081,7082,4869,7083,7084,7085,7086,7087,7088,2538,7089,7090, # 6256
7091,4040,3150,3769,4870,4041,2896,3359,4230,2930,7092,3279,7093,2967,4480,3213, # 6272
4481,3661,7094,7095,7096,7097,7098,7099,7100,7101,7102,2461,3770,7103,7104,4231, # 6288
3151,7105,7106,7107,4042,3662,7108,7109,4871,3663,4872,4043,3059,7110,7111,7112, # 6304
3493,2988,7113,4873,7114,7115,7116,3771,4874,7117,7118,4232,4875,7119,3576,2336, # 6320
4876,7120,4233,3419,4044,4877,4878,4482,4483,4879,4484,4234,7121,3772,4880,1045, # 6336
3280,3664,4881,4882,7122,7123,7124,7125,4883,7126,2778,7127,4485,4486,7128,4884, # 6352
3214,3887,7129,7130,3215,7131,4885,4045,7132,7133,4046,7134,7135,7136,7137,7138, # 6368
7139,7140,7141,7142,7143,4235,7144,4886,7145,7146,7147,4887,7148,7149,7150,4487, # 6384
4047,4488,7151,7152,4888,4048,2989,3888,7153,3665,7154,4049,7155,7156,7157,7158, # 6400
7159,7160,2931,4889,4890,4489,7161,2631,3889,4236,2779,7162,7163,4891,7164,3060, # 6416
7165,1672,4892,7166,4893,4237,3281,4894,7167,7168,3666,7169,3494,7170,7171,4050, # 6432
7172,7173,3104,3360,3420,4490,4051,2684,4052,7174,4053,7175,7176,7177,2253,4054, # 6448
7178,7179,4895,7180,3152,3890,3153,4491,3216,7181,7182,7183,2968,4238,4492,4055, # 6464
7184,2990,7185,2479,7186,7187,4493,7188,7189,7190,7191,7192,4896,7193,4897,2969, # 6480
4494,4898,7194,3495,7195,7196,4899,4495,7197,3105,2731,7198,4900,7199,7200,7201, # 6496
4056,7202,3361,7203,7204,4496,4901,4902,7205,4497,7206,7207,2315,4903,7208,4904, # 6512
7209,4905,2851,7210,7211,3577,7212,3578,4906,7213,4057,3667,4907,7214,4058,2354, # 6528
3891,2376,3217,3773,7215,7216,7217,7218,7219,4498,7220,4908,3282,2685,7221,3496, # 6544
4909,2632,3154,4910,7222,2337,7223,4911,7224,7225,7226,4912,4913,3283,4239,4499, # 6560
7227,2816,7228,7229,7230,7231,7232,7233,7234,4914,4500,4501,7235,7236,7237,2686, # 6576
7238,4915,7239,2897,4502,7240,4503,7241,2516,7242,4504,3362,3218,7243,7244,7245, # 6592
4916,7246,7247,4505,3363,7248,7249,7250,7251,3774,4506,7252,7253,4917,7254,7255, # 6608
3284,2991,4918,4919,3219,3892,4920,3106,3497,4921,7256,7257,7258,4922,7259,4923, # 6624
3364,4507,4508,4059,7260,4240,3498,7261,7262,4924,7263,2992,3893,4060,3220,7264, # 6640
7265,7266,7267,7268,7269,4509,3775,7270,2817,7271,4061,4925,4510,3776,7272,4241, # 6656
4511,3285,7273,7274,3499,7275,7276,7277,4062,4512,4926,7278,3107,3894,7279,7280, # 6672
4927,7281,4513,7282,7283,3668,7284,7285,4242,4514,4243,7286,2058,4515,4928,4929, # 6688
4516,7287,3286,4244,7288,4517,7289,7290,7291,3669,7292,7293,4930,4931,4932,2355, # 6704
4933,7294,2633,4518,7295,4245,7296,7297,4519,7298,7299,4520,4521,4934,7300,4246, # 6720
4522,7301,7302,7303,3579,7304,4247,4935,7305,4936,7306,7307,7308,7309,3777,7310, # 6736
4523,7311,7312,7313,4248,3580,7314,4524,3778,4249,7315,3581,7316,3287,7317,3221, # 6752
7318,4937,7319,7320,7321,7322,7323,7324,4938,4939,7325,4525,7326,7327,7328,4063, # 6768
7329,7330,4940,7331,7332,4941,7333,4526,7334,3500,2780,1741,4942,2026,1742,7335, # 6784
7336,3582,4527,2388,7337,7338,7339,4528,7340,4250,4943,7341,7342,7343,4944,7344, # 6800
7345,7346,3020,7347,4945,7348,7349,7350,7351,3895,7352,3896,4064,3897,7353,7354, # 6816
7355,4251,7356,7357,3898,7358,3779,7359,3780,3288,7360,7361,4529,7362,4946,4530, # 6832
2027,7363,3899,4531,4947,3222,3583,7364,4948,7365,7366,7367,7368,4949,3501,4950, # 6848
3781,4951,4532,7369,2517,4952,4252,4953,3155,7370,4954,4955,4253,2518,4533,7371, # 6864
7372,2712,4254,7373,7374,7375,3670,4956,3671,7376,2389,3502,4065,7377,2338,7378, # 6880
7379,7380,7381,3061,7382,4957,7383,7384,7385,7386,4958,4534,7387,7388,2993,7389, # 6896
3062,7390,4959,7391,7392,7393,4960,3108,4961,7394,4535,7395,4962,3421,4536,7396, # 6912
4963,7397,4964,1857,7398,4965,7399,7400,2176,3584,4966,7401,7402,3422,4537,3900, # 6928
3585,7403,3782,7404,2852,7405,7406,7407,4538,3783,2654,3423,4967,4539,7408,3784, # 6944
3586,2853,4540,4541,7409,3901,7410,3902,7411,7412,3785,3109,2327,3903,7413,7414, # 6960
2970,4066,2932,7415,7416,7417,3904,3672,3424,7418,4542,4543,4544,7419,4968,7420, # 6976
7421,4255,7422,7423,7424,7425,7426,4067,7427,3673,3365,4545,7428,3110,2559,3674, # 6992
7429,7430,3156,7431,7432,3503,7433,3425,4546,7434,3063,2873,7435,3223,4969,4547, # 7008
4548,2898,4256,4068,7436,4069,3587,3786,2933,3787,4257,4970,4971,3788,7437,4972, # 7024
3064,7438,4549,7439,7440,7441,7442,7443,4973,3905,7444,2874,7445,7446,7447,7448, # 7040
3021,7449,4550,3906,3588,4974,7450,7451,3789,3675,7452,2578,7453,4070,7454,7455, # 7056
7456,4258,3676,7457,4975,7458,4976,4259,3790,3504,2634,4977,3677,4551,4260,7459, # 7072
7460,7461,7462,3907,4261,4978,7463,7464,7465,7466,4979,4980,7467,7468,2213,4262, # 7088
7469,7470,7471,3678,4981,7472,2439,7473,4263,3224,3289,7474,3908,2415,4982,7475, # 7104
4264,7476,4983,2655,7477,7478,2732,4552,2854,2875,7479,7480,4265,7481,4553,4984, # 7120
7482,7483,4266,7484,3679,3366,3680,2818,2781,2782,3367,3589,4554,3065,7485,4071, # 7136
2899,7486,7487,3157,2462,4072,4555,4073,4985,4986,3111,4267,2687,3368,4556,4074, # 7152
3791,4268,7488,3909,2783,7489,2656,1962,3158,4557,4987,1963,3159,3160,7490,3112, # 7168
4988,4989,3022,4990,4991,3792,2855,7491,7492,2971,4558,7493,7494,4992,7495,7496, # 7184
7497,7498,4993,7499,3426,4559,4994,7500,3681,4560,4269,4270,3910,7501,4075,4995, # 7200
4271,7502,7503,4076,7504,4996,7505,3225,4997,4272,4077,2819,3023,7506,7507,2733, # 7216
4561,7508,4562,7509,3369,3793,7510,3590,2508,7511,7512,4273,3113,2994,2616,7513, # 7232
7514,7515,7516,7517,7518,2820,3911,4078,2748,7519,7520,4563,4998,7521,7522,7523, # 7248
7524,4999,4274,7525,4564,3682,2239,4079,4565,7526,7527,7528,7529,5000,7530,7531, # 7264
5001,4275,3794,7532,7533,7534,3066,5002,4566,3161,7535,7536,4080,7537,3162,7538, # 7280
7539,4567,7540,7541,7542,7543,7544,7545,5003,7546,4568,7547,7548,7549,7550,7551, # 7296
7552,7553,7554,7555,7556,5004,7557,7558,7559,5005,7560,3795,7561,4569,7562,7563, # 7312
7564,2821,3796,4276,4277,4081,7565,2876,7566,5006,7567,7568,2900,7569,3797,3912, # 7328
7570,7571,7572,4278,7573,7574,7575,5007,7576,7577,5008,7578,7579,4279,2934,7580, # 7344
7581,5009,7582,4570,7583,4280,7584,7585,7586,4571,4572,3913,7587,4573,3505,7588, # 7360
5010,7589,7590,7591,7592,3798,4574,7593,7594,5011,7595,4281,7596,7597,7598,4282, # 7376
5012,7599,7600,5013,3163,7601,5014,7602,3914,7603,7604,2734,4575,4576,4577,7605, # 7392
7606,7607,7608,7609,3506,5015,4578,7610,4082,7611,2822,2901,2579,3683,3024,4579, # 7408
3507,7612,4580,7613,3226,3799,5016,7614,7615,7616,7617,7618,7619,7620,2995,3290, # 7424
7621,4083,7622,5017,7623,7624,7625,7626,7627,4581,3915,7628,3291,7629,5018,7630, # 7440
7631,7632,7633,4084,7634,7635,3427,3800,7636,7637,4582,7638,5019,4583,5020,7639, # 7456
3916,7640,3801,5021,4584,4283,7641,7642,3428,3591,2269,7643,2617,7644,4585,3592, # 7472
7645,4586,2902,7646,7647,3227,5022,7648,4587,7649,4284,7650,7651,7652,4588,2284, # 7488
7653,5023,7654,7655,7656,4589,5024,3802,7657,7658,5025,3508,4590,7659,7660,7661, # 7504
1969,5026,7662,7663,3684,1821,2688,7664,2028,2509,4285,7665,2823,1841,7666,2689, # 7520
3114,7667,3917,4085,2160,5027,5028,2972,7668,5029,7669,7670,7671,3593,4086,7672, # 7536
4591,4087,5030,3803,7673,7674,7675,7676,7677,7678,7679,4286,2366,4592,4593,3067, # 7552
2328,7680,7681,4594,3594,3918,2029,4287,7682,5031,3919,3370,4288,4595,2856,7683, # 7568
3509,7684,7685,5032,5033,7686,7687,3804,2784,7688,7689,7690,7691,3371,7692,7693, # 7584
2877,5034,7694,7695,3920,4289,4088,7696,7697,7698,5035,7699,5036,4290,5037,5038, # 7600
5039,7700,7701,7702,5040,5041,3228,7703,1760,7704,5042,3229,4596,2106,4089,7705, # 7616
4597,2824,5043,2107,3372,7706,4291,4090,5044,7707,4091,7708,5045,3025,3805,4598, # 7632
4292,4293,4294,3373,7709,4599,7710,5046,7711,7712,5047,5048,3806,7713,7714,7715, # 7648
5049,7716,7717,7718,7719,4600,5050,7720,7721,7722,5051,7723,4295,3429,7724,7725, # 7664
7726,7727,3921,7728,3292,5052,4092,7729,7730,7731,7732,7733,7734,7735,5053,5054, # 7680
7736,7737,7738,7739,3922,3685,7740,7741,7742,7743,2635,5055,7744,5056,4601,7745, # 7696
7746,2560,7747,7748,7749,7750,3923,7751,7752,7753,7754,7755,4296,2903,7756,7757, # 7712
7758,7759,7760,3924,7761,5057,4297,7762,7763,5058,4298,7764,4093,7765,7766,5059, # 7728
3925,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,3595,7777,4299,5060,4094, # 7744
7778,3293,5061,7779,7780,4300,7781,7782,4602,7783,3596,7784,7785,3430,2367,7786, # 7760
3164,5062,5063,4301,7787,7788,4095,5064,5065,7789,3374,3115,7790,7791,7792,7793, # 7776
7794,7795,7796,3597,4603,7797,7798,3686,3116,3807,5066,7799,7800,5067,7801,7802, # 7792
4604,4302,5068,4303,4096,7803,7804,3294,7805,7806,5069,4605,2690,7807,3026,7808, # 7808
7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824, # 7824
7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7840
7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,7856, # 7856
7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,7872, # 7872
7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,7888, # 7888
7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,7904, # 7904
7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,7920, # 7920
7921,7922,7923,7924,3926,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935, # 7936
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951, # 7952
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967, # 7968
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983, # 7984
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999, # 8000
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015, # 8016
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031, # 8032
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047, # 8048
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063, # 8064
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079, # 8080
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095, # 8096
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111, # 8112
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127, # 8128
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143, # 8144
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159, # 8160
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175, # 8176
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191, # 8192
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207, # 8208
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223, # 8224
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239, # 8240
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255, # 8256
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271) # 8272
# flake8: noqa
|
rogalski/pylint | refs/heads/master | pylint/reporters/__init__.py | 4 | # Copyright (c) 2006, 2010, 2012-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2013-2014 Google, Inc.
# Copyright (c) 2014-2016 Claudiu Popa <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""utilities methods and classes for reporters"""
from __future__ import print_function
import sys
import locale
import os
import warnings
import six
CMPS = ['=', '-', '+']
# py3k has no more cmp builtin
if sys.version_info >= (3, 0):
def cmp(a, b): # pylint: disable=redefined-builtin
return (a > b) - (a < b)
def diff_string(old, new):
"""given a old and new int value, return a string representing the
difference
"""
diff = abs(old - new)
diff_str = "%s%s" % (CMPS[cmp(old, new)], diff and ('%.2f' % diff) or '')
return diff_str
class BaseReporter(object):
"""base class for reporters
symbols: show short symbolic names for messages.
"""
extension = ''
def __init__(self, output=None):
self.linter = None
self.section = 0
self.out = None
self.out_encoding = None
self.set_output(output)
# Build the path prefix to strip to get relative paths
self.path_strip_prefix = os.getcwd() + os.sep
def handle_message(self, msg):
"""Handle a new message triggered on the current file."""
def set_output(self, output=None):
"""set output stream"""
self.out = output or sys.stdout
if six.PY3:
encode = lambda self, string: string
else:
def encode(self, string):
if not isinstance(string, six.text_type):
return string
encoding = (getattr(self.out, 'encoding', None) or
locale.getpreferredencoding(do_setlocale=False) or
sys.getdefaultencoding())
# errors=replace, we don't want to crash when attempting to show
# source code line that can't be encoded with the current locale
# settings
return string.encode(encoding, 'replace')
def writeln(self, string=''):
"""write a line in the output buffer"""
print(self.encode(string), file=self.out)
def display_reports(self, layout):
"""display results encapsulated in the layout tree"""
self.section = 0
if hasattr(layout, 'report_id'):
layout.children[0].children[0].data += ' (%s)' % layout.report_id
self._display(layout)
def _display(self, layout):
"""display the layout"""
raise NotImplementedError()
def display_messages(self, layout):
"""Hook for displaying the messages of the reporter
This will be called whenever the underlying messages
needs to be displayed. For some reporters, it probably
doesn't make sense to display messages as soon as they
are available, so some mechanism of storing them could be used.
This method can be implemented to display them after they've
been aggregated.
"""
# Event callbacks
def on_set_current_module(self, module, filepath):
"""Hook called when a module starts to be analysed."""
def on_close(self, stats, previous_stats):
"""Hook called when a module finished analyzing."""
class CollectingReporter(BaseReporter):
"""collects messages"""
name = 'collector'
def __init__(self):
BaseReporter.__init__(self)
self.messages = []
def handle_message(self, msg):
self.messages.append(msg)
_display = None
def initialize(linter):
"""initialize linter with reporters in this package """
from pylint import utils
utils.register_plugins(linter, __path__[0])
|
PaytonShaw/shadowsocks1 | refs/heads/master | shadowsocks/encrypt_rc4_md5.py | 31 | #!/usr/bin/env python
# Copyright (c) 2014 clowwindy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import hashlib
def create_cipher(alg, key, iv, op, key_as_bytes=0, d=None, salt=None,
i=1, padding=1):
md5 = hashlib.md5()
md5.update(key)
md5.update(iv)
rc4_key = md5.digest()
import M2Crypto.EVP
return M2Crypto.EVP.Cipher('rc4', rc4_key, '', op, key_as_bytes=0,
d='md5', salt=None, i=1, padding=1)
|
wakatime/wakatime | refs/heads/master | wakatime/packages/py26/pygments/lexers/eiffel.py | 31 | # -*- coding: utf-8 -*-
"""
pygments.lexers.eiffel
~~~~~~~~~~~~~~~~~~~~~~
Lexer for the Eiffel language.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['EiffelLexer']
class EiffelLexer(RegexLexer):
"""
For `Eiffel <http://www.eiffel.com>`_ source code.
.. versionadded:: 2.0
"""
name = 'Eiffel'
aliases = ['eiffel']
filenames = ['*.e']
mimetypes = ['text/x-eiffel']
tokens = {
'root': [
(r'[^\S\n]+', Text),
(r'--.*?\n', Comment.Single),
(r'[^\S\n]+', Text),
# Please note that keyword and operator are case insensitive.
(r'(?i)(true|false|void|current|result|precursor)\b', Keyword.Constant),
(r'(?i)(and(\s+then)?|not|xor|implies|or(\s+else)?)\b', Operator.Word),
(words((
'across', 'agent', 'alias', 'all', 'as', 'assign', 'attached',
'attribute', 'check', 'class', 'convert', 'create', 'debug',
'deferred', 'detachable', 'do', 'else', 'elseif', 'end', 'ensure',
'expanded', 'export', 'external', 'feature', 'from', 'frozen', 'if',
'inherit', 'inspect', 'invariant', 'like', 'local', 'loop', 'none',
'note', 'obsolete', 'old', 'once', 'only', 'redefine', 'rename',
'require', 'rescue', 'retry', 'select', 'separate', 'then',
'undefine', 'until', 'variant', 'when'), prefix=r'(?i)\b', suffix=r'\b'),
Keyword.Reserved),
(r'"\[(([^\]%]|\n)|%(.|\n)|\][^"])*?\]"', String),
(r'"([^"%\n]|%.)*?"', String),
include('numbers'),
(r"'([^'%]|%'|%%)'", String.Char),
(r"(//|\\\\|>=|<=|:=|/=|~|/~|[\\?!#%&@|+/\-=>*$<^\[\]])", Operator),
(r"([{}():;,.])", Punctuation),
(r'([a-z]\w*)|([A-Z][A-Z0-9_]*[a-z]\w*)', Name),
(r'([A-Z][A-Z0-9_]*)', Name.Class),
(r'\n+', Text),
],
'numbers': [
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'0[bB][01]+', Number.Bin),
(r'0[cC][0-7]+', Number.Oct),
(r'([0-9]+\.[0-9]*)|([0-9]*\.[0-9]+)', Number.Float),
(r'[0-9]+', Number.Integer),
],
}
|
puckipedia/youtube-dl | refs/heads/master | youtube_dl/extractor/ehow.py | 195 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
class EHowIE(InfoExtractor):
IE_NAME = 'eHow'
_VALID_URL = r'https?://(?:www\.)?ehow\.com/[^/_?]*_(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.ehow.com/video_12245069_hardwood-flooring-basics.html',
'md5': '9809b4e3f115ae2088440bcb4efbf371',
'info_dict': {
'id': '12245069',
'ext': 'flv',
'title': 'Hardwood Flooring Basics',
'description': 'Hardwood flooring may be time consuming, but its ultimately a pretty straightforward concept. Learn about hardwood flooring basics with help from a hardware flooring business owner in this free video...',
'uploader': 'Erick Nathan',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = self._search_regex(
r'(?:file|source)=(http[^\'"&]*)', webpage, 'video URL')
final_url = compat_urllib_parse_unquote(video_url)
uploader = self._html_search_meta('uploader', webpage)
title = self._og_search_title(webpage).replace(' | eHow', '')
return {
'id': video_id,
'url': final_url,
'title': title,
'thumbnail': self._og_search_thumbnail(webpage),
'description': self._og_search_description(webpage),
'uploader': uploader,
}
|
jayceyxc/hue | refs/heads/master | desktop/core/ext-py/django-nose-1.3/django_nose/runner.py | 26 | """Django test runner that invokes nose.
You can use... ::
NOSE_ARGS = ['list', 'of', 'args']
in settings.py for arguments that you want always passed to nose.
"""
from __future__ import print_function
import os
import sys
from optparse import make_option
from types import MethodType
import django
from django.conf import settings
from django.core import exceptions
from django.core.management.base import BaseCommand
from django.core.management.color import no_style
from django.core.management.commands.loaddata import Command
from django.db import connections, transaction, DEFAULT_DB_ALIAS
from django.db.backends.creation import BaseDatabaseCreation
from django.utils.importlib import import_module
try:
from django.apps import apps
except ImportError:
# Django < 1.7
from django.db.models.loading import cache as apps
import nose.core
from django_nose.plugin import DjangoSetUpPlugin, ResultPlugin, TestReorderer
from django_nose.utils import uses_mysql
try:
any
except NameError:
def any(iterable):
for element in iterable:
if element:
return True
return False
try:
from django.test.runner import DiscoverRunner
except ImportError:
# Django < 1.8
from django.test.simple import DjangoTestSuiteRunner as DiscoverRunner
__all__ = ['BasicNoseRunner', 'NoseTestSuiteRunner']
# This is a table of Django's "manage.py test" options which
# correspond to nosetests options with a different name:
OPTION_TRANSLATION = {'--failfast': '-x',
'--nose-verbosity': '--verbosity'}
def translate_option(opt):
if '=' in opt:
long_opt, value = opt.split('=', 1)
return '%s=%s' % (translate_option(long_opt), value)
return OPTION_TRANSLATION.get(opt, opt)
# Django v1.2 does not have a _get_test_db_name() function.
if not hasattr(BaseDatabaseCreation, '_get_test_db_name'):
def _get_test_db_name(self):
TEST_DATABASE_PREFIX = 'test_'
if self.connection.settings_dict['TEST_NAME']:
return self.connection.settings_dict['TEST_NAME']
return TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
BaseDatabaseCreation._get_test_db_name = _get_test_db_name
def _get_plugins_from_settings():
plugins = (list(getattr(settings, 'NOSE_PLUGINS', [])) +
['django_nose.plugin.TestReorderer'])
for plug_path in plugins:
try:
dot = plug_path.rindex('.')
except ValueError:
raise exceptions.ImproperlyConfigured(
"%s isn't a Nose plugin module" % plug_path)
p_mod, p_classname = plug_path[:dot], plug_path[dot + 1:]
try:
mod = import_module(p_mod)
except ImportError as e:
raise exceptions.ImproperlyConfigured(
'Error importing Nose plugin module %s: "%s"' % (p_mod, e))
try:
p_class = getattr(mod, p_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured(
'Nose plugin module "%s" does not define a "%s"' %
(p_mod, p_classname))
yield p_class()
def _get_options():
"""Return all nose options that don't conflict with django options."""
cfg_files = nose.core.all_config_files()
manager = nose.core.DefaultPluginManager()
config = nose.core.Config(env=os.environ, files=cfg_files, plugins=manager)
config.plugins.addPlugins(list(_get_plugins_from_settings()))
options = config.getParser()._get_all_options()
# copy nose's --verbosity option and rename to --nose-verbosity
verbosity = [o for o in options if o.get_opt_string() == '--verbosity'][0]
verbosity_attrs = dict((attr, getattr(verbosity, attr))
for attr in verbosity.ATTRS
if attr not in ('dest', 'metavar'))
options.append(make_option('--nose-verbosity',
dest='nose_verbosity',
metavar='NOSE_VERBOSITY',
**verbosity_attrs))
# Django 1.6 introduces a "--pattern" option, which is shortened into "-p"
# do not allow "-p" to collide with nose's "--plugins" option.
plugins_option = [o for o in options if o.get_opt_string() == '--plugins'][0]
plugins_option._short_opts.remove('-p')
django_opts = [opt.dest for opt in BaseCommand.option_list] + ['version']
return tuple(o for o in options if o.dest not in django_opts and
o.action != 'help')
class BasicNoseRunner(DiscoverRunner):
"""Facade that implements a nose runner in the guise of a Django runner
You shouldn't have to use this directly unless the additions made by
``NoseTestSuiteRunner`` really bother you. They shouldn't, because they're
all off by default.
"""
__test__ = False
# Replace the builtin command options with the merged django/nose options:
options = _get_options()
def run_suite(self, nose_argv):
result_plugin = ResultPlugin()
plugins_to_add = [DjangoSetUpPlugin(self),
result_plugin,
TestReorderer()]
for plugin in _get_plugins_from_settings():
plugins_to_add.append(plugin)
try:
django.setup()
except AttributeError:
# Setup isn't necessary in Django < 1.7
pass
nose.core.TestProgram(argv=nose_argv, exit=False,
addplugins=plugins_to_add)
return result_plugin.result
def run_tests(self, test_labels, extra_tests=None):
"""Run the unit tests for all the test names in the provided list.
Test names specified may be file or module names, and may optionally
indicate the test case to run by separating the module or file name
from the test case name with a colon. Filenames may be relative or
absolute.
N.B.: The test_labels argument *MUST* be a sequence of
strings, *NOT* just a string object. (Or you will be
specifying tests for for each character in your string, and
not the whole string.
Examples:
runner.run_tests( ('test.module',) )
runner.run_tests(['another.test:TestCase.test_method'])
runner.run_tests(['a.test:TestCase'])
runner.run_tests(['/path/to/test/file.py:test_function'])
runner.run_tests( ('test.module', 'a.test:TestCase') )
Note: the extra_tests argument is currently ignored. You can
run old non-nose code that uses it without totally breaking,
but the extra tests will not be run. Maybe later.
Returns the number of tests that failed.
"""
nose_argv = (['nosetests'] + list(test_labels))
if hasattr(settings, 'NOSE_ARGS'):
nose_argv.extend(settings.NOSE_ARGS)
# Skip over 'manage.py test' and any arguments handled by django.
django_opts = ['--noinput', '--liveserver', '-p', '--pattern']
for opt in BaseCommand.option_list:
django_opts.extend(opt._long_opts)
django_opts.extend(opt._short_opts)
nose_argv.extend(translate_option(opt) for opt in sys.argv[1:]
if opt.startswith('-')
and not any(opt.startswith(d) for d in django_opts))
# if --nose-verbosity was omitted, pass Django verbosity to nose
if ('--verbosity' not in nose_argv and
not any(opt.startswith('--verbosity=') for opt in nose_argv)):
nose_argv.append('--verbosity=%s' % str(self.verbosity))
if self.verbosity >= 1:
print(' '.join(nose_argv))
result = self.run_suite(nose_argv)
# suite_result expects the suite as the first argument. Fake it.
return self.suite_result({}, result)
_old_handle = Command.handle
def _foreign_key_ignoring_handle(self, *fixture_labels, **options):
"""Wrap the the stock loaddata to ignore foreign key
checks so we can load circular references from fixtures.
This is monkeypatched into place in setup_databases().
"""
using = options.get('database', DEFAULT_DB_ALIAS)
commit = options.get('commit', True)
connection = connections[using]
# MySQL stinks at loading circular references:
if uses_mysql(connection):
cursor = connection.cursor()
cursor.execute('SET foreign_key_checks = 0')
_old_handle(self, *fixture_labels, **options)
if uses_mysql(connection):
cursor = connection.cursor()
cursor.execute('SET foreign_key_checks = 1')
# NOTE(erickt): This breaks installing Hue examples because we use
# loaddata to install the examples, then run Document.objects.sync() to
# clean up the database, so we need our connection to be left open.
#if commit:
# connection.close()
def _skip_create_test_db(self, verbosity=1, autoclobber=False, serialize=True):
"""``create_test_db`` implementation that skips both creation and flushing
The idea is to re-use the perfectly good test DB already created by an
earlier test run, cutting the time spent before any tests run from 5-13s
(depending on your I/O luck) down to 3.
"""
# Notice that the DB supports transactions. Originally, this was done in
# the method this overrides. The confirm method was added in Django v1.3
# (https://code.djangoproject.com/ticket/12991) but removed in Django v1.5
# (https://code.djangoproject.com/ticket/17760). In Django v1.5
# supports_transactions is a cached property evaluated on access.
if callable(getattr(self.connection.features, 'confirm', None)):
# Django v1.3-4
self.connection.features.confirm()
elif hasattr(self, "_rollback_works"):
# Django v1.2 and lower
can_rollback = self._rollback_works()
self.connection.settings_dict['SUPPORTS_TRANSACTIONS'] = can_rollback
return self._get_test_db_name()
def _reusing_db():
"""Return whether the ``REUSE_DB`` flag was passed"""
return os.getenv('REUSE_DB', 'false').lower() in ('true', '1', '')
def _can_support_reuse_db(connection):
"""Return whether it makes any sense to
use REUSE_DB with the backend of a connection."""
# Perhaps this is a SQLite in-memory DB. Those are created implicitly when
# you try to connect to them, so our usual test doesn't work.
return not connection.creation._get_test_db_name() == ':memory:'
def _should_create_database(connection):
"""Return whether we should recreate the given DB.
This is true if the DB doesn't exist or the REUSE_DB env var isn't truthy.
"""
# TODO: Notice when the Model classes change and return True. Worst case,
# we can generate sqlall and hash it, though it's a bit slow (2 secs) and
# hits the DB for no good reason. Until we find a faster way, I'm inclined
# to keep making people explicitly saying REUSE_DB if they want to reuse
# the DB.
if not _can_support_reuse_db(connection):
return True
# Notice whether the DB exists, and create it if it doesn't:
try:
connection.cursor()
except Exception: # TODO: Be more discerning but still DB agnostic.
return True
return not _reusing_db()
def _mysql_reset_sequences(style, connection):
"""Return a list of SQL statements needed to
reset all sequences for Django tables."""
tables = connection.introspection.django_table_names(only_existing=True)
flush_statements = connection.ops.sql_flush(
style, tables, connection.introspection.sequence_list())
# connection.ops.sequence_reset_sql() is not implemented for MySQL,
# and the base class just returns []. TODO: Implement it by pulling
# the relevant bits out of sql_flush().
return [s for s in flush_statements if s.startswith('ALTER')]
# Being overzealous and resetting the sequences on non-empty tables
# like django_content_type seems to be fine in MySQL: adding a row
# afterward does find the correct sequence number rather than
# crashing into an existing row.
class NoseTestSuiteRunner(BasicNoseRunner):
"""A runner that optionally skips DB creation
Monkeypatches connection.creation to let you skip creating databases if
they already exist. Your tests will start up much faster.
To opt into this behavior, set the environment variable ``REUSE_DB`` to
something that isn't "0" or "false" (case insensitive).
"""
def _get_models_for_connection(self, connection):
"""Return a list of models for a connection."""
tables = connection.introspection.get_table_list(connection.cursor())
return [m for m in apps.get_models() if
m._meta.db_table in tables]
def setup_databases(self):
for alias in connections:
connection = connections[alias]
creation = connection.creation
test_db_name = creation._get_test_db_name()
# Mess with the DB name so other things operate on a test DB
# rather than the real one. This is done in create_test_db when
# we don't monkeypatch it away with _skip_create_test_db.
orig_db_name = connection.settings_dict['NAME']
connection.settings_dict['NAME'] = test_db_name
if _should_create_database(connection):
# We're not using _skip_create_test_db, so put the DB name
# back:
connection.settings_dict['NAME'] = orig_db_name
# Since we replaced the connection with the test DB, closing
# the connection will avoid pooling issues with SQLAlchemy. The
# issue is trying to CREATE/DROP the test database using a
# connection to a DB that was established with that test DB.
# MySQLdb doesn't allow it, and SQLAlchemy attempts to reuse
# the existing connection from its pool.
connection.close()
else:
# Reset auto-increment sequences. Apparently, SUMO's tests are
# horrid and coupled to certain numbers.
cursor = connection.cursor()
style = no_style()
if uses_mysql(connection):
reset_statements = _mysql_reset_sequences(
style, connection)
else:
reset_statements = connection.ops.sequence_reset_sql(
style, self._get_models_for_connection(connection))
for reset_statement in reset_statements:
cursor.execute(reset_statement)
# Django v1.3 (https://code.djangoproject.com/ticket/9964)
# starts using commit_unless_managed() for individual
# connections. Backwards compatibility for Django 1.2 is to use
# the generic transaction function.
transaction.commit_unless_managed(using=connection.alias)
# Each connection has its own creation object, so this affects
# only a single connection:
creation.create_test_db = MethodType(
_skip_create_test_db, creation, creation.__class__)
Command.handle = _foreign_key_ignoring_handle
# With our class patch, does nothing but return some connection
# objects:
return super(NoseTestSuiteRunner, self).setup_databases()
def teardown_databases(self, *args, **kwargs):
"""Leave those poor, reusable databases alone if REUSE_DB is true."""
if not _reusing_db():
return super(NoseTestSuiteRunner, self).teardown_databases(
*args, **kwargs)
# else skip tearing down the DB so we can reuse it next time
|
rosmo/aurora | refs/heads/master | src/main/python/apache/aurora/client/api/restarter.py | 5 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from twitter.common import log
from apache.aurora.client.base import combine_messages
from .instance_watcher import InstanceWatcher
from .updater_util import FailureThreshold
from gen.apache.aurora.api.constants import ACTIVE_STATES
from gen.apache.aurora.api.ttypes import ResponseCode
class Restarter(object):
def __init__(self,
job_key,
update_config,
health_check_interval_seconds,
scheduler,
instance_watcher=None,
lock=None):
self._job_key = job_key
self._update_config = update_config
self.health_check_interval_seconds = health_check_interval_seconds
self._scheduler = scheduler
self._lock = lock
self._instance_watcher = instance_watcher or InstanceWatcher(
scheduler,
job_key.to_thrift(),
update_config.restart_threshold,
update_config.watch_secs,
health_check_interval_seconds)
def restart(self, instances):
# Verify that this operates on a valid job.
query = self._job_key.to_thrift_query()
query.statuses = ACTIVE_STATES
status = self._scheduler.getTasksWithoutConfigs(query)
if status.responseCode != ResponseCode.OK:
return status
failure_threshold = FailureThreshold(
self._update_config.max_per_instance_failures,
self._update_config.max_total_failures)
if not instances:
tasks = status.result.scheduleStatusResult.tasks
instances = sorted(task.assignedTask.instanceId for task in tasks)
if not instances:
log.info("No instances specified, and no active instances found in job %s" % self._job_key)
log.info("Nothing to do.")
return status
log.info("Performing rolling restart of job %s (instances: %s)" % (self._job_key, instances))
while instances and not failure_threshold.is_failed_update():
batch = instances[:self._update_config.batch_size]
instances = instances[self._update_config.batch_size:]
log.info("Restarting instances: %s", batch)
resp = self._scheduler.restartShards(self._job_key.to_thrift(), batch, self._lock)
if resp.responseCode != ResponseCode.OK:
log.error('Error restarting instances: %s', combine_messages(resp))
return resp
failed_instances = self._instance_watcher.watch(batch)
instances += failed_instances
failure_threshold.update_failure_counts(failed_instances)
if failure_threshold.is_failed_update():
log.info("Restart failures threshold reached. Aborting")
else:
log.info("All instances were restarted successfully")
return resp
|
iw3hxn/LibrERP | refs/heads/master | office_automation/gap_analysis/__init__.py | 6 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2010-2013 Elico Corp. All Rights Reserved.
# Author: Yannick Gouin <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import gap_analysis
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
go-lab/appcomposer | refs/heads/master | appcomposer/cdata.py | 3 | import xml.etree.ElementTree as ET
# Don't sort things (so the order is preserved)
ET.sorted = lambda x, *args, **kwargs: x
# Code from:
# http://stackoverflow.com/questions/1091945/what-characters-do-i-need-to-escape-in-xml-documents
def CDATA(text=None):
element = ET.Element('![CDATA[')
element.text = text
return element
ET._original_serialize_xml = ET._serialize_xml
if ET._serialize_xml.func_code.co_argcount == 5:
# Python 2
def _serialize_xml_py2(write, elem, encoding, qnames, namespaces):
if elem.tag == '![CDATA[':
write((u"<%s%s]]>" % (elem.tag, elem.text)).encode(encoding))
return
return ET._original_serialize_xml(
write, elem, encoding, qnames, namespaces)
ET._serialize_xml = ET._serialize['xml'] = _serialize_xml_py2
else:
# Python 3
def _serialize_xml_py3(write, elem, qnames, namespaces):
if elem.tag == '![CDATA[':
write("<%s%s]]>" % (
elem.tag, elem.text))
return
return ET._original_serialize_xml(
write, elem, qnames, namespaces)
ET._serialize_xml = ET._serialize['xml'] = _serialize_xml_py3
|
hsiaoyi0504/scikit-learn | refs/heads/master | sklearn/feature_extraction/setup.py | 314 | import os
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('feature_extraction', parent_package, top_path)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension('_hashing',
sources=['_hashing.c'],
include_dirs=[numpy.get_include()],
libraries=libraries)
return config
|
base2Services/alfajor | refs/heads/master | scripts/stale_snapshot_cleanup.py | 1 | import sys
import os
pwd = os.environ['PWD']
alfajor_path = "{0}".format(pwd) # if running from alfajor root
alfajor_path2 = "{0}/..".format(pwd) # if running from scripts folder
alfajor_path3 = "{0}/alfajor".format(pwd) # if running from folder above alfajor
for path in [alfajor_path, alfajor_path2, alfajor_path3]:
sys.path.append(path)
from alfajor import stale_snapshot_cleanup
# requires explicit delete command, otherwise will only list the EBS Snapshots
if 'DO_DELETE' in os.environ:
dry_run = not os.environ['DO_DELETE'] == str(1)
else:
dry_run = True
account = sys.argv[1]
sd = stale_snapshot_cleanup.SnapShotCleanup(debug=True, verbose=True,account=account)
sd.cleanup_stale_snapshots(dry_run=dry_run)
|
ondra-novak/chromium.src | refs/heads/nw | tools/valgrind/unused_suppressions.py | 187 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import urllib2
import suppressions
def main():
supp = suppressions.GetSuppressions()
all_supps = []
for supps in supp.values():
all_supps += [s.description for s in supps]
sys.stdout.write(urllib2.urlopen(
'http://chromium-build-logs.appspot.com/unused_suppressions',
'\n'.join(all_supps)).read())
return 0
if __name__ == "__main__":
sys.exit(main())
|
Ssawa/Diamond | refs/heads/master | src/collectors/ksm/test/testksm.py | 31 | #!/usr/bin/python
# coding=utf-8
##########################################################################
import os
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from ksm import KSMCollector
##########################################################################
class TestKSMCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('KSMCollector', {
'interval': 10,
'ksm_path': os.path.dirname(__file__) + '/fixtures/'
})
self.collector = KSMCollector(config, None)
def test_import(self):
self.assertTrue(KSMCollector)
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_work_with_synthetic_data(self, publish_mock):
self.collector.collect()
metrics = {
'full_scans': 123.0,
'pages_shared': 124.0,
'pages_sharing': 125.0,
'pages_to_scan': 100.0,
'pages_unshared': 126.0,
'pages_volatile': 127.0,
'run': 1.0,
'sleep_millisecs': 20.0,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
##########################################################################
if __name__ == "__main__":
unittest.main()
|
elena/django | refs/heads/master | tests/view_tests/tests/test_specials.py | 133 | from django.test import SimpleTestCase, override_settings
@override_settings(ROOT_URLCONF='view_tests.generic_urls')
class URLHandling(SimpleTestCase):
"""
Tests for URL handling in views and responses.
"""
redirect_target = "/%E4%B8%AD%E6%96%87/target/"
def test_nonascii_redirect(self):
"""
A non-ASCII argument to HttpRedirect is handled properly.
"""
response = self.client.get('/nonascii_redirect/')
self.assertRedirects(response, self.redirect_target)
def test_permanent_nonascii_redirect(self):
"""
A non-ASCII argument to HttpPermanentRedirect is handled properly.
"""
response = self.client.get('/permanent_nonascii_redirect/')
self.assertRedirects(response, self.redirect_target, status_code=301)
|
abhishek-ch/hue | refs/heads/master | desktop/core/ext-py/Django-1.6.10/django/__init__.py | 38 | VERSION = (1, 6, 10, 'final', 0)
def get_version(*args, **kwargs):
# Don't litter django/__init__.py with all the get_version stuff.
# Only import if it's actually called.
from django.utils.version import get_version
return get_version(*args, **kwargs)
|
wuhengzhi/chromium-crosswalk | refs/heads/master | build/android/pylib/utils/run_tests_helper.py | 24 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=unused-wildcard-import
# pylint: disable=wildcard-import
from devil.utils.run_tests_helper import *
|
w1ll1am23/home-assistant | refs/heads/dev | homeassistant/components/cloud/__init__.py | 5 | """Component to integrate the Home Assistant cloud."""
from hass_nabucasa import Cloud
import voluptuous as vol
from homeassistant.components.alexa import const as alexa_const
from homeassistant.components.google_assistant import const as ga_c
from homeassistant.const import (
CONF_DESCRIPTION,
CONF_MODE,
CONF_NAME,
CONF_REGION,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv, entityfilter
from homeassistant.loader import bind_hass
from homeassistant.util.aiohttp import MockRequest
from . import account_link, http_api
from .client import CloudClient
from .const import (
CONF_ACCOUNT_LINK_URL,
CONF_ACME_DIRECTORY_SERVER,
CONF_ALEXA,
CONF_ALEXA_ACCESS_TOKEN_URL,
CONF_ALIASES,
CONF_CLOUDHOOK_CREATE_URL,
CONF_COGNITO_CLIENT_ID,
CONF_ENTITY_CONFIG,
CONF_FILTER,
CONF_GOOGLE_ACTIONS,
CONF_GOOGLE_ACTIONS_REPORT_STATE_URL,
CONF_RELAYER,
CONF_REMOTE_API_URL,
CONF_SUBSCRIPTION_INFO_URL,
CONF_USER_POOL_ID,
CONF_VOICE_API_URL,
DOMAIN,
MODE_DEV,
MODE_PROD,
)
from .prefs import CloudPreferences
DEFAULT_MODE = MODE_PROD
SERVICE_REMOTE_CONNECT = "remote_connect"
SERVICE_REMOTE_DISCONNECT = "remote_disconnect"
ALEXA_ENTITY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_DESCRIPTION): cv.string,
vol.Optional(alexa_const.CONF_DISPLAY_CATEGORIES): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
)
GOOGLE_ENTITY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ALIASES): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ga_c.CONF_ROOM_HINT): cv.string,
}
)
ASSISTANT_SCHEMA = vol.Schema(
{vol.Optional(CONF_FILTER, default=dict): entityfilter.FILTER_SCHEMA}
)
ALEXA_SCHEMA = ASSISTANT_SCHEMA.extend(
{vol.Optional(CONF_ENTITY_CONFIG): {cv.entity_id: ALEXA_ENTITY_SCHEMA}}
)
GACTIONS_SCHEMA = ASSISTANT_SCHEMA.extend(
{vol.Optional(CONF_ENTITY_CONFIG): {cv.entity_id: GOOGLE_ENTITY_SCHEMA}}
)
# pylint: disable=no-value-for-parameter
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_MODE, default=DEFAULT_MODE): vol.In(
[MODE_DEV, MODE_PROD]
),
vol.Optional(CONF_COGNITO_CLIENT_ID): str,
vol.Optional(CONF_USER_POOL_ID): str,
vol.Optional(CONF_REGION): str,
vol.Optional(CONF_RELAYER): str,
vol.Optional(CONF_SUBSCRIPTION_INFO_URL): vol.Url(),
vol.Optional(CONF_CLOUDHOOK_CREATE_URL): vol.Url(),
vol.Optional(CONF_REMOTE_API_URL): vol.Url(),
vol.Optional(CONF_ACME_DIRECTORY_SERVER): vol.Url(),
vol.Optional(CONF_ALEXA): ALEXA_SCHEMA,
vol.Optional(CONF_GOOGLE_ACTIONS): GACTIONS_SCHEMA,
vol.Optional(CONF_ALEXA_ACCESS_TOKEN_URL): vol.Url(),
vol.Optional(CONF_GOOGLE_ACTIONS_REPORT_STATE_URL): vol.Url(),
vol.Optional(CONF_ACCOUNT_LINK_URL): vol.Url(),
vol.Optional(CONF_VOICE_API_URL): vol.Url(),
}
)
},
extra=vol.ALLOW_EXTRA,
)
class CloudNotAvailable(HomeAssistantError):
"""Raised when an action requires the cloud but it's not available."""
@bind_hass
@callback
def async_is_logged_in(hass) -> bool:
"""Test if user is logged in."""
return DOMAIN in hass.data and hass.data[DOMAIN].is_logged_in
@bind_hass
@callback
def async_active_subscription(hass) -> bool:
"""Test if user has an active subscription."""
return async_is_logged_in(hass) and not hass.data[DOMAIN].subscription_expired
@bind_hass
async def async_create_cloudhook(hass, webhook_id: str) -> str:
"""Create a cloudhook."""
if not async_is_logged_in(hass):
raise CloudNotAvailable
hook = await hass.data[DOMAIN].cloudhooks.async_create(webhook_id, True)
return hook["cloudhook_url"]
@bind_hass
async def async_delete_cloudhook(hass, webhook_id: str) -> None:
"""Delete a cloudhook."""
if DOMAIN not in hass.data:
raise CloudNotAvailable
await hass.data[DOMAIN].cloudhooks.async_delete(webhook_id)
@bind_hass
@callback
def async_remote_ui_url(hass) -> str:
"""Get the remote UI URL."""
if not async_is_logged_in(hass):
raise CloudNotAvailable
if not hass.data[DOMAIN].client.prefs.remote_enabled:
raise CloudNotAvailable
if not hass.data[DOMAIN].remote.instance_domain:
raise CloudNotAvailable
return f"https://{hass.data[DOMAIN].remote.instance_domain}"
def is_cloudhook_request(request):
"""Test if a request came from a cloudhook.
Async friendly.
"""
return isinstance(request, MockRequest)
async def async_setup(hass, config):
"""Initialize the Home Assistant cloud."""
# Process configs
if DOMAIN in config:
kwargs = dict(config[DOMAIN])
else:
kwargs = {CONF_MODE: DEFAULT_MODE}
# Alexa/Google custom config
alexa_conf = kwargs.pop(CONF_ALEXA, None) or ALEXA_SCHEMA({})
google_conf = kwargs.pop(CONF_GOOGLE_ACTIONS, None) or GACTIONS_SCHEMA({})
# Cloud settings
prefs = CloudPreferences(hass)
await prefs.async_initialize()
# Initialize Cloud
websession = hass.helpers.aiohttp_client.async_get_clientsession()
client = CloudClient(hass, prefs, websession, alexa_conf, google_conf)
cloud = hass.data[DOMAIN] = Cloud(client, **kwargs)
async def _shutdown(event):
"""Shutdown event."""
await cloud.stop()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _shutdown)
async def _service_handler(service):
"""Handle service for cloud."""
if service.service == SERVICE_REMOTE_CONNECT:
await cloud.remote.connect()
await prefs.async_update(remote_enabled=True)
elif service.service == SERVICE_REMOTE_DISCONNECT:
await cloud.remote.disconnect()
await prefs.async_update(remote_enabled=False)
hass.helpers.service.async_register_admin_service(
DOMAIN, SERVICE_REMOTE_CONNECT, _service_handler
)
hass.helpers.service.async_register_admin_service(
DOMAIN, SERVICE_REMOTE_DISCONNECT, _service_handler
)
loaded = False
async def _on_connect():
"""Discover RemoteUI binary sensor."""
nonlocal loaded
# Prevent multiple discovery
if loaded:
return
loaded = True
await hass.helpers.discovery.async_load_platform(
"binary_sensor", DOMAIN, {}, config
)
await hass.helpers.discovery.async_load_platform("stt", DOMAIN, {}, config)
await hass.helpers.discovery.async_load_platform("tts", DOMAIN, {}, config)
cloud.iot.register_on_connect(_on_connect)
await cloud.start()
await http_api.async_setup(hass)
account_link.async_setup(hass)
return True
|
ewandor/home-assistant | refs/heads/dev | homeassistant/components/alarm_control_panel/demo.py | 8 | """
Demo platform that has two fake alarm control panels.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/demo/
"""
import datetime
import homeassistant.components.alarm_control_panel.manual as manual
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY, STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME, STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED, STATE_ALARM_TRIGGERED, CONF_DELAY_TIME,
CONF_PENDING_TIME, CONF_TRIGGER_TIME)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Demo alarm control panel platform."""
add_devices([
manual.ManualAlarm(hass, 'Alarm', '1234', None, False, {
STATE_ALARM_ARMED_AWAY: {
CONF_DELAY_TIME: datetime.timedelta(seconds=0),
CONF_PENDING_TIME: datetime.timedelta(seconds=5),
CONF_TRIGGER_TIME: datetime.timedelta(seconds=10),
},
STATE_ALARM_ARMED_HOME: {
CONF_DELAY_TIME: datetime.timedelta(seconds=0),
CONF_PENDING_TIME: datetime.timedelta(seconds=5),
CONF_TRIGGER_TIME: datetime.timedelta(seconds=10),
},
STATE_ALARM_ARMED_NIGHT: {
CONF_DELAY_TIME: datetime.timedelta(seconds=0),
CONF_PENDING_TIME: datetime.timedelta(seconds=5),
CONF_TRIGGER_TIME: datetime.timedelta(seconds=10),
},
STATE_ALARM_DISARMED: {
CONF_DELAY_TIME: datetime.timedelta(seconds=0),
CONF_TRIGGER_TIME: datetime.timedelta(seconds=10),
},
STATE_ALARM_ARMED_CUSTOM_BYPASS: {
CONF_DELAY_TIME: datetime.timedelta(seconds=0),
CONF_PENDING_TIME: datetime.timedelta(seconds=5),
CONF_TRIGGER_TIME: datetime.timedelta(seconds=10),
},
STATE_ALARM_TRIGGERED: {
CONF_PENDING_TIME: datetime.timedelta(seconds=5),
},
}),
])
|
sander76/home-assistant | refs/heads/dev | homeassistant/helpers/typing.py | 6 | """Typing Helpers for Home Assistant."""
from enum import Enum
from typing import Any, Dict, Mapping, Optional, Tuple, Union
import homeassistant.core
GPSType = Tuple[float, float]
ConfigType = Dict[str, Any]
ContextType = homeassistant.core.Context
DiscoveryInfoType = Dict[str, Any]
EventType = homeassistant.core.Event
HomeAssistantType = homeassistant.core.HomeAssistant
ServiceCallType = homeassistant.core.ServiceCall
ServiceDataType = Dict[str, Any]
StateType = Union[None, str, int, float]
TemplateVarsType = Optional[Mapping[str, Any]]
# Custom type for recorder Queries
QueryType = Any
class UndefinedType(Enum):
"""Singleton type for use with not set sentinel values."""
_singleton = 0
UNDEFINED = UndefinedType._singleton # pylint: disable=protected-access
|
alqfahad/odoo | refs/heads/8.0 | addons/website_forum/models/res_users.py | 281 | # -*- coding: utf-8 -*-
from datetime import datetime
from urllib import urlencode
import hashlib
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
class Users(osv.Model):
_inherit = 'res.users'
def __init__(self, pool, cr):
init_res = super(Users, self).__init__(pool, cr)
self.SELF_WRITEABLE_FIELDS = list(
set(
self.SELF_WRITEABLE_FIELDS +
['country_id', 'city', 'website', 'website_description', 'website_published']))
return init_res
def _get_user_badge_level(self, cr, uid, ids, name, args, context=None):
"""Return total badge per level of users"""
result = dict.fromkeys(ids, False)
badge_user_obj = self.pool['gamification.badge.user']
for id in ids:
result[id] = {
'gold_badge': badge_user_obj.search(cr, uid, [('badge_id.level', '=', 'gold'), ('user_id', '=', id)], context=context, count=True),
'silver_badge': badge_user_obj.search(cr, uid, [('badge_id.level', '=', 'silver'), ('user_id', '=', id)], context=context, count=True),
'bronze_badge': badge_user_obj.search(cr, uid, [('badge_id.level', '=', 'bronze'), ('user_id', '=', id)], context=context, count=True),
}
return result
_columns = {
'create_date': fields.datetime('Create Date', select=True, readonly=True),
'karma': fields.integer('Karma'),
'badge_ids': fields.one2many('gamification.badge.user', 'user_id', 'Badges'),
'gold_badge': fields.function(_get_user_badge_level, string="Number of gold badges", type='integer', multi='badge_level'),
'silver_badge': fields.function(_get_user_badge_level, string="Number of silver badges", type='integer', multi='badge_level'),
'bronze_badge': fields.function(_get_user_badge_level, string="Number of bronze badges", type='integer', multi='badge_level'),
}
_defaults = {
'karma': 0,
}
def _generate_forum_token(self, cr, uid, user_id, email):
"""Return a token for email validation. This token is valid for the day
and is a hash based on a (secret) uuid generated by the forum module,
the user_id, the email and currently the day (to be updated if necessary). """
forum_uuid = self.pool.get('ir.config_parameter').get_param(cr, SUPERUSER_ID, 'website_forum.uuid')
return hashlib.sha256('%s-%s-%s-%s' % (
datetime.now().replace(hour=0, minute=0, second=0, microsecond=0),
forum_uuid,
user_id,
email)).hexdigest()
def send_forum_validation_email(self, cr, uid, user_id, forum_id=None, context=None):
user = self.pool['res.users'].browse(cr, uid, user_id, context=context)
token = self._generate_forum_token(cr, uid, user_id, user.email)
activation_template_id = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'website_forum.validation_email')
if activation_template_id:
params = {
'token': token,
'id': user_id,
'email': user.email}
if forum_id:
params['forum_id'] = forum_id
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
token_url = base_url + '/forum/validate_email?%s' % urlencode(params)
tpl_ctx = dict(context, token_url=token_url)
self.pool['email.template'].send_mail(cr, SUPERUSER_ID, activation_template_id, user_id, force_send=True, context=tpl_ctx)
return True
def process_forum_validation_token(self, cr, uid, token, user_id, email, forum_id=None, context=None):
validation_token = self.pool['res.users']._generate_forum_token(cr, uid, user_id, email)
user = self.pool['res.users'].browse(cr, SUPERUSER_ID, user_id, context=context)
if token == validation_token and user.karma == 0:
karma = 3
if not forum_id:
forum_ids = self.pool['forum.forum'].search(cr, uid, [], limit=1, context=context)
if forum_ids:
forum_id = forum_ids[0]
if forum_id:
forum = self.pool['forum.forum'].browse(cr, uid, forum_id, context=context)
# karma gained: karma to ask a question and have 2 downvotes
karma = forum.karma_ask + (-2 * forum.karma_gen_question_downvote)
return user.write({'karma': karma})
return False
def add_karma(self, cr, uid, ids, karma, context=None):
for user in self.browse(cr, uid, ids, context=context):
self.write(cr, uid, [user.id], {'karma': user.karma + karma}, context=context)
return True
def get_serialised_gamification_summary(self, cr, uid, excluded_categories=None, context=None):
if isinstance(excluded_categories, list):
if 'forum' not in excluded_categories:
excluded_categories.append('forum')
else:
excluded_categories = ['forum']
return super(Users, self).get_serialised_gamification_summary(cr, uid, excluded_categories=excluded_categories, context=context)
|
lindsayad/sympy | refs/heads/master | sympy/crypto/__init__.py | 38 | from sympy.crypto.crypto import (cycle_list,
encipher_shift, encipher_affine, encipher_substitution,
check_and_join, encipher_vigenere, decipher_vigenere, bifid5_square,
bifid6_square, encipher_hill, decipher_hill,
encipher_bifid5, encipher_bifid6, decipher_bifid5,
decipher_bifid6, encipher_kid_rsa, decipher_kid_rsa,
kid_rsa_private_key, kid_rsa_public_key, decipher_rsa, rsa_private_key,
rsa_public_key, encipher_rsa, lfsr_connection_polynomial,
lfsr_autocorrelation, lfsr_sequence, encode_morse, decode_morse,
elgamal_private_key, elgamal_public_key, decipher_elgamal,
encipher_elgamal, dh_private_key, dh_public_key, dh_shared_key,
padded_key, encipher_bifid, decipher_bifid, bifid_square, bifid5,
bifid6, bifid10)
|
VladKha/CodeWars | refs/heads/master | 7 kyu/Insert dashes/solve.py | 1 | def insert_dash(num):
odd = '13579'
s = str(num)
result = s[0]
for i in range(1, len(s)):
if s[i-1] in odd and s[i] in odd:
result += '-'
result += s[i]
return result
|
Drooids/odoo | refs/heads/8.0 | openerp/addons/base/tests/test_search.py | 290 | import unittest2
import openerp.tests.common as common
class test_search(common.TransactionCase):
def test_00_search_order(self):
registry, cr, uid = self.registry, self.cr, self.uid
# Create 6 partners with a given name, and a given creation order to
# ensure the order of their ID. Some are set as unactive to verify they
# are by default excluded from the searches and to provide a second
# `order` argument.
partners = registry('res.partner')
c = partners.create(cr, uid, {'name': 'test_search_order_C'})
d = partners.create(cr, uid, {'name': 'test_search_order_D', 'active': False})
a = partners.create(cr, uid, {'name': 'test_search_order_A'})
b = partners.create(cr, uid, {'name': 'test_search_order_B'})
ab = partners.create(cr, uid, {'name': 'test_search_order_AB'})
e = partners.create(cr, uid, {'name': 'test_search_order_E', 'active': False})
# The tests.
# The basic searches should exclude records that have active = False.
# The order of the returned ids should be given by the `order`
# parameter of search().
name_asc = partners.search(cr, uid, [('name', 'like', 'test_search_order%')], order="name asc")
self.assertEqual([a, ab, b, c], name_asc, "Search with 'NAME ASC' order failed.")
name_desc = partners.search(cr, uid, [('name', 'like', 'test_search_order%')], order="name desc")
self.assertEqual([c, b, ab, a], name_desc, "Search with 'NAME DESC' order failed.")
id_asc = partners.search(cr, uid, [('name', 'like', 'test_search_order%')], order="id asc")
self.assertEqual([c, a, b, ab], id_asc, "Search with 'ID ASC' order failed.")
id_desc = partners.search(cr, uid, [('name', 'like', 'test_search_order%')], order="id desc")
self.assertEqual([ab, b, a, c], id_desc, "Search with 'ID DESC' order failed.")
# The inactive records shouldn't be excluded as soon as a condition on
# that field is present in the domain. The `order` parameter of
# search() should support any legal coma-separated values.
active_asc_id_asc = partners.search(cr, uid, [('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="active asc, id asc")
self.assertEqual([d, e, c, a, b, ab], active_asc_id_asc, "Search with 'ACTIVE ASC, ID ASC' order failed.")
active_desc_id_asc = partners.search(cr, uid, [('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="active desc, id asc")
self.assertEqual([c, a, b, ab, d, e], active_desc_id_asc, "Search with 'ACTIVE DESC, ID ASC' order failed.")
active_asc_id_desc = partners.search(cr, uid, [('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="active asc, id desc")
self.assertEqual([e, d, ab, b, a, c], active_asc_id_desc, "Search with 'ACTIVE ASC, ID DESC' order failed.")
active_desc_id_desc = partners.search(cr, uid, [('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="active desc, id desc")
self.assertEqual([ab, b, a, c, e, d], active_desc_id_desc, "Search with 'ACTIVE DESC, ID DESC' order failed.")
id_asc_active_asc = partners.search(cr, uid, [('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="id asc, active asc")
self.assertEqual([c, d, a, b, ab, e], id_asc_active_asc, "Search with 'ID ASC, ACTIVE ASC' order failed.")
id_asc_active_desc = partners.search(cr, uid, [('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="id asc, active desc")
self.assertEqual([c, d, a, b, ab, e], id_asc_active_desc, "Search with 'ID ASC, ACTIVE DESC' order failed.")
id_desc_active_asc = partners.search(cr, uid, [('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="id desc, active asc")
self.assertEqual([e, ab, b, a, d, c], id_desc_active_asc, "Search with 'ID DESC, ACTIVE ASC' order failed.")
id_desc_active_desc = partners.search(cr, uid, [('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="id desc, active desc")
self.assertEqual([e, ab, b, a, d, c], id_desc_active_desc, "Search with 'ID DESC, ACTIVE DESC' order failed.")
def test_10_inherits_m2order(self):
registry, cr, uid = self.registry, self.cr, self.uid
users_obj = registry('res.users')
# Find Employee group
group_employee_ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'base', 'group_user')
group_employee_id = group_employee_ref and group_employee_ref[1] or False
# Get country/state data
country_us_id = registry('res.country').search(cr, uid, [('code', 'like', 'US')])[0]
state_ids = registry('res.country.state').search(cr, uid, [('country_id', '=', country_us_id)], limit=2)
country_be_id = registry('res.country').search(cr, uid, [('code', 'like', 'BE')])[0]
# Create test users
search_user = users_obj.create(cr, uid, {'name': '__search', 'login': '__search', 'groups_id': [(6, 0, [group_employee_id])]})
a = users_obj.create(cr, uid, {'name': '__test_A', 'login': '__test_A', 'country_id': country_be_id, 'state_id': country_be_id})
b = users_obj.create(cr, uid, {'name': '__test_B', 'login': '__a_test_B', 'country_id': country_us_id, 'state_id': state_ids[1]})
c = users_obj.create(cr, uid, {'name': '__test_B', 'login': '__z_test_B', 'country_id': country_us_id, 'state_id': state_ids[0]})
# Do: search on res.users, order on a field on res.partner to try inherits'd fields, then res.users
user_ids = users_obj.search(cr, search_user, [], order='name asc, login desc')
expected_ids = [search_user, a, c, b]
test_user_ids = filter(lambda x: x in expected_ids, user_ids)
self.assertEqual(test_user_ids, expected_ids, 'search on res_users did not provide expected ids or expected order')
# Do: order on many2one and inherits'd fields
user_ids = users_obj.search(cr, search_user, [], order='state_id asc, country_id desc, name asc, login desc')
expected_ids = [c, b, a, search_user]
test_user_ids = filter(lambda x: x in expected_ids, user_ids)
self.assertEqual(test_user_ids, expected_ids, 'search on res_users did not provide expected ids or expected order')
# Do: order on many2one and inherits'd fields
user_ids = users_obj.search(cr, search_user, [], order='country_id desc, state_id desc, name asc, login desc')
expected_ids = [search_user, b, c, a]
test_user_ids = filter(lambda x: x in expected_ids, user_ids)
self.assertEqual(test_user_ids, expected_ids, 'search on res_users did not provide expected ids or expected order')
# Do: order on many2one, but not by specifying in order parameter of search, but by overriding _order of res_users
old_order = users_obj._order
users_obj._order = 'country_id desc, name asc, login desc'
user_ids = users_obj.search(cr, search_user, [])
expected_ids = [search_user, c, b, a]
test_user_ids = filter(lambda x: x in expected_ids, user_ids)
self.assertEqual(test_user_ids, expected_ids, 'search on res_users did not provide expected ids or expected order')
users_obj._order = old_order
if __name__ == '__main__':
unittest2.main()
|
superdesk/Live-Blog | refs/heads/master | documentor/libraries/docutils-0.9.1-py3.2/docutils/writers/xetex/__init__.py | 2 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# :Author: Günter Milde <[email protected]>
# :Revision: $Revision: 7389 $
# :Date: $Date: 2012-03-30 13:58:21 +0200 (Fre, 30 Mär 2012) $
# :Copyright: © 2010 Günter Milde.
# :License: Released under the terms of the `2-Clause BSD license`_, in short:
#
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved.
# This file is offered as-is, without any warranty.
#
# .. _2-Clause BSD license: http://www.spdx.org/licenses/BSD-2-Clause
"""
XeLaTeX document tree Writer.
A variant of Docutils' standard 'latex2e' writer producing output
suited for processing with XeLaTeX (http://tug.org/xetex/).
"""
__docformat__ = 'reStructuredText'
import os
import os.path
import re
import docutils
from docutils import frontend, nodes, utils, writers, languages
from docutils.writers import latex2e
class Writer(latex2e.Writer):
"""A writer for Unicode-based LaTeX variants (XeTeX, LuaTeX)"""
supported = ('xetex','xelatex','luatex')
"""Formats this writer supports."""
default_template = 'xelatex.tex'
default_preamble = '\n'.join([
r'% Linux Libertine (free, wide coverage, not only for Linux)',
r'\setmainfont{Linux Libertine O}',
r'\setsansfont{Linux Biolinum O}',
r'\setmonofont[HyphenChar=None]{DejaVu Sans Mono}',
])
config_section = 'xetex writer'
config_section_dependencies = ('writers', 'latex2e writer')
settings_spec = frontend.filter_settings_spec(
latex2e.Writer.settings_spec,
'font_encoding',
template=('Template file. Default: "%s".' % default_template,
['--template'], {'default': default_template, 'metavar': '<file>'}),
latex_preamble=('Customization by LaTeX code in the preamble. '
'Default: select PDF standard fonts (Times, Helvetica, Courier).',
['--latex-preamble'],
{'default': default_preamble}),
)
def __init__(self):
latex2e.Writer.__init__(self)
self.settings_defaults.update({'fontencoding': ''}) # use default (EU1 or EU2)
self.translator_class = XeLaTeXTranslator
class Babel(latex2e.Babel):
"""Language specifics for XeTeX.
Use `polyglossia` instead of `babel` and adapt settings.
"""
language_codes = latex2e.Babel.language_codes.copy()
# Additionally supported or differently named languages:
language_codes.update({
# code Polyglossia-name comment
'cop': 'coptic',
'de': 'german', # new spelling (de_1996)
'de_1901': 'ogerman', # old spelling
'dv': 'divehi', # Maldivian
'dsb': 'lsorbian',
'el_polyton': 'polygreek',
'fa': 'farsi',
'grc': 'ancientgreek',
'hsb': 'usorbian',
'sh-cyrl': 'serbian', # Serbo-Croatian, Cyrillic script
'sh-latn': 'croatian', # Serbo-Croatian, Latin script
'sq': 'albanian',
'sr': 'serbian', # Cyrillic script (sr-cyrl)
'th': 'thai',
'vi': 'vietnamese',
# zh-latn: ??? # Chinese Pinyin
})
# Languages without Polyglossia support:
for key in ('af', # 'afrikaans',
'de_at', # 'naustrian',
'de_at_1901', # 'austrian',
'fr_ca', # 'canadien',
'grc_ibycus', # 'ibycus', (Greek Ibycus encoding)
'sr-latn', # 'serbian script=latin'
):
del(language_codes[key])
def __init__(self, language_code, reporter):
self.language_code = language_code
self.reporter = reporter
self.language = self.language_name(language_code)
self.otherlanguages = {}
self.warn_msg = 'Language "%s" not supported by XeTeX (polyglossia).'
self.quote_index = 0
self.quotes = ('"', '"')
# language dependent configuration:
# double quotes are "active" in some languages (e.g. German).
self.literal_double_quote = '"' # TODO: use \textquotedbl
def __call__(self):
setup = [r'\usepackage{polyglossia}',
r'\setdefaultlanguage{%s}' % self.language]
if self.otherlanguages:
setup.append(r'\setotherlanguages{%s}' %
','.join(list(self.otherlanguages.keys())))
return '\n'.join(setup)
class XeLaTeXTranslator(latex2e.LaTeXTranslator):
def __init__(self, document):
self.is_xetex = True # typeset with XeTeX or LuaTeX engine
latex2e.LaTeXTranslator.__init__(self, document, Babel)
if self.latex_encoding == 'utf8':
self.requirements.pop('_inputenc', None)
else:
self.requirements['_inputenc'] = (r'\XeTeXinputencoding %s '
% self.latex_encoding)
|
GdZ/scriptfile | refs/heads/master | software/googleAppEngine/lib/ipaddr/ipaddr/__init__.py | 92 | #!/usr/bin/python
#
# Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
This library is used to create/poke/manipulate IPv4 and IPv6 addresses
and networks.
"""
__version__ = '2.1.10'
import struct
IPV4LENGTH = 32
IPV6LENGTH = 128
class AddressValueError(ValueError):
"""A Value Error related to the address."""
class NetmaskValueError(ValueError):
"""A Value Error related to the netmask."""
def IPAddress(address, version=None):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
version: An Integer, 4 or 6. If set, don't try to automatically
determine what the IP address type is. important for things
like IPAddress(1), which could be IPv4, '0.0.0.1', or IPv6,
'::1'.
Returns:
An IPv4Address or IPv6Address object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address.
"""
if version:
if version == 4:
return IPv4Address(address)
elif version == 6:
return IPv6Address(address)
try:
return IPv4Address(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Address(address)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
address)
def IPNetwork(address, version=None, strict=False):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
version: An Integer, if set, don't try to automatically
determine what the IP address type is. important for things
like IPNetwork(1), which could be IPv4, '0.0.0.1/32', or IPv6,
'::1/128'.
Returns:
An IPv4Network or IPv6Network object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address. Or if a strict network was requested and a strict
network wasn't given.
"""
if version:
if version == 4:
return IPv4Network(address, strict)
elif version == 6:
return IPv6Network(address, strict)
try:
return IPv4Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %
address)
def v4_int_to_packed(address):
"""The binary representation of this address.
Args:
address: An integer representation of an IPv4 IP address.
Returns:
The binary representation of this address.
Raises:
ValueError: If the integer is too large to be an IPv4 IP
address.
"""
if address > _BaseV4._ALL_ONES:
raise ValueError('Address too large for IPv4')
return Bytes(struct.pack('!I', address))
def v6_int_to_packed(address):
"""The binary representation of this address.
Args:
address: An integer representation of an IPv4 IP address.
Returns:
The binary representation of this address.
"""
return Bytes(struct.pack('!QQ', address >> 64, address & (2**64 - 1)))
def _find_address_range(addresses):
"""Find a sequence of addresses.
Args:
addresses: a list of IPv4 or IPv6 addresses.
Returns:
A tuple containing the first and last IP addresses in the sequence.
"""
first = last = addresses[0]
for ip in addresses[1:]:
if ip._ip == last._ip + 1:
last = ip
else:
break
return (first, last)
def _get_prefix_length(number1, number2, bits):
"""Get the number of leading bits that are same for two numbers.
Args:
number1: an integer.
number2: another integer.
bits: the maximum number of bits to compare.
Returns:
The number of leading bits that are the same for two numbers.
"""
for i in range(bits):
if number1 >> i == number2 >> i:
return bits - i
return 0
def _count_righthand_zero_bits(number, bits):
"""Count the number of zero bits on the right hand side.
Args:
number: an integer.
bits: maximum number of bits to count.
Returns:
The number of zero bits on the right hand side of the number.
"""
if number == 0:
return bits
for i in range(bits):
if (number >> i) % 2:
return i
def summarize_address_range(first, last):
"""Summarize a network range given the first and last IP addresses.
Example:
>>> summarize_address_range(IPv4Address('1.1.1.0'),
IPv4Address('1.1.1.130'))
[IPv4Network('1.1.1.0/25'), IPv4Network('1.1.1.128/31'),
IPv4Network('1.1.1.130/32')]
Args:
first: the first IPv4Address or IPv6Address in the range.
last: the last IPv4Address or IPv6Address in the range.
Returns:
The address range collapsed to a list of IPv4Network's or
IPv6Network's.
Raise:
TypeError:
If the first and last objects are not IP addresses.
If the first and last objects are not the same version.
ValueError:
If the last object is not greater than the first.
If the version is not 4 or 6.
"""
if not (isinstance(first, _BaseIP) and isinstance(last, _BaseIP)):
raise TypeError('first and last must be IP addresses, not networks')
if first.version != last.version:
raise TypeError("%s and %s are not of the same version" % (
str(first), str(last)))
if first > last:
raise ValueError('last IP address must be greater than first')
networks = []
if first.version == 4:
ip = IPv4Network
elif first.version == 6:
ip = IPv6Network
else:
raise ValueError('unknown IP version')
ip_bits = first._max_prefixlen
first_int = first._ip
last_int = last._ip
while first_int <= last_int:
nbits = _count_righthand_zero_bits(first_int, ip_bits)
current = None
while nbits >= 0:
addend = 2**nbits - 1
current = first_int + addend
nbits -= 1
if current <= last_int:
break
prefix = _get_prefix_length(first_int, current, ip_bits)
net = ip('%s/%d' % (str(first), prefix))
networks.append(net)
if current == ip._ALL_ONES:
break
first_int = current + 1
first = IPAddress(first_int, version=first._version)
return networks
def _collapse_address_list_recursive(addresses):
"""Loops through the addresses, collapsing concurrent netblocks.
Example:
ip1 = IPv4Network('1.1.0.0/24')
ip2 = IPv4Network('1.1.1.0/24')
ip3 = IPv4Network('1.1.2.0/24')
ip4 = IPv4Network('1.1.3.0/24')
ip5 = IPv4Network('1.1.4.0/24')
ip6 = IPv4Network('1.1.0.1/22')
_collapse_address_list_recursive([ip1, ip2, ip3, ip4, ip5, ip6]) ->
[IPv4Network('1.1.0.0/22'), IPv4Network('1.1.4.0/24')]
This shouldn't be called directly; it is called via
collapse_address_list([]).
Args:
addresses: A list of IPv4Network's or IPv6Network's
Returns:
A list of IPv4Network's or IPv6Network's depending on what we were
passed.
"""
ret_array = []
optimized = False
for cur_addr in addresses:
if not ret_array:
ret_array.append(cur_addr)
continue
if cur_addr in ret_array[-1]:
optimized = True
elif cur_addr == ret_array[-1].supernet().subnet()[1]:
ret_array.append(ret_array.pop().supernet())
optimized = True
else:
ret_array.append(cur_addr)
if optimized:
return _collapse_address_list_recursive(ret_array)
return ret_array
def collapse_address_list(addresses):
"""Collapse a list of IP objects.
Example:
collapse_address_list([IPv4('1.1.0.0/24'), IPv4('1.1.1.0/24')]) ->
[IPv4('1.1.0.0/23')]
Args:
addresses: A list of IPv4Network or IPv6Network objects.
Returns:
A list of IPv4Network or IPv6Network objects depending on what we
were passed.
Raises:
TypeError: If passed a list of mixed version objects.
"""
i = 0
addrs = []
ips = []
nets = []
# split IP addresses and networks
for ip in addresses:
if isinstance(ip, _BaseIP):
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
str(ip), str(ips[-1])))
ips.append(ip)
elif ip._prefixlen == ip._max_prefixlen:
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
str(ip), str(ips[-1])))
ips.append(ip.ip)
else:
if nets and nets[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
str(ip), str(ips[-1])))
nets.append(ip)
# sort and dedup
ips = sorted(set(ips))
nets = sorted(set(nets))
while i < len(ips):
(first, last) = _find_address_range(ips[i:])
i = ips.index(last) + 1
addrs.extend(summarize_address_range(first, last))
return _collapse_address_list_recursive(sorted(
addrs + nets, key=_BaseNet._get_networks_key))
# backwards compatibility
CollapseAddrList = collapse_address_list
# We need to distinguish between the string and packed-bytes representations
# of an IP address. For example, b'0::1' is the IPv4 address 48.58.58.49,
# while '0::1' is an IPv6 address.
#
# In Python 3, the native 'bytes' type already provides this functionality,
# so we use it directly. For earlier implementations where bytes is not a
# distinct type, we create a subclass of str to serve as a tag.
#
# Usage example (Python 2):
# ip = ipaddr.IPAddress(ipaddr.Bytes('xxxx'))
#
# Usage example (Python 3):
# ip = ipaddr.IPAddress(b'xxxx')
try:
if bytes is str:
raise TypeError("bytes is not a distinct type")
Bytes = bytes
except (NameError, TypeError):
class Bytes(str):
def __repr__(self):
return 'Bytes(%s)' % str.__repr__(self)
def get_mixed_type_key(obj):
"""Return a key suitable for sorting between networks and addresses.
Address and Network objects are not sortable by default; they're
fundamentally different so the expression
IPv4Address('1.1.1.1') <= IPv4Network('1.1.1.1/24')
doesn't make any sense. There are some times however, where you may wish
to have ipaddr sort these for you anyway. If you need to do this, you
can use this function as the key= argument to sorted().
Args:
obj: either a Network or Address object.
Returns:
appropriate key.
"""
if isinstance(obj, _BaseNet):
return obj._get_networks_key()
elif isinstance(obj, _BaseIP):
return obj._get_address_key()
return NotImplemented
class _IPAddrBase(object):
"""The mother class."""
def __index__(self):
return self._ip
def __int__(self):
return self._ip
def __hex__(self):
return hex(self._ip)
@property
def exploded(self):
"""Return the longhand version of the IP address as a string."""
return self._explode_shorthand_ip_string()
@property
def compressed(self):
"""Return the shorthand version of the IP address as a string."""
return str(self)
class _BaseIP(_IPAddrBase):
"""A generic IP object.
This IP class contains the version independent methods which are
used by single IP addresses.
"""
def __eq__(self, other):
try:
return (self._ip == other._ip
and self._version == other._version)
except AttributeError:
return NotImplemented
def __ne__(self, other):
eq = self.__eq__(other)
if eq is NotImplemented:
return NotImplemented
return not eq
def __le__(self, other):
gt = self.__gt__(other)
if gt is NotImplemented:
return NotImplemented
return not gt
def __ge__(self, other):
lt = self.__lt__(other)
if lt is NotImplemented:
return NotImplemented
return not lt
def __lt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
str(self), str(other)))
if not isinstance(other, _BaseIP):
raise TypeError('%s and %s are not of the same type' % (
str(self), str(other)))
if self._ip != other._ip:
return self._ip < other._ip
return False
def __gt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
str(self), str(other)))
if not isinstance(other, _BaseIP):
raise TypeError('%s and %s are not of the same type' % (
str(self), str(other)))
if self._ip != other._ip:
return self._ip > other._ip
return False
# Shorthand for Integer addition and subtraction. This is not
# meant to ever support addition/subtraction of addresses.
def __add__(self, other):
if not isinstance(other, int):
return NotImplemented
return IPAddress(int(self) + other, version=self._version)
def __sub__(self, other):
if not isinstance(other, int):
return NotImplemented
return IPAddress(int(self) - other, version=self._version)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, str(self))
def __str__(self):
return '%s' % self._string_from_ip_int(self._ip)
def __hash__(self):
return hash(hex(long(self._ip)))
def _get_address_key(self):
return (self._version, self)
@property
def version(self):
raise NotImplementedError('BaseIP has no version')
class _BaseNet(_IPAddrBase):
"""A generic IP object.
This IP class contains the version independent methods which are
used by networks.
"""
def __init__(self, address):
self._cache = {}
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, str(self))
def iterhosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the network
or broadcast addresses.
"""
cur = int(self.network) + 1
bcast = int(self.broadcast) - 1
while cur <= bcast:
cur += 1
yield IPAddress(cur - 1, version=self._version)
def __iter__(self):
cur = int(self.network)
bcast = int(self.broadcast)
while cur <= bcast:
cur += 1
yield IPAddress(cur - 1, version=self._version)
def __getitem__(self, n):
network = int(self.network)
broadcast = int(self.broadcast)
if n >= 0:
if network + n > broadcast:
raise IndexError
return IPAddress(network + n, version=self._version)
else:
n += 1
if broadcast + n < network:
raise IndexError
return IPAddress(broadcast + n, version=self._version)
def __lt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
str(self), str(other)))
if not isinstance(other, _BaseNet):
raise TypeError('%s and %s are not of the same type' % (
str(self), str(other)))
if self.network != other.network:
return self.network < other.network
if self.netmask != other.netmask:
return self.netmask < other.netmask
return False
def __gt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
str(self), str(other)))
if not isinstance(other, _BaseNet):
raise TypeError('%s and %s are not of the same type' % (
str(self), str(other)))
if self.network != other.network:
return self.network > other.network
if self.netmask != other.netmask:
return self.netmask > other.netmask
return False
def __le__(self, other):
gt = self.__gt__(other)
if gt is NotImplemented:
return NotImplemented
return not gt
def __ge__(self, other):
lt = self.__lt__(other)
if lt is NotImplemented:
return NotImplemented
return not lt
def __eq__(self, other):
try:
return (self._version == other._version
and self.network == other.network
and int(self.netmask) == int(other.netmask))
except AttributeError:
if isinstance(other, _BaseIP):
return (self._version == other._version
and self._ip == other._ip)
def __ne__(self, other):
eq = self.__eq__(other)
if eq is NotImplemented:
return NotImplemented
return not eq
def __str__(self):
return '%s/%s' % (str(self.ip),
str(self._prefixlen))
def __hash__(self):
return hash(int(self.network) ^ int(self.netmask))
def __contains__(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if isinstance(other, _BaseNet):
return (self.network <= other.network and
self.broadcast >= other.broadcast)
# dealing with another address
else:
return (int(self.network) <= int(other._ip) <=
int(self.broadcast))
def overlaps(self, other):
"""Tell if self is partly contained in other."""
return self.network in other or self.broadcast in other or (
other.network in self or other.broadcast in self)
@property
def network(self):
x = self._cache.get('network')
if x is None:
x = IPAddress(self._ip & int(self.netmask), version=self._version)
self._cache['network'] = x
return x
@property
def broadcast(self):
x = self._cache.get('broadcast')
if x is None:
x = IPAddress(self._ip | int(self.hostmask), version=self._version)
self._cache['broadcast'] = x
return x
@property
def hostmask(self):
x = self._cache.get('hostmask')
if x is None:
x = IPAddress(int(self.netmask) ^ self._ALL_ONES,
version=self._version)
self._cache['hostmask'] = x
return x
@property
def with_prefixlen(self):
return '%s/%d' % (str(self.ip), self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (str(self.ip), str(self.netmask))
@property
def with_hostmask(self):
return '%s/%s' % (str(self.ip), str(self.hostmask))
@property
def numhosts(self):
"""Number of hosts in the current subnet."""
return int(self.broadcast) - int(self.network) + 1
@property
def version(self):
raise NotImplementedError('BaseNet has no version')
@property
def prefixlen(self):
return self._prefixlen
def address_exclude(self, other):
"""Remove an address from a larger block.
For example:
addr1 = IPNetwork('10.1.1.0/24')
addr2 = IPNetwork('10.1.1.0/26')
addr1.address_exclude(addr2) =
[IPNetwork('10.1.1.64/26'), IPNetwork('10.1.1.128/25')]
or IPv6:
addr1 = IPNetwork('::1/32')
addr2 = IPNetwork('::1/128')
addr1.address_exclude(addr2) = [IPNetwork('::0/128'),
IPNetwork('::2/127'),
IPNetwork('::4/126'),
IPNetwork('::8/125'),
...
IPNetwork('0:0:8000::/33')]
Args:
other: An IPvXNetwork object of the same type.
Returns:
A sorted list of IPvXNetwork objects addresses which is self
minus other.
Raises:
TypeError: If self and other are of difffering address
versions, or if other is not a network object.
ValueError: If other is not completely contained by self.
"""
if not self._version == other._version:
raise TypeError("%s and %s are not of the same version" % (
str(self), str(other)))
if not isinstance(other, _BaseNet):
raise TypeError("%s is not a network object" % str(other))
if other not in self:
raise ValueError('%s not contained in %s' % (str(other),
str(self)))
if other == self:
return []
ret_addrs = []
# Make sure we're comparing the network of other.
other = IPNetwork('%s/%s' % (str(other.network), str(other.prefixlen)),
version=other._version)
s1, s2 = self.subnet()
while s1 != other and s2 != other:
if other in s1:
ret_addrs.append(s2)
s1, s2 = s1.subnet()
elif other in s2:
ret_addrs.append(s1)
s1, s2 = s2.subnet()
else:
# If we got here, there's a bug somewhere.
assert True == False, ('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(str(s1), str(s2), str(other)))
if s1 == other:
ret_addrs.append(s2)
elif s2 == other:
ret_addrs.append(s1)
else:
# If we got here, there's a bug somewhere.
assert True == False, ('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(str(s1), str(s2), str(other)))
return sorted(ret_addrs, key=_BaseNet._get_networks_key)
def compare_networks(self, other):
"""Compare two IP objects.
This is only concerned about the comparison of the integer
representation of the network addresses. This means that the
host bits aren't considered at all in this method. If you want
to compare host bits, you can easily enough do a
'HostA._ip < HostB._ip'
Args:
other: An IP object.
Returns:
If the IP versions of self and other are the same, returns:
-1 if self < other:
eg: IPv4('1.1.1.0/24') < IPv4('1.1.2.0/24')
IPv6('1080::200C:417A') < IPv6('1080::200B:417B')
0 if self == other
eg: IPv4('1.1.1.1/24') == IPv4('1.1.1.2/24')
IPv6('1080::200C:417A/96') == IPv6('1080::200C:417B/96')
1 if self > other
eg: IPv4('1.1.1.0/24') > IPv4('1.1.0.0/24')
IPv6('1080::1:200C:417A/112') >
IPv6('1080::0:200C:417A/112')
If the IP versions of self and other are different, returns:
-1 if self._version < other._version
eg: IPv4('10.0.0.1/24') < IPv6('::1/128')
1 if self._version > other._version
eg: IPv6('::1/128') > IPv4('255.255.255.0/24')
"""
if self._version < other._version:
return -1
if self._version > other._version:
return 1
# self._version == other._version below here:
if self.network < other.network:
return -1
if self.network > other.network:
return 1
# self.network == other.network below here:
if self.netmask < other.netmask:
return -1
if self.netmask > other.netmask:
return 1
# self.network == other.network and self.netmask == other.netmask
return 0
def _get_networks_key(self):
"""Network-only key function.
Returns an object that identifies this address' network and
netmask. This function is a suitable "key" argument for sorted()
and list.sort().
"""
return (self._version, self.network, self.netmask)
def _ip_int_from_prefix(self, prefixlen=None):
"""Turn the prefix length netmask into a int for comparison.
Args:
prefixlen: An integer, the prefix length.
Returns:
An integer.
"""
if not prefixlen and prefixlen != 0:
prefixlen = self._prefixlen
return self._ALL_ONES ^ (self._ALL_ONES >> prefixlen)
def _prefix_from_ip_int(self, ip_int, mask=32):
"""Return prefix length from the decimal netmask.
Args:
ip_int: An integer, the IP address.
mask: The netmask. Defaults to 32.
Returns:
An integer, the prefix length.
"""
while mask:
if ip_int & 1 == 1:
break
ip_int >>= 1
mask -= 1
return mask
def _ip_string_from_prefix(self, prefixlen=None):
"""Turn a prefix length into a dotted decimal string.
Args:
prefixlen: An integer, the netmask prefix length.
Returns:
A string, the dotted decimal netmask string.
"""
if not prefixlen:
prefixlen = self._prefixlen
return self._string_from_ip_int(self._ip_int_from_prefix(prefixlen))
def iter_subnets(self, prefixlen_diff=1, new_prefix=None):
"""The subnets which join to make the current subnet.
In the case that self contains only one IP
(self._prefixlen == 32 for IPv4 or self._prefixlen == 128
for IPv6), return a list with just ourself.
Args:
prefixlen_diff: An integer, the amount the prefix length
should be increased by. This should not be set if
new_prefix is also set.
new_prefix: The desired new prefix length. This must be a
larger number (smaller prefix) than the existing prefix.
This should not be set if prefixlen_diff is also set.
Returns:
An iterator of IPv(4|6) objects.
Raises:
ValueError: The prefixlen_diff is too small or too large.
OR
prefixlen_diff and new_prefix are both set or new_prefix
is a smaller number than the current prefix (smaller
number means a larger network)
"""
if self._prefixlen == self._max_prefixlen:
yield self
return
if new_prefix is not None:
if new_prefix < self._prefixlen:
raise ValueError('new prefix must be longer')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = new_prefix - self._prefixlen
if prefixlen_diff < 0:
raise ValueError('prefix length diff must be > 0')
new_prefixlen = self._prefixlen + prefixlen_diff
if not self._is_valid_netmask(str(new_prefixlen)):
raise ValueError(
'prefix length diff %d is invalid for netblock %s' % (
new_prefixlen, str(self)))
first = IPNetwork('%s/%s' % (str(self.network),
str(self._prefixlen + prefixlen_diff)),
version=self._version)
yield first
current = first
while True:
broadcast = current.broadcast
if broadcast == self.broadcast:
return
new_addr = IPAddress(int(broadcast) + 1, version=self._version)
current = IPNetwork('%s/%s' % (str(new_addr), str(new_prefixlen)),
version=self._version)
yield current
def masked(self):
"""Return the network object with the host bits masked out."""
return IPNetwork('%s/%d' % (self.network, self._prefixlen),
version=self._version)
def subnet(self, prefixlen_diff=1, new_prefix=None):
"""Return a list of subnets, rather than an iterator."""
return list(self.iter_subnets(prefixlen_diff, new_prefix))
def supernet(self, prefixlen_diff=1, new_prefix=None):
"""The supernet containing the current network.
Args:
prefixlen_diff: An integer, the amount the prefix length of
the network should be decreased by. For example, given a
/24 network and a prefixlen_diff of 3, a supernet with a
/21 netmask is returned.
Returns:
An IPv4 network object.
Raises:
ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have a
negative prefix length.
OR
If prefixlen_diff and new_prefix are both set or new_prefix is a
larger number than the current prefix (larger number means a
smaller network)
"""
if self._prefixlen == 0:
return self
if new_prefix is not None:
if new_prefix > self._prefixlen:
raise ValueError('new prefix must be shorter')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = self._prefixlen - new_prefix
if self.prefixlen - prefixlen_diff < 0:
raise ValueError(
'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
(self.prefixlen, prefixlen_diff))
return IPNetwork('%s/%s' % (str(self.network),
str(self.prefixlen - prefixlen_diff)),
version=self._version)
# backwards compatibility
Subnet = subnet
Supernet = supernet
AddressExclude = address_exclude
CompareNetworks = compare_networks
Contains = __contains__
class _BaseV4(object):
"""Base IPv4 object.
The following methods are used by IPv4 objects in both single IP
addresses and networks.
"""
# Equivalent to 255.255.255.255 or 32 bits of 1's.
_ALL_ONES = (2**IPV4LENGTH) - 1
_DECIMAL_DIGITS = frozenset('0123456789')
def __init__(self, address):
self._version = 4
self._max_prefixlen = IPV4LENGTH
def _explode_shorthand_ip_string(self):
return str(self)
def _ip_int_from_string(self, ip_str):
"""Turn the given IP string into an integer for comparison.
Args:
ip_str: A string, the IP ip_str.
Returns:
The IP ip_str as an integer.
Raises:
AddressValueError: if ip_str isn't a valid IPv4 Address.
"""
octets = ip_str.split('.')
if len(octets) != 4:
raise AddressValueError(ip_str)
packed_ip = 0
for oc in octets:
try:
packed_ip = (packed_ip << 8) | self._parse_octet(oc)
except ValueError:
raise AddressValueError(ip_str)
return packed_ip
def _parse_octet(self, octet_str):
"""Convert a decimal octet into an integer.
Args:
octet_str: A string, the number to parse.
Returns:
The octet as an integer.
Raises:
ValueError: if the octet isn't strictly a decimal from [0..255].
"""
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not self._DECIMAL_DIGITS.issuperset(octet_str):
raise ValueError
octet_int = int(octet_str, 10)
# Disallow leading zeroes, because no clear standard exists on
# whether these should be interpreted as decimal or octal.
if octet_int > 255 or (octet_str[0] == '0' and len(octet_str) > 1):
raise ValueError
return octet_int
def _string_from_ip_int(self, ip_int):
"""Turns a 32-bit integer into dotted decimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
The IP address as a string in dotted decimal notation.
"""
octets = []
for _ in xrange(4):
octets.insert(0, str(ip_int & 0xFF))
ip_int >>= 8
return '.'.join(octets)
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def packed(self):
"""The binary representation of this address."""
return v4_int_to_packed(self._ip)
@property
def version(self):
return self._version
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within the
reserved IPv4 Network range.
"""
return self in IPv4Network('240.0.0.0/4')
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per RFC 1918.
"""
return (self in IPv4Network('10.0.0.0/8') or
self in IPv4Network('172.16.0.0/12') or
self in IPv4Network('192.168.0.0/16'))
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is multicast.
See RFC 3171 for details.
"""
return self in IPv4Network('224.0.0.0/4')
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 5735 3.
"""
return self in IPv4Network('0.0.0.0')
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback per RFC 3330.
"""
return self in IPv4Network('127.0.0.0/8')
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is link-local per RFC 3927.
"""
return self in IPv4Network('169.254.0.0/16')
class IPv4Address(_BaseV4, _BaseIP):
"""Represent and manipulate single IPv4 Addresses."""
def __init__(self, address):
"""
Args:
address: A string or integer representing the IP
'192.168.1.1'
Additionally, an integer can be passed, so
IPv4Address('192.168.1.1') == IPv4Address(3232235777).
or, more generally
IPv4Address(int(IPv4Address('192.168.1.1'))) ==
IPv4Address('192.168.1.1')
Raises:
AddressValueError: If ipaddr isn't a valid IPv4 address.
"""
_BaseV4.__init__(self, address)
# Efficient constructor from integer.
if isinstance(address, (int, long)):
self._ip = address
if address < 0 or address > self._ALL_ONES:
raise AddressValueError(address)
return
# Constructing from a packed address
if isinstance(address, Bytes):
try:
self._ip, = struct.unpack('!I', address)
except struct.error:
raise AddressValueError(address) # Wrong length.
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = str(address)
self._ip = self._ip_int_from_string(addr_str)
class IPv4Network(_BaseV4, _BaseNet):
"""This class represents and manipulates 32-bit IPv4 networks.
Attributes: [examples for IPv4Network('1.2.3.4/27')]
._ip: 16909060
.ip: IPv4Address('1.2.3.4')
.network: IPv4Address('1.2.3.0')
.hostmask: IPv4Address('0.0.0.31')
.broadcast: IPv4Address('1.2.3.31')
.netmask: IPv4Address('255.255.255.224')
.prefixlen: 27
"""
# the valid octets for host and netmasks. only useful for IPv4.
_valid_mask_octets = set((255, 254, 252, 248, 240, 224, 192, 128, 0))
def __init__(self, address, strict=False):
"""Instantiate a new IPv4 network object.
Args:
address: A string or integer representing the IP [& network].
'192.168.1.1/24'
'192.168.1.1/255.255.255.0'
'192.168.1.1/0.0.0.255'
are all functionally the same in IPv4. Similarly,
'192.168.1.1'
'192.168.1.1/255.255.255.255'
'192.168.1.1/32'
are also functionaly equivalent. That is to say, failing to
provide a subnetmask will create an object with a mask of /32.
If the mask (portion after the / in the argument) is given in
dotted quad form, it is treated as a netmask if it starts with a
non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
starts with a zero field (e.g. 0.255.255.255 == /8), with the
single exception of an all-zero mask which is treated as a
netmask == /0. If no mask is given, a default of /32 is used.
Additionally, an integer can be passed, so
IPv4Network('192.168.1.1') == IPv4Network(3232235777).
or, more generally
IPv4Network(int(IPv4Network('192.168.1.1'))) ==
IPv4Network('192.168.1.1')
strict: A boolean. If true, ensure that we have been passed
A true network address, eg, 192.168.1.0/24 and not an
IP address on a network, eg, 192.168.1.1/24.
Raises:
AddressValueError: If ipaddr isn't a valid IPv4 address.
NetmaskValueError: If the netmask isn't valid for
an IPv4 address.
ValueError: If strict was True and a network address was not
supplied.
"""
_BaseNet.__init__(self, address)
_BaseV4.__init__(self, address)
# Constructing from an integer or packed bytes.
if isinstance(address, (int, long, Bytes)):
self.ip = IPv4Address(address)
self._ip = self.ip._ip
self._prefixlen = self._max_prefixlen
self.netmask = IPv4Address(self._ALL_ONES)
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = str(address).split('/')
if len(addr) > 2:
raise AddressValueError(address)
self._ip = self._ip_int_from_string(addr[0])
self.ip = IPv4Address(self._ip)
if len(addr) == 2:
mask = addr[1].split('.')
if len(mask) == 4:
# We have dotted decimal netmask.
if self._is_valid_netmask(addr[1]):
self.netmask = IPv4Address(self._ip_int_from_string(
addr[1]))
elif self._is_hostmask(addr[1]):
self.netmask = IPv4Address(
self._ip_int_from_string(addr[1]) ^ self._ALL_ONES)
else:
raise NetmaskValueError('%s is not a valid netmask'
% addr[1])
self._prefixlen = self._prefix_from_ip_int(int(self.netmask))
else:
# We have a netmask in prefix length form.
if not self._is_valid_netmask(addr[1]):
raise NetmaskValueError(addr[1])
self._prefixlen = int(addr[1])
self.netmask = IPv4Address(self._ip_int_from_prefix(
self._prefixlen))
else:
self._prefixlen = self._max_prefixlen
self.netmask = IPv4Address(self._ip_int_from_prefix(
self._prefixlen))
if strict:
if self.ip != self.network:
raise ValueError('%s has host bits set' %
self.ip)
if self._prefixlen == (self._max_prefixlen - 1):
self.iterhosts = self.__iter__
def _is_hostmask(self, ip_str):
"""Test if the IP string is a hostmask (rather than a netmask).
Args:
ip_str: A string, the potential hostmask.
Returns:
A boolean, True if the IP string is a hostmask.
"""
bits = ip_str.split('.')
try:
parts = [int(x) for x in bits if int(x) in self._valid_mask_octets]
except ValueError:
return False
if len(parts) != len(bits):
return False
if parts[0] < parts[-1]:
return True
return False
def _is_valid_netmask(self, netmask):
"""Verify that the netmask is valid.
Args:
netmask: A string, either a prefix or dotted decimal
netmask.
Returns:
A boolean, True if the prefix represents a valid IPv4
netmask.
"""
mask = netmask.split('.')
if len(mask) == 4:
if [x for x in mask if int(x) not in self._valid_mask_octets]:
return False
if [y for idx, y in enumerate(mask) if idx > 0 and
y > mask[idx - 1]]:
return False
return True
try:
netmask = int(netmask)
except ValueError:
return False
return 0 <= netmask <= self._max_prefixlen
# backwards compatibility
IsRFC1918 = lambda self: self.is_private
IsMulticast = lambda self: self.is_multicast
IsLoopback = lambda self: self.is_loopback
IsLinkLocal = lambda self: self.is_link_local
class _BaseV6(object):
"""Base IPv6 object.
The following methods are used by IPv6 objects in both single IP
addresses and networks.
"""
_ALL_ONES = (2**IPV6LENGTH) - 1
_HEXTET_COUNT = 8
_HEX_DIGITS = frozenset('0123456789ABCDEFabcdef')
def __init__(self, address):
self._version = 6
self._max_prefixlen = IPV6LENGTH
def _ip_int_from_string(self, ip_str):
"""Turn an IPv6 ip_str into an integer.
Args:
ip_str: A string, the IPv6 ip_str.
Returns:
A long, the IPv6 ip_str.
Raises:
AddressValueError: if ip_str isn't a valid IPv6 Address.
"""
parts = ip_str.split(':')
# An IPv6 address needs at least 2 colons (3 parts).
if len(parts) < 3:
raise AddressValueError(ip_str)
# If the address has an IPv4-style suffix, convert it to hexadecimal.
if '.' in parts[-1]:
ipv4_int = IPv4Address(parts.pop())._ip
parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF))
parts.append('%x' % (ipv4_int & 0xFFFF))
# An IPv6 address can't have more than 8 colons (9 parts).
if len(parts) > self._HEXTET_COUNT + 1:
raise AddressValueError(ip_str)
# Disregarding the endpoints, find '::' with nothing in between.
# This indicates that a run of zeroes has been skipped.
try:
skip_index, = (
[i for i in xrange(1, len(parts) - 1) if not parts[i]] or
[None])
except ValueError:
# Can't have more than one '::'
raise AddressValueError(ip_str)
# parts_hi is the number of parts to copy from above/before the '::'
# parts_lo is the number of parts to copy from below/after the '::'
if skip_index is not None:
# If we found a '::', then check if it also covers the endpoints.
parts_hi = skip_index
parts_lo = len(parts) - skip_index - 1
if not parts[0]:
parts_hi -= 1
if parts_hi:
raise AddressValueError(ip_str) # ^: requires ^::
if not parts[-1]:
parts_lo -= 1
if parts_lo:
raise AddressValueError(ip_str) # :$ requires ::$
parts_skipped = self._HEXTET_COUNT - (parts_hi + parts_lo)
if parts_skipped < 1:
raise AddressValueError(ip_str)
else:
# Otherwise, allocate the entire address to parts_hi. The endpoints
# could still be empty, but _parse_hextet() will check for that.
if len(parts) != self._HEXTET_COUNT:
raise AddressValueError(ip_str)
parts_hi = len(parts)
parts_lo = 0
parts_skipped = 0
try:
# Now, parse the hextets into a 128-bit integer.
ip_int = 0L
for i in xrange(parts_hi):
ip_int <<= 16
ip_int |= self._parse_hextet(parts[i])
ip_int <<= 16 * parts_skipped
for i in xrange(-parts_lo, 0):
ip_int <<= 16
ip_int |= self._parse_hextet(parts[i])
return ip_int
except ValueError:
raise AddressValueError(ip_str)
def _parse_hextet(self, hextet_str):
"""Convert an IPv6 hextet string into an integer.
Args:
hextet_str: A string, the number to parse.
Returns:
The hextet as an integer.
Raises:
ValueError: if the input isn't strictly a hex number from [0..FFFF].
"""
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not self._HEX_DIGITS.issuperset(hextet_str):
raise ValueError
hextet_int = int(hextet_str, 16)
if hextet_int > 0xFFFF:
raise ValueError
return hextet_int
def _compress_hextets(self, hextets):
"""Compresses a list of hextets.
Compresses a list of strings, replacing the longest continuous
sequence of "0" in the list with "" and adding empty strings at
the beginning or at the end of the string such that subsequently
calling ":".join(hextets) will produce the compressed version of
the IPv6 address.
Args:
hextets: A list of strings, the hextets to compress.
Returns:
A list of strings.
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
for index in range(len(hextets)):
if hextets[index] == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
return hextets
def _string_from_ip_int(self, ip_int=None):
"""Turns a 128-bit integer into hexadecimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
A string, the hexadecimal representation of the address.
Raises:
ValueError: The address is bigger than 128 bits of all ones.
"""
if not ip_int and ip_int != 0:
ip_int = int(self._ip)
if ip_int > self._ALL_ONES:
raise ValueError('IPv6 address is too large')
hex_str = '%032x' % ip_int
hextets = []
for x in range(0, 32, 4):
hextets.append('%x' % int(hex_str[x:x+4], 16))
hextets = self._compress_hextets(hextets)
return ':'.join(hextets)
def _explode_shorthand_ip_string(self):
"""Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if isinstance(self, _BaseNet):
ip_str = str(self.ip)
else:
ip_str = str(self)
ip_int = self._ip_int_from_string(ip_str)
parts = []
for i in xrange(self._HEXTET_COUNT):
parts.append('%04x' % (ip_int & 0xFFFF))
ip_int >>= 16
parts.reverse()
if isinstance(self, _BaseNet):
return '%s/%d' % (':'.join(parts), self.prefixlen)
return ':'.join(parts)
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def packed(self):
"""The binary representation of this address."""
return v6_int_to_packed(self._ip)
@property
def version(self):
return self._version
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return self in IPv6Network('ff00::/8')
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return (self in IPv6Network('::/8') or
self in IPv6Network('100::/8') or
self in IPv6Network('200::/7') or
self in IPv6Network('400::/6') or
self in IPv6Network('800::/5') or
self in IPv6Network('1000::/4') or
self in IPv6Network('4000::/3') or
self in IPv6Network('6000::/3') or
self in IPv6Network('8000::/3') or
self in IPv6Network('A000::/3') or
self in IPv6Network('C000::/3') or
self in IPv6Network('E000::/4') or
self in IPv6Network('F000::/5') or
self in IPv6Network('F800::/6') or
self in IPv6Network('FE00::/9'))
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return self._ip == 0 and getattr(self, '_prefixlen', 128) == 128
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return self._ip == 1 and getattr(self, '_prefixlen', 128) == 128
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return self in IPv6Network('fe80::/10')
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return self in IPv6Network('fec0::/10')
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per RFC 4193.
"""
return self in IPv6Network('fc00::/7')
@property
def ipv4_mapped(self):
"""Return the IPv4 mapped address.
Returns:
If the IPv6 address is a v4 mapped address, return the
IPv4 mapped address. Return None otherwise.
"""
if (self._ip >> 32) != 0xFFFF:
return None
return IPv4Address(self._ip & 0xFFFFFFFF)
@property
def teredo(self):
"""Tuple of embedded teredo IPs.
Returns:
Tuple of the (server, client) IPs or None if the address
doesn't appear to be a teredo address (doesn't start with
2001::/32)
"""
if (self._ip >> 96) != 0x20010000:
return None
return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
IPv4Address(~self._ip & 0xFFFFFFFF))
@property
def sixtofour(self):
"""Return the IPv4 6to4 embedded address.
Returns:
The IPv4 6to4-embedded address if present or None if the
address doesn't appear to contain a 6to4 embedded address.
"""
if (self._ip >> 112) != 0x2002:
return None
return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
class IPv6Address(_BaseV6, _BaseIP):
"""Represent and manipulate single IPv6 Addresses.
"""
def __init__(self, address):
"""Instantiate a new IPv6 address object.
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv6Address('2001:4860::') ==
IPv6Address(42541956101370907050197289607612071936L).
or, more generally
IPv6Address(IPv6Address('2001:4860::')._ip) ==
IPv6Address('2001:4860::')
Raises:
AddressValueError: If address isn't a valid IPv6 address.
"""
_BaseV6.__init__(self, address)
# Efficient constructor from integer.
if isinstance(address, (int, long)):
self._ip = address
if address < 0 or address > self._ALL_ONES:
raise AddressValueError(address)
return
# Constructing from a packed address
if isinstance(address, Bytes):
try:
hi, lo = struct.unpack('!QQ', address)
except struct.error:
raise AddressValueError(address) # Wrong length.
self._ip = (hi << 64) | lo
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = str(address)
if not addr_str:
raise AddressValueError('')
self._ip = self._ip_int_from_string(addr_str)
class IPv6Network(_BaseV6, _BaseNet):
"""This class represents and manipulates 128-bit IPv6 networks.
Attributes: [examples for IPv6('2001:658:22A:CAFE:200::1/64')]
.ip: IPv6Address('2001:658:22a:cafe:200::1')
.network: IPv6Address('2001:658:22a:cafe::')
.hostmask: IPv6Address('::ffff:ffff:ffff:ffff')
.broadcast: IPv6Address('2001:658:22a:cafe:ffff:ffff:ffff:ffff')
.netmask: IPv6Address('ffff:ffff:ffff:ffff::')
.prefixlen: 64
"""
def __init__(self, address, strict=False):
"""Instantiate a new IPv6 Network object.
Args:
address: A string or integer representing the IPv6 network or the IP
and prefix/netmask.
'2001:4860::/128'
'2001:4860:0000:0000:0000:0000:0000:0000/128'
'2001:4860::'
are all functionally the same in IPv6. That is to say,
failing to provide a subnetmask will create an object with
a mask of /128.
Additionally, an integer can be passed, so
IPv6Network('2001:4860::') ==
IPv6Network(42541956101370907050197289607612071936L).
or, more generally
IPv6Network(IPv6Network('2001:4860::')._ip) ==
IPv6Network('2001:4860::')
strict: A boolean. If true, ensure that we have been passed
A true network address, eg, 192.168.1.0/24 and not an
IP address on a network, eg, 192.168.1.1/24.
Raises:
AddressValueError: If address isn't a valid IPv6 address.
NetmaskValueError: If the netmask isn't valid for
an IPv6 address.
ValueError: If strict was True and a network address was not
supplied.
"""
_BaseNet.__init__(self, address)
_BaseV6.__init__(self, address)
# Constructing from an integer or packed bytes.
if isinstance(address, (int, long, Bytes)):
self.ip = IPv6Address(address)
self._ip = self.ip._ip
self._prefixlen = self._max_prefixlen
self.netmask = IPv6Address(self._ALL_ONES)
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = str(address).split('/')
if len(addr) > 2:
raise AddressValueError(address)
self._ip = self._ip_int_from_string(addr[0])
self.ip = IPv6Address(self._ip)
if len(addr) == 2:
if self._is_valid_netmask(addr[1]):
self._prefixlen = int(addr[1])
else:
raise NetmaskValueError(addr[1])
else:
self._prefixlen = self._max_prefixlen
self.netmask = IPv6Address(self._ip_int_from_prefix(self._prefixlen))
if strict:
if self.ip != self.network:
raise ValueError('%s has host bits set' %
self.ip)
if self._prefixlen == (self._max_prefixlen - 1):
self.iterhosts = self.__iter__
def _is_valid_netmask(self, prefixlen):
"""Verify that the netmask/prefixlen is valid.
Args:
prefixlen: A string, the netmask in prefix length format.
Returns:
A boolean, True if the prefix represents a valid IPv6
netmask.
"""
try:
prefixlen = int(prefixlen)
except ValueError:
return False
return 0 <= prefixlen <= self._max_prefixlen
@property
def with_netmask(self):
return self.with_prefixlen
|
locaweb/leela | refs/heads/master | try/src/try_leela/suites/smoke/test_make.py | 1 | # -*- coding: utf-8 -*-
import unittest
from try_leela import env
from try_leela import helpers
class TestMake(unittest.TestCase):
def setUp(self):
self.driver = env.driver()
def test_make_returns_name(self):
with self.driver.session("smoke/test_make") as session:
session.execute("make (%(rnd_name.0)s)")
self.assertEqual("name", session.message()[0])
def test_linking_two_vertexes(self):
with self.driver.session("smoke/test_make") as session:
a_guid = helpers.make(session)
b_guid = helpers.make(session)
helpers.link(session, a_guid, "foobar", b_guid)
self.assertEqual([["path", [["foobar", b_guid]]]], session.execute_fetch("path %s -[foobar]> ()" % (a_guid,)))
|
X-ROM/android_external_skia | refs/heads/jb3 | PRESUBMIT.py | 13 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for Skia.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
def _CheckChangeHasEol(input_api, output_api, source_file_filter=None):
"""Checks that files end with atleast one \n (LF)."""
eof_files = []
for f in input_api.AffectedSourceFiles(source_file_filter):
contents = input_api.ReadFile(f, 'rb')
# Check that the file ends in atleast one newline character.
if len(contents) > 1 and contents[-1:] != '\n':
eof_files.append(f.LocalPath())
if eof_files:
return [output_api.PresubmitPromptWarning(
'These files should end in a newline character:',
items=eof_files)]
return []
def _CommonChecks(input_api, output_api):
"""Presubmit checks common to upload and commit."""
results = []
sources = lambda x: (x.LocalPath().endswith('.h') or
x.LocalPath().endswith('.gypi') or
x.LocalPath().endswith('.gyp') or
x.LocalPath().endswith('.py') or
x.LocalPath().endswith('.sh') or
x.LocalPath().endswith('.cpp'))
results.extend(
_CheckChangeHasEol(
input_api, output_api, source_file_filter=sources))
return results
def CheckChangeOnUpload(input_api, output_api):
"""Presubmit checks for the change on upload.
The following are the presubmit checks:
* Check change has one and only one EOL.
"""
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
def _CheckTreeStatus(input_api, output_api, json_url):
"""Check whether to allow commit.
Args:
input_api: input related apis.
output_api: output related apis.
json_url: url to download json style status.
"""
tree_status_results = input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api, json_url=json_url)
if not tree_status_results:
# Check for caution state only if tree is not closed.
connection = input_api.urllib2.urlopen(json_url)
status = input_api.json.loads(connection.read())
connection.close()
if 'caution' in status['message'].lower():
short_text = 'Tree state is: ' + status['general_state']
long_text = status['message'] + '\n' + json_url
tree_status_results.append(
output_api.PresubmitPromptWarning(
message=short_text, long_text=long_text))
return tree_status_results
def CheckChangeOnCommit(input_api, output_api):
"""Presubmit checks for the change on commit.
The following are the presubmit checks:
* Check change has one and only one EOL.
* Ensures that the Skia tree is open in
http://skia-tree-status.appspot.com/. Shows a warning if it is in 'Caution'
state and an error if it is in 'Closed' state.
"""
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(
_CheckTreeStatus(input_api, output_api, json_url=(
'http://skia-tree-status.appspot.com/banner-status?format=json')))
return results
|
dpaleino/bootchart2 | refs/heads/master | pybootchartgui/main.py | 2 | # This file is part of pybootchartgui.
# pybootchartgui is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pybootchartgui is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pybootchartgui. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import optparse
import parsing
import gui
import batch
def _mk_options_parser():
"""Make an options parser."""
usage = "%prog [options] PATH, ..., PATH"
version = "%prog v0.0.0"
parser = optparse.OptionParser(usage, version=version)
parser.add_option("-i", "--interactive", action="store_true", dest="interactive", default=False,
help="start in active mode")
parser.add_option("-f", "--format", dest="format", default = None,
help="image format (...); default format ...")
parser.add_option("-o", "--output", dest="output", metavar="PATH", default="bootchart.png",
help="output path (file or directory) where charts are stored")
parser.add_option("-n", "--no-prune", action="store_false", dest="prune", default=True,
help="do not prune the process tree")
parser.add_option("-q", "--quiet", action="store_true", dest="quiet", default=False,
help="suppress informational messages")
parser.add_option("-t", "--boot-time", action="store_true", dest="boottime", default=False,
help="only display the boot time of the boot in text format (stdout)")
parser.add_option("--very-quiet", action="store_true", dest="veryquiet", default=False,
help="suppress all messages except errors")
parser.add_option("--verbose", action="store_true", dest="verbose", default=False,
help="print all messages")
parser.add_option("--profile", action="store_true", dest="profile", default=False,
help="profile rendering of chart (only useful when in batch mode indicated by -f)")
parser.add_option("--show-pid", action="store_true", dest="show_pid", default=False,
help="show process ids in the bootchart as 'processname [pid]'")
parser.add_option("--show-all", action="store_true", dest="show_all", default=False,
help="show all process information in the bootchart as '/process/path/exe [pid] [args]'")
parser.add_option("--crop-after", dest="crop_after", metavar="PROCESS", default=None,
help="crop chart when idle after PROCESS is started")
parser.add_option("--annotate", action="append", dest="annotate", metavar="PROCESS", default=None,
help="annotate position where PROCESS is started; can be specified multiple times. " +
"To create a single annotation when any one of a set of processes is started, use commas to separate the names")
parser.add_option("--annotate-file", dest="annotate_file", metavar="FILENAME", default=None,
help="filename to write annotation points to")
return parser
class Writer:
def __init__(self, write, options):
self.write = write
self.options = options
def error(self, msg):
self.write(msg)
def warn(self, msg):
if not self.options.quiet:
self.write(msg)
def info(self, msg):
if self.options.verbose:
self.write(msg)
def status(self, msg):
if not self.options.quiet:
self.write(msg)
def _mk_writer(options):
def write(s):
print s
return Writer(write, options)
def _get_filename(paths, options):
"""Construct a usable filename for outputs based on the paths and options given on the commandline."""
dname = ""
fname = "bootchart"
if options.output and not(os.path.isdir(options.output)):
return options.output
if options.output:
dname = options.output
if len (paths) == 1:
path = paths[0]
if os.path.isdir(path):
fname = os.path.split(path)[-1]
elif os.path.splitext(path)[1] in [".tar", ".tgz", ".tar.gz"]:
fname = os.path.splitext(path)[0]
return os.path.join (dname, fname + "." + options.format)
def main(argv=None):
try:
if argv is None:
argv = sys.argv[1:]
parser = _mk_options_parser()
options, args = parser.parse_args(argv)
writer = _mk_writer(options)
if len(args) == 0:
print "No path given, trying /var/log/bootchart.tgz"
args = [ "/var/log/bootchart.tgz" ]
res = parsing.parse(writer, args, options.prune,
options.crop_after, options.annotate)
if options.interactive or options.output == None:
gui.show(res, options)
elif options.boottime:
import math
proc_tree = res[3]
if proc_tree.idle:
duration = proc_tree.idle
else:
duration = proc_tree.duration
dur = duration / 100.0
print '%02d:%05.2f' % (math.floor(dur/60), dur - 60 * math.floor(dur/60))
else:
if options.annotate_file:
f = open (options.annotate_file, "w")
try:
for time in res[4]:
if time is not None:
# output as ms
print >> f, time * 10
else:
print >> f
finally:
f.close()
filename = _get_filename(args, options)
def render():
batch.render(writer, res, options, filename)
if options.profile:
import cProfile
import pstats
profile = '%s.prof' % os.path.splitext(filename)[0]
cProfile.runctx('render()', globals(), locals(), profile)
p = pstats.Stats(profile)
p.strip_dirs().sort_stats('time').print_stats(20)
else:
render()
return 0
except parsing.ParseError, ex:
print("Parse error: %s" % ex)
return 2
if __name__ == '__main__':
sys.exit(main())
|
dd00/commandergenius | refs/heads/dd00 | project/jni/python/src/Lib/encodings/mac_turkish.py | 593 | """ Python Character Mapping Codec mac_turkish generated from 'MAPPINGS/VENDORS/APPLE/TURKISH.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-turkish',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\xb4' # 0xAB -> ACUTE ACCENT
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\xc6' # 0xAE -> LATIN CAPITAL LETTER AE
u'\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\xa5' # 0xB4 -> YEN SIGN
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u220f' # 0xB8 -> N-ARY PRODUCT
u'\u03c0' # 0xB9 -> GREEK SMALL LETTER PI
u'\u222b' # 0xBA -> INTEGRAL
u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
u'\xe6' # 0xBE -> LATIN SMALL LETTER AE
u'\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE
u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u2206' # 0xC6 -> INCREMENT
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u2013' # 0xD0 -> EN DASH
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\u011e' # 0xDA -> LATIN CAPITAL LETTER G WITH BREVE
u'\u011f' # 0xDB -> LATIN SMALL LETTER G WITH BREVE
u'\u0130' # 0xDC -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'\u0131' # 0xDD -> LATIN SMALL LETTER DOTLESS I
u'\u015e' # 0xDE -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\u015f' # 0xDF -> LATIN SMALL LETTER S WITH CEDILLA
u'\u2021' # 0xE0 -> DOUBLE DAGGER
u'\xb7' # 0xE1 -> MIDDLE DOT
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2030' # 0xE4 -> PER MILLE SIGN
u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\uf8ff' # 0xF0 -> Apple logo
u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\uf8a0' # 0xF5 -> undefined1
u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u02dc' # 0xF7 -> SMALL TILDE
u'\xaf' # 0xF8 -> MACRON
u'\u02d8' # 0xF9 -> BREVE
u'\u02d9' # 0xFA -> DOT ABOVE
u'\u02da' # 0xFB -> RING ABOVE
u'\xb8' # 0xFC -> CEDILLA
u'\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT
u'\u02db' # 0xFE -> OGONEK
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
joelstanner/learning_journal | refs/heads/master | features/steps.py | 1 | from lettuce import before, after, world, step
import os
from contextlib import closing
from journal import connect_db
from journal import DB_SCHEMA
import pytest
TEST_DSN = 'dbname=test_learning_journal user=postgres'
settings = {'db': TEST_DSN}
INPUT_BTN = "<input class='display-block' type='submit' value='Add post' name='Add post' />"
@world.absorb
def make_an_entry(app):
entry_data = {
'title': 'Hello there',
'text': '''#This is a post'
```python
def func(x):
return x
```''',
}
response = app.post('/add', params=entry_data, status='3*')
return response
@world.absorb
def login_helper(username, password, app):
"""encapsulate app login for reuse in tests
Accept all status codes so that we can make assertions in tests
"""
login_data = {'username': username, 'password': password}
return app.post('/login', params=login_data, status='*')
@before.all
def init_db():
with closing(connect_db(settings)) as db:
db.cursor().execute(DB_SCHEMA)
db.commit()
@after.all
def clear_db(total):
with closing(connect_db(settings)) as db:
db.cursor().execute("DROP TABLE entries")
db.commit()
@after.each_feature
def clear_entries(scenario):
with closing(connect_db(settings)) as db:
db.cursor().execute("DELETE FROM entries")
db.commit()
@before.each_scenario
def app(scenario):
from journal import main
from webtest import TestApp
os.environ['DATABASE_URL'] = TEST_DSN
app = main()
world.app = TestApp(app)
login_helper('admin', 'secret', world.app)
@step('a journal home page')
def get_home_page(step):
response = world.app.get('/')
assert response.status_code == 200
actual = response.body
expected = 'Nothin!'
assert expected in actual
@step('When I click on the entry link')
def click_on_the_entry_link(step):
login_helper('admin', 'secret', world.app)
world.make_an_entry(world.app)
response = world.app.get('/')
response = response.click(href='/post/1')
assert response.status_code == 200
assert 'This is a post' in response.body
@step('Then I get the detail page for that entry')
def i_get_the_detail_page(step):
response = world.app.get('/post/1')
assert response.status_code == 200
assert 'This is a post' in response.body
@step('a logged in user')
def a_logged_in_user(step):
redirect = login_helper('admin', 'secret', world.app)
assert redirect.status_code == 302
response = redirect.follow()
assert response.status_code == 200
actual = response.body
assert INPUT_BTN in actual
@step('a journal detail page')
def journal_detail_page(step):
response = world.app.get('/post/1')
assert response.status_code == 200
assert 'This is a post' in response.body
@step('I click on the edit button')
def click_on_the_edit_button(step):
response = world.app.get('/post/1')
assert response.status_code == 200
response = response.click(href='/edit/1')
assert response.status_code == 200
@step('I am taken to the edit page for that entry')
def taken_to_the_edit_page(step):
response = world.app.get('/edit/1')
assert response.status_code == 200
assert "name='Save post'" in response.body
@step('a journal edit form')
def a_journal_edit_form(step):
response = world.app.get('/edit/1')
assert response.status_code == 200
assert response.form
@step('I type in the edit box')
def type_in_the_edit_box(step):
response = world.app.get('/edit/1')
assert "name='Save post'" in response.body
response.form['title'] = 'Test edit'
response.form['text'] = '''
```python
def func(x):
print "Edit Success"
return x
```'''
redirect = response.form.submit()
assert redirect.status_code == 302
response = redirect.follow()
assert response.status_code == 200
@step('I can use MarkDown to format my post')
def use_markdown_to_format(step):
response = world.app.get('/post/1')
assert response.status_code == 200
assert "<pre>" in response.body
@step('a new journal detail page')
def new_detail_page(step):
response = world.app.get('/post/1')
assert response.status_code == 200
@step('I look at a post')
def look_at_a_post(step):
response = world.app.get('/post/1')
assert 'Test edit' in response.body
@step('I can see colorized code samples')
def can_see_colorized_code(step):
response = world.app.get('/post/1')
assert '<span class="k">' in response.body
|
shownomercy/django | refs/heads/master | django/contrib/sites/shortcuts.py | 615 | from __future__ import unicode_literals
from django.apps import apps
def get_current_site(request):
"""
Checks if contrib.sites is installed and returns either the current
``Site`` object or a ``RequestSite`` object based on the request.
"""
# Imports are inside the function because its point is to avoid importing
# the Site models when django.contrib.sites isn't installed.
if apps.is_installed('django.contrib.sites'):
from .models import Site
return Site.objects.get_current(request)
else:
from .requests import RequestSite
return RequestSite(request)
|
brandond/ansible | refs/heads/devel | lib/ansible/modules/network/vyos/vyos_l3_interface.py | 56 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: vyos_l3_interface
version_added: "2.4"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage L3 interfaces on VyOS network devices
description:
- This module provides declarative management of L3 interfaces
on VyOS network devices.
notes:
- Tested against VYOS 1.1.7
options:
name:
description:
- Name of the L3 interface.
ipv4:
description:
- IPv4 of the L3 interface.
ipv6:
description:
- IPv6 of the L3 interface.
aggregate:
description: List of L3 interfaces definitions
state:
description:
- State of the L3 interface configuration.
default: present
choices: ['present', 'absent']
extends_documentation_fragment: vyos
"""
EXAMPLES = """
- name: Set eth0 IPv4 address
vyos_l3_interface:
name: eth0
ipv4: 192.168.0.1/24
- name: Remove eth0 IPv4 address
vyos_l3_interface:
name: eth0
state: absent
- name: Set IP addresses on aggregate
vyos_l3_interface:
aggregate:
- { name: eth1, ipv4: 192.168.2.10/24 }
- { name: eth2, ipv4: 192.168.3.10/24, ipv6: "fd5d:12c9:2201:1::1/64" }
- name: Remove IP addresses on aggregate
vyos_l3_interface:
aggregate:
- { name: eth1, ipv4: 192.168.2.10/24 }
- { name: eth2, ipv4: 192.168.3.10/24, ipv6: "fd5d:12c9:2201:1::1/64" }
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- set interfaces ethernet eth0 address '192.168.0.1/24'
"""
import socket
import re
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import is_masklen, validate_ip_address
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.vyos.vyos import load_config, run_commands
from ansible.module_utils.network.vyos.vyos import vyos_argument_spec
def is_ipv4(value):
if value:
address = value.split('/')
if is_masklen(address[1]) and validate_ip_address(address[0]):
return True
return False
def is_ipv6(value):
if value:
address = value.split('/')
if 0 <= int(address[1]) <= 128:
try:
socket.inet_pton(socket.AF_INET6, address[0])
except socket.error:
return False
return True
return False
def search_obj_in_list(name, lst):
for o in lst:
if o['name'] == name:
return o
return None
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
for w in want:
name = w['name']
ipv4 = w['ipv4']
ipv6 = w['ipv6']
state = w['state']
obj_in_have = search_obj_in_list(name, have)
if state == 'absent' and obj_in_have:
if not ipv4 and not ipv6 and (obj_in_have['ipv4'] or obj_in_have['ipv6']):
if name == "lo":
commands.append('delete interfaces loopback lo address')
else:
commands.append('delete interfaces ethernet ' + name + ' address')
else:
if ipv4 and ipv4 in obj_in_have['ipv4']:
if name == "lo":
commands.append('delete interfaces loopback lo address ' + ipv4)
else:
commands.append('delete interfaces ethernet ' + name + ' address ' + ipv4)
if ipv6 and ipv6 in obj_in_have['ipv6']:
if name == "lo":
commands.append('delete interfaces loopback lo address ' + ipv6)
else:
commands.append('delete interfaces ethernet ' + name + ' address ' + ipv6)
elif (state == 'present' and obj_in_have):
if ipv4 and ipv4 not in obj_in_have['ipv4']:
if name == "lo":
commands.append('set interfaces loopback lo address ' + ipv4)
else:
commands.append('set interfaces ethernet ' + name + ' address ' + ipv4)
if ipv6 and ipv6 not in obj_in_have['ipv6']:
if name == "lo":
commands.append('set interfaces loopback lo address ' + ipv6)
else:
commands.append('set interfaces ethernet ' + name + ' address ' + ipv6)
return commands
def map_config_to_obj(module):
obj = []
output = run_commands(module, ['show interfaces'])
lines = re.split(r'\n[e|l]', output[0])[1:]
if len(lines) > 0:
for line in lines:
splitted_line = line.split()
if len(splitted_line) > 0:
ipv4 = []
ipv6 = []
if splitted_line[0].lower().startswith('th'):
name = 'e' + splitted_line[0].lower()
elif splitted_line[0].lower().startswith('o'):
name = 'l' + splitted_line[0].lower()
for i in splitted_line[1:]:
if (('.' in i or ':' in i) and '/' in i):
value = i.split(r'\n')[0]
if is_ipv4(value):
ipv4.append(value)
elif is_ipv6(value):
ipv6.append(value)
obj.append({'name': name,
'ipv4': ipv4,
'ipv6': ipv6})
return obj
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
obj.append(item.copy())
else:
obj.append({
'name': module.params['name'],
'ipv4': module.params['ipv4'],
'ipv6': module.params['ipv6'],
'state': module.params['state']
})
return obj
def main():
""" main entry point for module execution
"""
element_spec = dict(
name=dict(),
ipv4=dict(),
ipv6=dict(),
state=dict(default='present',
choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['name'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
)
argument_spec.update(element_spec)
argument_spec.update(vyos_argument_spec)
required_one_of = [['name', 'aggregate']]
mutually_exclusive = [['name', 'aggregate']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
commit = not module.check_mode
load_config(module, commands, commit=commit)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
edx/lettuce | refs/heads/master | tests/integration/lib/Django-1.3/django/core/management/commands/sqlreset.py | 313 | from optparse import make_option
from django.core.management.base import AppCommand
from django.core.management.sql import sql_reset
from django.db import connections, DEFAULT_DB_ALIAS
class Command(AppCommand):
help = "Prints the DROP TABLE SQL, then the CREATE TABLE SQL, for the given app name(s)."
option_list = AppCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to print the '
'SQL for. Defaults to the "default" database.'),
)
output_transaction = True
def handle_app(self, app, **options):
return u'\n'.join(sql_reset(app, self.style, connections[options.get('database', DEFAULT_DB_ALIAS)])).encode('utf-8')
|
Lekanich/intellij-community | refs/heads/master | python/testData/inspections/PyStringFormatInspection1/test.py | 39 | my_dict = {'class': 3}
my_dict['css_class'] = ""
if my_dict['class']:
my_dict['css_class'] = 'class %(class)s' % my_dict
my_dict['tmp'] = 'classes %(css_class)s' % my_dict
my_dict['tmp'] = 'classes %(claz)s' % <warning descr="Key 'claz' has no following argument">my_dict</warning>
#PY-4647
argument_pattern = re.compile(r'(%s)\s*(\(\s*(%s)\s*\)\s*)?$'
% ((states.Inliner.simplename,) * 2))
t, num = ('foo',), 2
res = '%d %d' % (<warning descr="Unexpected type str"><warning descr="Unexpected type str">t * num</warning></warning>) |
netgroup/dreamer-ryu | refs/heads/master | ryu/services/protocols/bgp/rtconf/neighbors.py | 3 | # Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Running or runtime configuration related to bgp peers/neighbors.
"""
from abc import abstractmethod
import logging
import netaddr
from ryu.lib.packet.bgp import RF_IPv4_UC
from ryu.lib.packet.bgp import RF_IPv6_UC
from ryu.lib.packet.bgp import RF_IPv4_VPN
from ryu.lib.packet.bgp import RF_IPv6_VPN
from ryu.lib.packet.bgp import RF_RTC_UC
from ryu.lib.packet.bgp import BGPOptParamCapabilityEnhancedRouteRefresh
from ryu.lib.packet.bgp import BGPOptParamCapabilityMultiprotocol
from ryu.lib.packet.bgp import BGPOptParamCapabilityRouteRefresh
from ryu.lib.packet.bgp import BGP_CAP_ENHANCED_ROUTE_REFRESH
from ryu.lib.packet.bgp import BGP_CAP_MULTIPROTOCOL
from ryu.lib.packet.bgp import BGP_CAP_ROUTE_REFRESH
from ryu.services.protocols.bgp.base import OrderedDict
from ryu.services.protocols.bgp.rtconf.base import ADVERTISE_PEER_AS
from ryu.services.protocols.bgp.rtconf.base import BaseConf
from ryu.services.protocols.bgp.rtconf.base import BaseConfListener
from ryu.services.protocols.bgp.rtconf.base import CAP_ENHANCED_REFRESH
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_IPV4
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_IPV6
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_VPNV4
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_VPNV6
from ryu.services.protocols.bgp.rtconf.base import CAP_REFRESH
from ryu.services.protocols.bgp.rtconf.base import CAP_RTC
from ryu.services.protocols.bgp.rtconf.base import compute_optional_conf
from ryu.services.protocols.bgp.rtconf.base import ConfigTypeError
from ryu.services.protocols.bgp.rtconf.base import ConfigValueError
from ryu.services.protocols.bgp.rtconf.base import ConfWithId
from ryu.services.protocols.bgp.rtconf.base import ConfWithIdListener
from ryu.services.protocols.bgp.rtconf.base import ConfWithStats
from ryu.services.protocols.bgp.rtconf.base import ConfWithStatsListener
from ryu.services.protocols.bgp.rtconf.base import HOLD_TIME
from ryu.services.protocols.bgp.rtconf.base import MAX_PREFIXES
from ryu.services.protocols.bgp.rtconf.base import MULTI_EXIT_DISC
from ryu.services.protocols.bgp.rtconf.base import RTC_AS
from ryu.services.protocols.bgp.rtconf.base import RuntimeConfigError
from ryu.services.protocols.bgp.rtconf.base import SITE_OF_ORIGINS
from ryu.services.protocols.bgp.rtconf.base import validate
from ryu.services.protocols.bgp.rtconf.base import validate_med
from ryu.services.protocols.bgp.rtconf.base import validate_soo_list
from ryu.services.protocols.bgp.utils.validation import is_valid_ipv4
from ryu.services.protocols.bgp.utils.validation import is_valid_old_asn
from ryu.services.protocols.bgp.info_base.base import Filter
from ryu.services.protocols.bgp.info_base.base import PrefixFilter
from ryu.services.protocols.bgp.info_base.base import AttributeMap
LOG = logging.getLogger('bgpspeaker.rtconf.neighbor')
# Various neighbor settings.
REMOTE_AS = 'remote_as'
IP_ADDRESS = 'ip_address'
ENABLED = 'enabled'
CHANGES = 'changes'
LOCAL_ADDRESS = 'local_address'
LOCAL_PORT = 'local_port'
PEER_NEXT_HOP = 'peer_next_hop'
PASSWORD = 'password'
IN_FILTER = 'in_filter'
OUT_FILTER = 'out_filter'
IS_ROUTE_SERVER_CLIENT = 'is_route_server_client'
CHECK_FIRST_AS = 'check_first_as'
ATTRIBUTE_MAP = 'attribute_map'
IS_NEXT_HOP_SELF = 'is_next_hop_self'
# Default value constants.
DEFAULT_CAP_GR_NULL = True
DEFAULT_CAP_REFRESH = True
DEFAULT_CAP_ENHANCED_REFRESH = False
DEFAULT_CAP_MBGP_IPV4 = True
DEFAULT_CAP_MBGP_IPV6 = False
DEFAULT_CAP_MBGP_VPNV4 = False
DEFAULT_CAP_MBGP_VPNV6 = False
DEFAULT_HOLD_TIME = 40
DEFAULT_ENABLED = True
DEFAULT_CAP_RTC = False
DEFAULT_IN_FILTER = []
DEFAULT_OUT_FILTER = []
DEFAULT_IS_ROUTE_SERVER_CLIENT = False
DEFAULT_CHECK_FIRST_AS = False
DEFAULT_IS_NEXT_HOP_SELF = False
# Default value for *MAX_PREFIXES* setting is set to 0.
DEFAULT_MAX_PREFIXES = 0
DEFAULT_ADVERTISE_PEER_AS = False
@validate(name=ENABLED)
def validate_enabled(enabled):
if not isinstance(enabled, bool):
raise ConfigValueError(desc='Enable property is not an instance of '
'boolean')
return enabled
@validate(name=CHANGES)
def validate_changes(changes):
for k, v in changes.iteritems():
if k not in (MULTI_EXIT_DISC, ENABLED):
raise ConfigValueError(desc="Unknown field to change: %s" % k)
if k == MULTI_EXIT_DISC:
validate_med(v)
elif k == ENABLED:
validate_enabled(v)
return changes
def valid_ip_address(addr):
if not netaddr.valid_ipv4(addr) and not netaddr.valid_ipv6(addr):
return False
return True
@validate(name=IP_ADDRESS)
def validate_ip_address(ip_address):
if not valid_ip_address(ip_address):
raise ConfigValueError(desc='Invalid neighbor ip_address: %s' %
ip_address)
return str(netaddr.IPAddress(ip_address))
@validate(name=LOCAL_ADDRESS)
def validate_local_address(ip_address):
if not valid_ip_address(ip_address):
raise ConfigValueError(desc='Invalid local ip_address: %s' %
ip_address)
return str(netaddr.IPAddress(ip_address))
@validate(name=PEER_NEXT_HOP)
def validate_next_hop(ip_address):
if not valid_ip_address(ip_address):
raise ConfigValueError(desc='Invalid next_hop ip_address: %s' %
ip_address)
return str(netaddr.IPAddress(ip_address))
@validate(name=PASSWORD)
def validate_password(password):
return password
@validate(name=LOCAL_PORT)
def validate_local_port(port):
if not isinstance(port, (int, long)):
raise ConfigTypeError(desc='Invalid local port: %s' % port)
if port < 1025 or port > 65535:
raise ConfigValueError(desc='Invalid local port value: %s, has to be'
' between 1025 and 65535' % port)
return port
@validate(name=REMOTE_AS)
def validate_remote_as(asn):
if not is_valid_old_asn(asn):
raise ConfigValueError(desc='Invalid remote as value %s' % asn)
return asn
def valid_prefix_filter(filter_):
policy = filter_.get('policy', None)
if policy == 'permit':
policy = PrefixFilter.POLICY_PERMIT
else:
policy = PrefixFilter.POLICY_DENY
prefix = filter_['prefix']
ge = filter_.get('ge', None)
le = filter_.get('le', None)
return PrefixFilter(prefix, policy, ge=ge, le=le)
PREFIX_FILTER = 'prefix_filter'
SUPPORTED_FILTER_VALIDATORS = {
PREFIX_FILTER: valid_prefix_filter
}
def valid_filter(filter_):
if isinstance(filter_, Filter):
return filter_
if not isinstance(filter_, dict):
raise ConfigTypeError(desc='Invalid filter: %s' % filter_)
if 'type' not in filter_:
raise ConfigTypeError(desc='Invalid filter: %s, needs \'type\' field'
% filter_)
if not filter_['type'] in SUPPORTED_FILTER_VALIDATORS:
raise ConfigTypeError(desc='Invalid filter type: %s, supported filter'
' types are %s'
% (filter_['type'],
SUPPORTED_FILTER_VALIDATORS.keys()))
return SUPPORTED_FILTER_VALIDATORS[filter_['type']](filter_)
def valid_attribute_map(attribute_map):
if not isinstance(attribute_map, AttributeMap):
raise ConfigTypeError(desc='Invalid AttributeMap: %s' % attribute_map)
else:
return attribute_map
@validate(name=IN_FILTER)
def validate_in_filters(filters):
return [valid_filter(filter_) for filter_ in filters]
@validate(name=OUT_FILTER)
def validate_out_filters(filters):
return [valid_filter(filter_) for filter_ in filters]
@validate(name=ATTRIBUTE_MAP)
def validate_attribute_maps(attribute_maps):
return [valid_attribute_map(attribute_map)
for attribute_map in attribute_maps]
@validate(name=IS_ROUTE_SERVER_CLIENT)
def validate_is_route_server_client(is_route_server_client):
if is_route_server_client not in (True, False):
raise ConfigValueError(desc='Invalid is_route_server_client(%s)' %
is_route_server_client)
return is_route_server_client
@validate(name=CHECK_FIRST_AS)
def validate_check_first_as(check_first_as):
if check_first_as not in (True, False):
raise ConfigValueError(desc='Invalid check_first_as(%s)' %
check_first_as)
return check_first_as
@validate(name=IS_NEXT_HOP_SELF)
def validate_is_next_hop_self(is_next_hop_self):
if is_next_hop_self not in (True, False):
raise ConfigValueError(desc='Invalid is_next_hop_self(%s)' %
is_next_hop_self)
return is_next_hop_self
class NeighborConf(ConfWithId, ConfWithStats):
"""Class that encapsulates one neighbors' configuration."""
UPDATE_ENABLED_EVT = 'update_enabled_evt'
UPDATE_MED_EVT = 'update_med_evt'
VALID_EVT = frozenset([UPDATE_ENABLED_EVT, UPDATE_MED_EVT])
REQUIRED_SETTINGS = frozenset([REMOTE_AS, IP_ADDRESS])
OPTIONAL_SETTINGS = frozenset([CAP_REFRESH,
CAP_ENHANCED_REFRESH,
CAP_MBGP_IPV4, CAP_MBGP_IPV6,
CAP_MBGP_VPNV4, CAP_MBGP_VPNV6,
CAP_RTC, RTC_AS, HOLD_TIME,
ENABLED, MULTI_EXIT_DISC, MAX_PREFIXES,
ADVERTISE_PEER_AS, SITE_OF_ORIGINS,
LOCAL_ADDRESS, LOCAL_PORT,
PEER_NEXT_HOP, PASSWORD,
IN_FILTER, OUT_FILTER,
IS_ROUTE_SERVER_CLIENT, CHECK_FIRST_AS,
IS_NEXT_HOP_SELF])
def __init__(self, **kwargs):
super(NeighborConf, self).__init__(**kwargs)
def _init_opt_settings(self, **kwargs):
self._settings[CAP_REFRESH] = compute_optional_conf(
CAP_REFRESH, DEFAULT_CAP_REFRESH, **kwargs)
self._settings[CAP_ENHANCED_REFRESH] = compute_optional_conf(
CAP_ENHANCED_REFRESH, DEFAULT_CAP_ENHANCED_REFRESH, **kwargs)
self._settings[CAP_MBGP_IPV4] = compute_optional_conf(
CAP_MBGP_IPV4, DEFAULT_CAP_MBGP_IPV4, **kwargs)
self._settings[CAP_MBGP_IPV6] = compute_optional_conf(
CAP_MBGP_IPV6, DEFAULT_CAP_MBGP_IPV6, **kwargs)
self._settings[CAP_MBGP_VPNV4] = compute_optional_conf(
CAP_MBGP_VPNV4, DEFAULT_CAP_MBGP_VPNV4, **kwargs)
self._settings[CAP_MBGP_VPNV6] = compute_optional_conf(
CAP_MBGP_VPNV6, DEFAULT_CAP_MBGP_VPNV6, **kwargs)
self._settings[HOLD_TIME] = compute_optional_conf(
HOLD_TIME, DEFAULT_HOLD_TIME, **kwargs)
self._settings[ENABLED] = compute_optional_conf(
ENABLED, DEFAULT_ENABLED, **kwargs)
self._settings[MAX_PREFIXES] = compute_optional_conf(
MAX_PREFIXES, DEFAULT_MAX_PREFIXES, **kwargs)
self._settings[ADVERTISE_PEER_AS] = compute_optional_conf(
ADVERTISE_PEER_AS, DEFAULT_ADVERTISE_PEER_AS, **kwargs)
self._settings[IN_FILTER] = compute_optional_conf(
IN_FILTER, DEFAULT_IN_FILTER, **kwargs)
self._settings[OUT_FILTER] = compute_optional_conf(
OUT_FILTER, DEFAULT_OUT_FILTER, **kwargs)
self._settings[IS_ROUTE_SERVER_CLIENT] = compute_optional_conf(
IS_ROUTE_SERVER_CLIENT,
DEFAULT_IS_ROUTE_SERVER_CLIENT, **kwargs)
self._settings[CHECK_FIRST_AS] = compute_optional_conf(
CHECK_FIRST_AS, DEFAULT_CHECK_FIRST_AS, **kwargs)
self._settings[IS_NEXT_HOP_SELF] = compute_optional_conf(
IS_NEXT_HOP_SELF,
DEFAULT_IS_NEXT_HOP_SELF, **kwargs)
# We do not have valid default MED value.
# If no MED attribute is provided then we do not have to use MED.
# If MED attribute is provided we have to validate it and use it.
med = kwargs.pop(MULTI_EXIT_DISC, None)
if med and validate_med(med):
self._settings[MULTI_EXIT_DISC] = med
# We do not have valid default SOO value.
# If no SOO attribute is provided then we do not have to use SOO.
# If SOO attribute is provided we have to validate it and use it.
soos = kwargs.pop(SITE_OF_ORIGINS, None)
if soos and validate_soo_list(soos):
self._settings[SITE_OF_ORIGINS] = soos
# We do not have valid default LOCAL_ADDRESS and LOCAL_PORT value.
# If no LOCAL_ADDRESS/PORT is provided then we will bind to system
# default.
self._settings[LOCAL_ADDRESS] = compute_optional_conf(
LOCAL_ADDRESS, None, **kwargs)
self._settings[LOCAL_PORT] = compute_optional_conf(
LOCAL_PORT, None, **kwargs)
self._settings[PEER_NEXT_HOP] = compute_optional_conf(
PEER_NEXT_HOP, None, **kwargs)
self._settings[PASSWORD] = compute_optional_conf(
PASSWORD, None, **kwargs)
# RTC configurations.
self._settings[CAP_RTC] = \
compute_optional_conf(CAP_RTC, DEFAULT_CAP_RTC, **kwargs)
# Default RTC_AS is local (router) AS.
from ryu.services.protocols.bgp.core_manager import \
CORE_MANAGER
default_rt_as = CORE_MANAGER.common_conf.local_as
self._settings[RTC_AS] = \
compute_optional_conf(RTC_AS, default_rt_as, **kwargs)
# Since ConfWithId' default values use str(self) and repr(self), we
# call super method after we have initialized other settings.
super(NeighborConf, self)._init_opt_settings(**kwargs)
@classmethod
def get_opt_settings(cls):
self_confs = super(NeighborConf, cls).get_opt_settings()
self_confs.update(NeighborConf.OPTIONAL_SETTINGS)
return self_confs
@classmethod
def get_req_settings(cls):
self_confs = super(NeighborConf, cls).get_req_settings()
self_confs.update(NeighborConf.REQUIRED_SETTINGS)
return self_confs
@classmethod
def get_valid_evts(cls):
self_valid_evts = super(NeighborConf, cls).get_valid_evts()
self_valid_evts.update(NeighborConf.VALID_EVT)
return self_valid_evts
# =========================================================================
# Required attributes
# =========================================================================
@property
def remote_as(self):
return self._settings[REMOTE_AS]
@property
def ip_address(self):
return self._settings[IP_ADDRESS]
@property
def host_bind_ip(self):
return self._settings[LOCAL_ADDRESS]
@property
def host_bind_port(self):
return self._settings[LOCAL_PORT]
@property
def next_hop(self):
return self._settings[PEER_NEXT_HOP]
@property
def password(self):
return self._settings[PASSWORD]
# =========================================================================
# Optional attributes with valid defaults.
# =========================================================================
@property
def hold_time(self):
return self._settings[HOLD_TIME]
@property
def cap_refresh(self):
return self._settings[CAP_REFRESH]
@property
def cap_enhanced_refresh(self):
return self._settings[CAP_ENHANCED_REFRESH]
@property
def cap_mbgp_ipv4(self):
return self._settings[CAP_MBGP_IPV4]
@property
def cap_mbgp_ipv6(self):
return self._settings[CAP_MBGP_IPV6]
@property
def cap_mbgp_vpnv4(self):
return self._settings[CAP_MBGP_VPNV4]
@property
def cap_mbgp_vpnv6(self):
return self._settings[CAP_MBGP_VPNV6]
@property
def cap_rtc(self):
return self._settings[CAP_RTC]
@property
def enabled(self):
return self._settings[ENABLED]
@enabled.setter
def enabled(self, enable):
# Update enabled flag and notify listeners.
if self._settings[ENABLED] != enable:
self._settings[ENABLED] = enable
self._notify_listeners(NeighborConf.UPDATE_ENABLED_EVT,
enable)
# =========================================================================
# Optional attributes with no valid defaults.
# =========================================================================
@property
def multi_exit_disc(self):
# This property does not have any valid default. Hence if not set we
# return None.
return self._settings.get(MULTI_EXIT_DISC)
@multi_exit_disc.setter
def multi_exit_disc(self, value):
if self._settings.get(MULTI_EXIT_DISC) != value:
self._settings[MULTI_EXIT_DISC] = value
self._notify_listeners(NeighborConf.UPDATE_MED_EVT, value)
@property
def soo_list(self):
soos = self._settings.get(SITE_OF_ORIGINS)
if soos:
soos = list(soos)
else:
soos = []
return soos
@property
def rtc_as(self):
return self._settings[RTC_AS]
@property
def in_filter(self):
return self._settings[IN_FILTER]
@property
def out_filter(self):
return self._settings[OUT_FILTER]
@property
def is_route_server_client(self):
return self._settings[IS_ROUTE_SERVER_CLIENT]
@property
def check_first_as(self):
return self._settings[CHECK_FIRST_AS]
@property
def is_next_hop_self(self):
return self._settings[IS_NEXT_HOP_SELF]
def exceeds_max_prefix_allowed(self, prefix_count):
allowed_max = self._settings[MAX_PREFIXES]
does_exceed = False
# Check if allowed max. is unlimited.
if allowed_max != 0:
# If max. prefix is limited, check if given exceeds this limit.
if prefix_count > allowed_max:
does_exceed = True
return does_exceed
def get_configured_capabilites(self):
"""Returns configured capabilities."""
capabilities = OrderedDict()
mbgp_caps = []
if self.cap_mbgp_ipv4:
mbgp_caps.append(
BGPOptParamCapabilityMultiprotocol(
RF_IPv4_UC.afi, RF_IPv4_UC.safi))
if self.cap_mbgp_ipv6:
mbgp_caps.append(
BGPOptParamCapabilityMultiprotocol(
RF_IPv6_UC.afi, RF_IPv6_UC.safi))
if self.cap_mbgp_vpnv4:
mbgp_caps.append(
BGPOptParamCapabilityMultiprotocol(
RF_IPv4_VPN.afi, RF_IPv4_VPN.safi))
if self.cap_mbgp_vpnv6:
mbgp_caps.append(
BGPOptParamCapabilityMultiprotocol(
RF_IPv6_VPN.afi, RF_IPv6_VPN.safi))
if self.cap_rtc:
mbgp_caps.append(
BGPOptParamCapabilityMultiprotocol(
RF_RTC_UC.afi, RF_RTC_UC.safi))
if mbgp_caps:
capabilities[BGP_CAP_MULTIPROTOCOL] = mbgp_caps
if self.cap_refresh:
capabilities[BGP_CAP_ROUTE_REFRESH] = [
BGPOptParamCapabilityRouteRefresh()]
if self.cap_enhanced_refresh:
capabilities[BGP_CAP_ENHANCED_ROUTE_REFRESH] = [
BGPOptParamCapabilityEnhancedRouteRefresh()]
return capabilities
def __repr__(self):
return '<%s(%r, %r, %r)>' % (self.__class__.__name__,
self.remote_as,
self.ip_address,
self.enabled)
def __str__(self):
return 'Neighbor: %s' % (self.ip_address)
class NeighborsConf(BaseConf):
"""Container of all neighbor configurations."""
ADD_NEIGH_CONF_EVT = 'add_neigh_conf_evt'
REMOVE_NEIGH_CONF_EVT = 'remove_neigh_conf_evt'
VALID_EVT = frozenset([ADD_NEIGH_CONF_EVT, REMOVE_NEIGH_CONF_EVT])
def __init__(self):
super(NeighborsConf, self).__init__()
self._neighbors = {}
def _init_opt_settings(self, **kwargs):
pass
def update(self, **kwargs):
raise NotImplementedError('Use either add/remove_neighbor_conf'
' methods instead.')
@property
def rtc_as_set(self):
"""Returns current RTC AS configured for current neighbors.
"""
rtc_as_set = set()
for neigh in self._neighbors.itervalues():
rtc_as_set.add(neigh.rtc_as)
return rtc_as_set
@classmethod
def get_valid_evts(cls):
self_valid_evts = super(NeighborsConf, cls).get_valid_evts()
self_valid_evts.update(NeighborsConf.VALID_EVT)
return self_valid_evts
def add_neighbor_conf(self, neigh_conf):
# Check if we already know this neighbor
if neigh_conf.ip_address in self._neighbors.keys():
message = 'Neighbor with given ip address already exists'
raise RuntimeConfigError(desc=message)
# Add this neighbor to known configured neighbors and generate update
# event
self._neighbors[neigh_conf.ip_address] = neigh_conf
self._notify_listeners(NeighborsConf.ADD_NEIGH_CONF_EVT, neigh_conf)
def remove_neighbor_conf(self, neigh_ip_address):
neigh_conf = self._neighbors.pop(neigh_ip_address, None)
if not neigh_conf:
raise RuntimeConfigError(desc='Tried to remove a neighbor that '
'does not exists')
else:
self._notify_listeners(NeighborsConf.REMOVE_NEIGH_CONF_EVT,
neigh_conf)
return neigh_conf
def get_neighbor_conf(self, neigh_ip_address):
return self._neighbors.get(neigh_ip_address, None)
def __repr__(self):
return '<%s(%r)>' % (self.__class__.__name__, self._neighbors)
def __str__(self):
return '\'Neighbors\': %s' % self._neighbors
@property
def settings(self):
return [neighbor.settings for _, neighbor in
self._neighbors.iteritems()]
class NeighborConfListener(ConfWithIdListener, ConfWithStatsListener):
"""Base listener for change events to a specific neighbors' configurations.
"""
def __init__(self, neigh_conf):
super(NeighborConfListener, self).__init__(neigh_conf)
neigh_conf.add_listener(NeighborConf.UPDATE_ENABLED_EVT,
self.on_update_enabled)
neigh_conf.add_listener(NeighborConf.UPDATE_MED_EVT,
self.on_update_med)
@abstractmethod
def on_update_enabled(self, evt):
raise NotImplementedError('This method should be overridden.')
def on_update_med(self, evt):
raise NotImplementedError('This method should be overridden.')
class NeighborsConfListener(BaseConfListener):
"""Base listener for change events to neighbor configuration container."""
def __init__(self, neighbors_conf):
super(NeighborsConfListener, self).__init__(neighbors_conf)
neighbors_conf.add_listener(NeighborsConf.ADD_NEIGH_CONF_EVT,
self.on_add_neighbor_conf)
neighbors_conf.add_listener(NeighborsConf.REMOVE_NEIGH_CONF_EVT,
self.on_remove_neighbor_conf)
@abstractmethod
def on_add_neighbor_conf(self, evt):
raise NotImplementedError('This method should be overridden.')
@abstractmethod
def on_remove_neighbor_conf(self, evt):
raise NotImplementedError('This method should be overridden.')
|
levilucio/SyVOLT | refs/heads/master | GM2AUTOSAR_MM/merge_inter_layer_rules/Himesis/HFindTwoApplyElementsWithTraceLHS.py | 2 |
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import cPickle as pickle
from uuid import UUID
class HFindTwoApplyElementsWithTraceLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HFindTwoApplyElementsWithTraceLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HFindTwoApplyElementsWithTraceLHS, self).__init__(name='HFindTwoApplyElementsWithTraceLHS', num_nodes=7, edges=[])
# Add the edges
self.add_edges([(3, 0), (0, 5), (6, 1), (1, 4), (5, 2), (2, 4)])
# Set the graph attributes
self["mm__"] = pickle.loads("""(lp1
S'MT_pre__GM2AUTOSAR_MM'
p2
aS'MoTifRule'
p3
a.""")
self["MT_constraint__"] = """if PreNode('4')['classtype'] == PreNode('3')['classtype']:
if len([i for i in graph.neighbors(PreNode('4').index) if graph.vs[i]['mm__'] == 'apply_contains']) == 0:
return True
return False
"""
self["name"] = """"""
self["GUID__"] = UUID('02cd9831-fcc7-4958-9de6-3053378bf1c6')
# Set the node attributes
self.vs[0]["MT_subtypeMatching__"] = False
self.vs[0]["MT_label__"] = """10"""
self.vs[0]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[0]["mm__"] = """MT_pre__apply_contains"""
self.vs[0]["MT_dirty__"] = False
self.vs[0]["GUID__"] = UUID('119659ed-dfaa-4d7c-99e0-46613f599969')
self.vs[1]["MT_subtypeMatching__"] = False
self.vs[1]["MT_pre__type"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_label__"] = """12"""
self.vs[1]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[1]["mm__"] = """MT_pre__backward_link"""
self.vs[1]["MT_dirty__"] = False
self.vs[1]["GUID__"] = UUID('5c3a2b43-0013-4803-b4b3-836c1e4ce7fb')
self.vs[2]["MT_subtypeMatching__"] = False
self.vs[2]["MT_label__"] = """11"""
self.vs[2]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[2]["mm__"] = """MT_pre__trace_link"""
self.vs[2]["MT_dirty__"] = False
self.vs[2]["GUID__"] = UUID('d8fb8a92-95ff-4430-89e7-2065538da51b')
self.vs[3]["MT_subtypeMatching__"] = False
self.vs[3]["MT_label__"] = """1"""
self.vs[3]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[3]["mm__"] = """MT_pre__ApplyModel"""
self.vs[3]["MT_dirty__"] = False
self.vs[3]["GUID__"] = UUID('d6405da9-989d-41f9-8fb6-d06bfe674080')
self.vs[4]["MT_subtypeMatching__"] = True
self.vs[4]["MT_pre__classtype"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[4]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[4]["MT_pivotIn__"] = """element1"""
self.vs[4]["MT_label__"] = """5"""
self.vs[4]["MT_subtypes__"] = pickle.loads("""(lp1
S'MT_pre__VirtualDevice'
p2
aS'MT_pre__Distributable'
p3
aS'MT_pre__Signal'
p4
aS'MT_pre__ExecFrame'
p5
aS'MT_pre__ECU'
p6
a.""")
self.vs[4]["mm__"] = """MT_pre__MetaModelElement_S"""
self.vs[4]["MT_dirty__"] = False
self.vs[4]["MT_pre__cardinality"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[4]["GUID__"] = UUID('090c34e2-8e32-4255-acd6-0b50a42b7ff0')
self.vs[5]["MT_pivotOut__"] = """element1"""
self.vs[5]["MT_subtypeMatching__"] = True
self.vs[5]["MT_pre__classtype"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[5]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[5]["MT_label__"] = """3"""
self.vs[5]["MT_subtypes__"] = pickle.loads("""(lp1
S'MT_pre__EcuInstance'
p2
aS'MT_pre__System'
p3
aS'MT_pre__SystemMapping'
p4
aS'MT_pre__ComponentPrototype'
p5
aS'MT_pre__SwCompToEcuMapping_component'
p6
aS'MT_pre__CompositionType'
p7
aS'MT_pre__PPortPrototype'
p8
aS'MT_pre__SwcToEcuMapping'
p9
aS'MT_pre__SoftwareComposition'
p10
aS'MT_pre__RPortPrototype'
p11
aS'MT_pre__PortPrototype'
p12
aS'MT_pre__ComponentType'
p13
a.""")
self.vs[5]["mm__"] = """MT_pre__MetaModelElement_T"""
self.vs[5]["MT_dirty__"] = False
self.vs[5]["MT_pre__cardinality"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[5]["GUID__"] = UUID('a219eb52-212c-4e1e-a21f-08509bdfa3dc')
self.vs[6]["MT_pivotOut__"] = """element2"""
self.vs[6]["MT_subtypeMatching__"] = True
self.vs[6]["MT_pre__classtype"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[6]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[6]["MT_label__"] = """4"""
self.vs[6]["MT_subtypes__"] = pickle.loads("""(lp1
S'MT_pre__EcuInstance'
p2
aS'MT_pre__System'
p3
aS'MT_pre__SystemMapping'
p4
aS'MT_pre__ComponentPrototype'
p5
aS'MT_pre__SwCompToEcuMapping_component'
p6
aS'MT_pre__CompositionType'
p7
aS'MT_pre__PPortPrototype'
p8
aS'MT_pre__SwcToEcuMapping'
p9
aS'MT_pre__SoftwareComposition'
p10
aS'MT_pre__RPortPrototype'
p11
aS'MT_pre__PortPrototype'
p12
aS'MT_pre__ComponentType'
p13
a.""")
self.vs[6]["mm__"] = """MT_pre__MetaModelElement_T"""
self.vs[6]["MT_dirty__"] = False
self.vs[6]["MT_pre__cardinality"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[6]["GUID__"] = UUID('b8c48f5b-5f5f-4b0b-95ee-03bfdc065909')
def eval_type12(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_classtype3(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_name3(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_cardinality3(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_classtype4(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_name4(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_cardinality4(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_classtype5(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_name5(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_cardinality5(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
if PreNode('4')['classtype'] == PreNode('3')['classtype']:
if len([i for i in graph.neighbors(PreNode('4').index) if graph.vs[i]['mm__'] == 'apply_contains']) == 0:
return True
return False
|
lexionbear/mlmodels | refs/heads/master | tensorflow/libs/caffe-tensorflow/kaffe/caffe/__init__.py | 13 | from .resolver import get_caffe_resolver, has_pycaffe
|
2015fallproject/2015fallcase1 | refs/heads/master | static/Brython3.2.0-20150701-214155/Lib/test/unittests/subprocessdata/fd_status.py | 90 | """When called as a script, print a comma-separated list of the open
file descriptors on stdout."""
import errno
import os
try:
_MAXFD = os.sysconf("SC_OPEN_MAX")
except:
_MAXFD = 256
if __name__ == "__main__":
fds = []
for fd in range(0, _MAXFD):
try:
st = os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
continue
raise
# Ignore Solaris door files
if st.st_mode & 0xF000 != 0xd000:
fds.append(fd)
print(','.join(map(str, fds)))
|
roninek/python101 | refs/heads/master | docs/podstawy/przyklady/05_oceny_03.py | 2 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# importujemy funkcje z modułu ocenyfun zapisanego w pliku ocenyfun.py
from ocenyfun import drukuj
from ocenyfun import srednia
from ocenyfun import mediana
from ocenyfun import odchylenie
przedmioty = set(['polski', 'angielski']) # definicja zbioru
drukuj(przedmioty, "Lista przedmiotów zawiera: ")
print "\nAby przerwać wprowadzanie przedmiotów, naciśnij Enter."
while True:
przedmiot = raw_input("Podaj nazwę przedmiotu: ")
if len(przedmiot):
if przedmiot in przedmioty: # czy przedmiot jest w zbiorze?
print "Ten przedmiot już mamy :-)"
przedmioty.add(przedmiot) # dodaj przedmiot do zbioru
else:
drukuj(przedmioty, "\nTwoje przedmioty: ")
przedmiot = raw_input("\nZ którego przedmiotu wprowadzisz oceny? ")
if przedmiot not in przedmioty: # jeżeli przedmiotu nie ma w zbiorze
print "Brak takiego przedmiotu, możesz go dodać."
else:
break # wyjście z pętli
oceny = [] # pusta lista ocen
ocena = None # zmienna sterująca pętlą i do pobierania ocen
print "\nAby przerwać wprowadzanie ocen, podaj 0 (zero)."
while not ocena:
try:
ocena = int(raw_input("Podaj ocenę (1-6): "))
if (ocena > 0 and ocena < 7):
oceny.append(float(ocena))
elif ocena == 0:
break
else:
print "Błędna ocena."
ocena = None
except ValueError:
print "Błędne dane!"
drukuj(oceny, przedmiot.capitalize() + " - wprowadzone oceny: ")
s = srednia(oceny) # wywołanie funkcji z modułu ocenyfun
m = mediana(oceny) # wywołanie funkcji z modułu ocenyfun
o = odchylenie(oceny, s) # wywołanie funkcji z modułu ocenyfun
print "\nŚrednia: {0:5.2f}\nMediana: {1:5.2f}\nOdchylenie: {2:5.2f}".format(s, m, o)
|
mbj36/Python-Flask | refs/heads/master | db_migrate.py | 47 | #!flask/bin/python
import imp
from migrate.versioning import api
from app import db
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
migration = SQLALCHEMY_MIGRATE_REPO + ('/versions/%03d_migration.py' % (v+1))
tmp_module = imp.new_module('old_model')
old_model = api.create_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
exec(old_model, tmp_module.__dict__)
script = api.make_update_script_for_model(SQLALCHEMY_DATABASE_URI,
SQLALCHEMY_MIGRATE_REPO,
tmp_module.meta, db.metadata)
open(migration, "wt").write(script)
api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print('New migration saved as ' + migration)
print('Current database version: ' + str(v))
|
Yelp/pootle | refs/heads/master | pootle/apps/pootle_app/management/commands/retry_failed_jobs.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import os
# This must be run before importing Django.
os.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'
from django.conf import settings
from django.core.management.base import NoArgsCommand
from django.core.urlresolvers import set_script_prefix
from django.utils.encoding import force_unicode
from django_rq.queues import get_failed_queue
class Command(NoArgsCommand):
help = "Retry failed RQ jobs."
def handle_noargs(self, **options):
# The script prefix needs to be set here because the generated
# URLs need to be aware of that and they are cached. Ideally
# Django should take care of setting this up, but it doesn't yet
# (fixed in Django 1.10):
# https://code.djangoproject.com/ticket/16734
script_name = (u'/' if settings.FORCE_SCRIPT_NAME is None
else force_unicode(settings.FORCE_SCRIPT_NAME))
set_script_prefix(script_name)
failed_queue = get_failed_queue()
for job_id in failed_queue.get_job_ids():
failed_queue.requeue(job_id=job_id)
|
oberstet/autobahn-python | refs/heads/master | examples/twisted/websocket/reconnecting/client.py | 3 | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from twisted.internet.protocol import ReconnectingClientFactory
from autobahn.twisted.websocket import WebSocketClientProtocol, \
WebSocketClientFactory
class MyClientProtocol(WebSocketClientProtocol):
def onConnect(self, response):
print("Server connected: {0}".format(response.peer))
self.factory.resetDelay()
def onOpen(self):
print("WebSocket connection open.")
def hello():
self.sendMessage("Hello, world!".encode('utf8'))
self.sendMessage(b"\x00\x01\x03\x04", isBinary=True)
self.factory.reactor.callLater(1, hello)
# start sending messages every second ..
hello()
def onMessage(self, payload, isBinary):
if isBinary:
print("Binary message received: {0} bytes".format(len(payload)))
else:
print("Text message received: {0}".format(payload.decode('utf8')))
def onClose(self, wasClean, code, reason):
print("WebSocket connection closed: {0}".format(reason))
class MyClientFactory(WebSocketClientFactory, ReconnectingClientFactory):
protocol = MyClientProtocol
def clientConnectionFailed(self, connector, reason):
print("Client connection failed .. retrying ..")
self.retry(connector)
def clientConnectionLost(self, connector, reason):
print("Client connection lost .. retrying ..")
self.retry(connector)
if __name__ == '__main__':
import sys
from twisted.python import log
from twisted.internet import reactor
log.startLogging(sys.stdout)
factory = MyClientFactory("ws://127.0.0.1:9000")
reactor.connectTCP("127.0.0.1", 9000, factory)
reactor.run()
|
RegulatoryGenomicsUPF/pyicoteo | refs/heads/pyicoteo | old_docs/conf.py | 1 | # -*- coding: utf-8 -*-
#
# pyicos documentation build configuration file, created by
# sphinx-quickstart on Sat Apr 3 10:47:10 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath('../'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinxcontrib.programoutput']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyicos'
copyright = u'2013, Juan González-Vallinas'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.2'
# The full version, including alpha/beta/rc tags.
release = '1.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyicosdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pyicos.tex', u'pyicos Documentation',
u'Juan González-Vallinas', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
UITools/saleor | refs/heads/master | saleor/dashboard/seo/utils.py | 1 | from django.utils.translation import pgettext_lazy
from ...core.utils.text import strip_html_and_truncate
from ..widgets import CharsLeftWidget
MIN_TITLE_LENGTH = 25
MIN_DESCRIPTION_LENGTH = 120
SEO_HELP_TEXTS = {
'seo_description': pgettext_lazy(
'Form field help text',
(
'If empty, the preview shows what will be autogenerated.')),
'seo_title': pgettext_lazy(
'Form field help text',
(
'If empty, the preview shows what will be autogenerated.'))}
SEO_LABELS = {
'seo_description': pgettext_lazy(
(
'Field name, Meta Description is page summary '
'used by Search Engines'),
'Meta Description'),
'seo_title': pgettext_lazy(
(
'Field name, '
'Title that will be used to describe page in Search Engines'),
'SEO Title')}
SEO_WIDGETS = {
'seo_description': CharsLeftWidget,
'seo_title': CharsLeftWidget}
def prepare_seo_description(seo_description, html_description, max_length):
# if there is no SEO friendly description set,
# generate it from the HTML description
if not seo_description:
# get the non-safe description (has non escaped HTML tags in it)
# generate a SEO friendly from HTML description
seo_description = strip_html_and_truncate(
html_description, max_length)
return seo_description
|
anhstudios/swganh | refs/heads/develop | data/scripts/templates/object/tangible/loot/loot_schematic/shared_tanning_hide_s01_schematic.py | 2 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/loot/loot_schematic/shared_tanning_hide_s01_schematic.iff"
result.attribute_template_id = -1
result.stfName("craft_item_ingredients_n","tanning_hide_s01")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
nsol-nmsu/ns3-smartgrid | refs/heads/ndnSIM-v2 | src/wave/test/examples-to-run.py | 102 | #! /usr/bin/env python
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
# A list of C++ examples to run in order to ensure that they remain
# buildable and runnable over time. Each tuple in the list contains
#
# (example_name, do_run, do_valgrind_run).
#
# See test.py for more information.
cpp_examples = [
("wave-simple-80211p", "True", "True"),
("wave-simple-device", "True", "True"),
]
# A list of Python examples to run in order to ensure that they remain
# runnable over time. Each tuple in the list contains
#
# (example_name, do_run).
#
# See test.py for more information.
python_examples = []
|
mrnamingo/enigma2-test | refs/heads/master | lib/python/Screens/EpgSelection.py | 1 | from time import localtime, time, strftime, mktime
from enigma import eServiceReference, eTimer, eServiceCenter, ePoint
from Screen import Screen
from Screens.HelpMenu import HelpableScreen
from Components.About import about
from Components.ActionMap import HelpableActionMap, HelpableNumberActionMap
from Components.Button import Button
from Components.config import config, configfile, ConfigClock
from Components.EpgList import EPGList, EPGBouquetList, TimelineText, EPG_TYPE_SINGLE, EPG_TYPE_SIMILAR, EPG_TYPE_MULTI, EPG_TYPE_ENHANCED, EPG_TYPE_INFOBAR, EPG_TYPE_INFOBARGRAPH, EPG_TYPE_GRAPH, MAX_TIMELINES
from Components.Label import Label
from Components.Pixmap import Pixmap
from Components.Sources.ServiceEvent import ServiceEvent
from Components.Sources.Event import Event
from Components.UsageConfig import preferredTimerPath
from Screens.TimerEdit import TimerSanityConflict
from Screens.EventView import EventViewEPGSelect, EventViewSimple
from Screens.ChoiceBox import ChoiceBox
from Screens.MessageBox import MessageBox
from Screens.PictureInPicture import PictureInPicture
from Screens.Setup import Setup
from TimeDateInput import TimeDateInput
from RecordTimer import RecordTimerEntry, parseEvent, AFTEREVENT
from TimerEntry import TimerEntry, InstantRecordTimerEntry
from ServiceReference import ServiceReference
from Tools.HardwareInfo import HardwareInfo
import Screens.InfoBar
mepg_config_initialized = False
# PiPServiceRelation installed?
try:
from Plugins.SystemPlugins.PiPServiceRelation.plugin import getRelationDict
plugin_PiPServiceRelation_installed = True
except:
plugin_PiPServiceRelation_installed = False
class EPGSelection(Screen, HelpableScreen):
EMPTY = 0
ADD_TIMER = 1
REMOVE_TIMER = 2
ZAP = 1
def __init__(self, session, service = None, zapFunc = None, eventid = None, bouquetChangeCB=None, serviceChangeCB = None, EPGtype = None, StartBouquet = None, StartRef = None, bouquets = None):
Screen.__init__(self, session)
HelpableScreen.__init__(self)
self.zapFunc = zapFunc
self.serviceChangeCB = serviceChangeCB
self.bouquets = bouquets
graphic = False
if EPGtype == 'single':
self.type = EPG_TYPE_SINGLE
elif EPGtype == 'infobar':
self.type = EPG_TYPE_INFOBAR
elif EPGtype == 'enhanced':
self.type = EPG_TYPE_ENHANCED
elif EPGtype == 'graph':
self.type = EPG_TYPE_GRAPH
if config.epgselection.graph_type_mode.value == "graphics":
graphic = True
elif EPGtype == 'infobargraph':
self.type = EPG_TYPE_INFOBARGRAPH
if config.epgselection.infobar_type_mode.value == "graphics":
graphic = True
elif EPGtype == 'multi':
self.type = EPG_TYPE_MULTI
else:
self.type = EPG_TYPE_SIMILAR
if not self.type == EPG_TYPE_SINGLE:
self.StartBouquet = StartBouquet
self.StartRef = StartRef
self.servicelist = None
self.ChoiceBoxDialog = None
self.ask_time = -1
self.closeRecursive = False
self.eventviewDialog = None
self.eventviewWasShown = False
self.currch = None
self.session.pipshown = False
self.cureventindex = None
self.primaryBouquet = None
if plugin_PiPServiceRelation_installed:
self.pipServiceRelation = getRelationDict()
else:
self.pipServiceRelation = {}
self.zapnumberstarted = False
self.NumberZapTimer = eTimer()
self.NumberZapTimer.callback.append(self.dozumberzap)
self.NumberZapField = None
self.CurrBouquet = None
self.CurrService = None
self["guidebouquetlist"] = Label()
self["number"] = Label()
self["number"].hide()
self["searchnumber"] = Label()
self["searchnumber"].hide()
self['Service'] = ServiceEvent()
self['Event'] = Event()
self['lab1'] = Label(_('Please wait while gathering EPG data...'))
self.key_green_choice = self.EMPTY
self.changeText = self.EMPTY
self.findchannel = False;
#Ensures yellow and blue buttons are different for graphical EPG screen
if self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH:
# ketmp - labels keys depending on what user has configured
self.RefreshColouredKeys()
else:
self['key_red'] = Button(_('IMDb Search'))
self['key_green'] = Button(_('Record Once'))
self['key_yellow'] = Button(_('EPG Search'))
self['key_blue'] = Button(_('Record Series'))
self['dialogactions'] = HelpableActionMap(self, 'WizardActions',
{
'back': (self.closeChoiceBoxDialog, _('Close dialog')),
}, -1)
self['dialogactions'].csel = self
self["dialogactions"].setEnabled(False)
self['okactions'] = HelpableActionMap(self, 'OkCancelActions',
{
'cancel': (self.closeScreen, _('Exit EPG')),
'OK': (self.OK, _('Zap to channel (setup in menu)')),
'OKLong': (self.OKLong, _('Zap to channel and close (setup in menu)'))
}, -1)
self['okactions'].csel = self
if self.type == EPG_TYPE_GRAPH:
self['colouractions'] = HelpableActionMap(self, 'ColorActions',
{
'red': (self.redButtonPressed, _('Open option menu')),
'green': (self.greenButtonPressed, _('Show completed recordings')),
'greenlong': (self.greenButtonPressedLong, _('Show Record Once List')),
'yellow': (self.yellowButtonPressed, _('Go back by 24 hours')),
'blue': (self.blueButtonPressed, _('Go forward by 24 hours')),
}, -1)
else:
self['colouractions'] = HelpableActionMap(self, 'ColorActions',
{
'red': (self.redButtonPressed, _('IMDB for current event')),
'redlong': (self.redButtonPressedLong, _('Sort EPG List')),
'green': (self.greenButtonPressed, _('Add/Remove Record Once entry for current event')),
'greenlong': (self.greenButtonPressedLong, _('Show Record Once List')),
'yellow': (self.yellowButtonPressed, _('Search for similar events')),
'blue': (self.blueButtonPressed, _('Add Record Series for current event')),
'bluelong': (self.blueButtonPressedLong, _('Show Record Series List'))
}, -1)
self['colouractions'].csel = self
self['recordingactions'] = HelpableActionMap(self, 'InfobarInstantRecord',
{
'ShortRecord': (self.recButtonPressed, _('Add a Record Once for current event')),
'LongRecord': (self.recButtonPressedLong, _('Add a zap timer for current event'))
}, -1)
self['recordingactions'].csel = self
if self.type == EPG_TYPE_SIMILAR:
self.currentService = service
self.eventid = eventid
self['epgactions'] = HelpableActionMap(self, 'EPGSelectActions',
{
'info': (self.Info, _('Show detailed event info')),
'infolong': (self.InfoLong, _('Show single epg for current channel')),
'menu': (self.createSetup, _('Setup menu'))
}, -1)
self['epgactions'].csel = self
elif self.type == EPG_TYPE_SINGLE:
self.currentService = ServiceReference(service)
self['epgactions'] = HelpableActionMap(self, 'EPGSelectActions',
{
'info': (self.Info, _('Show detailed event info')),
'epg': (self.Info, _('Show detailed event info')),
'menu': (self.createSetup, _('Setup menu'))
}, -1)
self['epgactions'].csel = self
self['epgcursoractions'] = HelpableActionMap(self, 'DirectionActions',
{
'left': (self.prevPage, _('Move up a page')),
'right': (self.nextPage, _('Move down a page')),
'up': (self.moveUp, _('Go to previous channel')),
'down': (self.moveDown, _('Go to next channel'))
}, -1)
self['epgcursoractions'].csel = self
elif self.type == EPG_TYPE_INFOBAR or self.type == EPG_TYPE_ENHANCED:
if self.type == EPG_TYPE_INFOBAR:
self.skinName = 'QuickEPG'
self['epgactions'] = HelpableActionMap(self, 'EPGSelectActions',
{
'nextBouquet': (self.nextBouquet, _('Go to next bouquet')),
'prevBouquet': (self.prevBouquet, _('Go to previous bouquet')),
'nextService': (self.nextPage, _('Move down a page')),
'prevService': (self.prevPage, _('Move up a page')),
'input_date_time': (self.enterDateTime, _('Goto specific data/time')),
'epg': (self.epgButtonPressed, _('Show single epg for current channel')),
'info': (self.Info, _('Show detailed event info')),
'infolong': (self.InfoLong, _('Show single epg for current channel')),
'menu': (self.createSetup, _('Setup menu'))
}, -1)
self['epgactions'].csel = self
self['epgcursoractions'] = HelpableActionMap(self, 'DirectionActions',
{
'left': (self.prevService, _('Goto previous channel')),
'right': (self.nextService, _('Goto next channel')),
'up': (self.moveUp, _('Goto previous channel')),
'down': (self.moveDown, _('Goto next channel'))
}, -1)
self['epgcursoractions'].csel = self
elif self.type == EPG_TYPE_ENHANCED:
self['epgactions'] = HelpableActionMap(self, 'EPGSelectActions',
{
'nextBouquet': (self.nextBouquet, _('Goto next bouquet')),
'prevBouquet': (self.prevBouquet, _('Goto previous bouquet')),
'nextService': (self.nextService, _('Goto next channel')),
'prevService': (self.prevService, _('Goto previous channel')),
'input_date_time': (self.enterDateTime, _('Goto specific data/time')),
'info': (self.Info, _('Show detailed event info')),
'infolong': (self.InfoLong, _('Show single epg for current channel')),
'menu': (self.createSetup, _('Setup menu'))
}, -1)
self['epgactions'].csel = self
self['epgcursoractions'] = HelpableActionMap(self, 'DirectionActions',
{
'left': (self.prevPage, _('Move up a page')),
'right': (self.nextPage, _('Move down a page')),
'up': (self.moveUp, _('Goto previous channel')),
'down': (self.moveDown, _('Goto next channel'))
}, -1)
self['epgcursoractions'].csel = self
self['input_actions'] = HelpableNumberActionMap(self, 'NumberActions',
{
'0': (self.keyNumberGlobal, _('enter number to jump to channel.')),
'1': (self.keyNumberGlobal, _('enter number to jump to channel.')),
'2': (self.keyNumberGlobal, _('enter number to jump to channel.')),
'3': (self.keyNumberGlobal, _('enter number to jump to channel.')),
'4': (self.keyNumberGlobal, _('enter number to jump to channel.')),
'5': (self.keyNumberGlobal, _('enter number to jump to channel.')),
'6': (self.keyNumberGlobal, _('enter number to jump to channel.')),
'7': (self.keyNumberGlobal, _('enter number to jump to channel.')),
'8': (self.keyNumberGlobal, _('enter number to jump to channel.')),
'9': (self.keyNumberGlobal, _('enter number to jump to channel.'))
}, -1)
self['input_actions'].csel = self
self.list = []
self.servicelist = service
self.currentService = self.session.nav.getCurrentlyPlayingServiceOrGroup()
elif self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH:
if self.type == EPG_TYPE_GRAPH:
if not config.epgselection.graph_pig.value:
self.skinName = 'GraphicalEPG'
else:
self.skinName = 'GraphicalEPGPIG'
elif self.type == EPG_TYPE_INFOBARGRAPH:
self.skinName = 'GraphicalInfoBarEPG'
now = time() - int(config.epg.histminutes.value) * 60
if self.type == EPG_TYPE_GRAPH:
self.ask_time = self.ask_time = now - now % (int(config.epgselection.graph_roundto.value) * 60)
elif self.type == EPG_TYPE_INFOBARGRAPH:
self.ask_time = self.ask_time = now - now % (int(config.epgselection.infobar_roundto.value) * 60)
self.closeRecursive = False
self.bouquetlist_active = False
self['bouquetlist'] = EPGBouquetList(graphic=graphic)
self['bouquetlist'].hide()
self['timeline_text'] = TimelineText(type=self.type,graphic=graphic)
self['Event'] = Event()
self['primetime'] = Label(_('PRIMETIME'))
self['change_bouquet'] = Label(_('CHANGE BOUQUET'))
self['jump'] = Label(_('JUMP 24 HOURS'))
self['page'] = Label(_('PAGE UP/DOWN'))
self.time_lines = []
for x in range(0, MAX_TIMELINES):
pm = Pixmap()
self.time_lines.append(pm)
self['timeline%d' % x] = pm
self['timeline_now'] = Pixmap()
self.updateTimelineTimer = eTimer()
self.updateTimelineTimer.callback.append(self.moveTimeLines)
self.updateTimelineTimer.start(60000)
self['bouquetokactions'] = HelpableActionMap(self, 'OkCancelActions',
{
'cancel': (self.BouquetlistHide, _('Close bouquet list.')),
'OK': (self.BouquetOK, _('Change to bouquet')),
}, -1)
self['bouquetokactions'].csel = self
self["bouquetokactions"].setEnabled(False)
self['bouquetcursoractions'] = HelpableActionMap(self, 'DirectionActions',
{
'left': (self.moveBouquetPageUp, _('Goto previous event')),
'right': (self.moveBouquetPageDown, _('Goto next event')),
'up': (self.moveBouquetUp, _('Goto previous channel')),
'down': (self.moveBouquetDown, _('Goto next channel'))
}, -1)
self['bouquetcursoractions'].csel = self
self["bouquetcursoractions"].setEnabled(False)
self['epgcursoractions'] = HelpableActionMap(self, 'DirectionActions',
{
'left': (self.leftPressed, _('Goto previous event')),
'right': (self.rightPressed, _('Goto next event')),
'up': (self.moveUp, _('Goto previous channel')),
'down': (self.moveDown, _('Goto next channel'))
}, -1)
self['epgcursoractions'].csel = self
self['epgactions'] = HelpableActionMap(self, 'EPGSelectActions',
{
'nextService': (self.prevPage, _('Page Up')),
'prevService': (self.nextPage, _('Page down')),
'nextBouquet': (self.nextBouquet, _('Goto next bouquet')),
'prevBouquet': (self.prevBouquet, _('Goto previous bouquet')),
'input_date_time': (self.enterDateTime, _('Goto specific data/time')),
'epg': (self.epgButtonPressed, _('Show single epg for current channel')),
'info': (self.Info, _('Show detailed event info')),
'infolong': (self.InfoLong, _('Show single epg for current channel')),
'tv': (self.Bouquetlist, _('Toggle between bouquet/epg lists')),
'tvlong': (self.togglePIG, _('Toggle Picture In Graphics')),
'menu': (self.createSetup, _('Setup menu'))
}, -1)
self['epgactions'].csel = self
if self.type == EPG_TYPE_GRAPH:
self['input_actions'] = HelpableNumberActionMap(self, 'NumberActions',
{
'0': (self.keyNumberGlobal, _('enter number to jump to channel.')),
'1': (self.keyNumberGlobal, _('enter number to jump to channel.')),
'2': (self.keyNumberGlobal, _('enter number to jump to channel.')),
'3': (self.keyNumberGlobal, _('enter number to jump to channel.')),
'4': (self.keyNumberGlobal, _('enter number to jump to channel.')),
'5': (self.keyNumberGlobal, _('enter number to jump to channel.')),
'6': (self.keyNumberGlobal, _('enter number to jump to channel.')),
'7': (self.keyNumberGlobal, _('enter number to jump to channel.')),
'8': (self.keyNumberGlobal, _('enter number to jump to channel.')),
'9': (self.keyNumberGlobal, _('enter number to jump to channel.'))
}, -1)
self['input_actions'].csel = self
else:
self['input_actions'] = HelpableNumberActionMap(self, 'NumberActions',
{
'1': (self.keyNumberGlobal, _('Reduce time scale')),
'2': (self.keyNumberGlobal, _('Page up')),
'3': (self.keyNumberGlobal, _('Increase time scale')),
'4': (self.keyNumberGlobal, _('page left')),
'5': (self.keyNumberGlobal, _('Jump to current time')),
'6': (self.keyNumberGlobal, _('Page right')),
'7': (self.keyNumberGlobal, _('No of items switch (increase or reduced)')),
'8': (self.keyNumberGlobal, _('Page down')),
'9': (self.keyNumberGlobal, _('Jump to prime time')),
'0': (self.keyNumberGlobal, _('Move to home of list'))
}, -1)
self['input_actions'].csel = self
elif self.type == EPG_TYPE_MULTI:
self.skinName = 'EPGSelectionMulti'
self['bouquetlist'] = EPGBouquetList(graphic=graphic)
self['bouquetlist'].hide()
self['now_button'] = Pixmap()
self['next_button'] = Pixmap()
self['more_button'] = Pixmap()
self['now_button_sel'] = Pixmap()
self['next_button_sel'] = Pixmap()
self['more_button_sel'] = Pixmap()
self['now_text'] = Label()
self['next_text'] = Label()
self['more_text'] = Label()
self['date'] = Label()
self.bouquetlist_active = False
self['bouquetokactions'] = HelpableActionMap(self, 'OkCancelActions',
{
'OK': (self.BouquetOK, _('Change to bouquet')),
}, -1)
self['bouquetokactions'].csel = self
self["bouquetokactions"].setEnabled(False)
self['bouquetcursoractions'] = HelpableActionMap(self, 'DirectionActions',
{
'left': (self.moveBouquetPageUp, _('Goto previous event')),
'right': (self.moveBouquetPageDown, _('Goto next event')),
'up': (self.moveBouquetUp, _('Goto previous channel')),
'down': (self.moveBouquetDown, _('Goto next channel'))
}, -1)
self['bouquetcursoractions'].csel = self
self['bouquetcursoractions'].setEnabled(False)
self['epgcursoractions'] = HelpableActionMap(self, 'DirectionActions',
{
'left': (self.leftPressed, _('Goto previous event')),
'right': (self.rightPressed, _('Goto next event')),
'up': (self.moveUp, _('Goto previous channel')),
'down': (self.moveDown, _('Goto next channel'))
}, -1)
self['epgcursoractions'].csel = self
self['epgactions'] = HelpableActionMap(self, 'EPGSelectActions',
{
'nextService': (self.nextPage, _('Move down a page')),
'prevService': (self.prevPage, _('Move up a page')),
'nextBouquet': (self.nextBouquet, _('Goto next bouquet')),
'prevBouquet': (self.prevBouquet, _('Goto previous bouquet')),
'input_date_time': (self.enterDateTime, _('Goto specific data/time')),
'epg': (self.epgButtonPressed, _('Show single epg for current channel')),
'info': (self.Info, _('Show detailed event info')),
'infolong': (self.InfoLong, _('Show single epg for current channel')),
'tv': (self.Bouquetlist, _('Toggle between bouquet/epg lists')),
'menu': (self.createSetup, _('Setup menu'))
}, -1)
self['epgactions'].csel = self
if self.type == EPG_TYPE_GRAPH:
time_epoch=int(config.epgselection.graph_prevtimeperiod.value)
elif self.type == EPG_TYPE_INFOBARGRAPH:
time_epoch=int(config.epgselection.infobar_prevtimeperiod.value)
else:
time_epoch=None
self['list'] = EPGList(type=self.type, selChangedCB=self.onSelectionChanged, timer=session.nav.RecordTimer, time_epoch=time_epoch, overjump_empty=config.epgselection.overjump.value, graphic=graphic)
self.refreshTimer = eTimer()
self.refreshTimer.timeout.get().append(self.refreshlist)
self.listTimer = eTimer()
self.listTimer.callback.append(self.hidewaitingtext)
if not HardwareInfo().is_nextgen():
self.createTimer = eTimer()
self.createTimer.callback.append(self.onCreate)
self.onLayoutFinish.append(self.LayoutFinish)
else:
self.onLayoutFinish.append(self.onCreate)
def createSetup(self):
self.closeEventViewDialog()
key = None
if self.type == EPG_TYPE_SINGLE:
key = 'epgsingle'
elif self.type == EPG_TYPE_MULTI:
key = 'epgmulti'
elif self.type == EPG_TYPE_ENHANCED:
key = 'epgenhanced'
elif self.type == EPG_TYPE_INFOBAR:
key = 'epginfobar'
elif self.type == EPG_TYPE_GRAPH:
key = 'epggraphical'
elif self.type == EPG_TYPE_INFOBARGRAPH:
key = 'epginfobargraphical'
if key:
self.session.openWithCallback(self.onSetupClose, Setup, key)
def onSetupClose(self, test = None):
if self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH:
if self.type == EPG_TYPE_GRAPH:
self.close('reopengraph')
elif self.type == EPG_TYPE_INFOBARGRAPH:
self.close('reopeninfobargraph')
else:
if self.type == EPG_TYPE_INFOBAR:
self.close('reopeninfobar')
def togglePIG(self):
if not config.epgselection.graph_pig.value:
config.epgselection.graph_pig.setValue(True)
else:
config.epgselection.graph_pig.setValue(False)
config.epgselection.graph_pig.save()
configfile.save()
self.close('reopengraph')
def hidewaitingtext(self):
self.listTimer.stop()
if self.type == EPG_TYPE_MULTI:
self['list'].moveToService(self.session.nav.getCurrentlyPlayingServiceOrGroup())
self['lab1'].hide()
def getBouquetServices(self, bouquet):
services = []
servicelist = eServiceCenter.getInstance().list(bouquet)
if not servicelist is None:
while True:
service = servicelist.getNext()
if not service.valid(): #check if end of list
break
if service.flags & (eServiceReference.isDirectory | eServiceReference.isMarker): #ignore non playable services
continue
services.append(ServiceReference(service))
return services
def LayoutFinish(self):
self['lab1'].show()
self.createTimer.start(800)
def onCreate(self):
if not HardwareInfo().is_nextgen():
self.createTimer.stop()
serviceref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
title = None
self['list'].recalcEntrySize()
self.BouquetRoot = False
if self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH:
self.getCurrentCursorLocation = None
if self.type == EPG_TYPE_GRAPH:
if config.epgselection.graph_primarybouquet.value and config.epgselection.graph_primarybouquet.value != "Disabled":
for bouq in self.bouquets:
if bouq[0] == config.epgselection.graph_primarybouquet.value:
self['list'].setPrimaryServiceList( self.getBouquetServices(bouq[1]) )
self.primaryBouquet = bouq[1]
break
else:
self['list'].setPrimaryServiceList( None )
self.primaryBouquet = None
self.getCurrentCursorLocation = None
if self.StartBouquet.toString().startswith('1:7:0'):
self.BouquetRoot = True
self.services = self.getBouquetServices(self.StartBouquet)
if self.findchannel == False:
self['list'].fillGraphEPG(self.services, self.ask_time)
self['list'].moveToService(serviceref) #remembers current event
self['list'].setCurrentlyPlaying(serviceref) #remembers channel
self['bouquetlist'].recalcEntrySize()
self['bouquetlist'].fillBouquetList(self.bouquets)
if self.findchannel == False:
self['bouquetlist'].moveToService(self.StartBouquet)
self['bouquetlist'].setCurrentBouquet(self.StartBouquet)
self.setTitle(self['bouquetlist'].getCurrentBouquet())
self.findchannel = False
if self.type == EPG_TYPE_GRAPH:
self.makebouqlistlabel()
self['list'].setShowServiceMode(config.epgselection.graph_servicetitle_mode.value)
self.moveTimeLines()
if config.epgselection.graph_channel1.value:
self['list'].instance.moveSelectionTo(0)
elif self.type == EPG_TYPE_INFOBARGRAPH:
self['list'].setShowServiceMode(config.epgselection.infobar_servicetitle_mode.value)
self.moveTimeLines()
elif self.type == EPG_TYPE_MULTI:
self['bouquetlist'].recalcEntrySize()
self['bouquetlist'].fillBouquetList(self.bouquets)
self['bouquetlist'].moveToService(self.StartBouquet)
self['bouquetlist'].fillBouquetList(self.bouquets)
self.services = self.getBouquetServices(self.StartBouquet)
self['list'].fillMultiEPG(self.services, self.ask_time)
self['list'].setCurrentlyPlaying(serviceref)
self.setTitle(self['bouquetlist'].getCurrentBouquet())
elif self.type == EPG_TYPE_SINGLE or self.type == EPG_TYPE_ENHANCED or self.type == EPG_TYPE_INFOBAR:
if self.type == EPG_TYPE_SINGLE:
service = self.currentService
elif self.type == EPG_TYPE_ENHANCED or self.type == EPG_TYPE_INFOBAR:
service = ServiceReference(self.servicelist.getCurrentSelection())
title = ServiceReference(self.servicelist.getRoot()).getServiceName()
self['Service'].newService(service.ref)
if title:
title = title + ' - ' + service.getServiceName()
else:
title = service.getServiceName()
self.setTitle(title)
self['list'].fillSingleEPG(service)
self['list'].sortSingleEPG(int(config.epgselection.sort.value))
else:
self['list'].fillSimilarList(self.currentService, self.eventid)
self.listTimer.start(10)
def refreshlist(self):
self.refreshTimer.stop()
if self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH:
if self.getCurrentCursorLocation:
self.ask_time = self.getCurrentCursorLocation
self.getCurrentCursorLocation = None
self['list'].fillGraphEPG(None, self.ask_time)
self.moveTimeLines()
elif self.type == EPG_TYPE_MULTI:
self['list'].fillMultiEPG(self.services, self.ask_time)
elif self.type == EPG_TYPE_SINGLE or self.type == EPG_TYPE_ENHANCED or self.type == EPG_TYPE_INFOBAR:
try:
if self.type == EPG_TYPE_SINGLE:
service = self.currentService
elif self.type == EPG_TYPE_ENHANCED or self.type == EPG_TYPE_INFOBAR:
service = ServiceReference(self.servicelist.getCurrentSelection())
if not self.cureventindex:
index = self['list'].getCurrentIndex()
else:
index = self.cureventindex
self.cureventindex = None
self['list'].fillSingleEPG(service)
self['list'].sortSingleEPG(int(config.epgselection.sort.value))
self['list'].setCurrentIndex(index)
except:
pass
def moveUp(self):
self['list'].moveTo(self['list'].instance.moveUp)
def moveDown(self):
self['list'].moveTo(self['list'].instance.moveDown)
def updEvent(self, dir, visible = True):
ret = self['list'].selEntry(dir, visible)
if ret:
self.moveTimeLines(True)
def nextPage(self):
self['list'].moveTo(self['list'].instance.pageDown)
def prevPage(self):
self['list'].moveTo(self['list'].instance.pageUp)
def toTop(self):
self['list'].moveTo(self['list'].instance.moveTop)
def toEnd(self):
self['list'].moveTo(self['list'].instance.moveEnd)
def leftPressed(self):
if self.type == EPG_TYPE_MULTI:
self['list'].updateMultiEPG(-1)
else:
self.updEvent(-1)
def rightPressed(self):
if self.type == EPG_TYPE_MULTI:
self['list'].updateMultiEPG(1)
else:
self.updEvent(+1)
def Bouquetlist(self):
if not self.bouquetlist_active:
self.BouquetlistShow()
else:
self.BouquetlistHide()
def BouquetlistShow(self):
self.curindex = self['bouquetlist'].l.getCurrentSelectionIndex()
self["epgcursoractions"].setEnabled(False)
self["okactions"].setEnabled(False)
self['bouquetlist'].show()
self["bouquetokactions"].setEnabled(True)
self["bouquetcursoractions"].setEnabled(True)
self.bouquetlist_active = True
def BouquetlistHide(self, cancel=True):
self["bouquetokactions"].setEnabled(False)
self["bouquetcursoractions"].setEnabled(False)
self['bouquetlist'].hide()
if cancel:
self['bouquetlist'].setCurrentIndex(self.curindex)
self["okactions"].setEnabled(True)
self["epgcursoractions"].setEnabled(True)
self.bouquetlist_active = False
def getCurrentBouquet(self):
if self.BouquetRoot:
return self.StartBouquet
elif self.has_key('bouquetlist'):
cur = self["bouquetlist"].l.getCurrentSelection()
return cur and cur[1]
else:
return self.servicelist.getRoot()
def BouquetOK(self):
self.BouquetRoot = False
now = time() - int(config.epg.histminutes.value) * 60
self.services = self.getBouquetServices(self.getCurrentBouquet())
if self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH:
if self.type == EPG_TYPE_GRAPH:
self.ask_time = self.ask_time = now - now % (int(config.epgselection.graph_roundto.value) * 60)
elif self.type == EPG_TYPE_INFOBARGRAPH:
self.ask_time = self.ask_time = now - now % (int(config.epgselection.infobar_roundto.value) * 60)
self['list'].resetOffset()
self['list'].fillGraphEPG(self.services, self.ask_time)
self.moveTimeLines(True)
elif self.type == EPG_TYPE_MULTI:
self['list'].fillMultiEPG(self.services, self.ask_time)
self['list'].instance.moveSelectionTo(0)
self.setTitle(self['bouquetlist'].getCurrentBouquet())
self.BouquetlistHide(False)
def moveBouquetUp(self):
self['bouquetlist'].moveTo(self['bouquetlist'].instance.moveUp)
self['bouquetlist'].fillBouquetList(self.bouquets)
def moveBouquetDown(self):
self['bouquetlist'].moveTo(self['bouquetlist'].instance.moveDown)
self['bouquetlist'].fillBouquetList(self.bouquets)
def moveBouquetPageUp(self):
self['bouquetlist'].moveTo(self['bouquetlist'].instance.pageUp)
self['bouquetlist'].fillBouquetList(self.bouquets)
def moveBouquetPageDown(self):
self['bouquetlist'].moveTo(self['bouquetlist'].instance.pageDown)
self['bouquetlist'].fillBouquetList(self.bouquets)
def makebouqlistlabel(self):
boqlist = ""
index = 0
listlength = len(self.bouquets)
for boqs in self.bouquets:
if boqs[0] != self['bouquetlist'].getCurrentBouquet():
index = index + 1
else:
break;
newendbouqlist = self.bouquets[0:index-1]
newstartbouqlist = self.bouquets[index+1:listlength]
finalbouqlist = newstartbouqlist + newendbouqlist
for boqs in finalbouqlist:
boqlist = boqlist + " | " + boqs[0]
self['guidebouquetlist'].setText(boqlist)
def nextBouquet(self):
if self.type == EPG_TYPE_MULTI or self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH:
self.moveBouquetDown()
self.BouquetOK()
self.makebouqlistlabel()
elif (self.type == EPG_TYPE_ENHANCED or self.type == EPG_TYPE_INFOBAR) and config.usage.multibouquet.value:
self.CurrBouquet = self.servicelist.getCurrentSelection()
self.CurrService = self.servicelist.getRoot()
self.servicelist.nextBouquet()
self.onCreate()
def prevBouquet(self):
if self.type == EPG_TYPE_MULTI or self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH:
self.moveBouquetUp()
self.BouquetOK()
self.makebouqlistlabel()
elif (self.type == EPG_TYPE_ENHANCED or self.type == EPG_TYPE_INFOBAR) and config.usage.multibouquet.value:
self.CurrBouquet = self.servicelist.getCurrentSelection()
self.CurrService = self.servicelist.getRoot()
self.servicelist.prevBouquet()
self.onCreate()
def nextService(self):
if self.type == EPG_TYPE_ENHANCED or self.type == EPG_TYPE_INFOBAR:
self.CurrBouquet = self.servicelist.getCurrentSelection()
self.CurrService = self.servicelist.getRoot()
self['list'].instance.moveSelectionTo(0)
if self.servicelist.inBouquet():
prev = self.servicelist.getCurrentSelection()
if prev:
prev = prev.toString()
while True:
if config.usage.quickzap_bouquet_change.value and self.servicelist.atEnd():
self.servicelist.nextBouquet()
else:
self.servicelist.moveDown()
cur = self.servicelist.getCurrentSelection()
if not cur or (not (cur.flags & 64)) or cur.toString() == prev:
break
else:
self.servicelist.moveDown()
if self.isPlayable():
self.onCreate()
if not self['list'].getCurrent()[1] and config.epgselection.overjump.value:
self.nextService()
else:
self.nextService()
elif self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH:
self.updEvent(+24)
elif self.serviceChangeCB:
self.serviceChangeCB(1, self)
def prevService(self):
if self.type == EPG_TYPE_ENHANCED or self.type == EPG_TYPE_INFOBAR:
self.CurrBouquet = self.servicelist.getCurrentSelection()
self.CurrService = self.servicelist.getRoot()
self['list'].instance.moveSelectionTo(0)
if self.servicelist.inBouquet():
prev = self.servicelist.getCurrentSelection()
if prev:
prev = prev.toString()
while True:
if config.usage.quickzap_bouquet_change.value:
if self.servicelist.atBegin():
self.servicelist.prevBouquet()
self.servicelist.moveUp()
cur = self.servicelist.getCurrentSelection()
if not cur or (not (cur.flags & 64)) or cur.toString() == prev:
break
else:
self.servicelist.moveUp()
if self.isPlayable():
self.onCreate()
if not self['list'].getCurrent()[1] and config.epgselection.overjump.value:
self.prevService()
else:
self.prevService()
elif self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH:
self.updEvent(-24)
elif self.serviceChangeCB:
self.serviceChangeCB(-1, self)
def enterDateTime(self):
global mepg_config_initialized
if self.type == EPG_TYPE_MULTI:
if not mepg_config_initialized:
config.misc.prev_mepg_time = ConfigClock(default=time())
mepg_config_initialized = True
self.session.openWithCallback(self.onDateTimeInputClosed, TimeDateInput, config.misc.prev_mepg_time)
elif self.type == EPG_TYPE_GRAPH:
self.session.openWithCallback(self.onDateTimeInputClosed, TimeDateInput, config.epgselection.graph_prevtime)
elif self.type == EPG_TYPE_INFOBARGRAPH:
self.session.openWithCallback(self.onDateTimeInputClosed, TimeDateInput, config.epgselection.infobar_prevtime)
def onDateTimeInputClosed(self, ret):
if len(ret) > 1:
if ret[0]:
if self.type == EPG_TYPE_MULTI:
self.ask_time = ret[1]
self['list'].fillMultiEPG(self.services, ret[1])
elif self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH:
now = time() - int(config.epg.histminutes.value) * 60
if self.type == EPG_TYPE_GRAPH:
self.ask_time -= self.ask_time % (int(config.epgselection.graph_roundto.value) * 60)
elif self.type == EPG_TYPE_INFOBARGRAPH:
self.ask_time -= self.ask_time % (int(config.epgselection.infobar_roundto.value) * 60)
l = self['list']
l.resetOffset()
l.fillGraphEPG(None, self.ask_time)
self.moveTimeLines(True)
if self.eventviewDialog and (self.type == EPG_TYPE_INFOBAR or self.type == EPG_TYPE_INFOBARGRAPH):
self.infoKeyPressed(True)
def infoKeyPressed(self, eventviewopen=False):
cur = self['list'].getCurrent()
event = cur[0]
service = cur[1]
if event is not None and not self.eventviewDialog and not eventviewopen:
if self.type != EPG_TYPE_SIMILAR:
if self.type == EPG_TYPE_INFOBARGRAPH:
self.eventviewDialog = self.session.instantiateDialog(EventViewSimple,event, service, skin='InfoBarEventView')
self.eventviewDialog.show()
else:
self.session.open(EventViewEPGSelect, event, service, callback=self.eventViewCallback, similarEPGCB=self.openSimilarList)
elif self.eventviewDialog and not eventviewopen:
self.eventviewDialog.hide()
del self.eventviewDialog
self.eventviewDialog = None
elif event is not None and self.eventviewDialog and eventviewopen:
if self.type != EPG_TYPE_SIMILAR:
if self.type == EPG_TYPE_INFOBAR or self.type == EPG_TYPE_INFOBARGRAPH:
self.eventviewDialog.hide()
self.eventviewDialog = self.session.instantiateDialog(EventViewSimple,event, service, skin='InfoBarEventView')
self.eventviewDialog.show()
def redButtonPressed(self):
self.closeEventViewDialog()
from InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if not InfoBarInstance.LongButtonPressed:
if self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH:
#if self.changeText != self.EMPTY:
# ketmp - when button pressed runs function user selected
#if config.epgselection.graph_red.value == "showoptiondialog":
# self.OptionDialog()
if config.epgselection.graph_red.value == "24plus":
self.nextService()
if config.epgselection.graph_red.value == "24minus":
self.prevService()
if config.epgselection.graph_red.value == "timer":
self.RecordTimerQuestion(True)
if config.epgselection.graph_red.value == "imdb" or config.epgselection.graph_red.value == None:
self.openIMDb()
if config.epgselection.graph_red.value == "autotimer":
self.addAutoTimer()
if config.epgselection.graph_red.value == "bouquetlist":
self.Bouquetlist()
if config.epgselection.graph_red.value == "epgsearch":
self.openEPGSearch()
if config.epgselection.graph_red.value == "showmovies":
self.showMovieSelection()
if config.epgselection.graph_red.value == "record":
self.RecordTimerQuestion()
if config.epgselection.graph_red.value == "gotodatetime":
self.enterDateTime()
else:
self.openEPGSearch()
else:
self.openIMDb()
def redButtonPressedLong(self):
self.closeEventViewDialog()
from InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if InfoBarInstance.LongButtonPressed:
self.sortEpg()
def greenButtonPressed(self):
self.closeEventViewDialog()
from InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if not InfoBarInstance.LongButtonPressed:
if self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH:
# ketmp - when button pressed runs function user selected
#if config.epgselection.graph_green.value == "showoptiondialog":
# self.OptionDialog()
if config.epgselection.graph_green.value == "24plus":
self.nextService()
if config.epgselection.graph_green.value == "24minus":
self.prevService()
if config.epgselection.graph_green.value == "timer" or config.epgselection.graph_green.value == None:
self.RecordTimerQuestion(True)
if config.epgselection.graph_green.value == "imdb":
self.openIMDb()
if config.epgselection.graph_green.value == "autotimer":
self.addAutoTimer()
if config.epgselection.graph_green.value == "bouquetlist":
self.Bouquetlist()
if config.epgselection.graph_green.value == "epgsearch":
self.openEPGSearch()
if config.epgselection.graph_green.value == "showmovies":
self.showMovieSelection()
if config.epgselection.graph_green.value == "record":
self.RecordTimerQuestion()
if config.epgselection.graph_green.value == "gotodatetime":
self.enterDateTime()
else:
self.RecordTimerQuestion(True)
def greenButtonPressedLong(self):
self.closeEventViewDialog()
from InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if InfoBarInstance.LongButtonPressed:
self.showTimerList()
def yellowButtonPressed(self):
self.closeEventViewDialog()
from InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if not InfoBarInstance.LongButtonPressed:
if self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH:
# ketmp - when button pressed runs function user selected
#if config.epgselection.graph_yellow.value == "showoptiondialog":
# self.OptionDialog()
if config.epgselection.graph_yellow.value == "24plus":
self.nextService()
if config.epgselection.graph_yellow.value == "24minus":
self.prevService()
if config.epgselection.graph_yellow.value == "timer":
self.RecordTimerQuestion(True)
if config.epgselection.graph_yellow.value == "imdb":
self.openIMDb()
if config.epgselection.graph_yellow.value == "autotimer":
self.addAutoTimer()
if config.epgselection.graph_yellow.value == "bouquetlist":
self.Bouquetlist()
if config.epgselection.graph_yellow.value == "epgsearch" or config.epgselection.graph_yellow.value == None:
self.openEPGSearch()
if config.epgselection.graph_yellow.value == "showmovies":
self.showMovieSelection()
if config.epgselection.graph_yellow.value == "record":
self.RecordTimerQuestion()
if config.epgselection.graph_yellow.value == "gotodatetime":
self.enterDateTime()
else:
self.openEPGSearch()
def blueButtonPressed(self):
self.closeEventViewDialog()
from InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if not InfoBarInstance.LongButtonPressed:
if self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH:
# ketmp - when button pressed runs function user selected
#if config.epgselection.graph_blue.value == "showoptiondialog":
# self.OptionDialog()
if config.epgselection.graph_blue.value == "24plus":
self.nextService()
if config.epgselection.graph_blue.value == "24minus":
self.prevService()
if config.epgselection.graph_blue.value == "timer":
self.RecordTimerQuestion(True)
if config.epgselection.graph_blue.value == "imdb":
self.openIMDb()
if config.epgselection.graph_blue.value == "autotimer" or config.epgselection.graph_blue.value == None:
self.addAutoTimer()
if config.epgselection.graph_blue.value == "bouquetlist":
self.Bouquetlist()
if config.epgselection.graph_blue.value == "epgsearch":
self.openEPGSearch()
if config.epgselection.graph_blue.value == "showmovies":
self.showMovieSelection()
if config.epgselection.graph_blue.value == "record":
self.RecordTimerQuestion()
if config.epgselection.graph_blue.value == "gotodatetime":
self.enterDateTime()
else:
self.addAutoTimer()
def blueButtonPressedLong(self):
self.closeEventViewDialog()
from InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if InfoBarInstance.LongButtonPressed:
self.showAutoTimerList()
def openSimilarList(self, eventid, refstr):
self.session.open(EPGSelection, refstr, None, eventid)
def setServices(self, services):
self.services = services
self.onCreate()
def setService(self, service):
self.currentService = service
self.onCreate()
def eventViewCallback(self, setEvent, setService, val):
l = self['list']
old = l.getCurrent()
if self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH:
self.updEvent(val, False)
elif val == -1:
self.moveUp()
elif val == +1:
self.moveDown()
cur = l.getCurrent()
if (self.type == EPG_TYPE_MULTI or self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH) and cur[0] is None and cur[1].ref != old[1].ref:
self.eventViewCallback(setEvent, setService, val)
else:
setService(cur[1])
setEvent(cur[0])
def eventSelected(self):
self.infoKeyPressed()
def sortEpg(self):
if self.type == EPG_TYPE_SINGLE or self.type == EPG_TYPE_ENHANCED or self.type == EPG_TYPE_INFOBAR:
if config.epgselection.sort.value == '0':
config.epgselection.sort.setValue('1')
else:
config.epgselection.sort.setValue('0')
config.epgselection.sort.save()
configfile.save()
self['list'].sortSingleEPG(int(config.epgselection.sort.value))
def OpenSingleEPG(self):
cur = self['list'].getCurrent()
if cur[0] is not None:
event = cur[0]
serviceref = cur[1].ref
if serviceref is not None:
self.session.open(SingleEPG, serviceref)
def openIMDb(self):
try:
from Plugins.Extensions.IMDb.plugin import IMDB, IMDBEPGSelection
try:
cur = self['list'].getCurrent()
event = cur[0]
name = event.getEventName()
except:
name = ''
self.session.open(IMDB, name, False)
except ImportError:
self.session.open(MessageBox, _('The IMDb plugin is not installed!\nPlease install it.'), type=MessageBox.TYPE_INFO, timeout=10)
def openEPGSearch(self):
try:
from Plugins.Extensions.EPGSearch.EPGSearch import EPGSearch
try:
cur = self['list'].getCurrent()
event = cur[0]
name = event.getEventName()
except:
name = ''
self.session.open(EPGSearch, name, False)
except ImportError:
self.session.open(MessageBox, _('The EPGSearch plugin is not installed!\nPlease install it.'), type=MessageBox.TYPE_INFO, timeout=10)
def addAutoTimer(self):
try:
from Plugins.Extensions.AutoTimer.AutoTimerEditor import addAutotimerFromEvent
cur = self['list'].getCurrent()
event = cur[0]
if not event:
return
serviceref = cur[1]
addAutotimerFromEvent(self.session, evt=event, service=serviceref)
self.refreshTimer.start(3000)
except ImportError:
self.session.open(MessageBox, _('The AutoTimer plugin is not installed!\nPlease install it.'), type=MessageBox.TYPE_INFO, timeout=10)
def addAutoTimerSilent(self):
try:
from Plugins.Extensions.AutoTimer.AutoTimerEditor import addAutotimerFromEventSilent
cur = self['list'].getCurrent()
event = cur[0]
if not event:
return
serviceref = cur[1]
addAutotimerFromEventSilent(self.session, evt=event, service=serviceref)
self.refreshTimer.start(3000)
except ImportError:
self.session.open(MessageBox, _('The AutoTimer plugin is not installed!\nPlease install it.'), type=MessageBox.TYPE_INFO, timeout=10)
def showTimerList(self):
from Screens.TimerEdit import TimerEditList
self.session.open(TimerEditList)
#NEWLY ADDED
def showMovieSelection(self):
Screens.InfoBar.InfoBar.instance.showMovies()
def showAutoTimerList(self):
global autopoller
global autotimer
try:
from Plugins.Extensions.AutoTimer.plugin import main, autostart
from Plugins.Extensions.AutoTimer.AutoTimer import AutoTimer
from Plugins.Extensions.AutoTimer.AutoPoller import AutoPoller
autopoller = AutoPoller()
autotimer = AutoTimer()
try:
autotimer.readXml()
except SyntaxError as se:
self.session.open(MessageBox, _('Your config file is not well-formed:\n%s') % str(se), type=MessageBox.TYPE_ERROR, timeout=10)
return
if autopoller is not None:
autopoller.stop()
from Plugins.Extensions.AutoTimer.AutoTimerOverview import AutoTimerOverview
self.session.openWithCallback(self.editCallback, AutoTimerOverview, autotimer)
except ImportError:
self.session.open(MessageBox, _('The AutoTimer plugin is not installed!\nPlease install it.'), type=MessageBox.TYPE_INFO, timeout=10)
def editCallback(self, session):
global autopoller
global autotimer
if session is not None:
autotimer.writeXml()
autotimer.parseEPG()
if config.plugins.autotimer.autopoll.value:
if autopoller is None:
from Plugins.Extensions.AutoTimer.AutoPoller import AutoPoller
autopoller = AutoPoller()
autopoller.start()
else:
autopoller = None
autotimer = None
def timerAdd(self):
self.RecordTimerQuestion(True)
def editTimer(self, timer):
self.session.open(TimerEntry, timer)
def removeTimer(self, timer):
self.closeChoiceBoxDialog()
timer.afterEvent = AFTEREVENT.NONE
self.session.nav.RecordTimer.removeEntry(timer)
if self.type != EPG_TYPE_GRAPH:
self['key_green'].setText(_('Record Once'))
self.key_green_choice = self.ADD_TIMER
self.getCurrentCursorLocation = self['list'].getCurrentCursorLocation()
self.refreshlist()
def disableTimer(self, timer):
self.closeChoiceBoxDialog()
timer.disable()
self.session.nav.RecordTimer.timeChanged(timer)
if self.type != EPG_TYPE_GRAPH:
self['key_green'].setText(_('Record Once'))
self.key_green_choice = self.ADD_TIMER
self.getCurrentCursorLocation = self['list'].getCurrentCursorLocation()
self.refreshlist()
def RecordTimerQuestion(self, manual=False):
cur = self['list'].getCurrent()
event = cur[0]
serviceref = cur[1]
if event is None:
return
eventid = event.getEventId()
refstr = ':'.join(serviceref.ref.toString().split(':')[:11])
title = None
for timer in self.session.nav.RecordTimer.timer_list:
if timer.eit == eventid and ':'.join(timer.service_ref.ref.toString().split(':')[:11]) == refstr:
cb_func1 = lambda ret: self.removeTimer(timer)
cb_func2 = lambda ret: self.editTimer(timer)
cb_func3 = lambda ret: self.disableTimer(timer)
menu = [(_("Delete Record Once"), 'CALLFUNC', self.RemoveChoiceBoxCB, cb_func1), (_("Edit Record Once"), 'CALLFUNC', self.RemoveChoiceBoxCB, cb_func2), (_("Disable Record Once"), 'CALLFUNC', self.RemoveChoiceBoxCB, cb_func3)]
title = _("Select action for timer %s:") % event.getEventName()
break
else:
if not manual:
menu = [(_("Record Once"), 'CALLFUNC', self.ChoiceBoxCB, self.doRecordTimer), (_("Record Series"), 'CALLFUNC', self.ChoiceBoxCB, self.addAutoTimer)]
title = "%s?" % event.getEventName()
else:
newEntry = RecordTimerEntry(serviceref, checkOldTimers=True, dirname=preferredTimerPath(), *parseEvent(event))
self.session.openWithCallback(self.finishedAdd, TimerEntry, newEntry)
if title:
self.ChoiceBoxDialog = self.session.instantiateDialog(ChoiceBox, title=title, list=menu, keys=['green', 'blue'], skin_name="RecordTimerQuestion")
serviceref = eServiceReference(str(self['list'].getCurrent()[1]))
posy = self['list'].getSelectionPosition(serviceref)
self.ChoiceBoxDialog.instance.move(ePoint(posy[0]-self.ChoiceBoxDialog.instance.size().width(),self.instance.position().y()+posy[1]))
self.showChoiceBoxDialog()
def recButtonPressed(self):
from InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if not InfoBarInstance.LongButtonPressed:
self.RecordTimerQuestion()
def recButtonPressedLong(self):
from InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if InfoBarInstance.LongButtonPressed:
self.doZapTimer()
def RemoveChoiceBoxCB(self, choice):
self.closeChoiceBoxDialog()
if choice:
choice(self)
def ChoiceBoxCB(self, choice):
self.closeChoiceBoxDialog()
if choice:
try:
choice()
except:
choice
def showChoiceBoxDialog(self):
self['okactions'].setEnabled(False)
if self.has_key('epgcursoractions'):
self['epgcursoractions'].setEnabled(False)
self['colouractions'].setEnabled(False)
self['recordingactions'].setEnabled(False)
self['epgactions'].setEnabled(False)
self["dialogactions"].setEnabled(True)
self.ChoiceBoxDialog['actions'].execBegin()
self.ChoiceBoxDialog.show()
if self.has_key('input_actions'):
self['input_actions'].setEnabled(False)
def closeChoiceBoxDialog(self):
self["dialogactions"].setEnabled(False)
if self.ChoiceBoxDialog:
self.ChoiceBoxDialog['actions'].execEnd()
self.session.deleteDialog(self.ChoiceBoxDialog)
self['okactions'].setEnabled(True)
if self.has_key('epgcursoractions'):
self['epgcursoractions'].setEnabled(True)
self['colouractions'].setEnabled(True)
self['recordingactions'].setEnabled(True)
self['epgactions'].setEnabled(True)
if self.has_key('input_actions'):
self['input_actions'].setEnabled(True)
def doRecordTimer(self):
self.doInstantTimer(0)
def doZapTimer(self):
self.doInstantTimer(1)
def doInstantTimer(self, zap):
cur = self['list'].getCurrent()
event = cur[0]
serviceref = cur[1]
if event is None:
return
eventid = event.getEventId()
refstr = serviceref.ref.toString()
newEntry = RecordTimerEntry(serviceref, checkOldTimers=True, *parseEvent(event))
self.InstantRecordDialog = self.session.instantiateDialog(InstantRecordTimerEntry, newEntry, zap)
retval = [True, self.InstantRecordDialog.retval()]
self.session.deleteDialogWithCallback(self.finishedAdd, self.InstantRecordDialog, retval)
def finishedAdd(self, answer):
if answer[0]:
entry = answer[1]
simulTimerList = self.session.nav.RecordTimer.record(entry)
if simulTimerList is not None:
for x in simulTimerList:
if x.setAutoincreaseEnd(entry):
self.session.nav.RecordTimer.timeChanged(x)
simulTimerList = self.session.nav.RecordTimer.record(entry)
if simulTimerList is not None:
if not entry.repeated and not config.recording.margin_before.value and not config.recording.margin_after.value and len(simulTimerList) > 1:
change_time = False
conflict_begin = simulTimerList[1].begin
conflict_end = simulTimerList[1].end
if conflict_begin == entry.end:
entry.end -= 30
change_time = True
elif entry.begin == conflict_end:
entry.begin += 30
change_time = True
if change_time:
simulTimerList = self.session.nav.RecordTimer.record(entry)
if simulTimerList is not None:
self.session.openWithCallback(self.finishSanityCorrection, TimerSanityConflict, simulTimerList)
if self.type != EPG_TYPE_GRAPH:
self["key_green"].setText(_("Change Record Option"))
self.key_green_choice = self.REMOVE_TIMER
else:
if self.type != EPG_TYPE_GRAPH:
self['key_green'].setText(_('Record Once'))
self.key_green_choice = self.ADD_TIMER
self.getCurrentCursorLocation = self['list'].getCurrentCursorLocation()
self.refreshlist()
def finishSanityCorrection(self, answer):
self.finishedAdd(answer)
def OK(self):
from InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if not InfoBarInstance.LongButtonPressed:
if self.zapnumberstarted:
self.dozumberzap()
else:
if config.epgselection.graph_ok.value == 'Zap' or config.epgselection.enhanced_ok.value == 'Zap' or config.epgselection.infobar_ok.value == 'Zap' or config.epgselection.multi_ok.value == 'Zap':
self.zapTo()
if config.epgselection.graph_ok.value == 'Zap + Exit' or config.epgselection.enhanced_ok.value == 'Zap + Exit' or config.epgselection.infobar_ok.value == 'Zap + Exit' or config.epgselection.multi_ok.value == 'Zap + Exit':
self.zap()
def OKLong(self):
from InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if InfoBarInstance.LongButtonPressed:
if self.zapnumberstarted:
self.dozumberzap()
else:
if config.epgselection.graph_oklong.value == 'Zap' or config.epgselection.enhanced_oklong.value == 'Zap' or config.epgselection.infobar_oklong.value == 'Zap' or config.epgselection.multi_oklong.value == 'Zap':
self.zapTo()
if config.epgselection.graph_oklong.value == 'Zap + Exit' or config.epgselection.enhanced_oklong.value == 'Zap + Exit' or config.epgselection.infobar_oklong.value == 'Zap + Exit' or config.epgselection.multi_oklong.value == 'Zap + Exit':
self.zap()
if config.epgselection.infobar_oklong.value == 'Event Info':
self.OpenSingleEPG()
def epgButtonPressed(self):
self.OpenSingleEPG()
def Info(self):
from InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if not InfoBarInstance.LongButtonPressed:
if self.type == EPG_TYPE_GRAPH and config.epgselection.graph_info.value == 'Channel Info':
self.infoKeyPressed()
elif self.type == EPG_TYPE_GRAPH and config.epgselection.graph_info.value == 'Single EPG':
self.OpenSingleEPG()
else:
self.infoKeyPressed()
def InfoLong(self):
from InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if InfoBarInstance.LongButtonPressed:
if self.type == EPG_TYPE_GRAPH and config.epgselection.graph_infolong.value == 'Channel Info':
self.infoKeyPressed()
elif self.type == EPG_TYPE_GRAPH and config.epgselection.graph_infolong.value == 'Single EPG':
self.OpenSingleEPG()
else:
self.OpenSingleEPG()
def applyButtonState(self, state):
if state == 0:
self['now_button'].hide()
self['now_button_sel'].hide()
self['next_button'].hide()
self['next_button_sel'].hide()
self['more_button'].hide()
self['more_button_sel'].hide()
self['now_text'].hide()
self['next_text'].hide()
self['more_text'].hide()
self['key_red'].setText('')
else:
if state == 1:
self['now_button_sel'].show()
self['now_button'].hide()
else:
self['now_button'].show()
self['now_button_sel'].hide()
if state == 2:
self['next_button_sel'].show()
self['next_button'].hide()
else:
self['next_button'].show()
self['next_button_sel'].hide()
if state == 3:
self['more_button_sel'].show()
self['more_button'].hide()
else:
self['more_button'].show()
self['more_button_sel'].hide()
def onSelectionChanged(self):
cur = self['list'].getCurrent()
event = cur[0]
self['Event'].newEvent(event)
if cur[1] is None:
self['Service'].newService(None)
else:
self['Service'].newService(cur[1].ref)
if self.type == EPG_TYPE_MULTI:
count = self['list'].getCurrentChangeCount()
if self.ask_time != -1:
self.applyButtonState(0)
elif count > 1:
self.applyButtonState(3)
elif count > 0:
self.applyButtonState(2)
else:
self.applyButtonState(1)
datestr = ''
if event is not None:
now = time()
beg = event.getBeginTime()
nowTime = localtime(now)
begTime = localtime(beg)
if nowTime[2] != begTime[2]:
datestr = strftime(_('%A %e %b'), begTime)
else:
datestr = '%s' % _('Today')
self['date'].setText(datestr)
if self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH:
# self['key_red'].setText('Record Options')
if cur[1] is None or cur[1].getServiceName() == '':
self['key_red'].setText('Search')
self.changeText = self.EMPTY
return
if event is None:
self['key_red'].setText('Search')
self.changeText = self.EMPTY
return
else:
self['key_red'].setText('IMDb Search')
if cur[1] is None or cur[1].getServiceName() == '':
if self.key_green_choice != self.EMPTY:
self['key_green'].setText('Record Once')
self.key_green_choice = self.EMPTY
return
if event is None:
if self.key_green_choice != self.EMPTY:
self['key_green'].setText('Record Once')
self.key_green_choice = self.EMPTY
return
serviceref = cur[1]
eventid = event.getEventId()
refstr = ':'.join(serviceref.ref.toString().split(':')[:11])
isRecordEvent = False
for timer in self.session.nav.RecordTimer.timer_list:
if timer.eit == eventid and ':'.join(timer.service_ref.ref.toString().split(':')[:11]) == refstr:
isRecordEvent = True
break
if self.type == EPG_TYPE_GRAPH:
if isRecordEvent:
self.changeText = "Change timer"
elif not isRecordEvent:
self.changeText = "Record Once"
#else:
# if isRecordEvent and self.key_green_choice != self.REMOVE_TIMER:
# self["key_green"].setText(_("Change timer"))
# self.key_green_choice = self.REMOVE_TIMER
# elif not isRecordEvent:
# self['key_green'].setText(_('Record Once'))
# self.key_green_choice = self.ADD_TIMER
if self.eventviewDialog and (self.type == EPG_TYPE_INFOBAR or self.type == EPG_TYPE_INFOBARGRAPH):
self.infoKeyPressed(True)
def moveTimeLines(self, force = False):
self.updateTimelineTimer.start((60 - int(time()) % 60) * 1000)
self['timeline_text'].setEntries(self['list'], self['timeline_now'], self.time_lines, force)
self['list'].l.invalidate()
def isPlayable(self):
current = ServiceReference(self.servicelist.getCurrentSelection())
return not current.ref.flags & (eServiceReference.isMarker | eServiceReference.isDirectory)
def setServicelistSelection(self, bouquet, service):
if self.servicelist:
if self.servicelist.getRoot() != bouquet:
self.servicelist.clearPath()
self.servicelist.enterPath(self.servicelist.bouquet_root)
self.servicelist.enterPath(bouquet)
self.servicelist.setCurrentSelection(service)
def closeEventViewDialog(self):
if self.eventviewDialog:
self.eventviewDialog.hide()
del self.eventviewDialog
self.eventviewDialog = None
def closeScreen(self):
if self.type == EPG_TYPE_SINGLE:
self.close()
return # stop and do not continue.
if self.session.nav.getCurrentlyPlayingServiceOrGroup() and self.StartRef and self.session.nav.getCurrentlyPlayingServiceOrGroup().toString() != self.StartRef.toString():
if self.zapFunc and self.StartRef and self.StartBouquet:
if ((self.type == EPG_TYPE_GRAPH and config.epgselection.graph_preview_mode.value) or
(self.type == EPG_TYPE_MULTI and config.epgselection.multi_preview_mode.value) or
(self.type in (EPG_TYPE_INFOBAR, EPG_TYPE_INFOBARGRAPH) and config.epgselection.infobar_preview_mode.value in ('1', '2')) or
(self.type == EPG_TYPE_ENHANCED and config.epgselection.enhanced_preview_mode.value)):
if '0:0:0:0:0:0:0:0:0' not in self.StartRef.toString():
self.zapFunc(None, zapback = True)
elif '0:0:0:0:0:0:0:0:0' in self.StartRef.toString():
self.session.nav.playService(self.StartRef)
else:
self.zapFunc(None, False)
if self.session.pipshown:
self.session.pipshown = False
del self.session.pip
self.closeEventViewDialog()
self.close(True)
def zap(self):
if self.zapFunc:
self.zapSelectedService()
self.closeEventViewDialog()
self.close(True)
else:
self.closeEventViewDialog()
self.close()
def zapSelectedService(self, prev=False):
currservice = self.session.nav.getCurrentlyPlayingServiceReference() and str(self.session.nav.getCurrentlyPlayingServiceReference().toString()) or None
if self.session.pipshown:
self.prevch = self.session.pip.getCurrentService() and str(self.session.pip.getCurrentService().toString()) or None
else:
self.prevch = self.session.nav.getCurrentlyPlayingServiceReference() and str(self.session.nav.getCurrentlyPlayingServiceReference().toString()) or None
lst = self["list"]
count = lst.getCurrentChangeCount()
if count == 0:
ref = lst.getCurrent()[1]
if ref is not None:
if (self.type == EPG_TYPE_INFOBAR or self.type == EPG_TYPE_INFOBARGRAPH) and config.epgselection.infobar_preview_mode.value == '2':
if not prev:
if self.session.pipshown:
self.session.pipshown = False
del self.session.pip
self.zapFunc(ref.ref, bouquet = self.getCurrentBouquet(), preview = False)
return
if not self.session.pipshown:
self.session.pip = self.session.instantiateDialog(PictureInPicture)
self.session.pip.show()
self.session.pipshown = True
n_service = self.pipServiceRelation.get(str(ref.ref), None)
if n_service is not None:
service = eServiceReference(n_service)
else:
service = ref.ref
if self.currch == service.toString():
if self.session.pipshown:
self.session.pipshown = False
del self.session.pip
self.zapFunc(ref.ref, bouquet = self.getCurrentBouquet(), preview = False)
return
if self.prevch != service.toString() and currservice != service.toString():
self.session.pip.playService(service)
self.currch = self.session.pip.getCurrentService() and str(self.session.pip.getCurrentService().toString())
else:
bouq = None
if self.primaryBouquet and lst.primaryServiceNumbers.has_key(ref.ref.toString()):
bouq = self.primaryBouquet
self.zapFunc(ref.ref, bouquet = bouq or self.getCurrentBouquet(), preview = prev)
self.currch = self.session.nav.getCurrentlyPlayingServiceReference() and str(self.session.nav.getCurrentlyPlayingServiceReference().toString())
self['list'].setCurrentlyPlaying(self.session.nav.getCurrentlyPlayingServiceOrGroup())
def zapTo(self):
if self.session.nav.getCurrentlyPlayingServiceOrGroup() and '0:0:0:0:0:0:0:0:0' in self.session.nav.getCurrentlyPlayingServiceOrGroup().toString():
from Screens.InfoBarGenerics import setResumePoint
setResumePoint(self.session)
if self.zapFunc:
self.zapSelectedService(True)
self.refreshTimer.start(2000)
if not self.currch or self.currch == self.prevch:
if self.zapFunc:
self.zapFunc(None, False)
self.closeEventViewDialog()
self.close('close')
else:
self.closeEventViewDialog()
self.close()
def keyNumberGlobal(self, number):
if self.type == EPG_TYPE_GRAPH and config.epgselection.graph_usezaptochannel.value == True:
#Will search for a number the user enters.
self.zapnumberstarted = True
self.NumberZapTimer.start(5000, True)
if not self.NumberZapField:
self.NumberZapField = str(number)
else:
self.NumberZapField += str(number)
self.handleServiceName()
self["searchnumber"].setText(self.NumberZapField + " " + self.zaptoservicename)
self["searchnumber"].show()
if len(self.NumberZapField) > 4:
self.NumberZapField = None
self["searchnumber"].hide()
self.dozumberzap()
elif self.type == EPG_TYPE_GRAPH and config.epgselection.graph_usezaptochannel.value == False:
if number == 1:
timeperiod = int(config.epgselection.graph_prevtimeperiod.value)
if timeperiod > 60:
timeperiod -= 30
self['list'].setEpoch(timeperiod)
config.epgselection.graph_prevtimeperiod.setValue(str(timeperiod))
self.moveTimeLines()
elif number == 2:
self.prevPage()
elif number == 3:
timeperiod = int(config.epgselection.graph_prevtimeperiod.value)
if timeperiod < 300:
timeperiod += 30
self['list'].setEpoch(timeperiod)
config.epgselection.graph_prevtimeperiod.setValue(str(timeperiod))
self.moveTimeLines()
elif number == 4:
self.updEvent(-2)
elif number == 5:
now = time() - int(config.epg.histminutes.value) * 60
self.ask_time = now - now % (int(config.epgselection.graph_roundto.value) * 60)
self['list'].resetOffset()
self['list'].fillGraphEPG(None, self.ask_time)
self.moveTimeLines(True)
elif number == 6:
self.updEvent(+2)
elif number == 7:
if config.epgselection.graph_heightswitch.value:
config.epgselection.graph_heightswitch.setValue(False)
else:
config.epgselection.graph_heightswitch.setValue(True)
self['list'].setItemsPerPage()
self['list'].fillGraphEPG(None)
self.moveTimeLines()
elif number == 8:
self.nextPage()
elif number == 9:
basetime = localtime(self['list'].getTimeBase())
basetime = (basetime[0], basetime[1], basetime[2], int(config.epgselection.graph_primetimehour.value), int(config.epgselection.graph_primetimemins.value), 0, basetime[6], basetime[7], basetime[8])
self.ask_time = mktime(basetime)
if self.ask_time + 3600 < time():
self.ask_time += 86400
self['list'].resetOffset()
self['list'].fillGraphEPG(None, self.ask_time)
self.moveTimeLines(True)
elif number == 0:
self.toTop()
now = time() - int(config.epg.histminutes.value) * 60
self.ask_time = now - now % (int(config.epgselection.graph_roundto.value) * 60)
self['list'].resetOffset()
self['list'].fillGraphEPG(None, self.ask_time)
self.moveTimeLines()
elif self.type == EPG_TYPE_INFOBARGRAPH:
if number == 1:
timeperiod = int(config.epgselection.infobar_prevtimeperiod.value)
if timeperiod > 60:
timeperiod -= 60
self['list'].setEpoch(timeperiod)
config.epgselection.infobar_prevtimeperiod.setValue(timeperiod)
self.moveTimeLines()
elif number == 2:
self.prevPage()
elif number == 3:
timeperiod = int(config.epgselection.infobar_prevtimeperiod.value)
if timeperiod < 300:
timeperiod += 60
self['list'].setEpoch(timeperiod)
config.epgselection.infobar_prevtimeperiod.setValue(timeperiod)
self.moveTimeLines()
elif number == 4:
self.updEvent(-2)
elif number == 5:
now = time() - int(config.epg.histminutes.value) * 60
self.ask_time = now - now % (int(config.epgselection.infobar_roundto.value) * 60)
self['list'].resetOffset()
self['list'].fillGraphEPG(None, self.ask_time)
self.moveTimeLines(True)
elif number == 6:
self.updEvent(+2)
elif number == 8:
self.nextPage()
elif number == 9:
basetime = localtime(self['list'].getTimeBase())
basetime = (basetime[0], basetime[1], basetime[2], int(config.epgselection.infobar_primetimehour.value), int(config.epgselection.infobar_primetimemins.value), 0, basetime[6], basetime[7], basetime[8])
self.ask_time = mktime(basetime)
if self.ask_time + 3600 < time():
self.ask_time += 86400
self['list'].resetOffset()
self['list'].fillGraphEPG(None, self.ask_time)
self.moveTimeLines(True)
elif number == 0:
self.toTop()
now = time() - int(config.epg.histminutes.value) * 60
self.ask_time = now - now % (int(config.epgselection.infobar_roundto.value) * 60)
self['list'].resetOffset()
self['list'].fillGraphEPG(None, self.ask_time)
self.moveTimeLines()
else:
self.zapnumberstarted = True
self.NumberZapTimer.start(5000, True)
if not self.NumberZapField:
self.NumberZapField = str(number)
else:
self.NumberZapField += str(number)
self.handleServiceName()
self["number"].setText(self.zaptoservicename+'\n'+self.NumberZapField)
self["number"].show()
if len(self.NumberZapField) >= 4:
self.dozumberzap()
def dozumberzap(self):
self.zapnumberstarted = False
self.numberEntered(self.service, self.bouquet)
def handleServiceName(self):
if self.searchNumber:
self.service, self.bouquet = self.searchNumber(int(self.NumberZapField))
self.zaptoservicename = ServiceReference(self.service).getServiceName()
def numberEntered(self, service = None, bouquet = None):
if service is not None:
self.zapToNumber(service, bouquet)
def searchNumberHelper(self, serviceHandler, num, bouquet):
servicelist = serviceHandler.list(bouquet)
if servicelist is not None:
serviceIterator = servicelist.getNext()
while serviceIterator.valid():
if num == serviceIterator.getChannelNum():
return serviceIterator
serviceIterator = servicelist.getNext()
return None
def searchNumber(self, number):
if self.type == EPG_TYPE_GRAPH:
bouquet = self.StartBouquet
service = None
serviceHandler = eServiceCenter.getInstance()
service = self.searchNumberHelper(serviceHandler, number, bouquet)
if config.usage.multibouquet.value:
service = self.searchNumberHelper(serviceHandler, number, bouquet)
if service is None:
for bouq in self.bouquets:
bouquet = bouq[1]
bouquetlist = serviceHandler.list(bouquet)
if bouquetlist is not None:
if bouquet.valid():
if bouquet.flags & eServiceReference.isDirectory:
service = self.searchNumberHelper(serviceHandler, number, bouquet)
if service is not None:
playable = not service.flags & (eServiceReference.isMarker | eServiceReference.isDirectory) or service.flags & eServiceReference.isNumberedMarker
if not playable:
service = None
break
if config.usage.alternative_number_mode.value:
break
else:
bouquet = self.servicelist.getRoot()
service = None
serviceHandler = eServiceCenter.getInstance()
service = self.searchNumberHelper(serviceHandler, number, bouquet)
if config.usage.multibouquet.value:
service = self.searchNumberHelper(serviceHandler, number, bouquet)
if service is None:
bouquet = self.servicelist.bouquet_root
bouquetlist = serviceHandler.list(bouquet)
if bouquetlist is not None:
bouquet = bouquetlist.getNext()
while bouquet.valid():
if bouquet.flags & eServiceReference.isDirectory:
service = self.searchNumberHelper(serviceHandler, number, bouquet)
if service is not None:
playable = not service.flags & (eServiceReference.isMarker | eServiceReference.isDirectory) or service.flags & eServiceReference.isNumberedMarker
if not playable:
service = None
break
if config.usage.alternative_number_mode.value:
break
bouquet = bouquetlist.getNext()
return service, bouquet
def zapToNumber(self, service, bouquet):
self["number"].hide()
self["searchnumber"].hide()
self.NumberZapField = None
self.CurrBouquet = bouquet
self.CurrService = service
if service is not None:
self.setServicelistSelection(bouquet, service)
if self.type == EPG_TYPE_GRAPH:
self.findchannel = True
self['list'].fillGraphEPG(self.getBouquetServices(self.bouquet), self.ask_time)
self['list'].moveToService(service) #remembers current event
self['bouquetlist'].moveToService(self.bouquet)
self['bouquetlist'].setCurrentBouquet(self.bouquet)
self.onCreate()
self.NumberZapTimer.stop()
#Will display the options dialog providing there is a event present (changeText is not empty).
def OptionDialog(self):
if self.changeText != self.EMPTY:
cur = self['list'].getCurrent()
event = cur[0]
menu = [(_("IMDb Search"), 'CALLFUNC', self.ChoiceBoxCB, self.openIMDb), (_(self.changeText), 'CALLFUNC', self.ChoiceBoxCB, self.OptionDialogAddTimer), (_("Record Series"), 'CALLFUNC', self.ChoiceBoxCB, self.addAutoTimer), (_("Bouquet list"), 'CALLFUNC', self.ChoiceBoxCB, self.Bouquetlist), (_("Search"), 'CALLFUNC', self.ChoiceBoxCB, self.openEPGSearch)]
title = "%s?" % event.getEventName()
self.ChoiceBoxDialog = self.session.instantiateDialog(ChoiceBox, title=title, list=menu, keys=['1', '2', '3', '4' , '5'], skin_name="OptionDialog")
serviceref = eServiceReference(str(self['list'].getCurrent()[1]))
posy = self['list'].getSelectionPosition(serviceref)
self.ChoiceBoxDialog.instance.move(ePoint(posy[0]-self.ChoiceBoxDialog.instance.size().width()/3,self.instance.position().y()+posy[1]-self.ChoiceBoxDialog.instance.size().height()))
self.showChoiceBoxDialog()
def OptionDialogAddTimer(self):
self.RecordTimerQuestion(True)
def RefreshColouredKeys(self):
if config.epgselection.graph_red.value == "showoptiondialog":
self['key_red'] = Button(_('Record Options'))
if config.epgselection.graph_red.value == "24plus":
self['key_red'] = Button(_('+24 Hours'))
if config.epgselection.graph_red.value == "24minus":
self['key_red'] = Button(_('-24 Hours'))
if config.epgselection.graph_red.value == "timer":
self['key_red'] = Button(_('Record Once'))
if config.epgselection.graph_red.value == "imdb" or config.epgselection.graph_red.value == None :
self['key_red'] = Button(_('IMDb'))
if config.epgselection.graph_red.value == "autotimer":
self['key_red'] = Button(_('Record Series'))
if config.epgselection.graph_red.value == "bouquetlist":
self['key_red'] = Button(_('BouquetList'))
if config.epgselection.graph_red.value == "epgsearch":
self['key_red'] = Button(_('EPG Search'))
if config.epgselection.graph_red.value == "showmovies":
self['key_red'] = Button(_('View Recordings'))
if config.epgselection.graph_red.value == "record":
self['key_red'] = Button(_('Record Options'))
if config.epgselection.graph_red.value == "gotodatetime":
self['key_red'] = Button(_('Goto Date/Time'))
if config.epgselection.graph_green.value == "showoptiondialog":
self['key_green'] = Button(_('Record Options'))
if config.epgselection.graph_green.value == "24plus":
self['key_green'] = Button(_('+24 Hours'))
if config.epgselection.graph_green.value == "24minus":
self['key_green'] = Button(_('-24 Hours'))
if config.epgselection.graph_green.value == "timer" or config.epgselection.graph_green.value == None :
self['key_green'] = Button(_('Record Once'))
if config.epgselection.graph_green.value == "imdb":
self['key_green'] = Button(_('IMDb'))
if config.epgselection.graph_green.value == "autotimer":
self['key_green'] = Button(_('Record Series'))
if config.epgselection.graph_green.value == "bouquetlist":
self['key_green'] = Button(_('BouquetList'))
if config.epgselection.graph_green.value == "epgsearch":
self['key_green'] = Button(_('EPG Search'))
if config.epgselection.graph_green.value == "showmovies":
self['key_green'] = Button(_('View Recordings'))
if config.epgselection.graph_green.value == "record":
self['key_green'] = Button(_('Record Options'))
if config.epgselection.graph_green.value == "gotodatetime":
self['key_green'] = Button(_('Goto Date/Time'))
if config.epgselection.graph_yellow.value == "showoptiondialog":
self['key_yellow'] = Button(_('Record Options'))
if config.epgselection.graph_yellow.value == "24plus":
self['key_yellow'] = Button(_('+24 Hours'))
if config.epgselection.graph_yellow.value == "24minus":
self['key_yellow'] = Button(_('-24 Hours'))
if config.epgselection.graph_yellow.value == "timer":
self['key_yellow'] = Button(_('Record Once'))
if config.epgselection.graph_yellow.value == "imdb":
self['key_yellow'] = Button(_('IMDb'))
if config.epgselection.graph_yellow.value == "autotimer":
self['key_yellow'] = Button(_('Record Series'))
if config.epgselection.graph_yellow.value == "bouquetlist":
self['key_yellow'] = Button(_('BouquetList'))
if config.epgselection.graph_yellow.value == "epgsearch" or config.epgselection.graph_yellow.value == None :
self['key_yellow'] = Button(_('EPG Search'))
if config.epgselection.graph_yellow.value == "showmovies":
self['key_yellow'] = Button(_('View Recordings'))
if config.epgselection.graph_yellow.value == "record":
self['key_yellow'] = Button(_('Record Options'))
if config.epgselection.graph_yellow.value == "gotodatetime":
self['key_yellow'] = Button(_('Goto Date/Time'))
if config.epgselection.graph_blue.value == "showoptiondialog":
self['key_blue'] = Button(_('Record Options'))
if config.epgselection.graph_blue.value == "24plus":
self['key_blue'] = Button(_('+24 Hours'))
if config.epgselection.graph_blue.value == "24minus":
self['key_blue'] = Button(_('-24 Hours'))
if config.epgselection.graph_blue.value == "timer":
self['key_blue'] = Button(_('Record Once'))
if config.epgselection.graph_blue.value == "imdb":
self['key_blue'] = Button(_('IMDb'))
if config.epgselection.graph_blue.value == "autotimer" or config.epgselection.graph_blue.value == None :
self['key_blue'] = Button(_('Record Series'))
if config.epgselection.graph_blue.value == "bouquetlist":
self['key_blue'] = Button(_('BouquetList'))
if config.epgselection.graph_blue.value == "epgsearch":
self['key_blue'] = Button(_('EPG Search'))
if config.epgselection.graph_blue.value == "showmovies":
self['key_blue'] = Button(_('View Recordings'))
if config.epgselection.graph_blue.value == "record":
self['key_blue'] = Button(_('Record Options'))
if config.epgselection.graph_blue.value == "gotodatetime":
self['key_blue'] = Button(_('Goto Date/Time'))
return
class SingleEPG(EPGSelection):
def __init__(self, session, service, EPGtype="single"):
EPGSelection.__init__(self, session, service=service, EPGtype=EPGtype)
self.skinName = 'EPGSelection' |
vasyarv/edx-platform | refs/heads/master | lms/djangoapps/django_comment_client/tests/factories.py | 149 | from factory.django import DjangoModelFactory
from django_comment_common.models import Role, Permission
class RoleFactory(DjangoModelFactory):
FACTORY_FOR = Role
name = 'Student'
course_id = 'edX/toy/2012_Fall'
class PermissionFactory(DjangoModelFactory):
FACTORY_FOR = Permission
name = 'create_comment'
|
jdnier/rexlib | refs/heads/master | tokens.py | 1 | """
XML token classes representing mark up and text.
The property idiom used in this module is discussed here:
http://docs.python.org/release/3.2/library/functions.html#property
"""
import re
import sys
from collections import OrderedDict
from .rex import XML_SPE_, ElemTagRE_, AttRE_
__all__ = [
'Cdata', 'Comment', 'Doctype', 'Empty', 'End', 'Error', 'PI', 'Start', 'StartOrEmpty', 'Tag',
'Text', 'Token', 'XmlDecl',
'tokenize',
'RexlibError', 'MarkupError', 'WellformednessError', 'SecondaryParsingError'
]
#
# Token Classes
#
class Token(object):
"""
Abstract superclass for all token classes.
"""
__slots__ = ['xml']
template = NotImplemented
# TODO: Move encoding to tokenizer function(s).
encoding = sys.getdefaultencoding()
MAX_REPR_WIDTH = 60
def __repr__(self):
"""
Tokens longer than MAX_REPR_WIDTH will be sliced (with an elipsis
added to indicate that the whole token is not being displayed). This
is useful for keeping the display of Text tokens (which can be very
long) managable.
To change the slice size used for all tokens, set the class variable
Token.MAX_REPR_WIDTH. Setting it to None will cause the full token
to be displayed; the usual Python convention,
eval(repr(token)) == token, then holds.
"""
text = self.xml
MAX_REPR_WIDTH = self.MAX_REPR_WIDTH
if MAX_REPR_WIDTH is not None and len(text) > MAX_REPR_WIDTH:
text = '{0}...'.format(text[:MAX_REPR_WIDTH])
return '{self.__class__.__name__}({text!r})'.format(
self=self, text=text)
def is_a(self, token_class, *_not_used):
"""
Check whether the current token is an instance of class token_class.
token.is_a(Start) reads as "token is a Start?"
Positional arguments are used by some token classes (Tag: *names,
PI: *targets).
"""
return isinstance(self, token_class)
def reserialize(self):
"""
Update self.xml based on internal state.
"""
raise NotImplementedError
class Text(Token):
"""
Plain text: a run of text not containing the "<" character.
"""
__slots__ = []
def __init__(self, xml):
self.xml = xml
@property
def isspace(self):
"""isspace property: token is whitespace"""
return self.xml.isspace()
class Tag(Token):
"""
Abstract superclass for Start, End, and Empty.
"""
__slots__ = ['_name']
def is_a(self, token_class, *names):
return (isinstance(self, token_class)
and (not names or self.name in names))
@property
def name(self):
"""name property: the tag name"""
return self._name
@name.setter
def name(self, name):
self._name = name
self.reserialize()
# TODO: add basic namespace extraction support for attributes?
@property
def ns_prefix(self):
"""ns_prefix property: namespace prefix of qualified tag name"""
qname = self._name
return ':' in qname and qname.split(':')[0] or ''
@ns_prefix.setter
def ns_prefix(self, prefix):
qname = self._name
if ':' in qname:
old_prefix, name = qname.split(':', 1)
else:
old_prefix, name = '', qname
if old_prefix != prefix:
# Don't reserialize needlessly.
if prefix:
self._name = '{prefix}:{name}'.format(**locals())
else:
self._name = name
self.reserialize()
class StartOrEmpty(Tag):
"""
Abstract superclass for Start and Empty
"""
__slots__ = ['attributes']
def __init__(self, xml):
self.xml = xml
# Parse element name and attributes.
m = ElemTagRE_.search(xml)
self._name = m.group('name')
self.attributes = attributes = AttributeDict(token=self)
for m in AttRE_.finditer(m.group('attributes')):
attributes[m.group('attribute_name')] = m.group('attribute_value')[1:-1]
def __getitem__(self, attribute_name):
return self.attributes.get(attribute_name)
def __setitem__(self, attribute_name, xml):
self.attributes[attribute_name] = xml
def __delitem__(self, attribute_name):
del self.attributes[attribute_name]
def __contains__(self, attribute_name):
return attribute_name in self.attributes
def delete_attribute(self, attribute_name):
if attribute_name in self.attributes:
del self.attributes[attribute_name]
def set_attribute_order(self, attribute_order=[], sort=False):
"""
Re-order attributes based on attribute_order list. Any attributes
listed in attribute_order will appear first (and in that order); any
remaining attributes will follow in original order. If sort is set
to true, remaining attributes will appear in case-insensitive sorted
order.
"""
self.attributes.set_attribute_order(attribute_order, sort)
def reserialize(self):
self.xml = self.template.format(self=self)
class Start(StartOrEmpty):
"""
A start tag: <tag> or <tag att="val">
"""
__slots__ = []
template = '<{self.name}{self.attributes.to_xml}>'
def __init__(self, xml):
super(Start, self).__init__(xml)
class Empty(StartOrEmpty):
"""
An empty tag: <tag/> or <tag att="val"/>
"""
__slots__ = []
template = '<{self.name}{self.attributes.to_xml}/>'
def __init__(self, xml):
super(Empty, self).__init__(xml)
class End(Tag):
"""
An end tag: </tag>
"""
__slots__ = []
template = '</{self.name}>'
def __init__(self, xml):
self.xml = xml
self._name = xml.split('/')[1][:-1].strip()
def reserialize(self):
self.xml = self.template.format(self=self)
class Comment(Token):
"""
A comment: <!-- comment -->
"""
__slots__ = ['_content']
template = '<!--{self.content}-->'
def __init__(self, xml):
self.xml = xml
self._content = xml[4:-3]
def reserialize(self):
self.xml = self.template.format(self=self)
@property
def content(self):
"""content property: the content of the comment"""
return self._content
@content.setter
def content(self, s):
self._content = s
self.reserialize()
class PI(Token):
"""
A processing instruction: <?target instruction?>
"""
__slots__ = ['_target', '_instruction', '_pseudoattributes']
template = '<?{self.target}{self.space}{self.instruction}?>'
def __init__(self, xml):
self.xml = xml
self._pseudoattributes = None
# Parse PI into target and instruction
# XML: <?target instruction?> (endslice -> -2 for xml)
# SGML: <?target instruction> (endslice -> -1 for sgml)
endslice = -2 if xml.endswith('?>') else -1
try:
self._target, self._instruction = xml[2:endslice].split(None, 1)
except ValueError:
# The PI has a target but no instruction.
self._target = xml[2:endslice]
self._instruction = ''
self._target = self._target.strip()
self._instruction = self._instruction.strip()
def __getitem__(self, attribute_name):
"""
Wait to parse instruction for pseudoattributes until first attribute
lookup.
"""
if not self._pseudoattributes:
self._parse_pseudoattributes()
return self._pseudoattributes.get(attribute_name)
def __setitem__(self, attribute_name, value):
"""
Replace a pseudoattribute if it exists; otherwise append it to the
end of the instruction.
"""
if not self._pseudoattributes:
self._parse_pseudoattributes()
self._pseudoattributes[attribute_name] = value
span = self._pseudoattributes.spans.get(attribute_name)
if span:
i, j = span
l = list(self._instruction)
l[i:j] = ' {attribute_name}="{value}"'.format(**locals())
self._instruction = ''.join(l)
else:
self._instruction += ' {attribute_name}="{value}"'.format(**locals())
self._locate_pseudoattributes()
self.reserialize()
def __delitem__(self, attribute_name):
if not self._pseudoattributes:
self._parse_pseudoattributes()
del self._pseudoattributes[attribute_name]
span = self._pseudoattributes.spans[attribute_name]
i, j = span
l = list(self._instruction)
del l[i:j]
self._instruction = ''.join(l)
self._locate_pseudoattributes()
self.reserialize()
def __contains__(self, attribute_name):
if self._pseudoattributes is not None:
return attribute_name in self._pseudoattributes
else:
return False
def _parse_pseudoattributes(self):
"""
Find anything attribute-like in the PI instruction and store as
attributes.
"""
self._pseudoattributes = AttributeDict(token=self)
# Add a spans attribute to store the offsets of pseudoattributes.
self._pseudoattributes.spans = {}
self._locate_pseudoattributes()
def _locate_pseudoattributes(self):
"""
Find the offsets of pseudoattributes within self._instruction.
This method is called whenever a pseudoattribute is updated
or deleted.
"""
spans = self._pseudoattributes.spans
pseudoattributes = self._pseudoattributes
if pseudoattributes:
# Clear any previous values.
pseudoattributes.clear()
spans.clear()
# Regex AttRE_, requires initial whitespace to match, hence the added
# ' ', below.
for m in AttRE_.finditer(' ' + self._instruction):
attribute_name = m.group('attribute_name')
pseudoattributes[attribute_name] = m.group('attribute_value')[1:-1] # strip delimeters
# Get the span for the attribute using the 'attribute' named group,
# which includes the preceding whitespace.
i, j = m.span('attribute')
# Compensate span for initial space added above.
if i - 1 < 0:
# avoid negative slices
spans[attribute_name] = (0, j - 1)
else:
spans[attribute_name] = (i - 1, j - 1)
def reserialize(self):
"""
Normalization note: instruction will be normalized to remove initial
whitespace.
"""
self._instruction = self._instruction.lstrip()
self.xml = self.template.format(self=self)
def is_a(self, token_class, *targets):
return (isinstance(self, token_class)
and (not targets or self.target in targets))
@property
def target(self):
"""target property: the PI target"""
return self._target
@target.setter
def target(self, val):
self._target = val
self.reserialize()
@property
def instruction(self):
"""instruction property: the PI instruction"""
return self._instruction
@instruction.setter
def instruction(self, val):
self._instruction = val
self._pseudoattributes = None
self.reserialize()
@property
def space(self):
"""
space property: space necessary to separate target and instruction
(' ' if instructions is not empty, otherwise '').
"""
return ' ' if self.instruction.lstrip() else ''
class XmlDecl(PI):
"""
An XML Declaration: <?xml version="1.0" encoding="utf-8" ...?>
"""
__slots__ = []
def __init__(self, xml):
super(XmlDecl, self).__init__(xml)
encoding = self['encoding'] # the XmlDecl encoding pseudoattribute
if encoding:
Token.encoding = encoding
doctype_parser_ = re.compile("""\
(?xs)
<!DOCTYPE\s+(?P<document_element>\S+)
(?:(?:\s+(?P<id_type>SYSTEM|PUBLIC))(?:\s+(?P<delim>["'])
(?P<id_value>.*?)(?P=delim))?)?
(?:\s*\[(?P<internal_subset>.*)\])?
\s*>
""")
class Doctype(Token):
"""
A DOCTYPE declaration: <!DOCTYPE tag ...>
For the following example:
<!DOCTYPE x:body SYSTEM "/S:/xml/dtd/xhtml1-strict-prefixed.dtd"
[<!ENTITY abc "xyz">]>
self.document_element -> 'x:body'
self.id_type -> 'SYSTEM'
self.id_value -> '/S:/xml/dtd/xhtml1-strict-prefixed.dtd'
self.internal_subset -> '<!ENTITY abc "xyz">'
"""
__slots__ = ['_document_element', '_id_type', '_id_value',
'_internal_subset']
template = '<!DOCTYPE {0}>'
def __init__(self, xml):
self.xml = xml
m = doctype_parser_.search(xml)
if m:
d = m.groupdict()
self._document_element = d['document_element']
self._id_type = d['id_type'] or ''
self._id_value = d['id_value'] or ''
self._internal_subset = d['internal_subset'] or ''
else:
raise SecondaryParsingError(
'unexpected DOCTYPE found: {self.xml}'
.format(self=self)
)
def reserialize(self):
l = [self.document_element]
if self._id_type:
l.append(self._id_type)
if self._id_value:
l.append('"{self._id_value}"'.format(self=self))
if self._internal_subset:
l.append('[{self._internal_subset}]'.format(self=self))
self.xml = self.template.format(' '.join(l))
@property
def document_element(self):
"""document_element property: the document element name"""
return self._document_element
@document_element.setter
def document_element(self, val):
self._document_element = val
self.reserialize()
@property
def id_type(self):
"""id_type property: either "PUBLIC" or "SYSTEM" or """""
return self._id_type
@id_type.setter
def id_type(self, val):
self._id_type = val
self.reserialize()
@property
def id_value(self):
"""id_value property: a public URI or system path"""
return self._id_value
@id_value.setter
def id_value(self, val):
self._id_value = val
self.reserialize()
@property
def internal_subset(self):
"""internal_subset property: the internal DTD subset"""
return self._internal_subset
@internal_subset.setter
def internal_subset(self, val):
self._internal_subset = val
self.reserialize()
class Cdata(Token):
"""
A CDATA section: <![CDATA[ literal <markup/> ]]>
"""
__slots__ = ['_content']
template = '<![CDATA[{self.content}]]>'
def __init__(self, xml):
self.xml = xml
self._content = self.xml[9:-3]
def reserialize(self):
self.xml = self.template.format(self=self)
@property
def content(self):
return self._content
@content.setter
def content(self, content):
self._content = content
self.reserialize()
@property
def escaped_content(self):
return self._content.replace('&', '&').replace('<', '<')
def to_text_token(self):
"""
Escape markup characters and remove CDATA section delimiters, returning
a Text token.
"""
return Text(self.escaped_content)
class Error(Token):
"""
A markup error: Token starts with '<' but does not end with '>'.
"""
__slots__ = ['span', 'line', 'column']
def __init__(self, xml, span, line=None, column=None):
self.xml = xml
self.span = span # (start, end) position of token in original string
# TODO: Adjust tokenizer to add line number and column when desired.
# (Tokenizer option? Tokenizer subclass? Only calculate when/after an
# error is encountered?).
def reserialize(self):
pass
#
# Utility classes
#
class AttributeDict(OrderedDict):
"""
A dictionary that preserves the order in which attributes are added.
If the constructor is passed a dictionary with attributes, the order
for those attributes will be random; however, attributes added
subsequently will be ordered following the initial population of
attributes.
self.token is a reference back to the Start or Empty token that
instantiated the AttributeDict; it's used to trigger re-serialization
in the token when an attribute is changed via token.attributes.
"""
def __init__(self, d=None, token=None):
self.token = token
if d is None:
d = {}
OrderedDict.__init__(self, d)
def __setitem__(self, key, item):
OrderedDict.__setitem__(self, key, item)
if self.token:
self.token.reserialize()
def __missing__(self, key):
"""Set a default for missing key, rather than raising an exception."""
return ''
def __delitem__(self, key):
"""Remove items without raising exceptions."""
if key in self:
OrderedDict.__delitem__(self, key)
if self.token:
self.token.reserialize()
def set_attribute_order(self, attribute_order=None, sort=False):
"""
Re-order attributes based on attribute_order list. Any attributes
listed in attribute_order will appear first (and in that order); any
remaining attributes will follow in original order. If sort is set
to true, remaining attributes will appear in case-insensitive sorted
order.
"""
d = OrderedDict(self)
self.clear()
if attribute_order:
for attribute_name in attribute_order:
if attribute_name in d:
self[attribute_name] = d[attribute_name]
d.pop(attribute_name)
if sort and d:
# Do a case-insensitive sort on remaining attributes.
for key in sorted(d, key=str.lower):
self[key] = d[key]
elif d:
# If there are any remaining attribute names in d, add them now.
for key in d:
self[key] = d[key]
del d
if self.token:
self.token.reserialize()
@property
def to_xml(self):
"""
Serialize attribute dict to a string of attributes in the form
' attr1="value 1" attr2="value 2"'.
Normalization note: Attribute value delimiters will be normalized to
double quotes. Any double quotes appearing in attribute values are
escaped as ".
"""
try:
return ''.join(
' {attribute_name}="{attribute_value}"'
.format(
attribute_name=attribute_name,
attribute_value=attribute_value.replace('"', '"')
)
for attribute_name, attribute_value in self.items()
)
except AttributeError:
raise RexlibError(
'Attribute value was not a string: {self}'
.format(self=self)
)
def has_key_nocase(self, key):
"""A case-insensitive version of 'attribute_name' in self."""
return key.lower() in [k.lower() for k in self]
#
# Exceptions
#
class RexlibError(Exception):
"""Superclass for all rexlib exceptions."""
def __init__(self, val):
self.val = val
def __str__(self):
return self.val
class MarkupError(RexlibError):
"""Used for syntax errors in markup."""
def __str__(self):
return 'Syntax error in markup: "{self.val}"'.format(self=self)
class WellformednessError(RexlibError):
"""Used for tag-nesting errors."""
def __str__(self):
return 'Wellformedness error: "{self.val}"'.format(self=self)
class SecondaryParsingError(RexlibError):
"""Used to indicate errors during secondary parsing."""
def __str__(self):
return 'Secondary parsing error: "{self.val}"'.format(self=self)
#
# The tokenizer
#
def tokenize(input, SPE_=XML_SPE_, error_stream=sys.stderr):
"""
A generator function for classifying each token matched by the REX shallow
parsing expression.
Set SPE_=SGML_SPE_ to tokenize SGML.
"""
tokenizer = SPE_.finditer
for m in tokenizer(input):
xml = m.group(0)
if xml[0] != '<':
# Token is text
yield Text(xml)
else:
if xml[-1] == '>':
# Token is markup
c = xml[1]
if c not in '/!?':
if xml[-2] == '/':
yield Empty(xml)
else:
yield Start(xml)
elif c == '/':
yield End(xml)
elif c == '!':
if xml.startswith('<!--'):
yield Comment(xml)
elif xml[2] == '[':
yield Cdata(xml)
elif xml.startswith('<!DOCTYPE'):
yield Doctype(xml)
elif c == '?':
if xml.startswith('<?xml '):
yield XmlDecl(xml)
else:
yield PI(xml)
else:
# REX's error condition (a markup item not ending with '>').
yield Error(xml, span=m.span())
if error_stream:
error_stream.write(
pprint_error_context(m, 'Syntax error in markup'))
#def stream_tokenizer(fin, SPE_=XML_SPE_):
# """
# Tokenize a steam to match objects.
# - one token lookahead
# - allows strings to be split into multiple tokens (so that really
# long strings don't accumulate in memory)
#
# TODO: There's a bug in the code below that I haven't gone back to find
# yet, the symptom being overlaping tokens.
#
# """
# m_prev = None
# for s in stream_reader(fin):
# if m_prev:
# xml = m_prev.group(0)
# if xml.startswith('<'):
# if xml.endswith('>'):
# yield m_prev
# else:
# # Incomplete markup; prepend to next buffer.
# s = '%s%s' % (xml, s)
# else:
# # Allowing text to be yielded as multiple tokens.
# yield m_prev
# m_prev = None
#
# for m in SPE_.finditer(s):
# xml = m.group(0)
# if m_prev:
# yield m_prev
# m_prev = m
# if m_prev:
# yield m_prev
#
# Utility functions
#
def pprint_error_context(m, msg, context_size=30):
"""
Prettyprint a markup error's context.
"""
s = m.string
end = m.end()
start_ellipsis, end_ellipsis = '', ''
if end >= context_size:
start = end - context_size
if end != context_size:
start_ellipsis = '...'
else:
# Start must not be negative due to the special meaning of negative
# slice indexes.
start = 0
if end + context_size < len(s):
end_ellipsis = '...'
before = repr(
'{0}"{1}'.format(start_ellipsis, s[start:end])
)[1:-1]
after = repr(
'{0}"{1}' .format(s[end:end + context_size], end_ellipsis)
)[1:-1]
indent = ' ' * len(before)
return (
'\n {msg}:\n {before}\n {indent}{after}\n'
.format(**locals())
)
|
geosolutions-it/geonode | refs/heads/master | geonode/api/views.py | 4 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import json
from django.utils import timezone
from django.http import HttpResponse
from django.contrib.auth import get_user_model
from django.views.decorators.csrf import csrf_exempt
from guardian.models import Group
from oauth2_provider.models import AccessToken
from oauth2_provider.exceptions import OAuthToolkitError, FatalClientError
from allauth.account.utils import user_field, user_email, user_username
from ..utils import json_response
from ..decorators import superuser_or_apiauth
from ..base.auth import (
get_token_object_from_session,
extract_headers,
get_auth_token)
def verify_access_token(request, key):
try:
token = None
if request:
token = get_token_object_from_session(request.session)
if not token or token.key != key:
token = AccessToken.objects.get(token=key)
if not token.is_valid():
raise OAuthToolkitError('AccessToken is not valid.')
if token.is_expired():
raise OAuthToolkitError('AccessToken has expired.')
except AccessToken.DoesNotExist:
raise FatalClientError("AccessToken not found at all.")
except Exception:
return None
return token
@csrf_exempt
def user_info(request):
headers = extract_headers(request)
user = request.user
if not user:
out = {'success': False,
'status': 'error',
'errors': {'user': ['User is not authenticated']}
}
return json_response(out, status=401)
access_token = None
if 'Authorization' not in headers or 'Bearer' not in headers["Authorization"]:
access_token = get_auth_token(user)
if not access_token:
out = {
'success': False,
'status': 'error',
'errors': {'auth': ['No token provided.']}
}
return json_response(out, status=403)
else:
access_token = headers["Authorization"].replace('Bearer ', '')
groups = [group.name for group in user.groups.all()]
if user.is_superuser:
groups.append("admin")
user_info = json.dumps({
"sub": str(user.id),
"name": " ".join([user_field(user, 'first_name'), user_field(user, 'last_name')]),
"given_name": user_field(user, 'first_name'),
"family_name": user_field(user, 'last_name'),
"email": user_email(user),
"preferred_username": user_username(user),
"groups": groups,
"access_token": str(access_token)
})
response = HttpResponse(
user_info,
content_type="application/json"
)
response['Cache-Control'] = 'no-store'
response['Pragma'] = 'no-cache'
return response
@csrf_exempt
def verify_token(request):
if (request.POST and 'token' in request.POST):
token = None
try:
access_token = request.POST.get('token')
token = verify_access_token(request, access_token)
except Exception as e:
return HttpResponse(
json.dumps({
'error': str(e)
}),
status=403,
content_type="application/json"
)
if token:
token_info = json.dumps({
'client_id': token.application.client_id,
'user_id': token.user.id,
'username': token.user.username,
'issued_to': token.user.username,
'access_token': access_token,
'email': token.user.email,
'verified_email': 'true',
'access_type': 'online',
'expires_in': (token.expires - timezone.now()).total_seconds() * 1000
})
response = HttpResponse(
token_info,
content_type="application/json"
)
response["Authorization"] = f"Bearer {access_token}"
return response
else:
return HttpResponse(
json.dumps({
'error': 'No access_token from server.'
}),
status=403,
content_type="application/json"
)
return HttpResponse(
json.dumps({
'error': 'invalid_request'
}),
status=403,
content_type="application/json"
)
@csrf_exempt
@superuser_or_apiauth()
def roles(request):
groups = [group.name for group in Group.objects.all()]
groups.append("admin")
return HttpResponse(
json.dumps({
'groups': groups
}),
content_type="application/json"
)
@csrf_exempt
@superuser_or_apiauth()
def users(request):
user_name = request.path_info.rsplit('/', 1)[-1]
User = get_user_model()
if user_name is None or not user_name or user_name == "users":
users = [user for user in User.objects.all()]
else:
users = [user for user in User.objects.filter(username=user_name)]
if not users:
# Try using the user email
users = [user for user in User.objects.filter(email=user_name)]
json_object = []
for user in users:
groups = [group.name for group in user.groups.all()]
if user.is_superuser:
groups.append("admin")
json_object.append({
'username': user.username,
'groups': groups
})
return HttpResponse(
json.dumps({
'users': json_object
}),
content_type="application/json"
)
@csrf_exempt
@superuser_or_apiauth()
def admin_role(request):
return HttpResponse(
json.dumps({
'adminRole': 'admin'
}),
content_type="application/json"
)
|
opendesk/pyramid_torque_engine | refs/heads/master | src/pyramid_torque_engine/action.py | 1 | # -*- coding: utf-8 -*-
"""Including this module sets up a configuration system for specifying which
actions are valid for a given resource (which will be in a given state).
Register valid actions using:
config.add_engine_action(
IFoo, # context
'a.DECLINE', # action (verb)
('s.QUOTED',), # from states
's.DECLINED' # to state
)
Then lookup a configured state machine for a resource using:
machine = request.get_state_machine(context, action='a.DECLINE')
The machine will have its current state set to the current state of the context,
which means it can be used to check whether an action is valid:
machine.can('a.DECLINE') # True or False
You can register a valid action for any state using the `*` character and
register an action that doesn't actually set the state using ``Ellipsis``:
# "decline" is a valid action in any state.
config.add_engine_action(IFoo, 'a.DECLINE', '*', 's.DECLINED')
# "tag" doesn't change the state.
config.add_engine_action(IFoo, 'a.TAG', ('s.DRAFT', 's.PUBLISHED'), Ellipsis)
# "share" is a valid action in any state and doesn't change the state.
config.add_engine_action(IFoo, 'a.SHARE', '*', Ellipsis)
"""
__all__ = [
'AddEngineAction',
'StateChanger',
'get_state_machine',
]
import logging
logger = logging.getLogger(__name__)
import fysom
from collections import defaultdict
from . import util
from . import repo
class StateChanger(object):
"""High level api to validate and perform state changes that uses the
engine configuration and client to make decisions and notify.
"""
def __init__(self, request, **kwargs):
self.request = request
self.engine = kwargs.get('engine', request.torque.engine)
self.get_machine = kwargs.get('get_machine', request.get_state_machine)
def can_perform(self, context, action, machine=None):
"""Can ``self.context`` perform ``action`` in its current state?"""
if machine is None:
machine = self.get_machine(context, action=action)
return bool(machine and machine.can(action))
def perform(self, context, action, event):
"""Return the next state that ``self.context`` should transition to iff
it's different from the current state.
"""
# Unpack.
engine = self.engine
machine = self.get_machine(context, action=action)
current_state = machine.current
request = self.request
# Prepare return value.
next_state, has_changed, dispatched = None, False, []
# Use the state machine to give us the next state.
try:
machine.trigger(action)
except TypeError:
# If the to_state is `Ellipsis` that means noop. Now, the current
# fysom machinery raises a TypeError when the value is Ellipsis,
# so we catch that error in, and only in, that case.
if machine.current is not Ellipsis:
raise
# And here (regardless of whether a TypeError was raised) we revert to
# the previous state if the new value is `Ellipsis`.
if machine.current == Ellipsis:
machine.current = current_state
next_state = machine.current
# If the state has changed create a new work status entry (with the
# activity event hung off it) and notify.
if next_state != current_state:
has_changed = True
context.set_work_status(next_state, event)
# Create a new activity event for the new state.
activity_event_factory = repo.ActivityEventFactory(request)
state_event = activity_event_factory.factory({
'user': event.user,
'parent': event.parent,
'type_': activity_event_factory.type_from_context_action(event.parent),
'data': event.data
})
# Broadcast the new event.
dispatched.append(engine.changed(context, state_event))
# Either way, notify that the action has been performed.
dispatched.append(engine.happened(context, action, event=event))
# Return all the available information.
return next_state, has_changed, dispatched
get_state_changer = lambda request: StateChanger(request)
def get_state_machine(request, context, action=None, **kwargs):
"""Request method to lookup a Fysom state machine configured with action rules
that determine which actions are possible from any given state.
The machine returned has its current state set to the state of the context.
The api for validation checks is then the native Fysom api, e.g.:
machine = request.get_state_machine(context)
machine.can('do_thing') # True or False depending on action config
"""
# Compose.
get_interfaces = kwargs.get('get_interfaces', util.get_interfaces)
# Unpack.
machines = request.registry.state_action_machines
# Get a matching machine.
machine = None
for key in get_interfaces(context):
fsm = machines.get(key, None)
if not fsm:
continue
if action and not hasattr(fsm, action):
continue
machine = fsm
break
# Populate the current state.
if machine:
machine.current = context.work_status.value
return machine
class AddEngineAction(object):
"""We use (you might say abuse) the Pyramid two-phase configuration machinery
by eager-building a dictionary of `state_action_rules` on the registry
keyed by context and name and then using this data to populate a single
fsm instance for each context.
"""
def __init__(self, **kwargs):
self.machine_cls = kwargs.get('machine_cls', fysom.Fysom)
def __call__(self, config, context, action, from_states, to_state):
"""We use (you might say abuse) the Pyramid two-phase configuration machinery
by eager-building a dictionary of `state_action_rules` on the registry
keyed by context and name and then using this data to populate a single
fsm instance for each context.
"""
# Unpack.
registry = config.registry
# Make sure ``from_states`` is an iterable.
if not hasattr(from_states, '__iter__'):
from_states = (from_states,)
# Unpack the from_states to individual states, so that we can use them
# in the discriminator: this allows the same action to be registered
# multiple times -- usually leading to different `to_state`s -- as
# long as the from_states are unique.
for state in from_states:
discriminator = ('engine.action', context, action, state)
# Make it introspectable.
intr = config.introspectable(category_name='engine action',
discriminator=discriminator,
title='An engine action',
type_name=None)
intr['value'] = (context, action, from_states, to_state)
config.action(discriminator, lambda: self.register(registry, context), introspectables=(intr,))
# And with that queued up, immediately store the from and two states
# in an action_rules dict.
value = (from_states, to_state)
allowed = registry.state_action_rules[context].get(action)
if allowed is None:
registry.state_action_rules[context][action] = allowed = []
registry.state_action_rules[context][action].append(value)
def register(self, registry, context):
"""Iff there isn't already a finite state machine registered for this
context then use the ``registry.state_action_rules`` to create and
register one.
This will noop except for the first call for each given ``context``.
Note that it leaves the rules intact after using them, so they're
still available for transitions to be validated against.
"""
# Noop if we've done this already.
machines = registry.state_action_machines
if machines.has_key(context):
return
# Coerce the stored rules to an fysom.Fysom events list.
events = []
for key, value in registry.state_action_rules[context].items():
for allowed_states_tuple in value:
event = dict(name=key.encode('utf-8'), src=allowed_states_tuple[0], dst=allowed_states_tuple[1])
events.append(event)
# Create and register the machine.
machine = self.machine_cls(events=events)
registry.state_action_machines[context] = machine
class IncludeMe(object):
"""Setup the action registry and provide the `add_engine_action` directive."""
def __init__(self, **kwargs):
self.add_action = kwargs.get('add_action', AddEngineAction())
self.get_state_changer = kwargs.get('get_state_changer', get_state_changer)
self.get_state_machine = kwargs.get('get_state_machine', get_state_machine)
def __call__(self, config):
"""Expose route and provide directive."""
# Unpack.
add_action = self.add_action
get_state_changer = self.get_state_changer
get_state_machine = self.get_state_machine
# Provide `request.get_state_machine()` and ``request.state_changer``.
config.add_request_method(get_state_machine, 'get_state_machine')
config.add_request_method(get_state_changer, 'state_changer', reify=True)
# Provide `register_action` directive.
config.registry.state_action_machines = {}
config.registry.state_action_rules = defaultdict(dict)
config.add_directive('add_engine_action', add_action)
includeme = IncludeMe().__call__
|
pradyu1993/scikit-learn | refs/heads/master | sklearn/manifold/tests/test_isomap.py | 31 | from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
if __name__ == '__main__':
import nose
nose.runmodule()
|
dga4654dan/UTM-Demo | refs/heads/master | V_1_0_1/UtmDemo_Sfs_2.9.0/UtmDemo_Sfs_2.9.0_Server/lib/Lib/Cookie.py | 8 | #!/usr/bin/env python
#
####
# Copyright 2000 by Timothy O'Malley <[email protected]>
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software
# and its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Timothy O'Malley not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
####
#
# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
# by Timothy O'Malley <[email protected]>
#
# Cookie.py is a Python module for the handling of HTTP
# cookies as a Python dictionary. See RFC 2109 for more
# information on cookies.
#
# The original idea to treat Cookies as a dictionary came from
# Dave Mitchell ([email protected]) in 1995, when he released the
# first version of nscookie.py.
#
####
r"""
Here's a sample session to show how to use this module.
At the moment, this is the only documentation.
The Basics
----------
Importing is easy..
>>> import Cookie
Most of the time you start by creating a cookie. Cookies come in
three flavors, each with slightly different encoding semanitcs, but
more on that later.
>>> C = Cookie.SimpleCookie()
>>> C = Cookie.SerialCookie()
>>> C = Cookie.SmartCookie()
[Note: Long-time users of Cookie.py will remember using
Cookie.Cookie() to create an Cookie object. Although deprecated, it
is still supported by the code. See the Backward Compatibility notes
for more information.]
Once you've created your Cookie, you can add values just as if it were
a dictionary.
>>> C = Cookie.SmartCookie()
>>> C["fig"] = "newton"
>>> C["sugar"] = "wafer"
>>> print C
Set-Cookie: fig=newton;
Set-Cookie: sugar=wafer;
Notice that the printable representation of a Cookie is the
appropriate format for a Set-Cookie: header. This is the
default behavior. You can change the header and printed
attributes by using the the .output() function
>>> C = Cookie.SmartCookie()
>>> C["rocky"] = "road"
>>> C["rocky"]["path"] = "/cookie"
>>> print C.output(header="Cookie:")
Cookie: rocky=road; Path=/cookie;
>>> print C.output(attrs=[], header="Cookie:")
Cookie: rocky=road;
The load() method of a Cookie extracts cookies from a string. In a
CGI script, you would use this method to extract the cookies from the
HTTP_COOKIE environment variable.
>>> C = Cookie.SmartCookie()
>>> C.load("chips=ahoy; vienna=finger")
>>> print C
Set-Cookie: chips=ahoy;
Set-Cookie: vienna=finger;
The load() method is darn-tootin smart about identifying cookies
within a string. Escaped quotation marks, nested semicolons, and other
such trickeries do not confuse it.
>>> C = Cookie.SmartCookie()
>>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
>>> print C
Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;";
Each element of the Cookie also supports all of the RFC 2109
Cookie attributes. Here's an example which sets the Path
attribute.
>>> C = Cookie.SmartCookie()
>>> C["oreo"] = "doublestuff"
>>> C["oreo"]["path"] = "/"
>>> print C
Set-Cookie: oreo=doublestuff; Path=/;
Each dictionary element has a 'value' attribute, which gives you
back the value associated with the key.
>>> C = Cookie.SmartCookie()
>>> C["twix"] = "none for you"
>>> C["twix"].value
'none for you'
A Bit More Advanced
-------------------
As mentioned before, there are three different flavors of Cookie
objects, each with different encoding/decoding semantics. This
section briefly discusses the differences.
SimpleCookie
The SimpleCookie expects that all values should be standard strings.
Just to be sure, SimpleCookie invokes the str() builtin to convert
the value to a string, when the values are set dictionary-style.
>>> C = Cookie.SimpleCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
'7'
>>> C["string"].value
'seven'
>>> print C
Set-Cookie: number=7;
Set-Cookie: string=seven;
SerialCookie
The SerialCookie expects that all values should be serialized using
cPickle (or pickle, if cPickle isn't available). As a result of
serializing, SerialCookie can save almost any Python object to a
value, and recover the exact same object when the cookie has been
returned. (SerialCookie can yield some strange-looking cookie
values, however.)
>>> C = Cookie.SerialCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
7
>>> C["string"].value
'seven'
>>> print C
Set-Cookie: number="I7\012.";
Set-Cookie: string="S'seven'\012p1\012.";
Be warned, however, if SerialCookie cannot de-serialize a value (because
it isn't a valid pickle'd object), IT WILL RAISE AN EXCEPTION.
SmartCookie
The SmartCookie combines aspects of each of the other two flavors.
When setting a value in a dictionary-fashion, the SmartCookie will
serialize (ala cPickle) the value *if and only if* it isn't a
Python string. String objects are *not* serialized. Similarly,
when the load() method parses out values, it attempts to de-serialize
the value. If it fails, then it fallsback to treating the value
as a string.
>>> C = Cookie.SmartCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
7
>>> C["string"].value
'seven'
>>> print C
Set-Cookie: number="I7\012.";
Set-Cookie: string=seven;
Backwards Compatibility
-----------------------
In order to keep compatibilty with earlier versions of Cookie.py,
it is still possible to use Cookie.Cookie() to create a Cookie. In
fact, this simply returns a SmartCookie.
>>> C = Cookie.Cookie()
>>> print C.__class__.__name__
SmartCookie
Finis.
""" #"
# ^
# |----helps out font-lock
#
# Import our required modules
#
import string
from UserDict import UserDict
try:
from cPickle import dumps, loads
except ImportError:
from pickle import dumps, loads
try:
import re
except ImportError:
raise ImportError, "Cookie.py requires 're' from Python 1.5 or later"
__all__ = ["CookieError","BaseCookie","SimpleCookie","SerialCookie",
"SmartCookie","Cookie"]
#
# Define an exception visible to External modules
#
class CookieError(Exception):
pass
# These quoting routines conform to the RFC2109 specification, which in
# turn references the character definitions from RFC2068. They provide
# a two-way quoting algorithm. Any non-text character is translated
# into a 4 character sequence: a forward-slash followed by the
# three-digit octal equivalent of the character. Any '\' or '"' is
# quoted with a preceeding '\' slash.
#
# These are taken from RFC2068 and RFC2109.
# _LegalChars is the list of chars which don't require "'s
# _Translator hash-table for fast quoting
#
_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~"
_Translator = {
'\000' : '\\000', '\001' : '\\001', '\002' : '\\002',
'\003' : '\\003', '\004' : '\\004', '\005' : '\\005',
'\006' : '\\006', '\007' : '\\007', '\010' : '\\010',
'\011' : '\\011', '\012' : '\\012', '\013' : '\\013',
'\014' : '\\014', '\015' : '\\015', '\016' : '\\016',
'\017' : '\\017', '\020' : '\\020', '\021' : '\\021',
'\022' : '\\022', '\023' : '\\023', '\024' : '\\024',
'\025' : '\\025', '\026' : '\\026', '\027' : '\\027',
'\030' : '\\030', '\031' : '\\031', '\032' : '\\032',
'\033' : '\\033', '\034' : '\\034', '\035' : '\\035',
'\036' : '\\036', '\037' : '\\037',
'"' : '\\"', '\\' : '\\\\',
'\177' : '\\177', '\200' : '\\200', '\201' : '\\201',
'\202' : '\\202', '\203' : '\\203', '\204' : '\\204',
'\205' : '\\205', '\206' : '\\206', '\207' : '\\207',
'\210' : '\\210', '\211' : '\\211', '\212' : '\\212',
'\213' : '\\213', '\214' : '\\214', '\215' : '\\215',
'\216' : '\\216', '\217' : '\\217', '\220' : '\\220',
'\221' : '\\221', '\222' : '\\222', '\223' : '\\223',
'\224' : '\\224', '\225' : '\\225', '\226' : '\\226',
'\227' : '\\227', '\230' : '\\230', '\231' : '\\231',
'\232' : '\\232', '\233' : '\\233', '\234' : '\\234',
'\235' : '\\235', '\236' : '\\236', '\237' : '\\237',
'\240' : '\\240', '\241' : '\\241', '\242' : '\\242',
'\243' : '\\243', '\244' : '\\244', '\245' : '\\245',
'\246' : '\\246', '\247' : '\\247', '\250' : '\\250',
'\251' : '\\251', '\252' : '\\252', '\253' : '\\253',
'\254' : '\\254', '\255' : '\\255', '\256' : '\\256',
'\257' : '\\257', '\260' : '\\260', '\261' : '\\261',
'\262' : '\\262', '\263' : '\\263', '\264' : '\\264',
'\265' : '\\265', '\266' : '\\266', '\267' : '\\267',
'\270' : '\\270', '\271' : '\\271', '\272' : '\\272',
'\273' : '\\273', '\274' : '\\274', '\275' : '\\275',
'\276' : '\\276', '\277' : '\\277', '\300' : '\\300',
'\301' : '\\301', '\302' : '\\302', '\303' : '\\303',
'\304' : '\\304', '\305' : '\\305', '\306' : '\\306',
'\307' : '\\307', '\310' : '\\310', '\311' : '\\311',
'\312' : '\\312', '\313' : '\\313', '\314' : '\\314',
'\315' : '\\315', '\316' : '\\316', '\317' : '\\317',
'\320' : '\\320', '\321' : '\\321', '\322' : '\\322',
'\323' : '\\323', '\324' : '\\324', '\325' : '\\325',
'\326' : '\\326', '\327' : '\\327', '\330' : '\\330',
'\331' : '\\331', '\332' : '\\332', '\333' : '\\333',
'\334' : '\\334', '\335' : '\\335', '\336' : '\\336',
'\337' : '\\337', '\340' : '\\340', '\341' : '\\341',
'\342' : '\\342', '\343' : '\\343', '\344' : '\\344',
'\345' : '\\345', '\346' : '\\346', '\347' : '\\347',
'\350' : '\\350', '\351' : '\\351', '\352' : '\\352',
'\353' : '\\353', '\354' : '\\354', '\355' : '\\355',
'\356' : '\\356', '\357' : '\\357', '\360' : '\\360',
'\361' : '\\361', '\362' : '\\362', '\363' : '\\363',
'\364' : '\\364', '\365' : '\\365', '\366' : '\\366',
'\367' : '\\367', '\370' : '\\370', '\371' : '\\371',
'\372' : '\\372', '\373' : '\\373', '\374' : '\\374',
'\375' : '\\375', '\376' : '\\376', '\377' : '\\377'
}
def _quote(str, LegalChars=_LegalChars,
join=string.join, idmap=string._idmap, translate=string.translate):
#
# If the string does not need to be double-quoted,
# then just return the string. Otherwise, surround
# the string in doublequotes and precede quote (with a \)
# special characters.
#
if "" == translate(str, idmap, LegalChars):
return str
else:
return '"' + join( map(_Translator.get, str, str), "" ) + '"'
# end _quote
_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
_QuotePatt = re.compile(r"[\\].")
def _unquote(str, join=string.join, atoi=string.atoi):
# If there aren't any doublequotes,
# then there can't be any special characters. See RFC 2109.
if len(str) < 2:
return str
if str[0] != '"' or str[-1] != '"':
return str
# We have to assume that we must decode this string.
# Down to work.
# Remove the "s
str = str[1:-1]
# Check for special sequences. Examples:
# \012 --> \n
# \" --> "
#
i = 0
n = len(str)
res = []
while 0 <= i < n:
Omatch = _OctalPatt.search(str, i)
Qmatch = _QuotePatt.search(str, i)
if not Omatch and not Qmatch: # Neither matched
res.append(str[i:])
break
# else:
j = k = -1
if Omatch: j = Omatch.start(0)
if Qmatch: k = Qmatch.start(0)
if Qmatch and ( not Omatch or k < j ): # QuotePatt matched
res.append(str[i:k])
res.append(str[k+1])
i = k+2
else: # OctalPatt matched
res.append(str[i:j])
res.append( chr( atoi(str[j+1:j+4], 8) ) )
i = j+4
return join(res, "")
# end _unquote
# The _getdate() routine is used to set the expiration time in
# the cookie's HTTP header. By default, _getdate() returns the
# current time in the appropriate "expires" format for a
# Set-Cookie header. The one optional argument is an offset from
# now, in seconds. For example, an offset of -3600 means "one hour ago".
# The offset may be a floating point number.
#
_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
_monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname):
from time import gmtime, time
now = time()
year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
return "%s, %02d-%3s-%4d %02d:%02d:%02d GMT" % \
(weekdayname[wd], day, monthname[month], year, hh, mm, ss)
#
# A class to hold ONE key,value pair.
# In a cookie, each such pair may have several attributes.
# so this class is used to keep the attributes associated
# with the appropriate key,value pair.
# This class also includes a coded_value attribute, which
# is used to hold the network representation of the
# value. This is most useful when Python objects are
# pickled for network transit.
#
class Morsel(UserDict):
# RFC 2109 lists these attributes as reserved:
# path comment domain
# max-age secure version
#
# For historical reasons, these attributes are also reserved:
# expires
#
# This dictionary provides a mapping from the lowercase
# variant on the left to the appropriate traditional
# formatting on the right.
_reserved = { "expires" : "expires",
"path" : "Path",
"comment" : "Comment",
"domain" : "Domain",
"max-age" : "Max-Age",
"secure" : "secure",
"version" : "Version",
}
_reserved_keys = _reserved.keys()
def __init__(self):
# Set defaults
self.key = self.value = self.coded_value = None
UserDict.__init__(self)
# Set default attributes
for K in self._reserved_keys:
UserDict.__setitem__(self, K, "")
# end __init__
def __setitem__(self, K, V):
K = string.lower(K)
if not K in self._reserved_keys:
raise CookieError("Invalid Attribute %s" % K)
UserDict.__setitem__(self, K, V)
# end __setitem__
def isReservedKey(self, K):
return string.lower(K) in self._reserved_keys
# end isReservedKey
def set(self, key, val, coded_val,
LegalChars=_LegalChars,
idmap=string._idmap, translate=string.translate ):
# First we verify that the key isn't a reserved word
# Second we make sure it only contains legal characters
if string.lower(key) in self._reserved_keys:
raise CookieError("Attempt to set a reserved key: %s" % key)
if "" != translate(key, idmap, LegalChars):
raise CookieError("Illegal key value: %s" % key)
# It's a good key, so save it.
self.key = key
self.value = val
self.coded_value = coded_val
# end set
def output(self, attrs=None, header = "Set-Cookie:"):
return "%s %s" % ( header, self.OutputString(attrs) )
__str__ = output
def __repr__(self):
return '<%s: %s=%s>' % (self.__class__.__name__,
self.key, repr(self.value) )
def js_output(self, attrs=None):
# Print javascript
return """
<SCRIPT LANGUAGE="JavaScript">
<!-- begin hiding
document.cookie = \"%s\"
// end hiding -->
</script>
""" % ( self.OutputString(attrs), )
# end js_output()
def OutputString(self, attrs=None):
# Build up our result
#
result = []
RA = result.append
# First, the key=value pair
RA("%s=%s;" % (self.key, self.coded_value))
# Now add any defined attributes
if attrs is None:
attrs = self._reserved_keys
items = self.items()
items.sort()
for K,V in items:
if V == "": continue
if K not in attrs: continue
if K == "expires" and type(V) == type(1):
RA("%s=%s;" % (self._reserved[K], _getdate(V)))
elif K == "max-age" and type(V) == type(1):
RA("%s=%d;" % (self._reserved[K], V))
elif K == "secure":
RA("%s;" % self._reserved[K])
else:
RA("%s=%s;" % (self._reserved[K], V))
# Return the result
return string.join(result, " ")
# end OutputString
# end Morsel class
#
# Pattern for finding cookie
#
# This used to be strict parsing based on the RFC2109 and RFC2068
# specifications. I have since discovered that MSIE 3.0x doesn't
# follow the character rules outlined in those specs. As a
# result, the parsing rules here are less strict.
#
_LegalCharsPatt = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]"
_CookiePattern = re.compile(
r"(?x)" # This is a Verbose pattern
r"(?P<key>" # Start of group 'key'
""+ _LegalCharsPatt +"+?" # Any word of at least one letter, nongreedy
r")" # End of group 'key'
r"\s*=\s*" # Equal Sign
r"(?P<val>" # Start of group 'val'
r'"(?:[^\\"]|\\.)*"' # Any doublequoted string
r"|" # or
""+ _LegalCharsPatt +"*" # Any word or empty string
r")" # End of group 'val'
r"\s*;?" # Probably ending in a semi-colon
)
# At long last, here is the cookie class.
# Using this class is almost just like using a dictionary.
# See this module's docstring for example usage.
#
class BaseCookie(UserDict):
# A container class for a set of Morsels
#
def value_decode(self, val):
"""real_value, coded_value = value_decode(STRING)
Called prior to setting a cookie's value from the network
representation. The VALUE is the value read from HTTP
header.
Override this function to modify the behavior of cookies.
"""
return val, val
# end value_encode
def value_encode(self, val):
"""real_value, coded_value = value_encode(VALUE)
Called prior to setting a cookie's value from the dictionary
representation. The VALUE is the value being assigned.
Override this function to modify the behavior of cookies.
"""
strval = str(val)
return strval, strval
# end value_encode
def __init__(self, input=None):
UserDict.__init__(self)
if input: self.load(input)
# end __init__
def __set(self, key, real_value, coded_value):
"""Private method for setting a cookie's value"""
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
UserDict.__setitem__(self, key, M)
# end __set
def __setitem__(self, key, value):
"""Dictionary style assignment."""
rval, cval = self.value_encode(value)
self.__set(key, rval, cval)
# end __setitem__
def output(self, attrs=None, header="Set-Cookie:", sep="\n"):
"""Return a string suitable for HTTP."""
result = []
items = self.items()
items.sort()
for K,V in items:
result.append( V.output(attrs, header) )
return string.join(result, sep)
# end output
__str__ = output
def __repr__(self):
L = []
items = self.items()
items.sort()
for K,V in items:
L.append( '%s=%s' % (K,repr(V.value) ) )
return '<%s: %s>' % (self.__class__.__name__, string.join(L))
def js_output(self, attrs=None):
"""Return a string suitable for JavaScript."""
result = []
items = self.items()
items.sort()
for K,V in items:
result.append( V.js_output(attrs) )
return string.join(result, "")
# end js_output
def load(self, rawdata):
"""Load cookies from a string (presumably HTTP_COOKIE) or
from a dictionary. Loading cookies from a dictionary 'd'
is equivalent to calling:
map(Cookie.__setitem__, d.keys(), d.values())
"""
if type(rawdata) == type(""):
self.__ParseString(rawdata)
else:
self.update(rawdata)
return
# end load()
def __ParseString(self, str, patt=_CookiePattern):
i = 0 # Our starting point
n = len(str) # Length of string
M = None # current morsel
while 0 <= i < n:
# Start looking for a cookie
match = patt.search(str, i)
if not match: break # No more cookies
K,V = match.group("key"), match.group("val")
i = match.end(0)
# Parse the key, value in case it's metainfo
if K[0] == "$":
# We ignore attributes which pertain to the cookie
# mechanism as a whole. See RFC 2109.
# (Does anyone care?)
if M:
M[ K[1:] ] = V
elif string.lower(K) in Morsel._reserved_keys:
if M:
M[ K ] = _unquote(V)
else:
rval, cval = self.value_decode(V)
self.__set(K, rval, cval)
M = self[K]
# end __ParseString
# end BaseCookie class
class SimpleCookie(BaseCookie):
"""SimpleCookie
SimpleCookie supports strings as cookie values. When setting
the value using the dictionary assignment notation, SimpleCookie
calls the builtin str() to convert the value to a string. Values
received from HTTP are kept as strings.
"""
def value_decode(self, val):
return _unquote( val ), val
def value_encode(self, val):
strval = str(val)
return strval, _quote( strval )
# end SimpleCookie
class SerialCookie(BaseCookie):
"""SerialCookie
SerialCookie supports arbitrary objects as cookie values. All
values are serialized (using cPickle) before being sent to the
client. All incoming values are assumed to be valid Pickle
representations. IF AN INCOMING VALUE IS NOT IN A VALID PICKLE
FORMAT, THEN AN EXCEPTION WILL BE RAISED.
Note: Large cookie values add overhead because they must be
retransmitted on every HTTP transaction.
Note: HTTP has a 2k limit on the size of a cookie. This class
does not check for this limit, so be careful!!!
"""
def value_decode(self, val):
# This could raise an exception!
return loads( _unquote(val) ), val
def value_encode(self, val):
return val, _quote( dumps(val) )
# end SerialCookie
class SmartCookie(BaseCookie):
"""SmartCookie
SmartCookie supports arbitrary objects as cookie values. If the
object is a string, then it is quoted. If the object is not a
string, however, then SmartCookie will use cPickle to serialize
the object into a string representation.
Note: Large cookie values add overhead because they must be
retransmitted on every HTTP transaction.
Note: HTTP has a 2k limit on the size of a cookie. This class
does not check for this limit, so be careful!!!
"""
def value_decode(self, val):
strval = _unquote(val)
try:
return loads(strval), val
except:
return strval, val
def value_encode(self, val):
if type(val) == type(""):
return val, _quote(val)
else:
return val, _quote( dumps(val) )
# end SmartCookie
###########################################################
# Backwards Compatibility: Don't break any existing code!
# We provide Cookie() as an alias for SmartCookie()
Cookie = SmartCookie
#
###########################################################
def _test():
import doctest, Cookie
return doctest.testmod(Cookie)
if __name__ == "__main__":
_test()
#Local Variables:
#tab-width: 4
#end:
|
jbuchbinder/youtube-dl | refs/heads/master | youtube_dl/extractor/tv3.py | 69 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class TV3IE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tv3\.co\.nz/(?P<id>[^/]+)/tabid/\d+/articleID/\d+/MCat/\d+/Default\.aspx'
_TEST = {
'url': 'http://www.tv3.co.nz/MOTORSPORT-SRS-SsangYong-Hampton-Downs-Round-3/tabid/3692/articleID/121615/MCat/2915/Default.aspx',
'info_dict': {
'id': '4659127992001',
'ext': 'mp4',
'title': 'CRC Motorsport: SRS SsangYong Hampton Downs Round 3 - S2015 Ep3',
'description': 'SsangYong Racing Series returns for Round 3 with drivers from New Zealand and Australia taking to the grid at Hampton Downs raceway.',
'uploader_id': '3812193411001',
'upload_date': '20151213',
'timestamp': 1449975272,
},
'expected_warnings': [
'Failed to download MPD manifest'
],
'params': {
# m3u8 download
'skip_download': True,
},
}
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/3812193411001/default_default/index.html?videoId=%s'
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
brightcove_id = self._search_regex(r'<param\s*name="@videoPlayer"\s*value="(\d+)"', webpage, 'brightcove id')
return self.url_result(self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, 'BrightcoveNew', brightcove_id)
|
stevenewey/django | refs/heads/master | tests/template_tests/filter_tests/test_truncatechars_html.py | 390 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.template.defaultfilters import truncatechars_html
from django.test import SimpleTestCase
class FunctionTests(SimpleTestCase):
def test_truncate_zero(self):
self.assertEqual(truncatechars_html('<p>one <a href="#">two - three <br>four</a> five</p>', 0), '...')
def test_truncate(self):
self.assertEqual(
truncatechars_html('<p>one <a href="#">two - three <br>four</a> five</p>', 6),
'<p>one...</p>',
)
def test_truncate2(self):
self.assertEqual(
truncatechars_html('<p>one <a href="#">two - three <br>four</a> five</p>', 11),
'<p>one <a href="#">two ...</a></p>',
)
def test_truncate3(self):
self.assertEqual(
truncatechars_html('<p>one <a href="#">two - three <br>four</a> five</p>', 100),
'<p>one <a href="#">two - three <br>four</a> five</p>',
)
def test_truncate_unicode(self):
self.assertEqual(truncatechars_html('<b>\xc5ngstr\xf6m</b> was here', 5), '<b>\xc5n...</b>')
def test_truncate_something(self):
self.assertEqual(truncatechars_html('a<b>b</b>c', 3), 'a<b>b</b>c')
|
vmax-feihu/hue | refs/heads/master | desktop/core/ext-py/Pygments-1.3.1/pygments/lexers/__init__.py | 72 | # -*- coding: utf-8 -*-
"""
pygments.lexers
~~~~~~~~~~~~~~~
Pygments lexers.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
import types
import fnmatch
from os.path import basename
from pygments.lexers._mapping import LEXERS
from pygments.plugin import find_plugin_lexers
from pygments.util import ClassNotFound, bytes
__all__ = ['get_lexer_by_name', 'get_lexer_for_filename', 'find_lexer_class',
'guess_lexer'] + LEXERS.keys()
_lexer_cache = {}
def _load_lexers(module_name):
"""
Load a lexer (and all others in the module too).
"""
mod = __import__(module_name, None, None, ['__all__'])
for lexer_name in mod.__all__:
cls = getattr(mod, lexer_name)
_lexer_cache[cls.name] = cls
def get_all_lexers():
"""
Return a generator of tuples in the form ``(name, aliases,
filenames, mimetypes)`` of all know lexers.
"""
for item in LEXERS.itervalues():
yield item[1:]
for lexer in find_plugin_lexers():
yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
def find_lexer_class(name):
"""
Lookup a lexer class by name. Return None if not found.
"""
if name in _lexer_cache:
return _lexer_cache[name]
# lookup builtin lexers
for module_name, lname, aliases, _, _ in LEXERS.itervalues():
if name == lname:
_load_lexers(module_name)
return _lexer_cache[name]
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if cls.name == name:
return cls
def get_lexer_by_name(_alias, **options):
"""
Get a lexer by an alias.
"""
# lookup builtin lexers
for module_name, name, aliases, _, _ in LEXERS.itervalues():
if _alias in aliases:
if name not in _lexer_cache:
_load_lexers(module_name)
return _lexer_cache[name](**options)
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if _alias in cls.aliases:
return cls(**options)
raise ClassNotFound('no lexer for alias %r found' % _alias)
def get_lexer_for_filename(_fn, code=None, **options):
"""
Get a lexer for a filename. If multiple lexers match the filename
pattern, use ``analyze_text()`` to figure out which one is more
appropriate.
"""
matches = []
fn = basename(_fn)
for modname, name, _, filenames, _ in LEXERS.itervalues():
for filename in filenames:
if fnmatch.fnmatch(fn, filename):
if name not in _lexer_cache:
_load_lexers(modname)
matches.append(_lexer_cache[name])
for cls in find_plugin_lexers():
for filename in cls.filenames:
if fnmatch.fnmatch(fn, filename):
matches.append(cls)
if sys.version_info > (3,) and isinstance(code, bytes):
# decode it, since all analyse_text functions expect unicode
code = code.decode('latin1')
def get_rating(cls):
# The class _always_ defines analyse_text because it's included in
# the Lexer class. The default implementation returns None which
# gets turned into 0.0. Run scripts/detect_missing_analyse_text.py
# to find lexers which need it overridden.
d = cls.analyse_text(code)
#print "Got %r from %r" % (d, cls)
return d
if code:
matches.sort(key=get_rating)
if matches:
#print "Possible lexers, after sort:", matches
return matches[-1](**options)
raise ClassNotFound('no lexer for filename %r found' % _fn)
def get_lexer_for_mimetype(_mime, **options):
"""
Get a lexer for a mimetype.
"""
for modname, name, _, _, mimetypes in LEXERS.itervalues():
if _mime in mimetypes:
if name not in _lexer_cache:
_load_lexers(modname)
return _lexer_cache[name](**options)
for cls in find_plugin_lexers():
if _mime in cls.mimetypes:
return cls(**options)
raise ClassNotFound('no lexer for mimetype %r found' % _mime)
def _iter_lexerclasses():
"""
Return an iterator over all lexer classes.
"""
for module_name, name, _, _, _ in LEXERS.itervalues():
if name not in _lexer_cache:
_load_lexers(module_name)
yield _lexer_cache[name]
for lexer in find_plugin_lexers():
yield lexer
def guess_lexer_for_filename(_fn, _text, **options):
"""
Lookup all lexers that handle those filenames primary (``filenames``)
or secondary (``alias_filenames``). Then run a text analysis for those
lexers and choose the best result.
usage::
>>> from pygments.lexers import guess_lexer_for_filename
>>> guess_lexer_for_filename('hello.html', '<%= @foo %>')
<pygments.lexers.templates.RhtmlLexer object at 0xb7d2f32c>
>>> guess_lexer_for_filename('hello.html', '<h1>{{ title|e }}</h1>')
<pygments.lexers.templates.HtmlDjangoLexer object at 0xb7d2f2ac>
>>> guess_lexer_for_filename('style.css', 'a { color: <?= $link ?> }')
<pygments.lexers.templates.CssPhpLexer object at 0xb7ba518c>
"""
fn = basename(_fn)
primary = None
matching_lexers = set()
for lexer in _iter_lexerclasses():
for filename in lexer.filenames:
if fnmatch.fnmatch(fn, filename):
matching_lexers.add(lexer)
primary = lexer
for filename in lexer.alias_filenames:
if fnmatch.fnmatch(fn, filename):
matching_lexers.add(lexer)
if not matching_lexers:
raise ClassNotFound('no lexer for filename %r found' % fn)
if len(matching_lexers) == 1:
return matching_lexers.pop()(**options)
result = []
for lexer in matching_lexers:
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
result.append((rv, lexer))
result.sort()
if not result[-1][0] and primary is not None:
return primary(**options)
return result[-1][1](**options)
def guess_lexer(_text, **options):
"""
Guess a lexer by strong distinctions in the text (eg, shebang).
"""
best_lexer = [0.0, None]
for lexer in _iter_lexerclasses():
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
if rv > best_lexer[0]:
best_lexer[:] = (rv, lexer)
if not best_lexer[0] or best_lexer[1] is None:
raise ClassNotFound('no lexer matching the text found')
return best_lexer[1](**options)
class _automodule(types.ModuleType):
"""Automatically import lexers."""
def __getattr__(self, name):
info = LEXERS.get(name)
if info:
_load_lexers(info[0])
cls = _lexer_cache[info[1]]
setattr(self, name, cls)
return cls
raise AttributeError(name)
oldmod = sys.modules['pygments.lexers']
newmod = _automodule('pygments.lexers')
newmod.__dict__.update(oldmod.__dict__)
sys.modules['pygments.lexers'] = newmod
del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
|
lochiiconnectivity/exabgp | refs/heads/master | lib/exabgp/configuration/bgp/process.py | 1 | # encoding: utf-8
"""
process.py
Created by Thomas Mangin on 2014-06-22.
Copyright (c) 2014-2015 Exa Networks. All rights reserved.
"""
from exabgp.configuration.engine.location import Location
from exabgp.configuration.engine.raised import Raised
from exabgp.configuration.engine.section import Section
from exabgp.configuration.engine.parser import boolean
import os
import sys
import stat
# =============================================================== syntax_process
syntax_process = """\
process <name> {
run </path/to/command with its args> # the command can be quoted
encoder text|json
received {
# "message" in notification,open,keepalive,update,refresh,operational,all
<message> [
parsed # send parsed BGP data for "message"
packets # send raw BGP message for "message"
consolidated # group parsed and raw information in one JSON object
]
neighbor-changes # state of peer change (up/down)
parsed # send parsed BGP data for all messages
packets # send raw BGP message for all messages
consolidated # group parsed and raw information for all messages
}
sent {
packets # send all generated BGP messages
}
}
"""
# ================================================================ RaisedProcess
class RaisedProcess (Raised):
syntax = syntax_process
# =============================================================== SectionProcess
#
class SectionProcess (Section):
syntax = syntax_process
name = 'process'
def enter (self,tokeniser):
Section.enter(self,tokeniser)
self.content['encoder'] = 'text'
def encoder (self,tokeniser):
token = tokeniser()
if token == '}':
return
if token not in ('text','json'):
raise RaisedProcess(tokeniser,'invalid encoder')
self.content['encoder'] = token
def respawn (self,tokeniser):
self.content['respawn'] = boolean(tokeniser,False)
def run (self,tokeniser):
command = tokeniser()
prg,args = command.split(None,1)
if prg[0] != '/':
if prg.startswith('etc/exabgp'):
parts = prg.split('/')
path = [os.environ.get('ETC','etc'),] + parts[2:]
prg = os.path.join(*path)
else:
prg = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]),prg))
if not os.path.exists(prg):
raise RaisedProcess(tokeniser,'can not locate the the program "%s"' % prg)
# XXX: Yep, race conditions are possible, those are sanity checks not security ones ...
s = os.stat(prg)
if stat.S_ISDIR(s.st_mode):
raise RaisedProcess(tokeniser,'can not execute directories "%s"' % prg)
if s.st_mode & stat.S_ISUID:
raise RaisedProcess(tokeniser,'refusing to run setuid programs "%s"' % prg)
check = stat.S_IXOTH
if s.st_uid == os.getuid():
check |= stat.S_IXUSR
if s.st_gid == os.getgid():
check |= stat.S_IXGRP
if not check & s.st_mode:
raise RaisedProcess(tokeniser,'exabgp will not be able to run this program "%s"' % prg)
self.content['run'] = '%s %s' % (prg,args)
# all the action are the same
def message (self,tokeniser):
valid_messages = ['notification','open','keepalive','update','refresh','operational']
valid_options = ['parsed','packets','consolidated']
format = lambda s: (s[::-1].replace(',', ' and'[::-1], 1))[::-1]
direction = self.location[-2]
message = self.location[-1]
actions = tokeniser()
for (idx_line,idx_column,line,action) in actions:
if action not in valid_options:
raise RaisedProcess(Location(idx_line,idx_column,line),'invalid message option %s, valid options are "%s"' % (action,format('", "'.join(valid_options))))
messages = valid_messages if message == 'all' else [message]
for m in messages:
section = self.content.setdefault(direction,{}).setdefault(m,[])
if action in section:
raise RaisedProcess(Location(idx_line,idx_column,line),'duplicate action (%s) for message %s%s' % (
action,
m,
" using the alis 'all'" if message == 'all' else ''
))
if 'consolidated' in section and len(section) > 0:
raise RaisedProcess(Location(idx_line,idx_column,line),'consolidated can not be used with another keyword')
section.append(action)
def neighbor_changes (self,tokeniser):
self.content.setdefault('received',{})['neighbor-changes'] = True
# reveived global level
def received_packets (self,tokeniser):
for message in ['notification','open','keepalive','update','refresh','operational']:
self.content.setdefault('received',{}).setdefault(message,[]).append('packets')
def received_parsed (self,tokeniser):
for message in ['notification','open','keepalive','update','refresh','operational']:
self.content.setdefault('received',{}).setdefault(message,[]).append('parsed')
def received_consolidated (self,tokeniser):
for message in ['notification','open','keepalive','update','refresh','operational']:
self.content.setdefault('received',{}).setdefault(message,[]).append('consolidated')
# sent global level
def sent_packets (self,tokeniser):
for message in ['notification','open','keepalive','update','refresh','operational']:
self.content.setdefault('sent',{}).setdefault(message,[]).append('packets')
@classmethod
def register (cls,registry,location):
registry.register_class(cls)
registry.register_hook(cls,'enter',location,'enter')
registry.register_hook(cls,'action',location+['run'],'run')
registry.register_hook(cls,'action',location+['encoder'],'encoder')
registry.register_hook(cls,'action',location+['respawn'],'respawn')
for received in (location+['received'],):
registry.register_hook(cls,'enter',received,'enter_unamed_section')
registry.register_hook(cls,'action',received+['neighbor-changes'],'neighbor_changes')
for message in ['notification','open','keepalive','update','refresh','operational','all']:
registry.register_hook(cls,'action',received+[message],'message')
registry.register_hook(cls,'exit', received,'exit_unamed_section')
registry.register_hook(cls,'enter',location+['sent'],'enter_unamed_section')
registry.register_hook(cls,'action',location+['sent','packets'],'sent_packets')
registry.register_hook(cls,'exit',location+['sent'],'exit_unamed_section')
registry.register_hook(cls,'exit',location,'exit')
|
andyneff/voxel-globe | refs/heads/master | voxel_globe/main/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
nemesisdesign/django | refs/heads/master | tests/utils_tests/test_archive.py | 372 | import os
import shutil
import tempfile
import unittest
from django.utils._os import upath
from django.utils.archive import Archive, extract
TEST_DIR = os.path.join(os.path.dirname(upath(__file__)), 'archives')
class ArchiveTester(object):
archive = None
def setUp(self):
"""
Create temporary directory for testing extraction.
"""
self.old_cwd = os.getcwd()
self.tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tmpdir)
self.archive_path = os.path.join(TEST_DIR, self.archive)
self.archive_lead_path = os.path.join(TEST_DIR, "leadpath_%s" % self.archive)
# Always start off in TEST_DIR.
os.chdir(TEST_DIR)
def tearDown(self):
os.chdir(self.old_cwd)
def test_extract_method(self):
with Archive(self.archive) as archive:
archive.extract(self.tmpdir)
self.check_files(self.tmpdir)
def test_extract_method_no_to_path(self):
os.chdir(self.tmpdir)
with Archive(self.archive_path) as archive:
archive.extract()
self.check_files(self.tmpdir)
def test_extract_function(self):
extract(self.archive_path, self.tmpdir)
self.check_files(self.tmpdir)
def test_extract_function_with_leadpath(self):
extract(self.archive_lead_path, self.tmpdir)
self.check_files(self.tmpdir)
def test_extract_function_no_to_path(self):
os.chdir(self.tmpdir)
extract(self.archive_path)
self.check_files(self.tmpdir)
def check_files(self, tmpdir):
self.assertTrue(os.path.isfile(os.path.join(self.tmpdir, '1')))
self.assertTrue(os.path.isfile(os.path.join(self.tmpdir, '2')))
self.assertTrue(os.path.isfile(os.path.join(self.tmpdir, 'foo', '1')))
self.assertTrue(os.path.isfile(os.path.join(self.tmpdir, 'foo', '2')))
self.assertTrue(os.path.isfile(os.path.join(self.tmpdir, 'foo', 'bar', '1')))
self.assertTrue(os.path.isfile(os.path.join(self.tmpdir, 'foo', 'bar', '2')))
class TestZip(ArchiveTester, unittest.TestCase):
archive = 'foobar.zip'
class TestTar(ArchiveTester, unittest.TestCase):
archive = 'foobar.tar'
class TestGzipTar(ArchiveTester, unittest.TestCase):
archive = 'foobar.tar.gz'
class TestBzip2Tar(ArchiveTester, unittest.TestCase):
archive = 'foobar.tar.bz2'
|
sunyanHub/myfirst | refs/heads/master | hzlu-github.py | 150 | print "hello, thank you for your GIT Tutorial." |
baylee-d/osf.io | refs/heads/develop | addons/mendeley/api.py | 77 | from mendeley.session import MendeleySession
class APISession(MendeleySession):
def request(self, *args, **kwargs):
kwargs['params'] = {'view': 'all', 'limit': '500'}
return super(APISession, self).request(*args, **kwargs)
|
diegosarmentero/ninja-ide | refs/heads/master | ninja_ide/resources.py | 6 | # -*- coding: utf-8 -*-
#
# This file is part of NINJA-IDE (http://ninja-ide.org).
#
# NINJA-IDE is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# NINJA-IDE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NINJA-IDE; If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import sys
from PyQt4.QtGui import QKeySequence
from PyQt4.QtCore import QDir
from PyQt4.QtCore import QSettings
from PyQt4.QtCore import Qt
###############################################################################
# CHECK PYTHON VERSION
###############################################################################
IS_PYTHON3 = sys.version_info.major == 3
###############################################################################
# PATHS
###############################################################################
HOME_PATH = QDir.toNativeSeparators(QDir.homePath())
NINJA_EXECUTABLE = os.path.realpath(sys.argv[0])
PRJ_PATH = os.path.abspath(os.path.dirname(__file__))
if not IS_PYTHON3:
PRJ_PATH = PRJ_PATH.decode('utf-8')
#Only for py2exe
frozen = getattr(sys, 'frozen', '')
if frozen in ('dll', 'console_exe', 'windows_exe'):
# py2exe:
PRJ_PATH = os.path.abspath(os.path.dirname(sys.executable))
HOME_NINJA_PATH = os.path.join(HOME_PATH, ".ninja_ide")
NINJA_KNOWLEDGE_PATH = os.path.join(HOME_NINJA_PATH, 'knowledge')
SETTINGS_PATH = os.path.join(HOME_NINJA_PATH, 'ninja_settings.ini')
DATA_SETTINGS_PATH = os.path.join(HOME_NINJA_PATH, 'data_settings.ini')
EXTENSIONS_PATH = os.path.join(HOME_NINJA_PATH, "extensions")
SYNTAX_FILES = os.path.join(PRJ_PATH, "extensions", "syntax")
PLUGINS = os.path.join(HOME_NINJA_PATH, "extensions", "plugins")
PLUGINS_DESCRIPTOR = os.path.join(EXTENSIONS_PATH,
"plugins", "descriptor.json")
LANGS = os.path.join(EXTENSIONS_PATH, "languages")
EDITOR_SKINS = os.path.join(EXTENSIONS_PATH, "schemes")
NINJA_THEME = os.path.join(PRJ_PATH, "extensions", "theme", "ninja_dark.qss")
NINJA_THEME_DOWNLOAD = os.path.join(EXTENSIONS_PATH, "theme")
LOG_FILE_PATH = os.path.join(HOME_NINJA_PATH, 'ninja_ide.log')
GET_SYSTEM_PATH = os.path.join(PRJ_PATH, 'tools', 'get_system_path.py')
QML_FILES = os.path.join(PRJ_PATH, "gui", "qml")
###############################################################################
# URLS
###############################################################################
BUGS_PAGE = "https://github.com/ninja-ide/ninja-ide/issues"
PLUGINS_DOC = "http://ninja-ide.readthedocs.org/en/latest/"
UPDATES_URL = 'http://ninja-ide.org/updates'
SCHEMES_URL = 'http://ninja-ide.org/schemes/api/'
LANGUAGES_URL = 'http://ninja-ide.org/plugins/languages'
PLUGINS_WEB = 'http://ninja-ide.org/plugins/api/official'
PLUGINS_COMMUNITY = 'http://ninja-ide.org/plugins/api/community'
###############################################################################
# COLOR SCHEMES
###############################################################################
COLOR_SCHEME = {
"Default": "#c5c8c6",
"Keyword": "#83c1fb",
"Operator": "#FFFFFF",
"Brace": "#FFFFFF",
"Caret": "#FFFFFF",
"FunctionMethodName": "#fdff74",
"ClassName": "#fdff74",
"Identifier": "#c5c8c6",
"DoubleQuotedString": "#d07cd3",
"SingleQuotedString": "#d07cd3",
"TripleDoubleQuotedString": "#86d986",
"TripleSingleQuotedString": "#86d986",
"Comment": "#7c7c7c",
"CommentBlock": "#7c7c7c",
"SelfReference": "#6EC7D7",
"HighlightedIdentifier": "#6EC7D7",
"Number": "#F8A008",
"Decorator": "#fdb269",
"EditorBackground": "#1d1f21",
"EditorSelectionColor": "#000000",
"EditorSelectionBackground": "#aaaaaa",
"CurrentLine": "#313233",
"SelectedWord": "#a8ff60",
"Pending": "#FF0000",
"SelectedWordBackground": "#009B00",
"FoldArea": "#292c2f",
"FoldArrowExpanded": "#696c6e",
"FoldArrowCollapsed": "#FFFFFF",
"LinkNavigate": "005aff",
"BraceBackground": "#5BC85B",
"BraceForeground": "#FF0000",
"ErrorUnderline": "#0000ff",
"Pep8Underline": "#00ffff",
"SidebarBackground": "#292c2f",
"SidebarSelectedBackground": "#46484b",
"SidebarForeground": "#868989",
"SidebarSelectedForeground": "#c5c8c6",
"MigrationUnderline": "#ff0000",
"MarginLine": '#7c7c7c',
}
CUSTOM_SCHEME = {}
def get_color(key):
if key in COLOR_SCHEME:
return CUSTOM_SCHEME.get(key, COLOR_SCHEME.get(key))
return None
def get_color_hex(key):
if key in COLOR_SCHEME:
return CUSTOM_SCHEME.get(key, COLOR_SCHEME.get(key)).lstrip("#")
return None
###############################################################################
# SHORTCUTS
###############################################################################
#default shortcuts
SHORTCUTS = {
"Duplicate": QKeySequence(Qt.CTRL + Qt.Key_R), # Replicate
"Remove-line": QKeySequence(Qt.CTRL + Qt.Key_E), # Eliminate
"Move-up": QKeySequence(Qt.ALT + Qt.Key_Up),
"Move-down": QKeySequence(Qt.ALT + Qt.Key_Down),
"Close-file": QKeySequence(Qt.CTRL + Qt.Key_W),
"New-file": QKeySequence(Qt.CTRL + Qt.Key_N),
"New-project": QKeySequence(Qt.CTRL + Qt.SHIFT + Qt.Key_N),
"Open-file": QKeySequence(Qt.CTRL + Qt.Key_O),
"Open-project": QKeySequence(Qt.CTRL + Qt.SHIFT + Qt.Key_O),
"Save-file": QKeySequence(Qt.CTRL + Qt.Key_S),
"Save-project": QKeySequence(Qt.CTRL + Qt.SHIFT + Qt.Key_S),
"Print-file": QKeySequence(Qt.CTRL + Qt.Key_P),
"Redo": QKeySequence(Qt.CTRL + Qt.Key_Y),
"Comment": QKeySequence(Qt.CTRL + Qt.Key_G),
"Uncomment": QKeySequence(Qt.CTRL + Qt.SHIFT + Qt.Key_G),
"Horizontal-line": QKeySequence(),
"Title-comment": QKeySequence(),
"Indent-less": QKeySequence(Qt.SHIFT + Qt.Key_Tab),
"Hide-misc": QKeySequence(Qt.Key_F4),
"Hide-editor": QKeySequence(Qt.Key_F3),
"Hide-explorer": QKeySequence(Qt.Key_F2),
"Run-file": QKeySequence(Qt.CTRL + Qt.Key_F6),
"Run-project": QKeySequence(Qt.Key_F6),
"Debug": QKeySequence(Qt.Key_F7),
"Show-Selector": QKeySequence(Qt.CTRL + Qt.Key_QuoteLeft),
"Stop-execution": QKeySequence(Qt.CTRL + Qt.SHIFT + Qt.Key_F6),
"Hide-all": QKeySequence(Qt.Key_F11),
"Full-screen": QKeySequence(Qt.CTRL + Qt.Key_F11),
"Find": QKeySequence(Qt.CTRL + Qt.Key_F),
"Find-replace": QKeySequence(Qt.CTRL + Qt.Key_H),
"Find-with-word": QKeySequence(Qt.CTRL + Qt.SHIFT + Qt.Key_F),
"Find-next": QKeySequence(Qt.CTRL + Qt.Key_F3),
"Find-previous": QKeySequence(Qt.SHIFT + Qt.Key_F3),
"Help": QKeySequence(Qt.Key_F1),
"Split-horizontal": QKeySequence(Qt.Key_F9),
"Split-vertical": QKeySequence(Qt.CTRL + Qt.Key_F9),
"Close-Split": QKeySequence(Qt.SHIFT + Qt.Key_F9),
"Split-assistance": QKeySequence(Qt.Key_F10),
"Follow-mode": QKeySequence(Qt.CTRL + Qt.Key_F10),
"Reload-file": QKeySequence(Qt.Key_F5),
"Find-in-files": QKeySequence(Qt.CTRL + Qt.Key_L),
"Import": QKeySequence(Qt.CTRL + Qt.Key_I),
"Go-to-definition": QKeySequence(Qt.CTRL + Qt.Key_Return),
"Complete-Declarations": QKeySequence(Qt.ALT + Qt.Key_Return),
"Code-locator": QKeySequence(Qt.CTRL + Qt.Key_K),
"File-Opener": QKeySequence(Qt.CTRL + Qt.ALT + Qt.Key_O),
"Navigate-back": QKeySequence(Qt.ALT + Qt.Key_Left),
"Navigate-forward": QKeySequence(Qt.ALT + Qt.Key_Right),
"Open-recent-closed": QKeySequence(Qt.CTRL + Qt.SHIFT + Qt.Key_T),
"Change-Tab": QKeySequence(Qt.CTRL + Qt.Key_PageDown),
"Change-Tab-Reverse": QKeySequence(Qt.CTRL + Qt.Key_PageUp),
"Show-Code-Nav": QKeySequence(Qt.CTRL + Qt.Key_3),
"Show-Paste-History": QKeySequence(Qt.CTRL + Qt.Key_4),
"History-Copy": QKeySequence(Qt.CTRL + Qt.ALT + Qt.Key_C),
"History-Paste": QKeySequence(Qt.CTRL + Qt.ALT + Qt.Key_V),
"Add-Bookmark-or-Breakpoint": QKeySequence(Qt.CTRL + Qt.Key_B),
#"change-split-focus": QKeySequence(Qt.CTRL + Qt.Key_Tab),
"Move-Tab-to-right": QKeySequence(Qt.CTRL + Qt.SHIFT + Qt.Key_0),
"Move-Tab-to-left": QKeySequence(Qt.CTRL + Qt.SHIFT + Qt.Key_9),
"change-tab-visibility": QKeySequence(Qt.SHIFT + Qt.Key_F1),
"Highlight-Word": QKeySequence(Qt.CTRL + Qt.Key_Down),
"undo": QKeySequence(Qt.CTRL + Qt.Key_Z),
"Indent-more": QKeySequence(Qt.Key_Tab),
"cut": QKeySequence(Qt.CTRL + Qt.Key_X),
"copy": QKeySequence(Qt.CTRL + Qt.Key_C),
"paste": QKeySequence(Qt.CTRL + Qt.Key_V),
"expand-symbol-combo": QKeySequence(Qt.CTRL + Qt.Key_2),
"expand-file-combo": QKeySequence(Qt.CTRL + Qt.Key_Tab)}
CUSTOM_SHORTCUTS = {}
###############################################################################
# FUNCTIONS
###############################################################################
def load_shortcuts():
"""
Loads the shortcuts from QSettings
"""
global SHORTCUTS, CUSTOM_SHORTCUTS
settings = QSettings(SETTINGS_PATH, QSettings.IniFormat)
for action in SHORTCUTS:
#default shortcut
default_action = SHORTCUTS[action].toString()
#get the custom shortcut or the default
shortcut_action = settings.value("shortcuts/%s" % action,
default_action)
#set the shortcut
CUSTOM_SHORTCUTS[action] = QKeySequence(shortcut_action)
def get_shortcut(shortcut_name):
"""
Returns the shortcut looking into CUSTOM_SHORTCUTS and
SHORTCUTS
"""
global SHORTCUTS, CUSTOM_SHORTCUTS
return CUSTOM_SHORTCUTS.get(shortcut_name, SHORTCUTS.get(shortcut_name))
def clean_custom_shortcuts():
"""
Cleans CUSTOMS_SHORTCUTS
"""
global CUSTOM_SHORTCUTS
CUSTOM_SHORTCUTS = {}
def create_home_dir_structure():
"""
Create the necesary directories structure for NINJA-IDE
"""
for directory in (HOME_NINJA_PATH, EXTENSIONS_PATH, PLUGINS, EDITOR_SKINS,
LANGS, NINJA_THEME_DOWNLOAD, NINJA_KNOWLEDGE_PATH):
if not os.path.isdir(directory):
os.mkdir(directory)
|
marrybird/flask-admin | refs/heads/master | examples/rediscli/app.py | 36 | from flask import Flask
from redis import Redis
import flask_admin as admin
from flask_admin.contrib import rediscli
# Create flask app
app = Flask(__name__)
# Flask views
@app.route('/')
def index():
return '<a href="/admin/">Click me to get to Admin!</a>'
if __name__ == '__main__':
# Create admin interface
admin = admin.Admin(app, name="Example: Redis")
admin.add_view(rediscli.RedisCli(Redis()))
# Start app
app.run(debug=True)
|
theshadowx/enigma2 | refs/heads/master | lib/python/Components/Renderer/FrontpanelLed.py | 30 | from Components.Element import Element
# this is not a GUI renderer.
class FrontpanelLed(Element):
def __init__(self, which = 0, patterns = [(20, 0, 0xffffffff),(20, 0x55555555, 0x84fc8c04)], boolean = True):
self.which = which
self.boolean = boolean
self.patterns = patterns
Element.__init__(self)
def changed(self, *args, **kwargs):
if self.boolean:
val = self.source.boolean and 0 or 1
else:
val = self.source.value
(speed, pattern, pattern_4bit) = self.patterns[val]
try:
open("/proc/stb/fp/led%d_pattern" % self.which, "w").write("%08x" % pattern)
except IOError:
pass
if self.which == 0:
try:
open("/proc/stb/fp/led_set_pattern", "w").write("%08x" % pattern_4bit)
open("/proc/stb/fp/led_set_speed", "w").write("%d" % speed)
except IOError:
pass
try:
open("/proc/stb/fp/led_pattern_speed", "w").write("%d" % speed)
except IOError:
pass
|
yrobla/nova | refs/heads/debian/unstable | nova/tests/integrated/test_xml.py | 14 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from nova.api.openstack import common
from nova.api.openstack import xmlutil
from nova.openstack.common import log as logging
from nova.tests.integrated import integrated_helpers
LOG = logging.getLogger(__name__)
class XmlTests(integrated_helpers._IntegratedTestBase):
""""Some basic XML sanity checks."""
def test_namespace_limits(self):
headers = {}
headers['Accept'] = 'application/xml'
response = self.api.api_request('/limits', headers=headers)
data = response.read()
LOG.debug("data: %s" % data)
root = etree.XML(data)
self.assertEqual(root.nsmap.get(None), xmlutil.XMLNS_COMMON_V10)
def test_namespace_servers(self):
# /servers should have v1.1 namespace (has changed in 1.1).
headers = {}
headers['Accept'] = 'application/xml'
response = self.api.api_request('/servers', headers=headers)
data = response.read()
LOG.debug("data: %s" % data)
root = etree.XML(data)
self.assertEqual(root.nsmap.get(None), common.XML_NS_V11)
|
unisport/thumblr | refs/heads/master | thumblr/templatetags/thumblr_tags.py | 1 | from django import template
from django.contrib.contenttypes.models import ContentType
from django.forms import HiddenInput
from django.template.base import TemplateSyntaxError
from thumblr.services.url import get_image_instance_url
from thumblr.dto import ImageMetadata, ImageUrlSpec
from thumblr.forms import ImageSizeForm
from thumblr.models import ImageSize, Image
from thumblr.usecases import get_image_url
from thumblr.views import SizeTable
from .utils import parse_kwargs
register = template.Library()
def thumblr_tag_parser(parser, token):
try:
splited = token.split_contents()
tag_name, file_name, params = splited[0], splited[1], splited[2:]
except ValueError:
raise template.TemplateSyntaxError(u"%r tag requires at least file name and size" % token.contents.split()[0])
kwargs = parse_kwargs(params)
return ThumblrNode(file_name[1:-1], **kwargs)
register.tag("thumblr", thumblr_tag_parser)
class ThumblrNode(template.Node):
def __init__(self, file_name, size=None, site_id=None, content_type_name=None, main=True):
self.file_name = file_name
self.size = size
self.site_id = site_id
self.content_type_name = content_type_name
self.main = main
self._url = None
@property
def url(self):
if self._url is None:
image_spec = ImageMetadata(
file_name=self.file_name,
size_slug=self.size,
site_id=self.site_id,
content_type_id=ContentType.objects.values('id').get(name=self.content_type_name)['id']
)
self._url = get_image_url(image_spec, ImageUrlSpec.CDN_URL)
return self._url
def render(self, context):
return self.url
class ImagesNode(template.Node):
def __init__(self, var_name='images', size='original',
site_id=None,
content_type_id=None,
content_type_name=None,
object_id=None):
self.var_name = var_name
self.size = size
self.site_id = site_id
self.content_type_id = content_type_id
self.content_type_name = content_type_name
self.object_id = object_id
def render(self, context):
images = Image.objects.filter(
site_id=self.site_id if self.site_id else context.get('site_id'),
content_type__name=self.content_type_name if self.content_type_name else context.get(
'content_type_name'),
object_id=self.object_id if self.object_id else context.get('object_id'),
size__name=self.size)
"""
render updates context of the template and adds new variable with var_name that contains images
"""
urls = list(get_image_instance_url(i, ImageUrlSpec.CDN_URL) for i in images)
context[self.var_name] = urls
@register.tag("thumblr_imgs")
def thumblr_imgs(parser, token):
'''
Could be used with or without
{% thumblr_imgs large as images %}
'''
try:
split_content = token.split_contents()
tag_name, kwargs_unparsed, _as, var_name = split_content[0], split_content[1:-2], split_content[-2], \
split_content[-1]
kwargs = parse_kwargs(kwargs_unparsed)
if _as != 'as':
raise TemplateSyntaxError(
"'as' wasn't found. Thumblr_imgs should be in the next format {% thumblr_imgs <size> as <var_name> %}")
except:
raise TemplateSyntaxError("thumblr_imgs should be in the next format {% thumblr_imgs <size> as <var_name> %}")
else:
return ImagesNode(var_name, **kwargs)
class SizeAddingNode(template.Node):
def __init__(self, content_type_name=None):
self.content_type_name = content_type_name
if content_type_name:
try:
self.content_type_id = ContentType.objects.values('id').get(name=content_type_name)['id']
except ContentType.DoesNotExist:
raise ContentType.DoesNotExist('Content Type from template tag with name "{}" '
'does not exist'.format(content_type_name))
else:
self.content_type_id = None
def render(self, context):
context['form'] = ImageSizeForm(initial={'content_type': self.content_type_id})
context['form'].fields['content_type'].widget = HiddenInput()
context['sizes'] = ImageSize.objects.all()
context['model'] = self.content_type_name
t = template.loader.get_template('thumblr/sizes.html')
if self.content_type_id:
context['sizes'] = SizeTable(ImageSize.objects.filter(content_type__id=self.content_type_id))
else:
context['sizes'] = SizeTable(ImageSize.objects.all())
return t.render(context)
@register.tag("thumblr_add_sizes")
def thumblr_size_adding(parser, token):
"""
Tag that returns a form for adding new size with a list of existing sizes for given content type
{% thumblr_add_sizes content_type_name='Tile' %}
"""
try:
split_content = token.split_contents()
if len(split_content) <= 1:
return SizeAddingNode()
tag_name, content_type_name_unparsed = split_content[0], split_content[-1]
key, content_type_name = content_type_name_unparsed.split('=')
if key != 'content_type_name':
raise TemplateSyntaxError(
"content_type_name coudn't be found in template tag. Check the syntax (Example: "
"thumblr_add_sizes content_type_name='Tile')")
except IndexError:
raise TemplateSyntaxError("Only two arguments should be passes "
"(Example: thumblr_add_sizes content_type_name='Tile')")
return SizeAddingNode(content_type_name.replace('"', '').replace("'", ''))
|
buchwj/xvector | refs/heads/master | xVLib/xVLib/__init__.py | 1 | # xVector Engine Core Library
# Copyright (c) 2010 James Buchwald
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
__all__ = ["async_subprocess", "BinaryStructs", "Directories", "Maps",
"Networking", "Packets", "Version"]
|
chouseknecht/ansible | refs/heads/devel | hacking/build_library/build_ansible/command_plugins/collection_meta.py | 26 | # coding: utf-8
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import os.path
import pathlib
import yaml
from jinja2 import Environment, FileSystemLoader
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_bytes
# Pylint doesn't understand Python3 namespace modules.
from ..change_detection import update_file_if_different # pylint: disable=relative-beyond-top-level
from ..commands import Command # pylint: disable=relative-beyond-top-level
from ..jinja2.filters import documented_type, rst_ify # pylint: disable=relative-beyond-top-level
DEFAULT_TEMPLATE_FILE = 'collections_galaxy_meta.rst.j2'
DEFAULT_TEMPLATE_DIR = pathlib.Path(__file__).parents[4] / 'docs/templates'
def normalize_options(options):
"""Normalize the options to make for easy templating"""
for opt in options:
if isinstance(opt['description'], string_types):
opt['description'] = [opt['description']]
class DocumentCollectionMeta(Command):
name = 'collection-meta'
@classmethod
def init_parser(cls, add_parser):
parser = add_parser(cls.name, description='Generate collection galaxy.yml documentation from shared metadata')
parser.add_argument("-t", "--template-file", action="store", dest="template_file",
default=DEFAULT_TEMPLATE_FILE,
help="Jinja2 template to use for the config")
parser.add_argument("-T", "--template-dir", action="store", dest="template_dir",
default=DEFAULT_TEMPLATE_DIR,
help="directory containing Jinja2 templates")
parser.add_argument("-o", "--output-dir", action="store", dest="output_dir", default='/tmp/',
help="Output directory for rst files")
parser.add_argument("collection_defs", metavar="COLLECTION-OPTION-DEFINITIONS.yml", type=str,
help="Source for collection metadata option docs")
@staticmethod
def main(args):
output_dir = os.path.abspath(args.output_dir)
template_file_full_path = os.path.abspath(os.path.join(args.template_dir, args.template_file))
template_file = os.path.basename(template_file_full_path)
template_dir = os.path.dirname(template_file_full_path)
with open(args.collection_defs) as f:
options = yaml.safe_load(f)
normalize_options(options)
env = Environment(loader=FileSystemLoader(template_dir),
variable_start_string="@{",
variable_end_string="}@",
trim_blocks=True)
env.filters['documented_type'] = documented_type
env.filters['rst_ify'] = rst_ify
template = env.get_template(template_file)
output_name = os.path.join(output_dir, template_file.replace('.j2', ''))
temp_vars = {'options': options}
data = to_bytes(template.render(temp_vars))
update_file_if_different(output_name, data)
return 0
|
ytjiang/thefuck | refs/heads/master | tests/test_logs.py | 4 | from mock import Mock
from thefuck import logs
def test_color():
assert logs.color('red', Mock(no_colors=False)) == 'red'
assert logs.color('red', Mock(no_colors=True)) == ''
|
uahic/nest-simulator | refs/heads/master | examples/nest/plot_tsodyks_depr_fac.py | 13 | # -*- coding: utf-8 -*-
#
# plot_tsodyks_depr_fac.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from scipy import *
from matplotlib.pylab import *
from matplotlib.mlab import *
def plot_spikes():
dt = 0.1 # time resolution
nbins = 1000
N = 500 # number of neurons
vm = load('voltmeter-4-0.dat')
figure(1)
clf()
plot(vm[:,0], vm[:,1], 'r')
xlabel('time / ms')
ylabel('$V_m [mV]$')
savefig('test_tsodyks_depressing.png')
plot_spikes()
show()
|
bsmr-eve/Pyfa | refs/heads/master | eos/effects/shiparmorknresistanceaf1.py | 2 | # shipArmorKNResistanceAF1
#
# Used by:
# Ship: Astero
# Ship: Malice
# Ship: Punisher
type = "passive"
def handler(fit, ship, context):
fit.ship.boostItemAttr("armorKineticDamageResonance", ship.getModifiedItemAttr("shipBonusAF"),
skill="Amarr Frigate")
|
cassinius/mlhi-ass2-anonymization | refs/heads/master | test/rangeGenHierarchyTests.py | 1 | import unittest
import src.rangeGenHierarchy as RGH
class ExtendedTestCase(unittest.TestCase):
def assertRaisesWithMessage(self, msg, func, *args, **kwargs):
try:
func(*args, **kwargs)
self.assertFail()
except Exception as inst:
self.assertEqual(inst.message, msg)
# test_ prefix for methods is needed in python unittest
class RangeGenHierarchyMethods(ExtendedTestCase):
def test_rangeGenAgeMinGreaterMax(self):
self.assertRaisesWithMessage('Range invalid. Min greater than max.',
RGH.RangeGenHierarchy,
'age', 10, -5)
def test_rangeGenAgeMinEqualsMax(self):
self.assertRaisesWithMessage('Range invalid. Min equals max.',
RGH.RangeGenHierarchy,
'age', 10, 10)
def test_rangeGenAgeGenToNegRange(self):
rgh = RGH.RangeGenHierarchy('age', 10, 90)
self.assertRaisesWithMessage('Cannot generalize to negative range.',
rgh.getCostOfRange,
40, 30)
def test_rangeGenAgeGenLowOutside(self):
rgh = RGH.RangeGenHierarchy('age', 10, 90)
self.assertRaisesWithMessage('Low parameter less than range minimum.',
rgh.getCostOfRange,
5, 30)
def test_rangeGenAgeGenHighOutside(self):
rgh = RGH.RangeGenHierarchy('age', 10, 90)
self.assertRaisesWithMessage('High parameter greater than range maximum.',
rgh.getCostOfRange,
50, 100)
def test_rangeGenAgeGenValid(self):
rgh = RGH.RangeGenHierarchy('age', 10, 90)
self.assertEqual(rgh.getCostOfRange(30, 50), 0.25)
if __name__ == '__main__':
unittest.main() |
r0balo/pelisalacarta | refs/heads/develop | python/main-classic/lib/sambatools/pyasn1/codec/cer/encoder.py | 10 | # CER encoder
from pyasn1 import error
from pyasn1.codec.ber import encoder
from pyasn1.compat.octets import int2oct, str2octs, null
from pyasn1.type import univ
from pyasn1.type import useful
class BooleanEncoder(encoder.IntegerEncoder):
def encodeValue(self, encodeFun, client, defMode, maxChunkSize):
if client == 0:
substrate = int2oct(0)
else:
substrate = int2oct(255)
return substrate, 0
class BitStringEncoder(encoder.BitStringEncoder):
def encodeValue(self, encodeFun, client, defMode, maxChunkSize):
return encoder.BitStringEncoder.encodeValue(
self, encodeFun, client, defMode, 1000
)
class OctetStringEncoder(encoder.OctetStringEncoder):
def encodeValue(self, encodeFun, client, defMode, maxChunkSize):
return encoder.OctetStringEncoder.encodeValue(
self, encodeFun, client, defMode, 1000
)
class RealEncoder(encoder.RealEncoder):
def _chooseEncBase(self, value):
m, b, e = value
return self._dropFloatingPoint(m, b, e)
# specialized GeneralStringEncoder here
class GeneralizedTimeEncoder(OctetStringEncoder):
zchar = str2octs('Z')
pluschar = str2octs('+')
minuschar = str2octs('-')
zero = str2octs('0')
def encodeValue(self, encodeFun, client, defMode, maxChunkSize):
octets = client.asOctets()
# This breaks too many existing data items
# if '.' not in octets:
# raise error.PyAsn1Error('Format must include fraction of second: %r' % octets)
if len(octets) < 15:
raise error.PyAsn1Error('Bad UTC time length: %r' % octets)
if self.pluschar in octets or self.minuschar in octets:
raise error.PyAsn1Error('Must be UTC time: %r' % octets)
if octets[-1] != self.zchar[0]:
raise error.PyAsn1Error('Missing timezone specifier: %r' % octets)
return encoder.OctetStringEncoder.encodeValue(
self, encodeFun, client, defMode, 1000
)
class UTCTimeEncoder(encoder.OctetStringEncoder):
zchar = str2octs('Z')
pluschar = str2octs('+')
minuschar = str2octs('-')
def encodeValue(self, encodeFun, client, defMode, maxChunkSize):
octets = client.asOctets()
if self.pluschar in octets or self.minuschar in octets:
raise error.PyAsn1Error('Must be UTC time: %r' % octets)
if octets and octets[-1] != self.zchar[0]:
client = client.clone(octets + self.zchar)
if len(client) != 13:
raise error.PyAsn1Error('Bad UTC time length: %r' % client)
return encoder.OctetStringEncoder.encodeValue(
self, encodeFun, client, defMode, 1000
)
class SetOfEncoder(encoder.SequenceOfEncoder):
def encodeValue(self, encodeFun, client, defMode, maxChunkSize):
if isinstance(client, univ.SequenceAndSetBase):
client.setDefaultComponents()
client.verifySizeSpec()
substrate = null; idx = len(client)
# This is certainly a hack but how else do I distinguish SetOf
# from Set if they have the same tags&constraints?
if isinstance(client, univ.SequenceAndSetBase):
# Set
comps = []
while idx > 0:
idx = idx - 1
if client[idx] is None: # Optional component
continue
if client.getDefaultComponentByPosition(idx) == client[idx]:
continue
comps.append(client[idx])
comps.sort(key=lambda x: isinstance(x, univ.Choice) and \
x.getMinTagSet() or x.getTagSet())
for c in comps:
substrate += encodeFun(c, defMode, maxChunkSize)
else:
# SetOf
compSubs = []
while idx > 0:
idx = idx - 1
compSubs.append(
encodeFun(client[idx], defMode, maxChunkSize)
)
compSubs.sort() # perhaps padding's not needed
substrate = null
for compSub in compSubs:
substrate += compSub
return substrate, 1
tagMap = encoder.tagMap.copy()
tagMap.update({
univ.Boolean.tagSet: BooleanEncoder(),
univ.BitString.tagSet: BitStringEncoder(),
univ.OctetString.tagSet: OctetStringEncoder(),
univ.Real.tagSet: RealEncoder(),
useful.GeneralizedTime.tagSet: GeneralizedTimeEncoder(),
useful.UTCTime.tagSet: UTCTimeEncoder(),
univ.SetOf().tagSet: SetOfEncoder() # conflcts with Set
})
typeMap = encoder.typeMap.copy()
typeMap.update({
univ.Set.typeId: SetOfEncoder(),
univ.SetOf.typeId: SetOfEncoder()
})
class Encoder(encoder.Encoder):
def __call__(self, client, defMode=False, maxChunkSize=0):
return encoder.Encoder.__call__(self, client, defMode, maxChunkSize)
encode = Encoder(tagMap, typeMap)
# EncoderFactory queries class instance and builds a map of tags -> encoders
|
chuntielin/Blog_Yeoman_Fullstack | refs/heads/master | node_modules/node-gyp/gyp/tools/graphviz.py | 2679 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Using the JSON dumped by the dump-dependency-json generator,
generate input suitable for graphviz to render a dependency graph of
targets."""
import collections
import json
import sys
def ParseTarget(target):
target, _, suffix = target.partition('#')
filename, _, target = target.partition(':')
return filename, target, suffix
def LoadEdges(filename, targets):
"""Load the edges map from the dump file, and filter it to only
show targets in |targets| and their depedendents."""
file = open('dump.json')
edges = json.load(file)
file.close()
# Copy out only the edges we're interested in from the full edge list.
target_edges = {}
to_visit = targets[:]
while to_visit:
src = to_visit.pop()
if src in target_edges:
continue
target_edges[src] = edges[src]
to_visit.extend(edges[src])
return target_edges
def WriteGraph(edges):
"""Print a graphviz graph to stdout.
|edges| is a map of target to a list of other targets it depends on."""
# Bucket targets by file.
files = collections.defaultdict(list)
for src, dst in edges.items():
build_file, target_name, toolset = ParseTarget(src)
files[build_file].append(src)
print 'digraph D {'
print ' fontsize=8' # Used by subgraphs.
print ' node [fontsize=8]'
# Output nodes by file. We must first write out each node within
# its file grouping before writing out any edges that may refer
# to those nodes.
for filename, targets in files.items():
if len(targets) == 1:
# If there's only one node for this file, simplify
# the display by making it a box without an internal node.
target = targets[0]
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [shape=box, label="%s\\n%s"]' % (target, filename,
target_name)
else:
# Group multiple nodes together in a subgraph.
print ' subgraph "cluster_%s" {' % filename
print ' label = "%s"' % filename
for target in targets:
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [label="%s"]' % (target, target_name)
print ' }'
# Now that we've placed all the nodes within subgraphs, output all
# the edges between nodes.
for src, dsts in edges.items():
for dst in dsts:
print ' "%s" -> "%s"' % (src, dst)
print '}'
def main():
if len(sys.argv) < 2:
print >>sys.stderr, __doc__
print >>sys.stderr
print >>sys.stderr, 'usage: %s target1 target2...' % (sys.argv[0])
return 1
edges = LoadEdges('dump.json', sys.argv[1:])
WriteGraph(edges)
return 0
if __name__ == '__main__':
sys.exit(main())
|
Dhivyap/ansible | refs/heads/devel | lib/ansible/modules/cloud/amazon/iam_password_policy.py | 7 | #!/usr/bin/python
# Copyright: (c) 2018, Aaron Smith <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: iam_password_policy
short_description: Update an IAM Password Policy
description:
- Module updates an IAM Password Policy on a given AWS account
version_added: "2.8"
requirements: [ 'botocore', 'boto3' ]
author:
- "Aaron Smith (@slapula)"
options:
state:
description:
- Specifies the overall state of the password policy.
required: true
choices: ['present', 'absent']
min_pw_length:
description:
- Minimum password length.
default: 6
aliases: [minimum_password_length]
require_symbols:
description:
- Require symbols in password.
default: false
type: bool
require_numbers:
description:
- Require numbers in password.
default: false
type: bool
require_uppercase:
description:
- Require uppercase letters in password.
default: false
type: bool
require_lowercase:
description:
- Require lowercase letters in password.
default: false
type: bool
allow_pw_change:
description:
- Allow users to change their password.
default: false
type: bool
aliases: [allow_password_change]
pw_max_age:
description:
- Maximum age for a password in days. When this option is 0 then passwords
do not expire automatically.
default: 0
aliases: [password_max_age]
pw_reuse_prevent:
description:
- Prevent re-use of passwords.
default: 0
aliases: [password_reuse_prevent, prevent_reuse]
pw_expire:
description:
- Prevents users from change an expired password.
default: false
type: bool
aliases: [password_expire, expire]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: Password policy for AWS account
iam_password_policy:
state: present
min_pw_length: 8
require_symbols: false
require_numbers: true
require_uppercase: true
require_lowercase: true
allow_pw_change: true
pw_max_age: 60
pw_reuse_prevent: 5
pw_expire: false
'''
RETURN = ''' # '''
try:
import botocore
except ImportError:
pass # caught by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info, AWSRetry
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict
class IAMConnection(object):
def __init__(self, module):
try:
self.connection = module.resource('iam')
self.module = module
except Exception as e:
module.fail_json(msg="Failed to connect to AWS: %s" % str(e))
def policy_to_dict(self, policy):
policy_attributes = [
'allow_users_to_change_password', 'expire_passwords', 'hard_expiry',
'max_password_age', 'minimum_password_length', 'password_reuse_prevention',
'require_lowercase_characters', 'require_numbers', 'require_symbols', 'require_uppercase_characters'
]
ret = {}
for attr in policy_attributes:
ret[attr] = getattr(policy, attr)
return ret
def update_password_policy(self, module, policy):
min_pw_length = module.params.get('min_pw_length')
require_symbols = module.params.get('require_symbols')
require_numbers = module.params.get('require_numbers')
require_uppercase = module.params.get('require_uppercase')
require_lowercase = module.params.get('require_lowercase')
allow_pw_change = module.params.get('allow_pw_change')
pw_max_age = module.params.get('pw_max_age')
pw_reuse_prevent = module.params.get('pw_reuse_prevent')
pw_expire = module.params.get('pw_expire')
update_parameters = dict(
MinimumPasswordLength=min_pw_length,
RequireSymbols=require_symbols,
RequireNumbers=require_numbers,
RequireUppercaseCharacters=require_uppercase,
RequireLowercaseCharacters=require_lowercase,
AllowUsersToChangePassword=allow_pw_change,
HardExpiry=pw_expire
)
if pw_reuse_prevent:
update_parameters.update(PasswordReusePrevention=pw_reuse_prevent)
if pw_max_age:
update_parameters.update(MaxPasswordAge=pw_max_age)
try:
original_policy = self.policy_to_dict(policy)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
original_policy = {}
try:
results = policy.update(**update_parameters)
policy.reload()
updated_policy = self.policy_to_dict(policy)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't update IAM Password Policy")
changed = (original_policy != updated_policy)
return (changed, updated_policy, camel_dict_to_snake_dict(results))
def delete_password_policy(self, policy):
try:
results = policy.delete()
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
if e.response['Error']['Code'] == 'NoSuchEntity':
self.module.exit_json(changed=False, task_status={'IAM': "Couldn't find IAM Password Policy"})
else:
self.module.fail_json_aws(e, msg="Couldn't delete IAM Password Policy")
return camel_dict_to_snake_dict(results)
def main():
module = AnsibleAWSModule(
argument_spec={
'state': dict(choices=['present', 'absent'], required=True),
'min_pw_length': dict(type='int', aliases=['minimum_password_length'], default=6),
'require_symbols': dict(type='bool', default=False),
'require_numbers': dict(type='bool', default=False),
'require_uppercase': dict(type='bool', default=False),
'require_lowercase': dict(type='bool', default=False),
'allow_pw_change': dict(type='bool', aliases=['allow_password_change'], default=False),
'pw_max_age': dict(type='int', aliases=['password_max_age'], default=0),
'pw_reuse_prevent': dict(type='int', aliases=['password_reuse_prevent', 'prevent_reuse'], default=0),
'pw_expire': dict(type='bool', aliases=['password_expire', 'expire'], default=False),
},
supports_check_mode=True,
)
resource = IAMConnection(module)
policy = resource.connection.AccountPasswordPolicy()
state = module.params.get('state')
if state == 'present':
(changed, new_policy, update_result) = resource.update_password_policy(module, policy)
module.exit_json(changed=changed, task_status={'IAM': update_result}, policy=new_policy)
if state == 'absent':
delete_result = resource.delete_password_policy(policy)
module.exit_json(changed=True, task_status={'IAM': delete_result})
if __name__ == '__main__':
main()
|
yahman72/robotframework | refs/heads/master | src/robot/htmldata/htmlfilewriter.py | 27 | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import re
from robot.utils import HtmlWriter
from robot.version import get_full_version
from .template import HtmlTemplate
class HtmlFileWriter(object):
def __init__(self, output, model_writer):
self._output = output
self._model_writer = model_writer
def write(self, template):
writers = self._get_writers(os.path.dirname(template))
for line in HtmlTemplate(template):
for writer in writers:
if writer.handles(line):
writer.write(line)
break
def _get_writers(self, base_dir):
html_writer = HtmlWriter(self._output)
return (self._model_writer,
JsFileWriter(html_writer, base_dir),
CssFileWriter(html_writer, base_dir),
GeneratorWriter(html_writer),
LineWriter(self._output))
class _Writer(object):
_handles_line = None
def handles(self, line):
return line.startswith(self._handles_line)
def write(self, line):
raise NotImplementedError
class ModelWriter(_Writer):
_handles_line = '<!-- JS MODEL -->'
class LineWriter(_Writer):
def __init__(self, output):
self._output = output
def handles(self, line):
return True
def write(self, line):
self._output.write(line + '\n')
class GeneratorWriter(_Writer):
_handles_line = '<meta name="Generator" content='
def __init__(self, html_writer):
self._html_writer = html_writer
def write(self, line):
version = get_full_version('Robot Framework')
self._html_writer.start('meta', {'name': 'Generator', 'content': version})
class _InliningWriter(_Writer):
def __init__(self, html_writer, base_dir):
self._html_writer = html_writer
self._base_dir = base_dir
def _inline_file(self, filename, tag, attrs):
self._html_writer.start(tag, attrs)
for line in HtmlTemplate(os.path.join(self._base_dir, filename)):
self._html_writer.content(line, escape=False, newline=True)
self._html_writer.end(tag)
class JsFileWriter(_InliningWriter):
_handles_line = '<script type="text/javascript" src='
_source_file = re.compile('src=\"([^\"]+)\"')
def write(self, line):
name = self._source_file.search(line).group(1)
self._inline_file(name, 'script', {'type': 'text/javascript'})
class CssFileWriter(_InliningWriter):
_handles_line = '<link rel="stylesheet"'
_source_file = re.compile('href=\"([^\"]+)\"')
_media_type = re.compile('media=\"([^\"]+)\"')
def write(self, line):
name = self._source_file.search(line).group(1)
media = self._media_type.search(line).group(1)
self._inline_file(name, 'style', {'type': 'text/css', 'media': media})
|
cbrunet/fibermodes | refs/heads/master | tests/fiber/material/test_sio2geo2.py | 2 | # This file is part of FiberModes.
#
# FiberModes is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FiberModes is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with FiberModes. If not, see <http://www.gnu.org/licenses/>.
"""Test suite for fibermodes.fiber.materia.sio2geo2 module"""
import unittest
import warnings
from fibermodes import Wavelength
from fibermodes.fiber.material import Silica, Germania, SiO2GeO2
class TestSiO2GeO2(unittest.TestCase):
"""Test suite for SiO2GeO2 material."""
def testConcentrationZero(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertEqual(Silica.n(Wavelength(0.5876e-6)),
SiO2GeO2.n(Wavelength(0.5876e-6), 0))
self.assertEqual(Silica.n(Wavelength(1.55e-6)),
SiO2GeO2.n(Wavelength(1.55e-6), 0))
def testConcentrationOne(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertEqual(Germania.n(Wavelength(0.5876e-6)),
SiO2GeO2.n(Wavelength(0.5876e-6), 1))
self.assertEqual(Germania.n(Wavelength(1.55e-6)),
SiO2GeO2.n(Wavelength(1.55e-6), 1))
def testDopedSilica(self):
"""Warning: test values based on results! It only ensures
that function works and that results stay the same.
Please find official tables to compare with.
"""
self.assertAlmostEqual(SiO2GeO2.n(Wavelength(1.55e-6), 0.05),
1.451526777142772)
self.assertAlmostEqual(SiO2GeO2.n(Wavelength(1.55e-6), 0.1),
1.4589885105632852)
self.assertAlmostEqual(SiO2GeO2.n(Wavelength(1.55e-6), 0.2),
1.473791249750968)
def testXFromN(self):
self.assertAlmostEqual(
SiO2GeO2.xFromN(Wavelength(1.55e-6), 1.451526777142772),
0.05)
if __name__ == "__main__":
unittest.main()
|
krkhan/azure-linux-extensions | refs/heads/dev | RDMAUpdate/main/patch/SuSEPatching.py | 13 | #!/usr/bin/python
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.4+
import os
import sys
import imp
import base64
import re
import json
import platform
import shutil
import time
import traceback
import datetime
import subprocess
from AbstractPatching import AbstractPatching
from Common import *
from CommandExecuter import CommandExecuter
from RdmaException import RdmaException
from SecondStageMarkConfig import SecondStageMarkConfig
class SuSEPatching(AbstractPatching):
def __init__(self,logger,distro_info):
super(SuSEPatching,self).__init__(distro_info)
self.logger = logger
if(distro_info[1] == "11"):
self.base64_path = '/usr/bin/base64'
self.bash_path = '/bin/bash'
self.blkid_path = '/sbin/blkid'
self.cryptsetup_path = '/sbin/cryptsetup'
self.cat_path = '/bin/cat'
self.dd_path = '/bin/dd'
self.e2fsck_path = '/sbin/e2fsck'
self.echo_path = '/bin/echo'
self.lsblk_path = '/bin/lsblk'
self.lsscsi_path = '/usr/bin/lsscsi'
self.mkdir_path = '/bin/mkdir'
self.modprobe_path = '/usr/bin/modprobe'
self.mount_path = '/bin/mount'
self.openssl_path = '/usr/bin/openssl'
self.ps_path = '/bin/ps'
self.resize2fs_path = '/sbin/resize2fs'
self.reboot_path = '/sbin/reboot'
self.rmmod_path = '/sbin/rmmod'
self.service_path='/usr/sbin/service'
self.umount_path = '/bin/umount'
self.zypper_path = '/usr/bin/zypper'
else:
self.base64_path = '/usr/bin/base64'
self.bash_path = '/bin/bash'
self.blkid_path = '/usr/bin/blkid'
self.cat_path = '/bin/cat'
self.cryptsetup_path = '/usr/sbin/cryptsetup'
self.dd_path = '/usr/bin/dd'
self.e2fsck_path = '/sbin/e2fsck'
self.echo_path = '/usr/bin/echo'
self.lsblk_path = '/usr/bin/lsblk'
self.lsscsi_path = '/usr/bin/lsscsi'
self.mkdir_path = '/usr/bin/mkdir'
self.modprobe_path = '/usr/sbin/modprobe'
self.mount_path = '/usr/bin/mount'
self.openssl_path = '/usr/bin/openssl'
self.ps_path = '/usr/bin/ps'
self.resize2fs_path = '/sbin/resize2fs'
self.reboot_path = '/sbin/reboot'
self.rmmod_path = '/usr/sbin/rmmod'
self.service_path = '/usr/sbin/service'
self.umount_path = '/usr/bin/umount'
self.zypper_path = '/usr/bin/zypper'
def rdmaupdate(self):
check_install_result = self.check_install_hv_utils()
if(check_install_result == CommonVariables.process_success):
time.sleep(40)
check_result = self.check_rdma()
if(check_result == CommonVariables.UpToDate):
return
elif(check_result == CommonVariables.OutOfDate):
nd_driver_version = self.get_nd_driver_version()
rdma_package_installed_version = self.get_rdma_package_version()
update_rdma_driver_result = self.update_rdma_driver(nd_driver_version, rdma_package_installed_version)
elif(check_result == CommonVariables.DriverVersionNotFound):
raise RdmaException(CommonVariables.driver_version_not_found)
elif(check_result == CommonVariables.Unknown):
raise RdmaException(CommonVariables.unknown_error)
else:
raise RdmaException(CommonVariables.install_hv_utils_failed)
def check_rdma(self):
nd_driver_version = self.get_nd_driver_version()
if(nd_driver_version is None or nd_driver_version == ""):
return CommonVariables.DriverVersionNotFound
package_version = self.get_rdma_package_version()
if(package_version is None or package_version == ""):
return CommonVariables.OutOfDate
else:
# package_version would be like this :20150707_k3.12.28_4-3.1
# nd_driver_version 140.0
self.logger.log("nd_driver_version is " + str(nd_driver_version) + " package_version is " + str(package_version))
if(nd_driver_version is not None):
r = re.match(".+(%s)$" % nd_driver_version, package_version)# NdDriverVersion should be at the end of package version
if not r : #host ND version is the same as the package version, do an update
return CommonVariables.OutOfDate
else:
return CommonVariables.UpToDate
return CommonVariables.Unknown
def reload_hv_utils(self):
commandExecuter = CommandExecuter(self.logger)
#clear /run/hv_kvp_daemon folder for the service could not be restart walkaround
error,output = commandExecuter.RunGetOutput(self.rmmod_path + " hv_utils") #find a way to force install non-prompt
self.logger.log("rmmod hv_utils return code: " + str(error) + " output:" + str(output))
if(error != CommonVariables.process_success):
return CommonVariables.common_failed
error,output = commandExecuter.RunGetOutput(self.modprobe_path + " hv_utils") #find a way to force install non-prompt
self.logger.log("modprobe hv_utils return code: " + str(error) + " output:" + str(output))
if(error != CommonVariables.process_success):
return CommonVariables.common_failed
return CommonVariables.process_success
def restart_hv_kvp_daemon(self):
commandExecuter = CommandExecuter(self.logger)
reload_result = self.reload_hv_utils()
if(reload_result == CommonVariables.process_success):
if(os.path.exists('/run/hv_kvp_daemon')):
os.rmdir('/run/hv_kvp_daemon')
error,output = commandExecuter.RunGetOutput(self.service_path + " hv_kvp_daemon start") #find a way to force install non-prompt
self.logger.log("service hv_kvp_daemon start return code: " + str(error) + " output:" + str(output))
if(error != CommonVariables.process_success):
return CommonVariables.common_failed
return CommonVariables.process_success
else:
return CommonVariables.common_failed
def check_install_hv_utils(self):
commandExecuter = CommandExecuter(self.logger)
error, output = commandExecuter.RunGetOutput(self.ps_path + " -ef")
if(error != CommonVariables.process_success):
return CommonVariables.common_failed
else:
r = re.search("hv_kvp_daemon", output)
if r is None :
self.logger.log("KVP deamon is not running, install it")
error,output = commandExecuter.RunGetOutput(self.zypper_path + " -n install --force hyper-v")
self.logger.log("install hyper-v return code: " + str(error) + " output:" + str(output))
if(error != CommonVariables.process_success):
return CommonVariables.common_failed
secondStageMarkConfig = SecondStageMarkConfig()
secondStageMarkConfig.MarkIt()
self.reboot_machine()
return CommonVariables.process_success
else :
self.logger.log("KVP deamon is running")
return CommonVariables.process_success
def get_nd_driver_version(self):
"""
if error happens, raise a RdmaException
"""
try:
with open("/var/lib/hyperv/.kvp_pool_0", "r") as f:
lines = f.read()
r = re.search("NdDriverVersion\0+(\d\d\d\.\d)", lines)
if r is not None:
NdDriverVersion = r.groups()[0]
return NdDriverVersion #e.g. NdDriverVersion = 142.0
else :
self.logger.log("Error: NdDriverVersion not found.")
return None
except Exception as e:
errMsg = 'Failed to enable the extension with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.logger.log("Can't update status: " + errMsg)
raise RdmaException(CommonVariables.nd_driver_detect_error)
def get_rdma_package_version(self):
"""
"""
commandExecuter = CommandExecuter(self.logger)
error, output = commandExecuter.RunGetOutput(self.zypper_path + " info msft-lis-rdma-kmp-default")
if(error == CommonVariables.process_success):
r = re.search("Version: (\S+)", output)
if r is not None:
package_version = r.groups()[0]# e.g. package_version is "20150707_k3.12.28_4-3.1.140.0"
return package_version
else:
return None
else:
return None
def update_rdma_driver(self, host_version, rdma_package_installed_version):
"""
"""
commandExecuter = CommandExecuter(self.logger)
error, output = commandExecuter.RunGetOutput(self.zypper_path + " lr -u")
rdma_pack_result = re.search("msft-rdma-pack", output)
if rdma_pack_result is None :
self.logger.log("rdma_pack_result is None")
error, output = commandExecuter.RunGetOutput(self.zypper_path + " ar https://drivers.suse.com/microsoft/Microsoft-LIS-RDMA/sle-12/updates msft-rdma-pack")
#wait for the cache build.
time.sleep(20)
self.logger.log("error result is " + str(error) + " output is : " + str(output))
else:
self.logger.log("output is: "+str(output))
self.logger.log("msft-rdma-pack found")
returnCode,message = commandExecuter.RunGetOutput(self.zypper_path + " --no-gpg-checks refresh")
self.logger.log("refresh repro return code is " + str(returnCode) + " output is: " + str(message))
#install the wrapper package, that will put the driver RPM packages under /opt/microsoft/rdma
returnCode,message = commandExecuter.RunGetOutput(self.zypper_path + " -n remove " + CommonVariables.wrapper_package_name)
self.logger.log("remove wrapper package return code is " + str(returnCode) + " output is: " + str(message))
returnCode,message = commandExecuter.RunGetOutput(self.zypper_path + " --non-interactive install --force " + CommonVariables.wrapper_package_name)
self.logger.log("install wrapper package return code is " + str(returnCode) + " output is: " + str(message))
r = os.listdir("/opt/microsoft/rdma")
if r is not None :
for filename in r :
if re.match("msft-lis-rdma-kmp-default-\d{8}\.(%s).+" % host_version, filename) :
error,output = commandExecuter.RunGetOutput(self.zypper_path + " --non-interactive remove msft-lis-rdma-kmp-default")
self.logger.log("remove msft-lis-rdma-kmp-default result is " + str(error) + " output is: " + str(output))
self.logger.log("Installing RPM /opt/microsoft/rdma/" + filename)
error,output = commandExecuter.RunGetOutput(self.zypper_path + " --non-interactive install --force /opt/microsoft/rdma/%s" % filename)
self.logger.log("Install msft-lis-rdma-kmp-default result is " + str(error) + " output is: " + str(output))
if(error == CommonVariables.process_success):
self.reboot_machine()
else:
raise RdmaException(CommonVariables.package_install_failed)
else:
self.logger.log("RDMA drivers not found in /opt/microsoft/rdma")
raise RdmaException(CommonVariables.package_not_found)
def reboot_machine(self):
self.logger.log("rebooting machine")
commandExecuter = CommandExecuter(self.logger)
commandExecuter.RunGetOutput(self.reboot_path)
|
maelnor/nova | refs/heads/master | nova/api/openstack/compute/plugins/v3/hosts.py | 15 | # Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The hosts admin extension."""
import webob.exc
from nova.api.openstack.compute.schemas.v3 import hosts
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
from nova import exception
from nova.i18n import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
ALIAS = 'os-hosts'
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
class HostController(wsgi.Controller):
"""The Hosts API controller for the OpenStack API."""
def __init__(self):
self.api = compute.HostAPI()
super(HostController, self).__init__()
@extensions.expected_errors(())
def index(self, req):
"""Returns a dict in the format
| {'hosts': [{'host_name': 'some.host.name',
| 'service': 'cells',
| 'zone': 'internal'},
| {'host_name': 'some.other.host.name',
| 'service': 'cells',
| 'zone': 'internal'},
| {'host_name': 'some.celly.host.name',
| 'service': 'cells',
| 'zone': 'internal'},
| {'host_name': 'console1.host.com',
| 'service': 'consoleauth',
| 'zone': 'internal'},
| {'host_name': 'network1.host.com',
| 'service': 'network',
| 'zone': 'internal'},
| {'host_name': 'netwwork2.host.com',
| 'service': 'network',
| 'zone': 'internal'},
| {'host_name': 'compute1.host.com',
| 'service': 'compute',
| 'zone': 'nova'},
| {'host_name': 'compute2.host.com',
| 'service': 'compute',
| 'zone': 'nova'},
| {'host_name': 'sched1.host.com',
| 'service': 'scheduler',
| 'zone': 'internal'},
| {'host_name': 'sched2.host.com',
| 'service': 'scheduler',
| 'zone': 'internal'},
| {'host_name': 'vol1.host.com',
| 'service': 'volume'},
| 'zone': 'internal']}
"""
context = req.environ['nova.context']
authorize(context)
filters = {'disabled': False}
zone = req.GET.get('zone', None)
if zone:
filters['availability_zone'] = zone
service = req.GET.get('service')
if service:
filters['topic'] = service
services = self.api.service_get_all(context, filters=filters,
set_zones=True)
hosts = []
for service in services:
hosts.append({'host_name': service['host'],
'service': service['topic'],
'zone': service['availability_zone']})
return {'hosts': hosts}
@extensions.expected_errors((400, 404, 501))
@validation.schema(hosts.update)
def update(self, req, id, body):
""":param body: example format {'status': 'enable',
'maintenance_mode': 'enable'}
:returns:
"""
def read_enabled(orig_val):
""":param orig_val: A string with either 'enable' or 'disable'. May
be surrounded by whitespace, and case doesn't
matter
:returns: True for 'enabled' and False for 'disabled'
"""
val = orig_val.strip().lower()
return val == "enable"
context = req.environ['nova.context']
authorize(context)
# See what the user wants to 'update'
status = body.get('status')
maint_mode = body.get('maintenance_mode')
if status is not None:
status = read_enabled(status)
if maint_mode is not None:
maint_mode = read_enabled(maint_mode)
# Make the calls and merge the results
result = {'host': id}
if status is not None:
result['status'] = self._set_enabled_status(context, id, status)
if maint_mode is not None:
result['maintenance_mode'] = self._set_host_maintenance(context,
id,
maint_mode)
return result
def _set_host_maintenance(self, context, host_name, mode=True):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
LOG.audit(_("Putting host %(host_name)s in maintenance mode "
"%(mode)s."),
{'host_name': host_name, 'mode': mode})
try:
result = self.api.set_host_maintenance(context, host_name, mode)
except NotImplementedError:
msg = _("Virt driver does not implement host maintenance mode.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
except exception.HostNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.ComputeServiceUnavailable as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
if result not in ("on_maintenance", "off_maintenance"):
raise webob.exc.HTTPBadRequest(explanation=result)
return result
def _set_enabled_status(self, context, host_name, enabled):
"""Sets the specified host's ability to accept new instances.
:param enabled: a boolean - if False no new VMs will be able to start
on the host.
"""
if enabled:
LOG.audit(_("Enabling host %s."), host_name)
else:
LOG.audit(_("Disabling host %s."), host_name)
try:
result = self.api.set_host_enabled(context, host_name=host_name,
enabled=enabled)
except NotImplementedError:
msg = _("Virt driver does not implement host disabled status.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
except exception.HostNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.ComputeServiceUnavailable as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
if result not in ("enabled", "disabled"):
raise webob.exc.HTTPBadRequest(explanation=result)
return result
def _host_power_action(self, req, host_name, action):
"""Reboots, shuts down or powers up the host."""
context = req.environ['nova.context']
authorize(context)
try:
result = self.api.host_power_action(context, host_name=host_name,
action=action)
except NotImplementedError:
msg = _("Virt driver does not implement host power management.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
except exception.HostNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.ComputeServiceUnavailable as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
return {"host": host_name, "power_action": result}
@extensions.expected_errors((400, 404, 501))
def startup(self, req, id):
return self._host_power_action(req, host_name=id, action="startup")
@extensions.expected_errors((400, 404, 501))
def shutdown(self, req, id):
return self._host_power_action(req, host_name=id, action="shutdown")
@extensions.expected_errors((400, 404, 501))
def reboot(self, req, id):
return self._host_power_action(req, host_name=id, action="reboot")
@staticmethod
def _get_total_resources(host_name, compute_node):
return {'resource': {'host': host_name,
'project': '(total)',
'cpu': compute_node['vcpus'],
'memory_mb': compute_node['memory_mb'],
'disk_gb': compute_node['local_gb']}}
@staticmethod
def _get_used_now_resources(host_name, compute_node):
return {'resource': {'host': host_name,
'project': '(used_now)',
'cpu': compute_node['vcpus_used'],
'memory_mb': compute_node['memory_mb_used'],
'disk_gb': compute_node['local_gb_used']}}
@staticmethod
def _get_resource_totals_from_instances(host_name, instances):
cpu_sum = 0
mem_sum = 0
hdd_sum = 0
for instance in instances:
cpu_sum += instance['vcpus']
mem_sum += instance['memory_mb']
hdd_sum += instance['root_gb'] + instance['ephemeral_gb']
return {'resource': {'host': host_name,
'project': '(used_max)',
'cpu': cpu_sum,
'memory_mb': mem_sum,
'disk_gb': hdd_sum}}
@staticmethod
def _get_resources_by_project(host_name, instances):
# Getting usage resource per project
project_map = {}
for instance in instances:
resource = project_map.setdefault(instance['project_id'],
{'host': host_name,
'project': instance['project_id'],
'cpu': 0,
'memory_mb': 0,
'disk_gb': 0})
resource['cpu'] += instance['vcpus']
resource['memory_mb'] += instance['memory_mb']
resource['disk_gb'] += (instance['root_gb'] +
instance['ephemeral_gb'])
return project_map
@extensions.expected_errors((403, 404))
def show(self, req, id):
"""Shows the physical/usage resource given by hosts.
:param id: hostname
:returns: expected to use HostShowTemplate.
ex.::
{'host': {'resource':D},..}
D: {'host': 'hostname','project': 'admin',
'cpu': 1, 'memory_mb': 2048, 'disk_gb': 30}
"""
context = req.environ['nova.context']
authorize(context)
host_name = id
try:
service = self.api.service_get_by_compute_host(context, host_name)
except exception.ComputeHostNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.AdminRequired:
# TODO(Alex Xu): The authorization is done by policy,
# db layer checking is needless. The db layer checking should
# be removed
msg = _("Describe-resource is admin only functionality")
raise webob.exc.HTTPForbidden(explanation=msg)
compute_node = service['compute_node']
instances = self.api.instance_get_all_by_host(context, host_name)
resources = [self._get_total_resources(host_name, compute_node)]
resources.append(self._get_used_now_resources(host_name,
compute_node))
resources.append(self._get_resource_totals_from_instances(host_name,
instances))
by_proj_resources = self._get_resources_by_project(host_name,
instances)
for resource in by_proj_resources.itervalues():
resources.append({'resource': resource})
return {'host': resources}
class Hosts(extensions.V3APIExtensionBase):
"""Admin-only host administration."""
name = "Hosts"
alias = ALIAS
version = 1
def get_resources(self):
resources = [extensions.ResourceExtension('os-hosts',
HostController(),
member_actions={"startup": "GET", "shutdown": "GET",
"reboot": "GET"})]
return resources
def get_controller_extensions(self):
return []
|
mlperf/inference_results_v0.7 | refs/heads/master | closed/Nettrix/code/rnnt/tensorrt/preprocessing/convert_rnnt_data.py | 12 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Script to preprocess .wav files and convert them to .npy format
RNNT harness reads in .npy files
Example command line:
python3 convert_rnnt_data.py --batch_size 1 --output_dir <path> --cudnn_benchmark --dataset_dir <path> --val_manifest <path>/<name>-wav.json --model_toml configs/rnnt.toml
'''
import argparse
import itertools
import os
import torch
import numpy as np
import torchvision
from tqdm import tqdm
import math
import random
import toml
import sys
sys.path.insert(0, os.path.dirname(__file__))
from helpers import Optimization, print_dict, add_blank_label
from dataset import AudioToTextDataLayer
from preprocessing import AudioPreprocessing
def parse_args():
parser = argparse.ArgumentParser(description='Jasper')
parser.add_argument("--dataset_dir", type=str, help='absolute path to dataset folder')
parser.add_argument("--output_dir", type=str, help='absolute path for generated .npy files folder')
parser.add_argument("--val_manifest", type=str, help='relative path to evaluation dataset manifest file')
parser.add_argument("--batch_size", default=1, type=int, help='data batch size')
parser.add_argument("--fp16", action='store_true', help='use half precision')
parser.add_argument("--fixed_seq_length", default=512, type=int, help="produce .npy files with fixed sequence length")
parser.add_argument("--generate_wav_npy", default=True, type=str, help="produce wav .npy files with MAX length")
parser.add_argument("--fixed_wav_file_length", default=240000, type=int, help="produce wav .npy files with MAX length")
parser.add_argument("--seed", default=42, type=int, help='seed')
parser.add_argument("--model_toml", type=str, help='relative model configuration path given dataset folder')
parser.add_argument("--max_duration", default=None, type=float, help='maximum duration of sequences. if None uses attribute from model configuration file')
parser.add_argument("--pad_to", default=None, type=int, help="default is pad to value as specified in model configurations. if -1 pad to maximum duration. If > 0 pad batch to next multiple of value")
return parser.parse_args()
def eval(
data_layer,
audio_processor,
args):
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
if not os.path.exists(args.output_dir + 'fp16'):
os.makedirs(args.output_dir + "fp16")
if not os.path.exists(args.output_dir + 'fp32'):
os.makedirs(args.output_dir + "fp32")
if not os.path.exists(args.output_dir + 'int32'):
os.makedirs(args.output_dir + "int32")
if(args.generate_wav_npy):
if not os.path.exists(args.output_dir + 'wav_files'):
os.makedirs(args.output_dir + "wav_files")
if not os.path.exists(args.output_dir + 'wav_files' + '/int32'):
os.makedirs(args.output_dir + 'wav_files' + '/int32')
if not os.path.exists(args.output_dir + 'wav_files' + '/fp32'):
os.makedirs(args.output_dir + 'wav_files' + '/fp32')
if not os.path.exists(args.output_dir + 'wav_files' + '/fp16'):
os.makedirs(args.output_dir + 'wav_files' + '/fp16')
fixed_seq_length = args.fixed_seq_length
val_map_filename = args.output_dir + "val_map_" + str(fixed_seq_length) + ".txt"
file_handle = open(val_map_filename, "w")
max_seq_length = 0
for it, data in enumerate(tqdm(data_layer.data_iterator)):
tensors = []
for d in data:
tensors.append(d)
file_handle.write("RNNT_input_" + str(fixed_seq_length) + "_" + str(it) + "\n")
if(args.generate_wav_npy):
t_audio_signal_e, t_a_sig_length_e, t_transcript_e, t_transcript_len_e = tensors
print("Audio signal = {} dtype = {} shape {} ".format(t_audio_signal_e, t_audio_signal_e.dtype, torch.numel(t_audio_signal_e)))
print("{} Audio signal length = {}".format(it, t_a_sig_length_e))
t_audio_signal_e_fp16 = t_audio_signal_e.to(torch.float16)
if t_a_sig_length_e <= args.fixed_wav_file_length:
target = torch.zeros(args.fixed_wav_file_length, dtype=torch.float32)
target[:t_a_sig_length_e] = t_audio_signal_e
#print("Target = {}".format(target))
#print("Target num elements = {}".format(torch.numel(target)))
target_np = target.cpu().numpy()
file_name = args.output_dir + "wav_files/fp32/" + "RNNT_input_" + str(fixed_seq_length) + "_" + str(it) + ".npy"
np.save(file_name, target_np)
target = torch.zeros(args.fixed_wav_file_length, dtype=torch.float16)
target[:t_a_sig_length_e] = t_audio_signal_e_fp16
#print("Target = {}".format(target))
#print("Target num elements = {}".format(torch.numel(target)))
target_np = target.cpu().numpy()
file_name = args.output_dir + "wav_files/fp16/" + "RNNT_input_" + str(fixed_seq_length) + "_" + str(it) + ".npy"
np.save(file_name, target_np)
t_a_sig_length_e_int32 = t_a_sig_length_e.to(torch.int32)
t_a_sig_length_e_int32_np = t_a_sig_length_e_int32.cpu().numpy()
print("Length tensor = {}".format(t_a_sig_length_e_int32_np))
file_name = args.output_dir + "wav_files/int32/" + "RNNT_input_" + str(fixed_seq_length) + "_" + str(it) + ".npy"
np.save(file_name, t_a_sig_length_e_int32_np)
else:
target = t_audio_signal_e_fp16[:args.fixed_wav_file_length]
target_np = target.cpu().numpy()
file_name = args.output_dir + "wav_files/fp32/" + "RNNT_input_" + str(fixed_seq_length) + "_" + str(it) + ".npy"
np.save(file_name, target_np)
length_tensor = torch.Tensor([args.fixed_wav_file_length])
#print("Length_tensor = {}".format(length_tensor))
t_a_sig_length_e_int32 = length_tensor.to(torch.int32)
t_a_sig_length_e_int32_np = t_a_sig_length_e_int32.cpu().numpy()
print("Length tensor = {}".format(t_a_sig_length_e_int32_np))
file_name = args.output_dir + "wav_files/int32/" + "RNNT_input_" + str(fixed_seq_length) + "_" + str(it) + ".npy"
np.save(file_name, t_a_sig_length_e_int32_np)
t_audio_signal_e, t_a_sig_length_e, t_transcript_e, t_transcript_len_e = audio_processor(data)
seq_length, batch_size, num_features = t_audio_signal_e.size()
print("Seq length = {} Batch size = {} Features = {}".format(seq_length, batch_size, num_features))
if seq_length > max_seq_length:
max_seq_length = seq_length
t_audio_signal_e_fp16 = t_audio_signal_e.to(torch.float16)
t_audio_signal_e_fp16 = t_audio_signal_e_fp16.reshape(seq_length, num_features)
t_audio_signal_e_fp16_np = t_audio_signal_e_fp16.cpu().numpy()
t_audio_signal_e = t_audio_signal_e.reshape(seq_length, num_features)
t_audio_signal_e_np = t_audio_signal_e.cpu().numpy()
t_a_sig_length_e_int32 = t_a_sig_length_e.to(torch.int32)
t_a_sig_length_e_int32_np = t_a_sig_length_e_int32.cpu().numpy()
target_np = t_a_sig_length_e_int32_np
file_name = args.output_dir + "int32/RNNT_input_" + str(fixed_seq_length) + "_" + str(it) + ".npy"
np.save(file_name, target_np)
# Generating Fixed size seq_length
if seq_length <= fixed_seq_length:
target = torch.zeros(fixed_seq_length, 240, dtype=torch.float16)
target[:seq_length,:] = t_audio_signal_e_fp16
target_np = target.cpu().numpy()
file_name = args.output_dir + "fp16/RNNT_input_" + str(fixed_seq_length) + "_" + str(it) + ".npy"
np.save(file_name, target_np)
target = torch.zeros(fixed_seq_length, 240, dtype=torch.float32)
target[:seq_length,:] = t_audio_signal_e
target_np = target.cpu().numpy()
file_name = args.output_dir + "fp32/RNNT_input_" + str(fixed_seq_length) + "_" + str(it) + ".npy"
np.save(file_name, target_np)
else:
target = torch.zeros(fixed_seq_length, 240, dtype=torch.float16)
target = t_audio_signal_e_fp16[:fixed_seq_length,:]
target_np = target.cpu().numpy()
file_name = args.output_dir + "fp16/RNNT_input_" + str(fixed_seq_length) + "_" + str(it) + ".npy"
np.save(file_name, target_np)
target = torch.zeros(fixed_seq_length, 240, dtype=torch.float32)
target = t_audio_signal_e[:fixed_seq_length,:]
target_np = target.cpu().numpy()
file_name = args.output_dir + "fp32/RNNT_input_" + str(fixed_seq_length) + "_" + str(it) + ".npy"
np.save(file_name, target_np)
print("Max seq length {}".format(max_seq_length))
file_handle.close()
def main(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.fp16:
optim_level = Optimization.mxprO3
else:
optim_level = Optimization.mxprO0
model_definition = toml.load(args.model_toml)
dataset_vocab = model_definition['labels']['labels']
ctc_vocab = add_blank_label(dataset_vocab)
val_manifest = args.val_manifest
featurizer_config = model_definition['input_eval']
featurizer_config["optimization_level"] = optim_level
if args.max_duration is not None:
featurizer_config['max_duration'] = args.max_duration
if args.pad_to is not None:
featurizer_config['pad_to'] = args.pad_to if args.pad_to >= 0 else "max"
data_layer = AudioToTextDataLayer(
dataset_dir=args.dataset_dir,
featurizer_config=featurizer_config,
manifest_filepath=val_manifest,
labels=dataset_vocab,
batch_size=args.batch_size,
pad_to_max=featurizer_config['pad_to'] == "max",
shuffle=False,
multi_gpu=False)
audio_preprocessor = AudioPreprocessing(**featurizer_config)
audio_preprocessor.eval()
eval_transforms = torchvision.transforms.Compose([
lambda xs: [*audio_preprocessor(xs[0:2]), *xs[2:]],
lambda xs: [xs[0].permute(2, 0, 1), *xs[1:]],
])
eval(
data_layer=data_layer,
audio_processor=eval_transforms,
args=args)
if __name__=="__main__":
args = parse_args()
print_dict(vars(args))
main(args) |
ventilooo/nit | refs/heads/master | contrib/neo_doxygen/tests/python-def/src/foo.py | 16 | # This file is part of NIT ( http://www.nitlanguage.org ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## A `bar` function in the `foo` namespace.
def bar:
"""By default, Doxygen recognizes anything in the docstrings as verbatim
detailed description."""
pass
|
gnufede/results | refs/heads/master | results/goals/urls.py | 1 | from django.conf.urls import patterns, url
urlpatterns = patterns('goals.views',
url(r'^users/me$', 'myself'),
url(r'^categories$', 'category_list'),
url(r'^wins$', 'win_list'),
url(r'^wins/(?P<pk>[0-9]+)$', 'win_detail'),
url(r'^wins/tag/$', 'win_tag'),
url(r'^goals$', 'goal_list'),
url(r'^goals/tag/$', 'goal_tag'),
url(r'^goals/(?P<pk>[0-9]+)$', 'goal_detail'),
# url(r'^wins/(?P<year>\d{4})/(?P<month>\d{2})/(?P<day>\d+)/$', 'win_list'),
# url(r'^wins/weekly$', 'win_list', kwargs={'weekly':True,}),
# url(r'^wins/weekly/(?P<pk>[0-9]+)$', 'win_detail'),
# url(r'^wins/weekly/(?P<year>\d{4})/(?P<month>\d{2})/(?P<day>\d+)/$','win_list', kwargs={'weekly':True,}),
# url(r'^goals/(?P<year>\d{4})/(?P<month>\d{2})/(?P<day>\d+)/$', 'goal_list'),
# url(r'^goals(?:/(?P<weekly>\d+))?$', 'goal_list'),
# url(r'^goals/weekly$', 'goal_list', kwargs={'weekly':True,}),
# url(r'^goals/weekly/(?P<year>\d{4})/(?P<month>\d{2})/(?P<day>\d+)/$','goal_list', kwargs={'weekly':True,}),
# url(r'^goals/weekly/(?P<pk>[0-9]+)$', 'goal_detail'),
)
|
scalable-networks/ext | refs/heads/master | gnuradio-3.7.0.1/gnuradio-runtime/python/gnuradio/gru/os_read_exactly.py | 78 | #
# Copyright 2005 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import os
def os_read_exactly(file_descriptor, nbytes):
"""
Replacement for os.read that blocks until it reads exactly nbytes.
"""
s = ''
while nbytes > 0:
sbuf = os.read(file_descriptor, nbytes)
if not(sbuf):
return ''
nbytes -= len(sbuf)
s = s + sbuf
return s
|
chrish42/pylearn | refs/heads/master | pylearn2/packaged_dependencies/theano_linear/unshared_conv/test_unshared_conv.py | 44 | import unittest
import numpy
import theano
from theano.tests.unittest_tools import verify_grad
from .unshared_conv import FilterActs
from .unshared_conv import WeightActs
from .unshared_conv import ImgActs
def rand(shp, dtype):
return numpy.random.rand(*shp).astype(dtype)
def assert_linear(f, pt, mode=None):
t = theano.tensor.scalar(dtype=pt.dtype)
ptlike = theano.shared(rand(
pt.get_value(borrow=True).shape,
dtype=pt.dtype))
out = f(pt)
out2 = f(pt * t)
out3 = f(ptlike) + out
out4 = f(pt + ptlike)
f = theano.function([t], [out * t, out2, out3, out4],
allow_input_downcast=True,
mode=mode)
outval, out2val, out3val, out4val = f(3.6)
assert numpy.allclose(outval, out2val)
assert numpy.allclose(out3val, out4val)
class TestFilterActs(unittest.TestCase):
# 2 4x4 greyscale images
ishape = (1, 1, 4, 4, 2)
# 5 3x3 filters at each location in a 2x2 grid
fshape = (2, 2, 1, 3, 3, 1, 5)
module_stride = 1
dtype = 'float64'
# step size for numeric gradient, None is the default
eps = None
mode = theano.compile.get_default_mode()
def function(self, inputs, outputs):
return theano.function(inputs, outputs, mode=self.mode)
def setUp(self):
self.op = FilterActs(self.module_stride)
self.s_images = theano.shared(rand(self.ishape, self.dtype),
name = 's_images')
self.s_filters = theano.shared(
rand(self.fshape, self.dtype),
name = 's_filters')
def test_type(self):
out = self.op(self.s_images, self.s_filters)
assert out.dtype == self.dtype
assert out.ndim == 5
f = self.function([], out)
outval = f()
assert len(outval.shape) == len(self.ishape)
assert outval.dtype == self.s_images.get_value(borrow=True).dtype
def test_linearity_images(self):
assert_linear(
lambda imgs: self.op(imgs, self.s_filters),
self.s_images,
mode=self.mode)
def test_linearity_filters(self):
assert_linear(
lambda fts: self.op(self.s_images, fts),
self.s_filters,
mode=self.mode)
def test_shape(self):
out = self.op(self.s_images, self.s_filters)
f = self.function([], out)
outval = f()
assert outval.shape == (self.fshape[-2],
self.fshape[-1],
self.fshape[0], self.fshape[1],
self.ishape[-1])
def test_grad_left(self):
# test only the left so that the right can be a shared variable,
# and then TestGpuFilterActs can use a gpu-allocated shared var
# instead.
def left_op(imgs):
return self.op(imgs, self.s_filters)
verify_grad(left_op, [self.s_images.get_value()],
mode=self.mode, eps=self.eps)
def test_grad_right(self):
# test only the right so that the left can be a shared variable,
# and then TestGpuFilterActs can use a gpu-allocated shared var
# instead.
def right_op(filters):
rval = self.op(self.s_images, filters)
rval.name = 'right_op(%s, %s)' % (self.s_images.name,
filters.name)
assert rval.dtype == filters.dtype
return rval
verify_grad(right_op, [self.s_filters.get_value()],
mode=self.mode, eps=self.eps)
def test_dtype_mismatch(self):
self.assertRaises(TypeError,
self.op,
theano.tensor.cast(self.s_images, 'float32'),
theano.tensor.cast(self.s_filters, 'float64'))
self.assertRaises(TypeError,
self.op,
theano.tensor.cast(self.s_images, 'float64'),
theano.tensor.cast(self.s_filters, 'float32'))
def test_op_eq(self):
assert FilterActs(1) == FilterActs(1)
assert not (FilterActs(1) != FilterActs(1))
assert (FilterActs(2) != FilterActs(1))
assert FilterActs(1) != None
class TestFilterActsF32(TestFilterActs):
dtype = 'float32'
eps = 1e-3
class TestWeightActs(unittest.TestCase):
# 1 5x5 6-channel image (2 groups of 3 channels)
ishape = (6, 3, 5, 5, 1)
hshape = (6, 4, 2, 2, 1)
fshape = (2, 2, 3, 2, 2, 6, 4)
module_stride = 2
dtype = 'float64'
# step size for numeric gradient, None is the default
eps = None
frows = property(lambda s: s.fshape[3])
fcols = property(lambda s: s.fshape[4])
def setUp(self):
self.op = WeightActs(self.module_stride)
self.s_images = theano.shared(rand(self.ishape, self.dtype))
self.s_hidacts = theano.shared(rand(self.hshape, self.dtype))
def test_type(self):
out = self.op(self.s_images, self.s_hidacts, self.frows, self.fcols)
assert out.dtype == self.dtype
assert out.ndim == 7
f = theano.function([], out)
outval = f()
assert outval.shape == self.fshape
assert outval.dtype == self.dtype
def test_linearity_images(self):
def f(images):
return self.op(images, self.s_hidacts, self.frows, self.fcols)
assert_linear(f, self.s_images)
def test_linearity_hidacts(self):
def f(hidacts):
return self.op(self.s_images, hidacts, self.frows, self.fcols)
assert_linear(f, self.s_hidacts)
def test_grad(self):
def op2(imgs, hids):
return self.op(imgs, hids, self.frows, self.fcols)
verify_grad(op2,
[self.s_images.get_value(),
self.s_hidacts.get_value()],
eps=self.eps)
def test_dtype_mismatch(self):
self.assertRaises(TypeError,
self.op,
theano.tensor.cast(self.s_images, 'float32'),
theano.tensor.cast(self.s_hidacts, 'float64'),
self.frows, self.fcols)
self.assertRaises(TypeError,
self.op,
theano.tensor.cast(self.s_images, 'float64'),
theano.tensor.cast(self.s_hidacts, 'float32'),
self.frows, self.fcols)
class TestImgActs(unittest.TestCase):
# 1 5x5 6-channel image (2 groups of 3 channels)
ishape = (6, 3, 5, 5, 2)
hshape = (6, 4, 3, 3, 2)
fshape = (3, 3, 3, 2, 2, 6, 4)
module_stride = 1
dtype = 'float64'
# step size for numeric gradient, None is the default
eps = None
#frows = property(lambda s: s.fshape[3])
#fcols = property(lambda s: s.fshape[4])
irows = property(lambda s: s.ishape[2])
icols = property(lambda s: s.ishape[3])
def setUp(self):
self.op = ImgActs(module_stride=self.module_stride)
self.s_filters = theano.shared(rand(self.fshape, self.dtype))
self.s_hidacts = theano.shared(rand(self.hshape, self.dtype))
def test_type(self):
out = self.op(self.s_filters, self.s_hidacts, self.irows, self.icols)
assert out.dtype == self.dtype
assert out.ndim == 5
f = theano.function([], out)
outval = f()
assert outval.shape == self.ishape
assert outval.dtype == self.dtype
def test_linearity_filters(self):
def f(filts):
return self.op(filts, self.s_hidacts, self.irows, self.icols)
assert_linear(f, self.s_filters)
def test_linearity_hidacts(self):
def f(hidacts):
return self.op(self.s_filters, hidacts, self.irows, self.icols)
assert_linear(f, self.s_hidacts)
def test_grad(self):
def op2(imgs, hids):
return self.op(imgs, hids, self.irows, self.icols)
verify_grad(op2,
[self.s_filters.get_value(),
self.s_hidacts.get_value()],
eps=self.eps)
def test_dtype_mismatch(self):
self.assertRaises(TypeError,
self.op,
theano.tensor.cast(self.s_filters, 'float32'),
theano.tensor.cast(self.s_hidacts, 'float64'),
self.irows, self.icols)
self.assertRaises(TypeError,
self.op,
theano.tensor.cast(self.s_filters, 'float64'),
theano.tensor.cast(self.s_hidacts, 'float32'),
self.irows, self.icols)
|
demonchild2112/travis-test | refs/heads/master | grr/client/grr_response_client/client_actions/file_fingerprint.py | 2 | #!/usr/bin/env python
"""Action to fingerprint files on the client."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import hashlib
from grr_response_client import vfs
from grr_response_client.client_actions import standard
from grr_response_core.lib import fingerprint
from grr_response_core.lib.rdfvalues import client_action as rdf_client_action
class Fingerprinter(fingerprint.Fingerprinter):
"""A fingerprinter with heartbeat."""
def __init__(self, progress_cb, file_obj):
super(Fingerprinter, self).__init__(file_obj)
self.progress_cb = progress_cb
def _GetNextInterval(self):
self.progress_cb()
return super(Fingerprinter, self)._GetNextInterval()
class FingerprintFile(standard.ReadBuffer):
"""Apply a set of fingerprinting methods to a file."""
in_rdfvalue = rdf_client_action.FingerprintRequest
out_rdfvalues = [rdf_client_action.FingerprintResponse]
_hash_types = {
rdf_client_action.FingerprintTuple.HashType.MD5: hashlib.md5,
rdf_client_action.FingerprintTuple.HashType.SHA1: hashlib.sha1,
rdf_client_action.FingerprintTuple.HashType.SHA256: hashlib.sha256,
}
_fingerprint_types = {
rdf_client_action.FingerprintTuple.Type.FPT_GENERIC: (
fingerprint.Fingerprinter.EvalGeneric),
rdf_client_action.FingerprintTuple.Type.FPT_PE_COFF: (
fingerprint.Fingerprinter.EvalPecoff),
}
def Run(self, args):
"""Fingerprint a file."""
with vfs.VFSOpen(
args.pathspec, progress_callback=self.Progress) as file_obj:
fingerprinter = Fingerprinter(self.Progress, file_obj)
response = rdf_client_action.FingerprintResponse()
response.pathspec = file_obj.pathspec
if args.tuples:
tuples = args.tuples
else:
# There are none selected -- we will cover everything
tuples = list()
for k in self._fingerprint_types:
tuples.append(rdf_client_action.FingerprintTuple(fp_type=k))
for finger in tuples:
hashers = [self._hash_types[h] for h in finger.hashers] or None
if finger.fp_type in self._fingerprint_types:
invoke = self._fingerprint_types[finger.fp_type]
res = invoke(fingerprinter, hashers)
if res:
response.matching_types.append(finger.fp_type)
else:
raise RuntimeError(
"Encountered unknown fingerprint type. %s" % finger.fp_type)
# Structure of the results is a list of dicts, each containing the
# name of the hashing method, hashes for enabled hash algorithms,
# and auxilliary data where present (e.g. signature blobs).
# Also see Fingerprint:HashIt()
response.results = fingerprinter.HashIt()
# We now return data in a more structured form.
for result in response.results:
if result.GetItem("name") == "generic":
for hash_type in ["md5", "sha1", "sha256"]:
value = result.GetItem(hash_type)
if value is not None:
setattr(response.hash, hash_type, value)
if result["name"] == "pecoff":
for hash_type in ["md5", "sha1", "sha256"]:
value = result.GetItem(hash_type)
if value:
setattr(response.hash, "pecoff_" + hash_type, value)
signed_data = result.GetItem("SignedData", [])
for data in signed_data:
response.hash.signed_data.Append(
revision=data[0], cert_type=data[1], certificate=data[2])
self.SendReply(response)
|
ProjectSWGCore/NGECore2 | refs/heads/master | scripts/mobiles/dynamicgroups/tatooine_desert_demon.py | 2 | # Spawn Group file created with PSWG Planetary Spawn Tool
import sys
from java.util import Vector
from services.spawn import DynamicSpawnGroup
from services.spawn import MobileTemplate
def addDynamicGroup(core):
dynamicGroup = DynamicSpawnGroup()
mobileTemplates = Vector()
mobileTemplates.add('desert_demon')
mobileTemplates.add('desert_demon_bodyguard')
mobileTemplates.add('desert_demon_brawler')
mobileTemplates.add('desert_demon_leader')
mobileTemplates.add('desert_demon_marksman')
dynamicGroup.setMobiles(mobileTemplates)
dynamicGroup.setGroupMembersNumber(5)
dynamicGroup.setName('tatooine_desert_demon')
dynamicGroup.setMaxSpawns(-1)
dynamicGroup.setMinSpawnDistance(150)
core.spawnService.addDynamicGroup('tatooine_desert_demon', dynamicGroup)
return
|
Samuel-Ferreira/django-pizza | refs/heads/master | pizzaria/pizza/views.py | 1 | from django.http import HttpResponse
from django.shortcuts import render
from .models import Pizza
def menu(request):
#import ipdb; ipdb.set_trace()
pizzas = Pizza.objects.all()
context = {'pizzas': pizzas}
return render(request, 'pizza/menu.html', context)
|
aYukiSekiguchi/ACCESS-Chromium | refs/heads/master | third_party/libxml/src/regressions.py | 360 | #!/usr/bin/python -u
import glob, os, string, sys, thread, time
# import difflib
import libxml2
###
#
# This is a "Work in Progress" attempt at a python script to run the
# various regression tests. The rationale for this is that it should be
# possible to run this on most major platforms, including those (such as
# Windows) which don't support gnu Make.
#
# The script is driven by a parameter file which defines the various tests
# to be run, together with the unique settings for each of these tests. A
# script for Linux is included (regressions.xml), with comments indicating
# the significance of the various parameters. To run the tests under Windows,
# edit regressions.xml and remove the comment around the default parameter
# "<execpath>" (i.e. make it point to the location of the binary executables).
#
# Note that this current version requires the Python bindings for libxml2 to
# have been previously installed and accessible
#
# See Copyright for the status of this software.
# William Brack ([email protected])
#
###
defaultParams = {} # will be used as a dictionary to hold the parsed params
# This routine is used for comparing the expected stdout / stdin with the results.
# The expected data has already been read in; the result is a file descriptor.
# Within the two sets of data, lines may begin with a path string. If so, the
# code "relativises" it by removing the path component. The first argument is a
# list already read in by a separate thread; the second is a file descriptor.
# The two 'base' arguments are to let me "relativise" the results files, allowing
# the script to be run from any directory.
def compFiles(res, expected, base1, base2):
l1 = len(base1)
exp = expected.readlines()
expected.close()
# the "relativisation" is done here
for i in range(len(res)):
j = string.find(res[i],base1)
if (j == 0) or ((j == 2) and (res[i][0:2] == './')):
col = string.find(res[i],':')
if col > 0:
start = string.rfind(res[i][:col], '/')
if start > 0:
res[i] = res[i][start+1:]
for i in range(len(exp)):
j = string.find(exp[i],base2)
if (j == 0) or ((j == 2) and (exp[i][0:2] == './')):
col = string.find(exp[i],':')
if col > 0:
start = string.rfind(exp[i][:col], '/')
if start > 0:
exp[i] = exp[i][start+1:]
ret = 0
# ideally we would like to use difflib functions here to do a
# nice comparison of the two sets. Unfortunately, during testing
# (using python 2.3.3 and 2.3.4) the following code went into
# a dead loop under windows. I'll pursue this later.
# diff = difflib.ndiff(res, exp)
# diff = list(diff)
# for line in diff:
# if line[:2] != ' ':
# print string.strip(line)
# ret = -1
# the following simple compare is fine for when the two data sets
# (actual result vs. expected result) are equal, which should be true for
# us. Unfortunately, if the test fails it's not nice at all.
rl = len(res)
el = len(exp)
if el != rl:
print 'Length of expected is %d, result is %d' % (el, rl)
ret = -1
for i in range(min(el, rl)):
if string.strip(res[i]) != string.strip(exp[i]):
print '+:%s-:%s' % (res[i], exp[i])
ret = -1
if el > rl:
for i in range(rl, el):
print '-:%s' % exp[i]
ret = -1
elif rl > el:
for i in range (el, rl):
print '+:%s' % res[i]
ret = -1
return ret
# Separate threads to handle stdout and stderr are created to run this function
def readPfile(file, list, flag):
data = file.readlines() # no call by reference, so I cheat
for l in data:
list.append(l)
file.close()
flag.append('ok')
# This routine runs the test program (e.g. xmllint)
def runOneTest(testDescription, filename, inbase, errbase):
if 'execpath' in testDescription:
dir = testDescription['execpath'] + '/'
else:
dir = ''
cmd = os.path.abspath(dir + testDescription['testprog'])
if 'flag' in testDescription:
for f in string.split(testDescription['flag']):
cmd += ' ' + f
if 'stdin' not in testDescription:
cmd += ' ' + inbase + filename
if 'extarg' in testDescription:
cmd += ' ' + testDescription['extarg']
noResult = 0
expout = None
if 'resext' in testDescription:
if testDescription['resext'] == 'None':
noResult = 1
else:
ext = '.' + testDescription['resext']
else:
ext = ''
if not noResult:
try:
fname = errbase + filename + ext
expout = open(fname, 'rt')
except:
print "Can't open result file %s - bypassing test" % fname
return
noErrors = 0
if 'reserrext' in testDescription:
if testDescription['reserrext'] == 'None':
noErrors = 1
else:
if len(testDescription['reserrext'])>0:
ext = '.' + testDescription['reserrext']
else:
ext = ''
else:
ext = ''
if not noErrors:
try:
fname = errbase + filename + ext
experr = open(fname, 'rt')
except:
experr = None
else:
experr = None
pin, pout, perr = os.popen3(cmd)
if 'stdin' in testDescription:
infile = open(inbase + filename, 'rt')
pin.writelines(infile.readlines())
infile.close()
pin.close()
# popen is great fun, but can lead to the old "deadly embrace", because
# synchronizing the writing (by the task being run) of stdout and stderr
# with respect to the reading (by this task) is basically impossible. I
# tried several ways to cheat, but the only way I have found which works
# is to do a *very* elementary multi-threading approach. We can only hope
# that Python threads are implemented on the target system (it's okay for
# Linux and Windows)
th1Flag = [] # flags to show when threads finish
th2Flag = []
outfile = [] # lists to contain the pipe data
errfile = []
th1 = thread.start_new_thread(readPfile, (pout, outfile, th1Flag))
th2 = thread.start_new_thread(readPfile, (perr, errfile, th2Flag))
while (len(th1Flag)==0) or (len(th2Flag)==0):
time.sleep(0.001)
if not noResult:
ret = compFiles(outfile, expout, inbase, 'test/')
if ret != 0:
print 'trouble with %s' % cmd
else:
if len(outfile) != 0:
for l in outfile:
print l
print 'trouble with %s' % cmd
if experr != None:
ret = compFiles(errfile, experr, inbase, 'test/')
if ret != 0:
print 'trouble with %s' % cmd
else:
if not noErrors:
if len(errfile) != 0:
for l in errfile:
print l
print 'trouble with %s' % cmd
if 'stdin' not in testDescription:
pin.close()
# This routine is called by the parameter decoding routine whenever the end of a
# 'test' section is encountered. Depending upon file globbing, a large number of
# individual tests may be run.
def runTest(description):
testDescription = defaultParams.copy() # set defaults
testDescription.update(description) # override with current ent
if 'testname' in testDescription:
print "## %s" % testDescription['testname']
if not 'file' in testDescription:
print "No file specified - can't run this test!"
return
# Set up the source and results directory paths from the decoded params
dir = ''
if 'srcdir' in testDescription:
dir += testDescription['srcdir'] + '/'
if 'srcsub' in testDescription:
dir += testDescription['srcsub'] + '/'
rdir = ''
if 'resdir' in testDescription:
rdir += testDescription['resdir'] + '/'
if 'ressub' in testDescription:
rdir += testDescription['ressub'] + '/'
testFiles = glob.glob(os.path.abspath(dir + testDescription['file']))
if testFiles == []:
print "No files result from '%s'" % testDescription['file']
return
# Some test programs just don't work (yet). For now we exclude them.
count = 0
excl = []
if 'exclfile' in testDescription:
for f in string.split(testDescription['exclfile']):
glb = glob.glob(dir + f)
for g in glb:
excl.append(os.path.abspath(g))
# Run the specified test program
for f in testFiles:
if not os.path.isdir(f):
if f not in excl:
count = count + 1
runOneTest(testDescription, os.path.basename(f), dir, rdir)
#
# The following classes are used with the xmlreader interface to interpret the
# parameter file. Once a test section has been identified, runTest is called
# with a dictionary containing the parsed results of the interpretation.
#
class testDefaults:
curText = '' # accumulates text content of parameter
def addToDict(self, key):
txt = string.strip(self.curText)
# if txt == '':
# return
if key not in defaultParams:
defaultParams[key] = txt
else:
defaultParams[key] += ' ' + txt
def processNode(self, reader, curClass):
if reader.Depth() == 2:
if reader.NodeType() == 1:
self.curText = '' # clear the working variable
elif reader.NodeType() == 15:
if (reader.Name() != '#text') and (reader.Name() != '#comment'):
self.addToDict(reader.Name())
elif reader.Depth() == 3:
if reader.Name() == '#text':
self.curText += reader.Value()
elif reader.NodeType() == 15: # end of element
print "Defaults have been set to:"
for k in defaultParams.keys():
print " %s : '%s'" % (k, defaultParams[k])
curClass = rootClass()
return curClass
class testClass:
def __init__(self):
self.testParams = {} # start with an empty set of params
self.curText = '' # and empty text
def addToDict(self, key):
data = string.strip(self.curText)
if key not in self.testParams:
self.testParams[key] = data
else:
if self.testParams[key] != '':
data = ' ' + data
self.testParams[key] += data
def processNode(self, reader, curClass):
if reader.Depth() == 2:
if reader.NodeType() == 1:
self.curText = '' # clear the working variable
if reader.Name() not in self.testParams:
self.testParams[reader.Name()] = ''
elif reader.NodeType() == 15:
if (reader.Name() != '#text') and (reader.Name() != '#comment'):
self.addToDict(reader.Name())
elif reader.Depth() == 3:
if reader.Name() == '#text':
self.curText += reader.Value()
elif reader.NodeType() == 15: # end of element
runTest(self.testParams)
curClass = rootClass()
return curClass
class rootClass:
def processNode(self, reader, curClass):
if reader.Depth() == 0:
return curClass
if reader.Depth() != 1:
print "Unexpected junk: Level %d, type %d, name %s" % (
reader.Depth(), reader.NodeType(), reader.Name())
return curClass
if reader.Name() == 'test':
curClass = testClass()
curClass.testParams = {}
elif reader.Name() == 'defaults':
curClass = testDefaults()
return curClass
def streamFile(filename):
try:
reader = libxml2.newTextReaderFilename(filename)
except:
print "unable to open %s" % (filename)
return
curClass = rootClass()
ret = reader.Read()
while ret == 1:
curClass = curClass.processNode(reader, curClass)
ret = reader.Read()
if ret != 0:
print "%s : failed to parse" % (filename)
# OK, we're finished with all the routines. Now for the main program:-
if len(sys.argv) != 2:
print "Usage: maketest {filename}"
sys.exit(-1)
streamFile(sys.argv[1])
|
sankha93/servo | refs/heads/master | python/servo/devenv_commands.py | 5 | # Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
from __future__ import print_function, unicode_literals
from os import path, getcwd, listdir
import subprocess
import sys
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
)
from servo.command_base import CommandBase, cd, call
@CommandProvider
class MachCommands(CommandBase):
@Command('cargo',
description='Run Cargo',
category='devenv')
@CommandArgument(
'params', default=None, nargs='...',
help="Command-line arguments to be passed through to Cargo")
def cargo(self, params):
if not params:
params = []
if self.context.topdir == getcwd():
with cd(path.join('components', 'servo')):
return call(["cargo"] + params, env=self.build_env())
return call(['cargo'] + params, env=self.build_env())
@Command('cargo-update',
description='Same as update-cargo',
category='devenv')
@CommandArgument(
'params', default=None, nargs='...',
help='Command-line arguments to be passed through to cargo update')
@CommandArgument(
'--package', '-p', default=None,
help='Updates selected package')
@CommandArgument(
'--all-packages', '-a', action='store_true',
help='Updates all packages')
def cargo_update(self, params=None, package=None, all_packages=None):
self.update_cargo(params, package, all_packages)
@Command('update-cargo',
description='Update Cargo dependencies',
category='devenv')
@CommandArgument(
'params', default=None, nargs='...',
help='Command-line arguments to be passed through to cargo update')
@CommandArgument(
'--package', '-p', default=None,
help='Updates the selected package')
@CommandArgument(
'--all-packages', '-a', action='store_true',
help='Updates all packages. NOTE! This is very likely to break your ' +
'working copy, making it impossible to build servo. Only do ' +
'this if you really know what you are doing.')
def update_cargo(self, params=None, package=None, all_packages=None):
if not params:
params = []
if package:
params += ["-p", package]
elif all_packages:
params = []
else:
print("Please choose package to update with the --package (-p) ")
print("flag or update all packages with --all-packages (-a) flag")
sys.exit(1)
cargo_paths = [path.join('components', 'servo'),
path.join('ports', 'cef'),
path.join('ports', 'geckolib')]
for cargo_path in cargo_paths:
with cd(cargo_path):
print(cargo_path)
call(["cargo", "update"] + params,
env=self.build_env())
@Command('clippy',
description='Run Clippy',
category='devenv')
@CommandArgument(
'--package', '-p', default=None,
help='Updates the selected package')
@CommandArgument(
'--json', '-j', action="store_true",
help='Outputs')
def clippy(self, package=None, json=False):
params = ["--features=script/plugins/clippy"]
if package:
params += ["-p", package]
if json:
params += ["--", "-Zunstable-options", "--error-format", "json"]
with cd(path.join(self.context.topdir, "components", "servo")):
return subprocess.call(["cargo", "rustc", "-v"] + params,
env=self.build_env())
@Command('rustc',
description='Run the Rust compiler',
category='devenv')
@CommandArgument(
'params', default=None, nargs='...',
help="Command-line arguments to be passed through to rustc")
def rustc(self, params):
if params is None:
params = []
return call(["rustc"] + params, env=self.build_env())
@Command('rust-root',
description='Print the path to the root of the Rust compiler',
category='devenv')
def rust_root(self):
print(self.config["tools"]["rust-root"])
@Command('grep',
description='`git grep` for selected directories.',
category='devenv')
@CommandArgument(
'params', default=None, nargs='...',
help="Command-line arguments to be passed through to `git grep`")
def grep(self, params):
if not params:
params = []
# get all directories under tests/
tests_dirs = listdir('tests')
# Directories to be excluded under tests/
excluded_tests_dirs = ['wpt', 'jquery']
tests_dirs = filter(lambda dir: dir not in excluded_tests_dirs, tests_dirs)
# Set of directories in project root
root_dirs = ['components', 'ports', 'python', 'etc', 'resources']
# Generate absolute paths for directories in tests/ and project-root/
tests_dirs_abs = [path.join(self.context.topdir, 'tests', s) for s in tests_dirs]
root_dirs_abs = [path.join(self.context.topdir, s) for s in root_dirs]
# Absolute paths for all directories to be considered
grep_paths = root_dirs_abs + tests_dirs_abs
return call(
["git"] + ["grep"] + params + ['--'] + grep_paths + [':(exclude)*.min.js'],
env=self.build_env())
@Command('wpt-upgrade',
description='upgrade wptrunner.',
category='devenv')
def upgrade_wpt_runner(self):
with cd(path.join(self.context.topdir, 'tests', 'wpt', 'harness')):
code = call(["git", "init"], env=self.build_env())
if code:
return code
code = call(
["git", "remote", "add", "upstream", "https://github.com/w3c/wptrunner.git"], env=self.build_env())
if code:
return code
code = call(["git", "fetch", "upstream"], env=self.build_env())
if code:
return code
code = call(["git", "reset", "--hard", "remotes/upstream/master"], env=self.build_env())
if code:
return code
code = call(["rm", "-rf", ".git"], env=self.build_env())
if code:
return code
return 0
|
yousseb/django_pytds | refs/heads/master | tests/mssql_regress/tests.py | 1 | from __future__ import absolute_import
from django.core.exceptions import ImproperlyConfigured
from django.db import connection
from django.test import TestCase
from .models import AutoPkPlusOne, PkPlusOne, TextPkPlusOne
class ConnectionStringTestCase(TestCase):
def assertInString(self, conn_string, pattern):
"""
Asserts that the pattern is found in the string.
"""
found = conn_string.find(pattern) != -1
self.assertTrue(found,
"pattern \"%s\" was not found in connection string \"%s\"" % (pattern, conn_string))
def assertNotInString(self, conn_string, pattern):
"""
Asserts that the pattern is found in the string.
"""
found = conn_string.find(pattern) != -1
self.assertFalse(found,
"pattern \"%s\" was found in connection string \"%s\"" % (pattern, conn_string))
def get_conn_string(self, data={}):
db_settings = {
'NAME': 'db_name',
'ENGINE': 'sqlserver_ado',
'HOST': 'myhost',
'PORT': '',
'USER': '',
'PASSWORD': '',
'OPTIONS' : {
'provider': 'SQLOLEDB',
'use_mars': True,
},
}
db_settings.update(data)
from sqlserver_ado.base import make_connection_string
return make_connection_string(db_settings)
def test_default(self):
conn_string = self.get_conn_string()
self.assertInString(conn_string, 'Initial Catalog=db_name')
self.assertInString(conn_string, '=myhost;')
self.assertInString(conn_string, 'Integrated Security=SSPI')
self.assertInString(conn_string, 'PROVIDER=SQLOLEDB')
self.assertNotInString(conn_string, 'UID=')
self.assertNotInString(conn_string, 'PWD=')
self.assertInString(conn_string, 'MARS Connection=True')
def test_require_database_name(self):
"""Database NAME setting is required"""
self.assertRaises(ImproperlyConfigured, self.get_conn_string, {'NAME': ''})
def test_user_pass(self):
"""Validate username and password in connection string"""
conn_string = self.get_conn_string({'USER': 'myuser', 'PASSWORD': 'mypass'})
self.assertInString(conn_string, 'UID=myuser;')
self.assertInString(conn_string, 'PWD=mypass;')
self.assertNotInString(conn_string, 'Integrated Security=SSPI')
def test_port_with_host(self):
"""Test the PORT setting to make sure it properly updates the connection string"""
self.assertRaises(ImproperlyConfigured, self.get_conn_string,
{'HOST': 'myhost', 'PORT': 1433})
self.assertRaises(ImproperlyConfigured, self.get_conn_string, {'HOST': 'myhost', 'PORT': 'a'})
conn_string = self.get_conn_string({'HOST': '127.0.0.1', 'PORT': 1433})
self.assertInString(conn_string, '=127.0.0.1,1433;')
def test_extra_params(self):
"""Test extra_params OPTIONS"""
extras = 'Some=Extra;Stuff Goes=here'
conn_string = self.get_conn_string({'OPTIONS': {'extra_params': extras}})
self.assertInString(conn_string, extras)
def test_host_fqdn_with_port(self):
"""
Issue 21 - FQDN crashed on IP address detection.
"""
with self.assertRaisesRegexp(ImproperlyConfigured, 'DATABASE HOST must be an IP address'):
self.get_conn_string(data={
'HOST': 'my.fqdn.com',
'PORT': '1433',
})
class ReturnIdOnInsertWithTriggersTestCase(TestCase):
def create_trigger(self, model):
"""Create a trigger for the provided model"""
qn = connection.ops.quote_name
table_name = qn(model._meta.db_table)
trigger_name = qn('test_trigger_%s' % model._meta.db_table)
with connection.cursor() as cur:
# drop trigger if it exists
drop_sql = """
IF OBJECT_ID(N'[dbo].{trigger}') IS NOT NULL
DROP TRIGGER [dbo].{trigger}
""".format(trigger=trigger_name)
create_sql = """
CREATE TRIGGER [dbo].{trigger} ON {tbl} FOR INSERT
AS UPDATE {tbl} set [a] = 100""".format(
trigger=trigger_name,
tbl=table_name,
)
cur.execute(drop_sql)
cur.execute(create_sql)
def test_pk(self):
self.create_trigger(PkPlusOne)
id = 1
obj = PkPlusOne.objects.create(id=id)
self.assertEqual(obj.pk, id)
self.assertEqual(PkPlusOne.objects.get(pk=id).a, 100)
def test_auto_pk(self):
self.create_trigger(AutoPkPlusOne)
id = 1
obj = AutoPkPlusOne.objects.create()
self.assertEqual(obj.pk, id)
self.assertEqual(AutoPkPlusOne.objects.get(pk=id).a, 100)
def test_text_pk(self):
self.create_trigger(TextPkPlusOne)
id = 'asdf'
obj = TextPkPlusOne.objects.create(id=id)
self.assertEqual(obj.pk, id)
self.assertEqual(TextPkPlusOne.objects.get(pk=id).a, 100)
|
titienmiami/mmc.repository | refs/heads/master | plugin.video.SportsDevil/lib/utils/github/Tag.py | 7 | # -*- coding: utf-8 -*-
# Copyright 2012 Vincent Jacques
# [email protected]
# This file is part of PyGithub. http://vincent-jacques.net/PyGithub
# PyGithub is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License along with PyGithub. If not, see <http://www.gnu.org/licenses/>.
import GithubObject
import Commit
class Tag(GithubObject.BasicGithubObject):
@property
def commit(self):
return self._NoneIfNotSet(self._commit)
@property
def name(self):
return self._NoneIfNotSet(self._name)
@property
def tarball_url(self):
return self._NoneIfNotSet(self._tarball_url)
@property
def zipball_url(self):
return self._NoneIfNotSet(self._zipball_url)
def _initAttributes(self):
self._commit = GithubObject.NotSet
self._name = GithubObject.NotSet
self._tarball_url = GithubObject.NotSet
self._zipball_url = GithubObject.NotSet
def _useAttributes(self, attributes):
if "commit" in attributes: # pragma no branch
assert attributes["commit"] is None or isinstance(attributes["commit"], dict), attributes["commit"]
self._commit = None if attributes["commit"] is None else Commit.Commit(self._requester, attributes["commit"], completed=False)
if "name" in attributes: # pragma no branch
assert attributes["name"] is None or isinstance(attributes["name"], (str, unicode)), attributes["name"]
self._name = attributes["name"]
if "tarball_url" in attributes: # pragma no branch
assert attributes["tarball_url"] is None or isinstance(attributes["tarball_url"], (str, unicode)), attributes["tarball_url"]
self._tarball_url = attributes["tarball_url"]
if "zipball_url" in attributes: # pragma no branch
assert attributes["zipball_url"] is None or isinstance(attributes["zipball_url"], (str, unicode)), attributes["zipball_url"]
self._zipball_url = attributes["zipball_url"]
|
usc-isi/horizon-old | refs/heads/hpc-horizon | horizon/horizon/dashboards/nova/containers/urls.py | 1 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls.defaults import patterns, url
OBJECTS = r'^(?P<container_name>[^/]+)/%s$'
# Swift containers and objects.
urlpatterns = patterns('horizon.dashboards.nova.containers.views',
url(r'^$', 'index', name='index'),
url(r'^create/$', 'create', name='create'),
url(OBJECTS % r'$', 'object_index', name='object_index'),
url(OBJECTS % r'upload$', 'object_upload', name='object_upload'),
url(OBJECTS % r'(?P<object_name>[^/]+)/copy$',
'object_copy', name='object_copy'),
url(OBJECTS % r'(?P<object_name>[^/]+)/download$',
'object_download', name='object_download'))
|
bigbadhacker/google-diff-match-patch | refs/heads/master | python2/diff_match_patch_test.py | 319 | #!/usr/bin/python2.4
"""Test harness for diff_match_patch.py
Copyright 2006 Google Inc.
http://code.google.com/p/google-diff-match-patch/
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import time
import unittest
import diff_match_patch as dmp_module
# Force a module reload. Allows one to edit the DMP module and rerun the tests
# without leaving the Python interpreter.
reload(dmp_module)
class DiffMatchPatchTest(unittest.TestCase):
def setUp(self):
"Test harness for dmp_module."
self.dmp = dmp_module.diff_match_patch()
def diff_rebuildtexts(self, diffs):
# Construct the two texts which made up the diff originally.
text1 = ""
text2 = ""
for x in range(0, len(diffs)):
if diffs[x][0] != dmp_module.diff_match_patch.DIFF_INSERT:
text1 += diffs[x][1]
if diffs[x][0] != dmp_module.diff_match_patch.DIFF_DELETE:
text2 += diffs[x][1]
return (text1, text2)
class DiffTest(DiffMatchPatchTest):
"""DIFF TEST FUNCTIONS"""
def testDiffCommonPrefix(self):
# Detect any common prefix.
# Null case.
self.assertEquals(0, self.dmp.diff_commonPrefix("abc", "xyz"))
# Non-null case.
self.assertEquals(4, self.dmp.diff_commonPrefix("1234abcdef", "1234xyz"))
# Whole case.
self.assertEquals(4, self.dmp.diff_commonPrefix("1234", "1234xyz"))
def testDiffCommonSuffix(self):
# Detect any common suffix.
# Null case.
self.assertEquals(0, self.dmp.diff_commonSuffix("abc", "xyz"))
# Non-null case.
self.assertEquals(4, self.dmp.diff_commonSuffix("abcdef1234", "xyz1234"))
# Whole case.
self.assertEquals(4, self.dmp.diff_commonSuffix("1234", "xyz1234"))
def testDiffCommonOverlap(self):
# Null case.
self.assertEquals(0, self.dmp.diff_commonOverlap("", "abcd"))
# Whole case.
self.assertEquals(3, self.dmp.diff_commonOverlap("abc", "abcd"))
# No overlap.
self.assertEquals(0, self.dmp.diff_commonOverlap("123456", "abcd"))
# Overlap.
self.assertEquals(3, self.dmp.diff_commonOverlap("123456xxx", "xxxabcd"))
# Unicode.
# Some overly clever languages (C#) may treat ligatures as equal to their
# component letters. E.g. U+FB01 == 'fi'
self.assertEquals(0, self.dmp.diff_commonOverlap("fi", u"\ufb01i"))
def testDiffHalfMatch(self):
# Detect a halfmatch.
self.dmp.Diff_Timeout = 1
# No match.
self.assertEquals(None, self.dmp.diff_halfMatch("1234567890", "abcdef"))
self.assertEquals(None, self.dmp.diff_halfMatch("12345", "23"))
# Single Match.
self.assertEquals(("12", "90", "a", "z", "345678"), self.dmp.diff_halfMatch("1234567890", "a345678z"))
self.assertEquals(("a", "z", "12", "90", "345678"), self.dmp.diff_halfMatch("a345678z", "1234567890"))
self.assertEquals(("abc", "z", "1234", "0", "56789"), self.dmp.diff_halfMatch("abc56789z", "1234567890"))
self.assertEquals(("a", "xyz", "1", "7890", "23456"), self.dmp.diff_halfMatch("a23456xyz", "1234567890"))
# Multiple Matches.
self.assertEquals(("12123", "123121", "a", "z", "1234123451234"), self.dmp.diff_halfMatch("121231234123451234123121", "a1234123451234z"))
self.assertEquals(("", "-=-=-=-=-=", "x", "", "x-=-=-=-=-=-=-="), self.dmp.diff_halfMatch("x-=-=-=-=-=-=-=-=-=-=-=-=", "xx-=-=-=-=-=-=-="))
self.assertEquals(("-=-=-=-=-=", "", "", "y", "-=-=-=-=-=-=-=y"), self.dmp.diff_halfMatch("-=-=-=-=-=-=-=-=-=-=-=-=y", "-=-=-=-=-=-=-=yy"))
# Non-optimal halfmatch.
# Optimal diff would be -q+x=H-i+e=lloHe+Hu=llo-Hew+y not -qHillo+x=HelloHe-w+Hulloy
self.assertEquals(("qHillo", "w", "x", "Hulloy", "HelloHe"), self.dmp.diff_halfMatch("qHilloHelloHew", "xHelloHeHulloy"))
# Optimal no halfmatch.
self.dmp.Diff_Timeout = 0
self.assertEquals(None, self.dmp.diff_halfMatch("qHilloHelloHew", "xHelloHeHulloy"))
def testDiffLinesToChars(self):
# Convert lines down to characters.
self.assertEquals(("\x01\x02\x01", "\x02\x01\x02", ["", "alpha\n", "beta\n"]), self.dmp.diff_linesToChars("alpha\nbeta\nalpha\n", "beta\nalpha\nbeta\n"))
self.assertEquals(("", "\x01\x02\x03\x03", ["", "alpha\r\n", "beta\r\n", "\r\n"]), self.dmp.diff_linesToChars("", "alpha\r\nbeta\r\n\r\n\r\n"))
self.assertEquals(("\x01", "\x02", ["", "a", "b"]), self.dmp.diff_linesToChars("a", "b"))
# More than 256 to reveal any 8-bit limitations.
n = 300
lineList = []
charList = []
for x in range(1, n + 1):
lineList.append(str(x) + "\n")
charList.append(unichr(x))
self.assertEquals(n, len(lineList))
lines = "".join(lineList)
chars = "".join(charList)
self.assertEquals(n, len(chars))
lineList.insert(0, "")
self.assertEquals((chars, "", lineList), self.dmp.diff_linesToChars(lines, ""))
def testDiffCharsToLines(self):
# Convert chars up to lines.
diffs = [(self.dmp.DIFF_EQUAL, "\x01\x02\x01"), (self.dmp.DIFF_INSERT, "\x02\x01\x02")]
self.dmp.diff_charsToLines(diffs, ["", "alpha\n", "beta\n"])
self.assertEquals([(self.dmp.DIFF_EQUAL, "alpha\nbeta\nalpha\n"), (self.dmp.DIFF_INSERT, "beta\nalpha\nbeta\n")], diffs)
# More than 256 to reveal any 8-bit limitations.
n = 300
lineList = []
charList = []
for x in range(1, n + 1):
lineList.append(str(x) + "\n")
charList.append(unichr(x))
self.assertEquals(n, len(lineList))
lines = "".join(lineList)
chars = "".join(charList)
self.assertEquals(n, len(chars))
lineList.insert(0, "")
diffs = [(self.dmp.DIFF_DELETE, chars)]
self.dmp.diff_charsToLines(diffs, lineList)
self.assertEquals([(self.dmp.DIFF_DELETE, lines)], diffs)
def testDiffCleanupMerge(self):
# Cleanup a messy diff.
# Null case.
diffs = []
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([], diffs)
# No change case.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_INSERT, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_INSERT, "c")], diffs)
# Merge equalities.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_EQUAL, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "abc")], diffs)
# Merge deletions.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_DELETE, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc")], diffs)
# Merge insertions.
diffs = [(self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_INSERT, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_INSERT, "abc")], diffs)
# Merge interweave.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_DELETE, "c"), (self.dmp.DIFF_INSERT, "d"), (self.dmp.DIFF_EQUAL, "e"), (self.dmp.DIFF_EQUAL, "f")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "ac"), (self.dmp.DIFF_INSERT, "bd"), (self.dmp.DIFF_EQUAL, "ef")], diffs)
# Prefix and suffix detection.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "abc"), (self.dmp.DIFF_DELETE, "dc")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "d"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_EQUAL, "c")], diffs)
# Prefix and suffix detection with equalities.
diffs = [(self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "abc"), (self.dmp.DIFF_DELETE, "dc"), (self.dmp.DIFF_EQUAL, "y")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "xa"), (self.dmp.DIFF_DELETE, "d"), (self.dmp.DIFF_INSERT, "b"), (self.dmp.DIFF_EQUAL, "cy")], diffs)
# Slide edit left.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_INSERT, "ba"), (self.dmp.DIFF_EQUAL, "c")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_INSERT, "ab"), (self.dmp.DIFF_EQUAL, "ac")], diffs)
# Slide edit right.
diffs = [(self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_INSERT, "ab"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "ca"), (self.dmp.DIFF_INSERT, "ba")], diffs)
# Slide edit left recursive.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_DELETE, "ac"), (self.dmp.DIFF_EQUAL, "x")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "acx")], diffs)
# Slide edit right recursive.
diffs = [(self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "ca"), (self.dmp.DIFF_EQUAL, "c"), (self.dmp.DIFF_DELETE, "b"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupMerge(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "xca"), (self.dmp.DIFF_DELETE, "cba")], diffs)
def testDiffCleanupSemanticLossless(self):
# Slide diffs to match logical boundaries.
# Null case.
diffs = []
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([], diffs)
# Blank lines.
diffs = [(self.dmp.DIFF_EQUAL, "AAA\r\n\r\nBBB"), (self.dmp.DIFF_INSERT, "\r\nDDD\r\n\r\nBBB"), (self.dmp.DIFF_EQUAL, "\r\nEEE")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "AAA\r\n\r\n"), (self.dmp.DIFF_INSERT, "BBB\r\nDDD\r\n\r\n"), (self.dmp.DIFF_EQUAL, "BBB\r\nEEE")], diffs)
# Line boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "AAA\r\nBBB"), (self.dmp.DIFF_INSERT, " DDD\r\nBBB"), (self.dmp.DIFF_EQUAL, " EEE")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "AAA\r\n"), (self.dmp.DIFF_INSERT, "BBB DDD\r\n"), (self.dmp.DIFF_EQUAL, "BBB EEE")], diffs)
# Word boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The c"), (self.dmp.DIFF_INSERT, "ow and the c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The "), (self.dmp.DIFF_INSERT, "cow and the "), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# Alphanumeric boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The-c"), (self.dmp.DIFF_INSERT, "ow-and-the-c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The-"), (self.dmp.DIFF_INSERT, "cow-and-the-"), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# Hitting the start.
diffs = [(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "ax")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "aax")], diffs)
# Hitting the end.
diffs = [(self.dmp.DIFF_EQUAL, "xa"), (self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "a")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "xaa"), (self.dmp.DIFF_DELETE, "a")], diffs)
# Sentence boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The xxx. The "), (self.dmp.DIFF_INSERT, "zzz. The "), (self.dmp.DIFF_EQUAL, "yyy.")]
self.dmp.diff_cleanupSemanticLossless(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The xxx."), (self.dmp.DIFF_INSERT, " The zzz."), (self.dmp.DIFF_EQUAL, " The yyy.")], diffs)
def testDiffCleanupSemantic(self):
# Cleanup semantically trivial equalities.
# Null case.
diffs = []
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([], diffs)
# No elimination #1.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "cd"), (self.dmp.DIFF_EQUAL, "12"), (self.dmp.DIFF_DELETE, "e")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "cd"), (self.dmp.DIFF_EQUAL, "12"), (self.dmp.DIFF_DELETE, "e")], diffs)
# No elimination #2.
diffs = [(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "ABC"), (self.dmp.DIFF_EQUAL, "1234"), (self.dmp.DIFF_DELETE, "wxyz")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "ABC"), (self.dmp.DIFF_EQUAL, "1234"), (self.dmp.DIFF_DELETE, "wxyz")], diffs)
# Simple elimination.
diffs = [(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "c")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "b")], diffs)
# Backpass elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_EQUAL, "cd"), (self.dmp.DIFF_DELETE, "e"), (self.dmp.DIFF_EQUAL, "f"), (self.dmp.DIFF_INSERT, "g")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abcdef"), (self.dmp.DIFF_INSERT, "cdfg")], diffs)
# Multiple eliminations.
diffs = [(self.dmp.DIFF_INSERT, "1"), (self.dmp.DIFF_EQUAL, "A"), (self.dmp.DIFF_DELETE, "B"), (self.dmp.DIFF_INSERT, "2"), (self.dmp.DIFF_EQUAL, "_"), (self.dmp.DIFF_INSERT, "1"), (self.dmp.DIFF_EQUAL, "A"), (self.dmp.DIFF_DELETE, "B"), (self.dmp.DIFF_INSERT, "2")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "AB_AB"), (self.dmp.DIFF_INSERT, "1A2_1A2")], diffs)
# Word boundaries.
diffs = [(self.dmp.DIFF_EQUAL, "The c"), (self.dmp.DIFF_DELETE, "ow and the c"), (self.dmp.DIFF_EQUAL, "at.")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_EQUAL, "The "), (self.dmp.DIFF_DELETE, "cow and the "), (self.dmp.DIFF_EQUAL, "cat.")], diffs)
# No overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "abcxx"), (self.dmp.DIFF_INSERT, "xxdef")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abcxx"), (self.dmp.DIFF_INSERT, "xxdef")], diffs)
# Overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "abcxxx"), (self.dmp.DIFF_INSERT, "xxxdef")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "xxx"), (self.dmp.DIFF_INSERT, "def")], diffs)
# Reverse overlap elimination.
diffs = [(self.dmp.DIFF_DELETE, "xxxabc"), (self.dmp.DIFF_INSERT, "defxxx")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_INSERT, "def"), (self.dmp.DIFF_EQUAL, "xxx"), (self.dmp.DIFF_DELETE, "abc")], diffs)
# Two overlap eliminations.
diffs = [(self.dmp.DIFF_DELETE, "abcd1212"), (self.dmp.DIFF_INSERT, "1212efghi"), (self.dmp.DIFF_EQUAL, "----"), (self.dmp.DIFF_DELETE, "A3"), (self.dmp.DIFF_INSERT, "3BC")]
self.dmp.diff_cleanupSemantic(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abcd"), (self.dmp.DIFF_EQUAL, "1212"), (self.dmp.DIFF_INSERT, "efghi"), (self.dmp.DIFF_EQUAL, "----"), (self.dmp.DIFF_DELETE, "A"), (self.dmp.DIFF_EQUAL, "3"), (self.dmp.DIFF_INSERT, "BC")], diffs)
def testDiffCleanupEfficiency(self):
# Cleanup operationally trivial equalities.
self.dmp.Diff_EditCost = 4
# Null case.
diffs = []
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([], diffs)
# No elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")], diffs)
# Four-edit elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abxyzcd"), (self.dmp.DIFF_INSERT, "12xyz34")], diffs)
# Three-edit elimination.
diffs = [(self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "xcd"), (self.dmp.DIFF_INSERT, "12x34")], diffs)
# Backpass elimination.
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "xy"), (self.dmp.DIFF_INSERT, "34"), (self.dmp.DIFF_EQUAL, "z"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "56")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abxyzcd"), (self.dmp.DIFF_INSERT, "12xy34z56")], diffs)
# High cost elimination.
self.dmp.Diff_EditCost = 5
diffs = [(self.dmp.DIFF_DELETE, "ab"), (self.dmp.DIFF_INSERT, "12"), (self.dmp.DIFF_EQUAL, "wxyz"), (self.dmp.DIFF_DELETE, "cd"), (self.dmp.DIFF_INSERT, "34")]
self.dmp.diff_cleanupEfficiency(diffs)
self.assertEquals([(self.dmp.DIFF_DELETE, "abwxyzcd"), (self.dmp.DIFF_INSERT, "12wxyz34")], diffs)
self.dmp.Diff_EditCost = 4
def testDiffPrettyHtml(self):
# Pretty print.
diffs = [(self.dmp.DIFF_EQUAL, "a\n"), (self.dmp.DIFF_DELETE, "<B>b</B>"), (self.dmp.DIFF_INSERT, "c&d")]
self.assertEquals("<span>a¶<br></span><del style=\"background:#ffe6e6;\"><B>b</B></del><ins style=\"background:#e6ffe6;\">c&d</ins>", self.dmp.diff_prettyHtml(diffs))
def testDiffText(self):
# Compute the source and destination texts.
diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, " lazy")]
self.assertEquals("jumps over the lazy", self.dmp.diff_text1(diffs))
self.assertEquals("jumped over a lazy", self.dmp.diff_text2(diffs))
def testDiffDelta(self):
# Convert a diff into delta string.
diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, " lazy"), (self.dmp.DIFF_INSERT, "old dog")]
text1 = self.dmp.diff_text1(diffs)
self.assertEquals("jumps over the lazy", text1)
delta = self.dmp.diff_toDelta(diffs)
self.assertEquals("=4\t-1\t+ed\t=6\t-3\t+a\t=5\t+old dog", delta)
# Convert delta string into a diff.
self.assertEquals(diffs, self.dmp.diff_fromDelta(text1, delta))
# Generates error (19 != 20).
try:
self.dmp.diff_fromDelta(text1 + "x", delta)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Generates error (19 != 18).
try:
self.dmp.diff_fromDelta(text1[1:], delta)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Generates error (%c3%xy invalid Unicode).
try:
self.dmp.diff_fromDelta("", "+%c3xy")
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
# Test deltas with special characters.
diffs = [(self.dmp.DIFF_EQUAL, u"\u0680 \x00 \t %"), (self.dmp.DIFF_DELETE, u"\u0681 \x01 \n ^"), (self.dmp.DIFF_INSERT, u"\u0682 \x02 \\ |")]
text1 = self.dmp.diff_text1(diffs)
self.assertEquals(u"\u0680 \x00 \t %\u0681 \x01 \n ^", text1)
delta = self.dmp.diff_toDelta(diffs)
self.assertEquals("=7\t-7\t+%DA%82 %02 %5C %7C", delta)
# Convert delta string into a diff.
self.assertEquals(diffs, self.dmp.diff_fromDelta(text1, delta))
# Verify pool of unchanged characters.
diffs = [(self.dmp.DIFF_INSERT, "A-Z a-z 0-9 - _ . ! ~ * ' ( ) ; / ? : @ & = + $ , # ")]
text2 = self.dmp.diff_text2(diffs)
self.assertEquals("A-Z a-z 0-9 - _ . ! ~ * \' ( ) ; / ? : @ & = + $ , # ", text2)
delta = self.dmp.diff_toDelta(diffs)
self.assertEquals("+A-Z a-z 0-9 - _ . ! ~ * \' ( ) ; / ? : @ & = + $ , # ", delta)
# Convert delta string into a diff.
self.assertEquals(diffs, self.dmp.diff_fromDelta("", delta))
def testDiffXIndex(self):
# Translate a location in text1 to text2.
self.assertEquals(5, self.dmp.diff_xIndex([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "1234"), (self.dmp.DIFF_EQUAL, "xyz")], 2))
# Translation on deletion.
self.assertEquals(1, self.dmp.diff_xIndex([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "1234"), (self.dmp.DIFF_EQUAL, "xyz")], 3))
def testDiffLevenshtein(self):
# Levenshtein with trailing equality.
self.assertEquals(4, self.dmp.diff_levenshtein([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "1234"), (self.dmp.DIFF_EQUAL, "xyz")]))
# Levenshtein with leading equality.
self.assertEquals(4, self.dmp.diff_levenshtein([(self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_INSERT, "1234")]))
# Levenshtein with middle equality.
self.assertEquals(7, self.dmp.diff_levenshtein([(self.dmp.DIFF_DELETE, "abc"), (self.dmp.DIFF_EQUAL, "xyz"), (self.dmp.DIFF_INSERT, "1234")]))
def testDiffBisect(self):
# Normal.
a = "cat"
b = "map"
# Since the resulting diff hasn't been normalized, it would be ok if
# the insertion and deletion pairs are swapped.
# If the order changes, tweak this test as required.
self.assertEquals([(self.dmp.DIFF_DELETE, "c"), (self.dmp.DIFF_INSERT, "m"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "t"), (self.dmp.DIFF_INSERT, "p")], self.dmp.diff_bisect(a, b, sys.maxint))
# Timeout.
self.assertEquals([(self.dmp.DIFF_DELETE, "cat"), (self.dmp.DIFF_INSERT, "map")], self.dmp.diff_bisect(a, b, 0))
def testDiffMain(self):
# Perform a trivial diff.
# Null case.
self.assertEquals([], self.dmp.diff_main("", "", False))
# Equality.
self.assertEquals([(self.dmp.DIFF_EQUAL, "abc")], self.dmp.diff_main("abc", "abc", False))
# Simple insertion.
self.assertEquals([(self.dmp.DIFF_EQUAL, "ab"), (self.dmp.DIFF_INSERT, "123"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("abc", "ab123c", False))
# Simple deletion.
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "123"), (self.dmp.DIFF_EQUAL, "bc")], self.dmp.diff_main("a123bc", "abc", False))
# Two insertions.
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_INSERT, "123"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_INSERT, "456"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("abc", "a123b456c", False))
# Two deletions.
self.assertEquals([(self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "123"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "456"), (self.dmp.DIFF_EQUAL, "c")], self.dmp.diff_main("a123b456c", "abc", False))
# Perform a real diff.
# Switch off the timeout.
self.dmp.Diff_Timeout = 0
# Simple cases.
self.assertEquals([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, "b")], self.dmp.diff_main("a", "b", False))
self.assertEquals([(self.dmp.DIFF_DELETE, "Apple"), (self.dmp.DIFF_INSERT, "Banana"), (self.dmp.DIFF_EQUAL, "s are a"), (self.dmp.DIFF_INSERT, "lso"), (self.dmp.DIFF_EQUAL, " fruit.")], self.dmp.diff_main("Apples are a fruit.", "Bananas are also fruit.", False))
self.assertEquals([(self.dmp.DIFF_DELETE, "a"), (self.dmp.DIFF_INSERT, u"\u0680"), (self.dmp.DIFF_EQUAL, "x"), (self.dmp.DIFF_DELETE, "\t"), (self.dmp.DIFF_INSERT, "\x00")], self.dmp.diff_main("ax\t", u"\u0680x\x00", False))
# Overlaps.
self.assertEquals([(self.dmp.DIFF_DELETE, "1"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "y"), (self.dmp.DIFF_EQUAL, "b"), (self.dmp.DIFF_DELETE, "2"), (self.dmp.DIFF_INSERT, "xab")], self.dmp.diff_main("1ayb2", "abxab", False))
self.assertEquals([(self.dmp.DIFF_INSERT, "xaxcx"), (self.dmp.DIFF_EQUAL, "abc"), (self.dmp.DIFF_DELETE, "y")], self.dmp.diff_main("abcy", "xaxcxabc", False))
self.assertEquals([(self.dmp.DIFF_DELETE, "ABCD"), (self.dmp.DIFF_EQUAL, "a"), (self.dmp.DIFF_DELETE, "="), (self.dmp.DIFF_INSERT, "-"), (self.dmp.DIFF_EQUAL, "bcd"), (self.dmp.DIFF_DELETE, "="), (self.dmp.DIFF_INSERT, "-"), (self.dmp.DIFF_EQUAL, "efghijklmnopqrs"), (self.dmp.DIFF_DELETE, "EFGHIJKLMNOefg")], self.dmp.diff_main("ABCDa=bcd=efghijklmnopqrsEFGHIJKLMNOefg", "a-bcd-efghijklmnopqrs", False))
# Large equality.
self.assertEquals([(self.dmp.DIFF_INSERT, " "), (self.dmp.DIFF_EQUAL,"a"), (self.dmp.DIFF_INSERT,"nd"), (self.dmp.DIFF_EQUAL," [[Pennsylvania]]"), (self.dmp.DIFF_DELETE," and [[New")], self.dmp.diff_main("a [[Pennsylvania]] and [[New", " and [[Pennsylvania]]", False))
# Timeout.
self.dmp.Diff_Timeout = 0.1 # 100ms
a = "`Twas brillig, and the slithy toves\nDid gyre and gimble in the wabe:\nAll mimsy were the borogoves,\nAnd the mome raths outgrabe.\n"
b = "I am the very model of a modern major general,\nI've information vegetable, animal, and mineral,\nI know the kings of England, and I quote the fights historical,\nFrom Marathon to Waterloo, in order categorical.\n"
# Increase the text lengths by 1024 times to ensure a timeout.
for x in range(10):
a = a + a
b = b + b
startTime = time.time()
self.dmp.diff_main(a, b)
endTime = time.time()
# Test that we took at least the timeout period.
self.assertTrue(self.dmp.Diff_Timeout <= endTime - startTime)
# Test that we didn't take forever (be forgiving).
# Theoretically this test could fail very occasionally if the
# OS task swaps or locks up for a second at the wrong moment.
self.assertTrue(self.dmp.Diff_Timeout * 2 > endTime - startTime)
self.dmp.Diff_Timeout = 0
# Test the linemode speedup.
# Must be long to pass the 100 char cutoff.
# Simple line-mode.
a = "1234567890\n" * 13
b = "abcdefghij\n" * 13
self.assertEquals(self.dmp.diff_main(a, b, False), self.dmp.diff_main(a, b, True))
# Single line-mode.
a = "1234567890" * 13
b = "abcdefghij" * 13
self.assertEquals(self.dmp.diff_main(a, b, False), self.dmp.diff_main(a, b, True))
# Overlap line-mode.
a = "1234567890\n" * 13
b = "abcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n"
texts_linemode = self.diff_rebuildtexts(self.dmp.diff_main(a, b, True))
texts_textmode = self.diff_rebuildtexts(self.dmp.diff_main(a, b, False))
self.assertEquals(texts_textmode, texts_linemode)
# Test null inputs.
try:
self.dmp.diff_main(None, None)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
class MatchTest(DiffMatchPatchTest):
"""MATCH TEST FUNCTIONS"""
def testMatchAlphabet(self):
# Initialise the bitmasks for Bitap.
self.assertEquals({"a":4, "b":2, "c":1}, self.dmp.match_alphabet("abc"))
self.assertEquals({"a":37, "b":18, "c":8}, self.dmp.match_alphabet("abcaba"))
def testMatchBitap(self):
self.dmp.Match_Distance = 100
self.dmp.Match_Threshold = 0.5
# Exact matches.
self.assertEquals(5, self.dmp.match_bitap("abcdefghijk", "fgh", 5))
self.assertEquals(5, self.dmp.match_bitap("abcdefghijk", "fgh", 0))
# Fuzzy matches.
self.assertEquals(4, self.dmp.match_bitap("abcdefghijk", "efxhi", 0))
self.assertEquals(2, self.dmp.match_bitap("abcdefghijk", "cdefxyhijk", 5))
self.assertEquals(-1, self.dmp.match_bitap("abcdefghijk", "bxy", 1))
# Overflow.
self.assertEquals(2, self.dmp.match_bitap("123456789xx0", "3456789x0", 2))
self.assertEquals(0, self.dmp.match_bitap("abcdef", "xxabc", 4))
self.assertEquals(3, self.dmp.match_bitap("abcdef", "defyy", 4))
self.assertEquals(0, self.dmp.match_bitap("abcdef", "xabcdefy", 0))
# Threshold test.
self.dmp.Match_Threshold = 0.4
self.assertEquals(4, self.dmp.match_bitap("abcdefghijk", "efxyhi", 1))
self.dmp.Match_Threshold = 0.3
self.assertEquals(-1, self.dmp.match_bitap("abcdefghijk", "efxyhi", 1))
self.dmp.Match_Threshold = 0.0
self.assertEquals(1, self.dmp.match_bitap("abcdefghijk", "bcdef", 1))
self.dmp.Match_Threshold = 0.5
# Multiple select.
self.assertEquals(0, self.dmp.match_bitap("abcdexyzabcde", "abccde", 3))
self.assertEquals(8, self.dmp.match_bitap("abcdexyzabcde", "abccde", 5))
# Distance test.
self.dmp.Match_Distance = 10 # Strict location.
self.assertEquals(-1, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdefg", 24))
self.assertEquals(0, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdxxefg", 1))
self.dmp.Match_Distance = 1000 # Loose location.
self.assertEquals(0, self.dmp.match_bitap("abcdefghijklmnopqrstuvwxyz", "abcdefg", 24))
def testMatchMain(self):
# Full match.
# Shortcut matches.
self.assertEquals(0, self.dmp.match_main("abcdef", "abcdef", 1000))
self.assertEquals(-1, self.dmp.match_main("", "abcdef", 1))
self.assertEquals(3, self.dmp.match_main("abcdef", "", 3))
self.assertEquals(3, self.dmp.match_main("abcdef", "de", 3))
self.assertEquals(3, self.dmp.match_main("abcdef", "defy", 4))
self.assertEquals(0, self.dmp.match_main("abcdef", "abcdefy", 0))
# Complex match.
self.dmp.Match_Threshold = 0.7
self.assertEquals(4, self.dmp.match_main("I am the very model of a modern major general.", " that berry ", 5))
self.dmp.Match_Threshold = 0.5
# Test null inputs.
try:
self.dmp.match_main(None, None, 0)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
class PatchTest(DiffMatchPatchTest):
"""PATCH TEST FUNCTIONS"""
def testPatchObj(self):
# Patch Object.
p = dmp_module.patch_obj()
p.start1 = 20
p.start2 = 21
p.length1 = 18
p.length2 = 17
p.diffs = [(self.dmp.DIFF_EQUAL, "jump"), (self.dmp.DIFF_DELETE, "s"), (self.dmp.DIFF_INSERT, "ed"), (self.dmp.DIFF_EQUAL, " over "), (self.dmp.DIFF_DELETE, "the"), (self.dmp.DIFF_INSERT, "a"), (self.dmp.DIFF_EQUAL, "\nlaz")]
strp = str(p)
self.assertEquals("@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n %0Alaz\n", strp)
def testPatchFromText(self):
self.assertEquals([], self.dmp.patch_fromText(""))
strp = "@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n %0Alaz\n"
self.assertEquals(strp, str(self.dmp.patch_fromText(strp)[0]))
self.assertEquals("@@ -1 +1 @@\n-a\n+b\n", str(self.dmp.patch_fromText("@@ -1 +1 @@\n-a\n+b\n")[0]))
self.assertEquals("@@ -1,3 +0,0 @@\n-abc\n", str(self.dmp.patch_fromText("@@ -1,3 +0,0 @@\n-abc\n")[0]))
self.assertEquals("@@ -0,0 +1,3 @@\n+abc\n", str(self.dmp.patch_fromText("@@ -0,0 +1,3 @@\n+abc\n")[0]))
# Generates error.
try:
self.dmp.patch_fromText("Bad\nPatch\n")
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
def testPatchToText(self):
strp = "@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n laz\n"
p = self.dmp.patch_fromText(strp)
self.assertEquals(strp, self.dmp.patch_toText(p))
strp = "@@ -1,9 +1,9 @@\n-f\n+F\n oo+fooba\n@@ -7,9 +7,9 @@\n obar\n-,\n+.\n tes\n"
p = self.dmp.patch_fromText(strp)
self.assertEquals(strp, self.dmp.patch_toText(p))
def testPatchAddContext(self):
self.dmp.Patch_Margin = 4
p = self.dmp.patch_fromText("@@ -21,4 +21,10 @@\n-jump\n+somersault\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps over the lazy dog.")
self.assertEquals("@@ -17,12 +17,18 @@\n fox \n-jump\n+somersault\n s ov\n", str(p))
# Same, but not enough trailing context.
p = self.dmp.patch_fromText("@@ -21,4 +21,10 @@\n-jump\n+somersault\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps.")
self.assertEquals("@@ -17,10 +17,16 @@\n fox \n-jump\n+somersault\n s.\n", str(p))
# Same, but not enough leading context.
p = self.dmp.patch_fromText("@@ -3 +3,2 @@\n-e\n+at\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps.")
self.assertEquals("@@ -1,7 +1,8 @@\n Th\n-e\n+at\n qui\n", str(p))
# Same, but with ambiguity.
p = self.dmp.patch_fromText("@@ -3 +3,2 @@\n-e\n+at\n")[0]
self.dmp.patch_addContext(p, "The quick brown fox jumps. The quick brown fox crashes.")
self.assertEquals("@@ -1,27 +1,28 @@\n Th\n-e\n+at\n quick brown fox jumps. \n", str(p))
def testPatchMake(self):
# Null case.
patches = self.dmp.patch_make("", "")
self.assertEquals("", self.dmp.patch_toText(patches))
text1 = "The quick brown fox jumps over the lazy dog."
text2 = "That quick brown fox jumped over a lazy dog."
# Text2+Text1 inputs.
expectedPatch = "@@ -1,8 +1,7 @@\n Th\n-at\n+e\n qui\n@@ -21,17 +21,18 @@\n jump\n-ed\n+s\n over \n-a\n+the\n laz\n"
# The second patch must be "-21,17 +21,18", not "-22,17 +21,18" due to rolling context.
patches = self.dmp.patch_make(text2, text1)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Text2 inputs.
expectedPatch = "@@ -1,11 +1,12 @@\n Th\n-e\n+at\n quick b\n@@ -22,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n laz\n"
patches = self.dmp.patch_make(text1, text2)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Diff input.
diffs = self.dmp.diff_main(text1, text2, False)
patches = self.dmp.patch_make(diffs)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Diff inputs.
patches = self.dmp.patch_make(text1, diffs)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Text1+Text2+Diff inputs (deprecated).
patches = self.dmp.patch_make(text1, text2, diffs)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Character encoding.
patches = self.dmp.patch_make("`1234567890-=[]\\;',./", "~!@#$%^&*()_+{}|:\"<>?")
self.assertEquals("@@ -1,21 +1,21 @@\n-%601234567890-=%5B%5D%5C;',./\n+~!@#$%25%5E&*()_+%7B%7D%7C:%22%3C%3E?\n", self.dmp.patch_toText(patches))
# Character decoding.
diffs = [(self.dmp.DIFF_DELETE, "`1234567890-=[]\\;',./"), (self.dmp.DIFF_INSERT, "~!@#$%^&*()_+{}|:\"<>?")]
self.assertEquals(diffs, self.dmp.patch_fromText("@@ -1,21 +1,21 @@\n-%601234567890-=%5B%5D%5C;',./\n+~!@#$%25%5E&*()_+%7B%7D%7C:%22%3C%3E?\n")[0].diffs)
# Long string with repeats.
text1 = ""
for x in range(100):
text1 += "abcdef"
text2 = text1 + "123"
expectedPatch = "@@ -573,28 +573,31 @@\n cdefabcdefabcdefabcdefabcdef\n+123\n"
patches = self.dmp.patch_make(text1, text2)
self.assertEquals(expectedPatch, self.dmp.patch_toText(patches))
# Test null inputs.
try:
self.dmp.patch_make(None, None)
self.assertFalse(True)
except ValueError:
# Exception expected.
pass
def testPatchSplitMax(self):
# Assumes that Match_MaxBits is 32.
patches = self.dmp.patch_make("abcdefghijklmnopqrstuvwxyz01234567890", "XabXcdXefXghXijXklXmnXopXqrXstXuvXwxXyzX01X23X45X67X89X0")
self.dmp.patch_splitMax(patches)
self.assertEquals("@@ -1,32 +1,46 @@\n+X\n ab\n+X\n cd\n+X\n ef\n+X\n gh\n+X\n ij\n+X\n kl\n+X\n mn\n+X\n op\n+X\n qr\n+X\n st\n+X\n uv\n+X\n wx\n+X\n yz\n+X\n 012345\n@@ -25,13 +39,18 @@\n zX01\n+X\n 23\n+X\n 45\n+X\n 67\n+X\n 89\n+X\n 0\n", self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("abcdef1234567890123456789012345678901234567890123456789012345678901234567890uvwxyz", "abcdefuvwxyz")
oldToText = self.dmp.patch_toText(patches)
self.dmp.patch_splitMax(patches)
self.assertEquals(oldToText, self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("1234567890123456789012345678901234567890123456789012345678901234567890", "abc")
self.dmp.patch_splitMax(patches)
self.assertEquals("@@ -1,32 +1,4 @@\n-1234567890123456789012345678\n 9012\n@@ -29,32 +1,4 @@\n-9012345678901234567890123456\n 7890\n@@ -57,14 +1,3 @@\n-78901234567890\n+abc\n", self.dmp.patch_toText(patches))
patches = self.dmp.patch_make("abcdefghij , h : 0 , t : 1 abcdefghij , h : 0 , t : 1 abcdefghij , h : 0 , t : 1", "abcdefghij , h : 1 , t : 1 abcdefghij , h : 1 , t : 1 abcdefghij , h : 0 , t : 1")
self.dmp.patch_splitMax(patches)
self.assertEquals("@@ -2,32 +2,32 @@\n bcdefghij , h : \n-0\n+1\n , t : 1 abcdef\n@@ -29,32 +29,32 @@\n bcdefghij , h : \n-0\n+1\n , t : 1 abcdef\n", self.dmp.patch_toText(patches))
def testPatchAddPadding(self):
# Both edges full.
patches = self.dmp.patch_make("", "test")
self.assertEquals("@@ -0,0 +1,4 @@\n+test\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEquals("@@ -1,8 +1,12 @@\n %01%02%03%04\n+test\n %01%02%03%04\n", self.dmp.patch_toText(patches))
# Both edges partial.
patches = self.dmp.patch_make("XY", "XtestY")
self.assertEquals("@@ -1,2 +1,6 @@\n X\n+test\n Y\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEquals("@@ -2,8 +2,12 @@\n %02%03%04X\n+test\n Y%01%02%03\n", self.dmp.patch_toText(patches))
# Both edges none.
patches = self.dmp.patch_make("XXXXYYYY", "XXXXtestYYYY")
self.assertEquals("@@ -1,8 +1,12 @@\n XXXX\n+test\n YYYY\n", self.dmp.patch_toText(patches))
self.dmp.patch_addPadding(patches)
self.assertEquals("@@ -5,8 +5,12 @@\n XXXX\n+test\n YYYY\n", self.dmp.patch_toText(patches))
def testPatchApply(self):
self.dmp.Match_Distance = 1000
self.dmp.Match_Threshold = 0.5
self.dmp.Patch_DeleteThreshold = 0.5
# Null case.
patches = self.dmp.patch_make("", "")
results = self.dmp.patch_apply(patches, "Hello world.")
self.assertEquals(("Hello world.", []), results)
# Exact match.
patches = self.dmp.patch_make("The quick brown fox jumps over the lazy dog.", "That quick brown fox jumped over a lazy dog.")
results = self.dmp.patch_apply(patches, "The quick brown fox jumps over the lazy dog.")
self.assertEquals(("That quick brown fox jumped over a lazy dog.", [True, True]), results)
# Partial match.
results = self.dmp.patch_apply(patches, "The quick red rabbit jumps over the tired tiger.")
self.assertEquals(("That quick red rabbit jumped over a tired tiger.", [True, True]), results)
# Failed match.
results = self.dmp.patch_apply(patches, "I am the very model of a modern major general.")
self.assertEquals(("I am the very model of a modern major general.", [False, False]), results)
# Big delete, small change.
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x123456789012345678901234567890-----++++++++++-----123456789012345678901234567890y")
self.assertEquals(("xabcy", [True, True]), results)
# Big delete, big change 1.
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x12345678901234567890---------------++++++++++---------------12345678901234567890y")
self.assertEquals(("xabc12345678901234567890---------------++++++++++---------------12345678901234567890y", [False, True]), results)
# Big delete, big change 2.
self.dmp.Patch_DeleteThreshold = 0.6
patches = self.dmp.patch_make("x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy")
results = self.dmp.patch_apply(patches, "x12345678901234567890---------------++++++++++---------------12345678901234567890y")
self.assertEquals(("xabcy", [True, True]), results)
self.dmp.Patch_DeleteThreshold = 0.5
# Compensate for failed patch.
self.dmp.Match_Threshold = 0.0
self.dmp.Match_Distance = 0
patches = self.dmp.patch_make("abcdefghijklmnopqrstuvwxyz--------------------1234567890", "abcXXXXXXXXXXdefghijklmnopqrstuvwxyz--------------------1234567YYYYYYYYYY890")
results = self.dmp.patch_apply(patches, "ABCDEFGHIJKLMNOPQRSTUVWXYZ--------------------1234567890")
self.assertEquals(("ABCDEFGHIJKLMNOPQRSTUVWXYZ--------------------1234567YYYYYYYYYY890", [False, True]), results)
self.dmp.Match_Threshold = 0.5
self.dmp.Match_Distance = 1000
# No side effects.
patches = self.dmp.patch_make("", "test")
patchstr = self.dmp.patch_toText(patches)
results = self.dmp.patch_apply(patches, "")
self.assertEquals(patchstr, self.dmp.patch_toText(patches))
# No side effects with major delete.
patches = self.dmp.patch_make("The quick brown fox jumps over the lazy dog.", "Woof")
patchstr = self.dmp.patch_toText(patches)
self.dmp.patch_apply(patches, "The quick brown fox jumps over the lazy dog.")
self.assertEquals(patchstr, self.dmp.patch_toText(patches))
# Edge exact match.
patches = self.dmp.patch_make("", "test")
self.dmp.patch_apply(patches, "")
self.assertEquals(("test", [True]), results)
# Near edge exact match.
patches = self.dmp.patch_make("XY", "XtestY")
results = self.dmp.patch_apply(patches, "XY")
self.assertEquals(("XtestY", [True]), results)
# Edge partial match.
patches = self.dmp.patch_make("y", "y123")
results = self.dmp.patch_apply(patches, "x")
self.assertEquals(("x123", [True]), results)
if __name__ == "__main__":
unittest.main()
|
matthiasdiener/spack | refs/heads/develop | var/spack/repos/builtin/packages/ftgl/package.py | 2 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import sys
import os
class Ftgl(AutotoolsPackage):
"""Library to use arbitrary fonts in OpenGL applications."""
homepage = "http://ftgl.sourceforge.net/docs/html/"
url = "https://sourceforge.net/projects/ftgl/files/FTGL%20Source/2.1.2/ftgl-2.1.2.tar.gz/download"
list_url = "https://sourceforge.net/projects/ftgl/files/FTGL%20Source/"
list_depth = 1
version('2.1.2', 'f81c0a7128192ba11e036186f9a968f2')
# Ftgl does not come with a configure script
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('pkgconfig', type='build')
depends_on('gl')
depends_on('glu')
depends_on('[email protected]:')
@property
@when('@2.1.2')
def configure_directory(self):
subdir = 'unix'
if sys.platform == 'darwin':
subdir = 'mac'
return os.path.join(self.stage.source_path, subdir)
|
artdent/mingus-python3 | refs/heads/python3 | unittest/test_notes.py | 2 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
sys.path = ['../'] + sys.path
import mingus.core.notes as notes
from mingus.core.mt_exceptions import RangeError
import unittest
class test_notes(unittest.TestCase):
def setUp(self):
self.base_notes = [
'C',
'D',
'E',
'F',
'G',
'A',
'B',
]
self.sharps = [x + '#' for x in self.base_notes]
self.flats = [x + 'b' for x in self.base_notes]
self.exotic = [x + 'b###b#' for x in self.base_notes]
def test_base_note_validity(self):
list(map(lambda x: self.assert_(notes.is_valid_note(x), 'Base notes A-G'),
self.base_notes))
def test_sharp_note_validity(self):
list(map(lambda x: self.assert_(notes.is_valid_note(x), 'Sharp notes A#-G#'
), self.sharps))
def test_flat_note_validity(self):
list(map(lambda x: self.assert_(notes.is_valid_note(x), 'Flat notes Ab-Gb'),
self.flats))
def test_exotic_note_validity(self):
list(map(lambda x: self.assert_(notes.is_valid_note(x),
'Exotic notes Ab##b#-Gb###b#'), self.exotic))
def test_faulty_note_invalidity(self):
list(map(lambda x: self.assertEqual(False, notes.is_valid_note(x),
'Faulty notes'), ['asdasd', 'C###f', 'c', 'd', 'E*']))
def test_valid_int_to_note(self):
n = [
'C',
'C#',
'D',
'D#',
'E',
'F',
'F#',
'G',
'G#',
'A',
'A#',
'B',
]
list(map(lambda x: self.assertEqual(n[x], notes.int_to_note(x),
'Int to note mapping %d-%s failed.' % (x, n[x])), list(range(0, 12))))
def test_invalid_int_to_note(self):
faulty = [-1, 12, 13, 123123, -123]
list(map(lambda x: self.assertRaises(RangeError, notes.int_to_note, x),
faulty))
def test_to_minor(self):
known = {
'C': 'A',
'E': 'C#',
'B': 'G#',
'G': 'E',
'F': 'D',
}
list(map(lambda x: self.assertEqual(known[x], notes.to_minor(x),
'The minor of %s is not %s, expecting %s' % (x, notes.to_minor(x),
known[x])), list(known.keys())))
def test_to_major(self):
known = {
'C': 'Eb',
'A': 'C',
'E': 'G',
'F': 'Ab',
'D': 'F',
'B': 'D',
'B#': 'D#',
}
list(map(lambda x: self.assertEqual(known[x], notes.to_major(x),
'The major of %s is not %s, expecting %s' % (x, notes.to_major(x),
known[x])), list(known.keys())))
def test_augment(self):
known = {
'C': 'C#',
'C#': 'C##',
'Cb': 'C',
'Cbb': 'Cb',
}
list(map(lambda x: self.assertEqual(known[x], notes.augment(x),
'The augmented note of %s is not %s, expecting %s' % (x,
notes.augment(x), known[x])), list(known.keys())))
def test_diminish(self):
known = {
'C': 'Cb',
'C#': 'C',
'C##': 'C#',
'Cb': 'Cbb',
}
list(map(lambda x: self.assertEqual(known[x], notes.diminish(x),
'The diminished note of %s is not %s, expecting %s' % (x,
notes.diminish(x), known[x])), list(known.keys())))
def suite():
return unittest.TestLoader().loadTestsFromTestCase(test_notes)
|
shaunbrady/boto | refs/heads/develop | boto/mashups/server.py | 153 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
High-level abstraction of an EC2 server
"""
import boto
import boto.utils
from boto.compat import StringIO
from boto.mashups.iobject import IObject
from boto.pyami.config import Config, BotoConfigPath
from boto.mashups.interactive import interactive_shell
from boto.sdb.db.model import Model
from boto.sdb.db.property import StringProperty
import os
class ServerSet(list):
def __getattr__(self, name):
results = []
is_callable = False
for server in self:
try:
val = getattr(server, name)
if callable(val):
is_callable = True
results.append(val)
except:
results.append(None)
if is_callable:
self.map_list = results
return self.map
return results
def map(self, *args):
results = []
for fn in self.map_list:
results.append(fn(*args))
return results
class Server(Model):
@property
def ec2(self):
if self._ec2 is None:
self._ec2 = boto.connect_ec2()
return self._ec2
@classmethod
def Inventory(cls):
"""
Returns a list of Server instances, one for each Server object
persisted in the db
"""
l = ServerSet()
rs = cls.find()
for server in rs:
l.append(server)
return l
@classmethod
def Register(cls, name, instance_id, description=''):
s = cls()
s.name = name
s.instance_id = instance_id
s.description = description
s.save()
return s
def __init__(self, id=None, **kw):
super(Server, self).__init__(id, **kw)
self._reservation = None
self._instance = None
self._ssh_client = None
self._pkey = None
self._config = None
self._ec2 = None
name = StringProperty(unique=True, verbose_name="Name")
instance_id = StringProperty(verbose_name="Instance ID")
config_uri = StringProperty()
ami_id = StringProperty(verbose_name="AMI ID")
zone = StringProperty(verbose_name="Availability Zone")
security_group = StringProperty(verbose_name="Security Group", default="default")
key_name = StringProperty(verbose_name="Key Name")
elastic_ip = StringProperty(verbose_name="Elastic IP")
instance_type = StringProperty(verbose_name="Instance Type")
description = StringProperty(verbose_name="Description")
log = StringProperty()
def setReadOnly(self, value):
raise AttributeError
def getInstance(self):
if not self._instance:
if self.instance_id:
try:
rs = self.ec2.get_all_reservations([self.instance_id])
except:
return None
if len(rs) > 0:
self._reservation = rs[0]
self._instance = self._reservation.instances[0]
return self._instance
instance = property(getInstance, setReadOnly, None, 'The Instance for the server')
def getAMI(self):
if self.instance:
return self.instance.image_id
ami = property(getAMI, setReadOnly, None, 'The AMI for the server')
def getStatus(self):
if self.instance:
self.instance.update()
return self.instance.state
status = property(getStatus, setReadOnly, None,
'The status of the server')
def getHostname(self):
if self.instance:
return self.instance.public_dns_name
hostname = property(getHostname, setReadOnly, None,
'The public DNS name of the server')
def getPrivateHostname(self):
if self.instance:
return self.instance.private_dns_name
private_hostname = property(getPrivateHostname, setReadOnly, None,
'The private DNS name of the server')
def getLaunchTime(self):
if self.instance:
return self.instance.launch_time
launch_time = property(getLaunchTime, setReadOnly, None,
'The time the Server was started')
def getConsoleOutput(self):
if self.instance:
return self.instance.get_console_output()
console_output = property(getConsoleOutput, setReadOnly, None,
'Retrieve the console output for server')
def getGroups(self):
if self._reservation:
return self._reservation.groups
else:
return None
groups = property(getGroups, setReadOnly, None,
'The Security Groups controlling access to this server')
def getConfig(self):
if not self._config:
remote_file = BotoConfigPath
local_file = '%s.ini' % self.instance.id
self.get_file(remote_file, local_file)
self._config = Config(local_file)
return self._config
def setConfig(self, config):
local_file = '%s.ini' % self.instance.id
fp = open(local_file)
config.write(fp)
fp.close()
self.put_file(local_file, BotoConfigPath)
self._config = config
config = property(getConfig, setConfig, None,
'The instance data for this server')
def set_config(self, config):
"""
Set SDB based config
"""
self._config = config
self._config.dump_to_sdb("botoConfigs", self.id)
def load_config(self):
self._config = Config(do_load=False)
self._config.load_from_sdb("botoConfigs", self.id)
def stop(self):
if self.instance:
self.instance.stop()
def start(self):
self.stop()
ec2 = boto.connect_ec2()
ami = ec2.get_all_images(image_ids = [str(self.ami_id)])[0]
groups = ec2.get_all_security_groups(groupnames=[str(self.security_group)])
if not self._config:
self.load_config()
if not self._config.has_section("Credentials"):
self._config.add_section("Credentials")
self._config.set("Credentials", "aws_access_key_id", ec2.aws_access_key_id)
self._config.set("Credentials", "aws_secret_access_key", ec2.aws_secret_access_key)
if not self._config.has_section("Pyami"):
self._config.add_section("Pyami")
if self._manager.domain:
self._config.set('Pyami', 'server_sdb_domain', self._manager.domain.name)
self._config.set("Pyami", 'server_sdb_name', self.name)
cfg = StringIO()
self._config.write(cfg)
cfg = cfg.getvalue()
r = ami.run(min_count=1,
max_count=1,
key_name=self.key_name,
security_groups = groups,
instance_type = self.instance_type,
placement = self.zone,
user_data = cfg)
i = r.instances[0]
self.instance_id = i.id
self.put()
if self.elastic_ip:
ec2.associate_address(self.instance_id, self.elastic_ip)
def reboot(self):
if self.instance:
self.instance.reboot()
def get_ssh_client(self, key_file=None, host_key_file='~/.ssh/known_hosts',
uname='root'):
import paramiko
if not self.instance:
print('No instance yet!')
return
if not self._ssh_client:
if not key_file:
iobject = IObject()
key_file = iobject.get_filename('Path to OpenSSH Key file')
self._pkey = paramiko.RSAKey.from_private_key_file(key_file)
self._ssh_client = paramiko.SSHClient()
self._ssh_client.load_system_host_keys()
self._ssh_client.load_host_keys(os.path.expanduser(host_key_file))
self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self._ssh_client.connect(self.instance.public_dns_name,
username=uname, pkey=self._pkey)
return self._ssh_client
def get_file(self, remotepath, localpath):
ssh_client = self.get_ssh_client()
sftp_client = ssh_client.open_sftp()
sftp_client.get(remotepath, localpath)
def put_file(self, localpath, remotepath):
ssh_client = self.get_ssh_client()
sftp_client = ssh_client.open_sftp()
sftp_client.put(localpath, remotepath)
def listdir(self, remotepath):
ssh_client = self.get_ssh_client()
sftp_client = ssh_client.open_sftp()
return sftp_client.listdir(remotepath)
def shell(self, key_file=None):
ssh_client = self.get_ssh_client(key_file)
channel = ssh_client.invoke_shell()
interactive_shell(channel)
def bundle_image(self, prefix, key_file, cert_file, size):
print('bundling image...')
print('\tcopying cert and pk over to /mnt directory on server')
ssh_client = self.get_ssh_client()
sftp_client = ssh_client.open_sftp()
path, name = os.path.split(key_file)
remote_key_file = '/mnt/%s' % name
self.put_file(key_file, remote_key_file)
path, name = os.path.split(cert_file)
remote_cert_file = '/mnt/%s' % name
self.put_file(cert_file, remote_cert_file)
print('\tdeleting %s' % BotoConfigPath)
# delete the metadata.ini file if it exists
try:
sftp_client.remove(BotoConfigPath)
except:
pass
command = 'sudo ec2-bundle-vol '
command += '-c %s -k %s ' % (remote_cert_file, remote_key_file)
command += '-u %s ' % self._reservation.owner_id
command += '-p %s ' % prefix
command += '-s %d ' % size
command += '-d /mnt '
if self.instance.instance_type == 'm1.small' or self.instance_type == 'c1.medium':
command += '-r i386'
else:
command += '-r x86_64'
print('\t%s' % command)
t = ssh_client.exec_command(command)
response = t[1].read()
print('\t%s' % response)
print('\t%s' % t[2].read())
print('...complete!')
def upload_bundle(self, bucket, prefix):
print('uploading bundle...')
command = 'ec2-upload-bundle '
command += '-m /mnt/%s.manifest.xml ' % prefix
command += '-b %s ' % bucket
command += '-a %s ' % self.ec2.aws_access_key_id
command += '-s %s ' % self.ec2.aws_secret_access_key
print('\t%s' % command)
ssh_client = self.get_ssh_client()
t = ssh_client.exec_command(command)
response = t[1].read()
print('\t%s' % response)
print('\t%s' % t[2].read())
print('...complete!')
def create_image(self, bucket=None, prefix=None, key_file=None, cert_file=None, size=None):
iobject = IObject()
if not bucket:
bucket = iobject.get_string('Name of S3 bucket')
if not prefix:
prefix = iobject.get_string('Prefix for AMI file')
if not key_file:
key_file = iobject.get_filename('Path to RSA private key file')
if not cert_file:
cert_file = iobject.get_filename('Path to RSA public cert file')
if not size:
size = iobject.get_int('Size (in MB) of bundled image')
self.bundle_image(prefix, key_file, cert_file, size)
self.upload_bundle(bucket, prefix)
print('registering image...')
self.image_id = self.ec2.register_image('%s/%s.manifest.xml' % (bucket, prefix))
return self.image_id
def attach_volume(self, volume, device="/dev/sdp"):
"""
Attach an EBS volume to this server
:param volume: EBS Volume to attach
:type volume: boto.ec2.volume.Volume
:param device: Device to attach to (default to /dev/sdp)
:type device: string
"""
if hasattr(volume, "id"):
volume_id = volume.id
else:
volume_id = volume
return self.ec2.attach_volume(volume_id=volume_id, instance_id=self.instance_id, device=device)
def detach_volume(self, volume):
"""
Detach an EBS volume from this server
:param volume: EBS Volume to detach
:type volume: boto.ec2.volume.Volume
"""
if hasattr(volume, "id"):
volume_id = volume.id
else:
volume_id = volume
return self.ec2.detach_volume(volume_id=volume_id, instance_id=self.instance_id)
def install_package(self, package_name):
print('installing %s...' % package_name)
command = 'yum -y install %s' % package_name
print('\t%s' % command)
ssh_client = self.get_ssh_client()
t = ssh_client.exec_command(command)
response = t[1].read()
print('\t%s' % response)
print('\t%s' % t[2].read())
print('...complete!')
|
stackforge/solum-dashboard | refs/heads/master | solumdashboard/languagepacks/views.py | 1 | # Copyright (c) 2014 Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.urls import reverse
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
import json
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon import views
from solumclient.v1 import languagepack as cli_lp
from solumdashboard.api.client import client as solumclient
from solumdashboard.languagepacks import forms as lp_forms
from solumdashboard.languagepacks import tables as lp_tables
class IndexView(tables.DataTableView):
table_class = lp_tables.LanguagepacksTable
template_name = 'languagepacks/index.html'
page_title = _("Languagepacks")
def get_data(self):
try:
solum = solumclient(self.request)
languagepacks = solum.languagepacks.list()
except Exception as e:
languagepacks = []
exceptions.handle(
self.request,
_('Unable to retrieve languagepacks: %s') % e)
return languagepacks
class DetailView(views.HorizonTemplateView):
template_name = 'languagepacks/detail.html'
page_title = "{{ languagepack.name }}"
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
languagepack, loglist = self.get_data()
context["languagepack"] = languagepack
context["loglist"] = loglist
table = lp_tables.LanguagepacksTable(self.request)
context["actions"] = table.render_row_actions(languagepack)
return context
def get_data(self):
lp_id = self.kwargs['languagepack_id']
solum = solumclient(self.request)
languagepack = None
loglist = []
try:
languagepack = solum.languagepacks.find(name_or_id=lp_id)
loglist = cli_lp.LanguagePackManager(solum).logs(
lp_id=lp_id)
except Exception as e:
INDEX_URL = 'horizon:solum:languagepacks:index'
exceptions.handle(
self.request,
_('Unable to retrieve languagepack details: %s') % e,
redirect=reverse(INDEX_URL))
for log in loglist:
strategy_info = json.loads(log.strategy_info)
if log.strategy == 'local':
log.local_storage = log.location
elif log.strategy == 'swift':
log.swift_container = strategy_info['container']
log.swift_path = log.location
return languagepack, loglist
class CreateView(forms.ModalFormView):
form_class = lp_forms.CreateForm
template_name = 'languagepacks/create.html'
modal_header = _("Create Languagepack")
page_title = _("Create Languagepack")
submit_url = reverse_lazy("horizon:solum:languagepacks:create")
success_url = reverse_lazy("horizon:solum:languagepacks:index")
|
drnextgis/QGIS | refs/heads/master | python/plugins/processing/script/snippets.py | 20 | ##Iterate over the features of a layer.
feats = processing.features(layer)
n = len(feats)
for i, feat in enumerate(feats):
progress.setPercentage(int(100 * i / n))
#do something with 'feat'
##Create a new layer from another one, with an extra field
fields = processing.fields(layer)
# int, float and bool can be used as well as types
fields.append(('NEW_FIELD', str))
writer = processing.VectorWriter(output_file, None, fields,
processing.geomtype(layer), layer.crs()
)
##Create a new table
writer = processing.TableWriter(output_file, None, ['field1', 'field2'])
|
ammarkhann/FinalSeniorCode | refs/heads/master | lib/python2.7/site-packages/IPython/utils/tests/test_tokenutil.py | 15 | """Tests for tokenutil"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import nose.tools as nt
from IPython.utils.tokenutil import token_at_cursor, line_at_cursor
def expect_token(expected, cell, cursor_pos):
token = token_at_cursor(cell, cursor_pos)
offset = 0
for line in cell.splitlines():
if offset + len(line) >= cursor_pos:
break
else:
offset += len(line)+1
column = cursor_pos - offset
line_with_cursor = '%s|%s' % (line[:column], line[column:])
nt.assert_equal(token, expected,
"Expected %r, got %r in: %r (pos %i)" % (
expected, token, line_with_cursor, cursor_pos)
)
def test_simple():
cell = "foo"
for i in range(len(cell)):
expect_token("foo", cell, i)
def test_function():
cell = "foo(a=5, b='10')"
expected = 'foo'
# up to `foo(|a=`
for i in range(cell.find('a=') + 1):
expect_token("foo", cell, i)
# find foo after `=`
for i in [cell.find('=') + 1, cell.rfind('=') + 1]:
expect_token("foo", cell, i)
# in between `5,|` and `|b=`
for i in range(cell.find(','), cell.find('b=')):
expect_token("foo", cell, i)
def test_multiline():
cell = '\n'.join([
'a = 5',
'b = hello("string", there)'
])
expected = 'hello'
start = cell.index(expected) + 1
for i in range(start, start + len(expected)):
expect_token(expected, cell, i)
expected = 'hello'
start = cell.index(expected) + 1
for i in range(start, start + len(expected)):
expect_token(expected, cell, i)
def test_multiline_token():
cell = '\n'.join([
'"""\n\nxxxxxxxxxx\n\n"""',
'5, """',
'docstring',
'multiline token',
'""", [',
'2, 3, "complicated"]',
'b = hello("string", there)'
])
expected = 'hello'
start = cell.index(expected) + 1
for i in range(start, start + len(expected)):
expect_token(expected, cell, i)
expected = 'hello'
start = cell.index(expected) + 1
for i in range(start, start + len(expected)):
expect_token(expected, cell, i)
def test_nested_call():
cell = "foo(bar(a=5), b=10)"
expected = 'foo'
start = cell.index('bar') + 1
for i in range(start, start + 3):
expect_token(expected, cell, i)
expected = 'bar'
start = cell.index('a=')
for i in range(start, start + 3):
expect_token(expected, cell, i)
expected = 'foo'
start = cell.index(')') + 1
for i in range(start, len(cell)-1):
expect_token(expected, cell, i)
def test_attrs():
cell = "a = obj.attr.subattr"
expected = 'obj'
idx = cell.find('obj') + 1
for i in range(idx, idx + 3):
expect_token(expected, cell, i)
idx = cell.find('.attr') + 2
expected = 'obj.attr'
for i in range(idx, idx + 4):
expect_token(expected, cell, i)
idx = cell.find('.subattr') + 2
expected = 'obj.attr.subattr'
for i in range(idx, len(cell)):
expect_token(expected, cell, i)
def test_line_at_cursor():
cell = ""
(line, offset) = line_at_cursor(cell, cursor_pos=11)
assert line == "", ("Expected '', got %r" % line)
assert offset == 0, ("Expected '', got %r" % line)
def test_muliline_statement():
cell = """a = (1,
3)
int()
map()
"""
for c in range(16, 22):
yield lambda: expect_token("int", cell, c)
for c in range(22, 28):
yield lambda: expect_token("map", cell, c)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.