hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6b269d5b99914d3425678d7f3582d07b2c34d9e3 | 495 | py | Python | mayan/apps/document_states/queues.py | Syunkolee9891/Mayan-EDMS | 3759a9503a264a180b74cc8518388f15ca66ac1a | [
"Apache-2.0"
] | 1 | 2021-06-17T18:24:25.000Z | 2021-06-17T18:24:25.000Z | mayan/apps/document_states/queues.py | Syunkolee9891/Mayan-EDMS | 3759a9503a264a180b74cc8518388f15ca66ac1a | [
"Apache-2.0"
] | 7 | 2020-06-06T00:01:04.000Z | 2022-01-13T01:47:17.000Z | mayan/apps/document_states/queues.py | Syunkolee9891/Mayan-EDMS | 3759a9503a264a180b74cc8518388f15ca66ac1a | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import, unicode_literals
from django.utils.translation import ugettext_lazy as _
from mayan.apps.task_manager.classes import CeleryQueue
from mayan.apps.task_manager.workers import worker_slow
queue_document_states = CeleryQueue(
name='document_states', label=_('Document states'), worker=worker_slow
)
queue_document_states.add_task_type(
dotted_path='mayan.apps.document_states.tasks.task_launch_all_workflows',
label=_('Launch all workflows')
)
| 33 | 77 | 0.824242 |
84011e68b18b2661a55f6a6460760c092fc28194 | 2,715 | py | Python | test/test_build.py | FindDefinition/ccimport | 2be66fe4cdeb4daa915d2dfc75f2363c0c0bfb75 | [
"MIT"
] | 1 | 2021-11-23T08:36:48.000Z | 2021-11-23T08:36:48.000Z | test/test_build.py | FindDefinition/ccimport | 2be66fe4cdeb4daa915d2dfc75f2363c0c0bfb75 | [
"MIT"
] | null | null | null | test/test_build.py | FindDefinition/ccimport | 2be66fe4cdeb4daa915d2dfc75f2363c0c0bfb75 | [
"MIT"
] | 1 | 2021-11-23T08:26:52.000Z | 2021-11-23T08:26:52.000Z | import subprocess
from pathlib import Path
import ccimport
from ccimport import compat
from ccimport.utils import tempdir
import os
import sys
def test_cpp_build():
source = ccimport.autoimport([Path(__file__).parent / "source.cc"],
Path(__file__).parent / "source")
assert source.sub(2, 1) == 1
obj = source.TestClass(5)
assert obj.add(3) == 8
def test_cpp_exec_build():
with tempdir() as tempd:
sources = [
Path(__file__).parent / "executable.cc",
Path(__file__).parent / "source.cc"
]
p2s = {Path(__file__).parent / "some_pch.h": sources}
pch_to_include = {Path(__file__).parent / "some_pch.h": "some_pch.h"}
source = ccimport.ccimport(sources,
tempd / "executable",
includes=[Path(__file__).parent],
shared=False,
load_library=False,
pch_to_sources=p2s,
pch_to_include=pch_to_include,
verbose=False,
objects_folder="objects")
output = subprocess.check_output([str(source)])
assert output.decode("utf-8").strip() == "hello ccimport!"
def _test_gcc_crosscompile_build():
# currently no CI/CD available, so disable this test.
if compat.InWindows:
return
# aarch64-linux-gnu-g++
with tempdir() as tempd:
py_ver = (sys.version_info[0], sys.version_info[1])
os.environ["SETUPTOOLS_EXT_SUFFIX"] = compat.get_extension_suffix_linux_custom(py_ver, "aarch64")
sources = [
Path(__file__).parent / "executable.cc",
Path(__file__).parent / "source.cc"
]
p2s = {Path(__file__).parent / "some_pch.h": sources}
pch_to_include = {Path(__file__).parent / "some_pch.h": "some_pch.h"}
source = ccimport.ccimport(sources,
tempd / "executable",
includes=[Path(__file__).parent],
shared=True,
load_library=False,
pch_to_sources=p2s,
pch_to_include=pch_to_include,
verbose=True,
objects_folder="objects")
print(input("hold"), tempd)
output = subprocess.check_output([str(source)])
assert output.decode("utf-8").strip() == "hello ccimport!"
if __name__ == "__main__":
_test_gcc_crosscompile_build()
| 38.239437 | 105 | 0.523757 |
3df3c764ed6e853ac00744315e025f63df6e9ea2 | 1,628 | py | Python | loss/loss_functions.py | RyanDsilva/nn-from-scratch | ef2bc5794e2d88c948d62762a415c306dda4101f | [
"MIT"
] | 23 | 2020-04-22T08:53:31.000Z | 2021-12-14T12:26:22.000Z | loss/loss_functions.py | RyanDsilva/nn-from-scratch | ef2bc5794e2d88c948d62762a415c306dda4101f | [
"MIT"
] | 5 | 2020-05-09T04:24:54.000Z | 2020-10-09T16:46:25.000Z | loss/loss_functions.py | RyanDsilva/nn-from-scratch | ef2bc5794e2d88c948d62762a415c306dda4101f | [
"MIT"
] | 5 | 2020-05-22T10:44:11.000Z | 2021-10-01T09:33:34.000Z | import numpy as np
def MSE(y, yhat):
return np.mean(np.power(y-yhat, 2))
def dMSE(y, yhat):
return 2*(yhat-y)/y.size
def MAE(y, yhat):
return np.sum(np.abs(y-yhat))
def dMAE(y, yhat):
return 1 if y == yhat else -1
def kl_divergence(y, yhat):
"""
measures the difference between two probability distributions
over the same variable.
Parameters:
- y : Numpy array
- yhat : Numpy array
Returns:
difference between two probability distribution.
KL divergence can be calculated as
the negative sum of probability of each event in P multiplied by
the log of the probability of the event in Q over the probability of the event
"""
return sum(y[i] * log2(y[i]/yhat[i]) for i in range(len(y)))
def entropy(y,factor=1e-15):
"""
measures the performance of a classification model
whose output is a probability value between 0 and 1
Parameters:
- y: Numpy array
- factor: Optional (To ensure 0 is not returned).
Returns:
between 0 to 1
"""
return -sum([y[i] * log2(y[i]+factor) for i in range(len(y))])
def cross_entropy(y,yhat,mode=None,factor=1e-15):
"""
calculates loss among two probability vectors.
Parameters:
- y: Numpy array
- yhat: numpy array
- mode: Optional (mode= kl_divergence then calculate cross entropy using kl_divergence )
- factor: Optional (To ensure 0 is not returned).
Returns:
between 0 to 1
"""
if(mode=='Kl_diversion'):
return entropy(y) + kl_divergence(y, yhat)
return -sum([y[i]*log2(yhat[i]+factor) for i in range(len(y))])
| 23.257143 | 92 | 0.648649 |
ba556a1972df6326643ee40a4f3063dca9a1f881 | 2,276 | py | Python | test/test_geolines.py | PlusWayne/pyecharts | 881771378e5fe1d0f55a13f2cf63c7d181eb1894 | [
"MIT"
] | null | null | null | test/test_geolines.py | PlusWayne/pyecharts | 881771378e5fe1d0f55a13f2cf63c7d181eb1894 | [
"MIT"
] | null | null | null | test/test_geolines.py | PlusWayne/pyecharts | 881771378e5fe1d0f55a13f2cf63c7d181eb1894 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
from __future__ import unicode_literals
from nose.tools import assert_raises
from pyecharts import GeoLines, Style
from pyecharts.datasets.coordinates import search_coordinates_by_keyword
style = Style(
title_top="#fff",
title_pos="center",
width=1200,
height=600,
background_color="#404a59",
)
style_geo = style.add(
is_label_show=True,
line_curve=0.2,
line_opacity=0.6,
legend_text_color="#eee",
legend_pos="right",
geo_effect_symbol="plane",
geo_effect_symbolsize=15,
label_color=["#a6c84c", "#ffa022", "#46bee9"],
label_pos="right",
label_formatter="{b}",
label_text_color="#eee",
legend_selectedmode="single",
)
def test_geolines():
data_guangzhou = [
["广州", "上海"],
["广州", "北京"],
["广州", "南京"],
["广州", "重庆"],
["广州", "兰州"],
["广州", "杭州"],
]
data_beijing = [
["北京", "上海"],
["北京", "广州"],
["北京", "南京"],
["北京", "重庆"],
["北京", "兰州"],
["北京", "杭州"],
]
lines = GeoLines("GeoLines 示例", **style.init_style)
lines.add("从广州出发", data_guangzhou, **style_geo)
lines.add("从北京出发", data_beijing, **style_geo)
lines.print_echarts_options()
lines.render()
def test_with_custom_coordinates():
data_guangzhou = [
["广州", "上海"],
["广州", "北京"],
["广州", "南京"],
["广州", "重庆"],
["广州", "兰州"],
["广州", "A市"],
]
lines = GeoLines("GeoLines 示例", **style.init_style)
coordinate = lines.get_coordinate("广州")
assert 2 == len(coordinate)
with assert_raises(ValueError):
lines.get_coordinate("A市", raise_exception=True)
lines.add(
"从广州出发",
data_guangzhou,
geo_cities_coords={"A市": (119.3, 26.08)},
**style_geo
)
lines.render()
def test_with_full_example():
line_data = [["广州", "上海"], ["广州", "北京"], ["广州", "南京"], ["广州", "A市"]]
lines = GeoLines("GeoLines 示例", **style.init_style)
with assert_raises(ValueError):
lines.add("从广州出发", line_data, **style_geo)
assert 0 == len(search_coordinates_by_keyword("A市"))
lines.add_coordinate("A市", 119.3, 26.08)
lines.add("从广州出发", line_data, **style_geo)
lines.render()
| 23.957895 | 72 | 0.572935 |
51f5b067821c1beaa67545340764652bc6441c5a | 9,543 | py | Python | Lib/test/test_sort.py | raychorn/svn_Python-2.5.1 | 425005b1b489ba44ec0bb989e077297e8953d9be | [
"PSF-2.0"
] | null | null | null | Lib/test/test_sort.py | raychorn/svn_Python-2.5.1 | 425005b1b489ba44ec0bb989e077297e8953d9be | [
"PSF-2.0"
] | null | null | null | Lib/test/test_sort.py | raychorn/svn_Python-2.5.1 | 425005b1b489ba44ec0bb989e077297e8953d9be | [
"PSF-2.0"
] | null | null | null | from test import test_support
import random
import sys
import unittest
verbose = test_support.verbose
nerrors = 0
def check(tag, expected, raw, compare=None):
global nerrors
if verbose:
print " checking", tag
orig = raw[:] # save input in case of error
if compare:
raw.sort(compare)
else:
raw.sort()
if len(expected) != len(raw):
print "error in", tag
print "length mismatch;", len(expected), len(raw)
print expected
print orig
print raw
nerrors += 1
return
for i, good in enumerate(expected):
maybe = raw[i]
if good is not maybe:
print "error in", tag
print "out of order at index", i, good, maybe
print expected
print orig
print raw
nerrors += 1
return
class TestBase(unittest.TestCase):
def testStressfully(self):
# Try a variety of sizes at and around powers of 2, and at powers of 10.
sizes = [0]
for power in range(1, 10):
n = 2 ** power
sizes.extend(range(n-1, n+2))
sizes.extend([10, 100, 1000])
class Complains(object):
maybe_complain = True
def __init__(self, i):
self.i = i
def __lt__(self, other):
if Complains.maybe_complain and random.random() < 0.001:
if verbose:
print " complaining at", self, other
raise RuntimeError
return self.i < other.i
def __repr__(self):
return "Complains(%d)" % self.i
class Stable(object):
def __init__(self, key, i):
self.key = key
self.index = i
def __cmp__(self, other):
return cmp(self.key, other.key)
def __repr__(self):
return "Stable(%d, %d)" % (self.key, self.index)
for n in sizes:
x = range(n)
if verbose:
print "Testing size", n
s = x[:]
check("identity", x, s)
s = x[:]
s.reverse()
check("reversed", x, s)
s = x[:]
random.shuffle(s)
check("random permutation", x, s)
y = x[:]
y.reverse()
s = x[:]
check("reversed via function", y, s, lambda a, b: cmp(b, a))
if verbose:
print " Checking against an insane comparison function."
print " If the implementation isn't careful, this may segfault."
s = x[:]
s.sort(lambda a, b: int(random.random() * 3) - 1)
check("an insane function left some permutation", x, s)
x = [Complains(i) for i in x]
s = x[:]
random.shuffle(s)
Complains.maybe_complain = True
it_complained = False
try:
s.sort()
except RuntimeError:
it_complained = True
if it_complained:
Complains.maybe_complain = False
check("exception during sort left some permutation", x, s)
s = [Stable(random.randrange(10), i) for i in xrange(n)]
augmented = [(e, e.index) for e in s]
augmented.sort() # forced stable because ties broken by index
x = [e for e, i in augmented] # a stable sort of s
check("stability", x, s)
#==============================================================================
class TestBugs(unittest.TestCase):
def test_bug453523(self):
# bug 453523 -- list.sort() crasher.
# If this fails, the most likely outcome is a core dump.
# Mutations during a list sort should raise a ValueError.
class C:
def __lt__(self, other):
if L and random.random() < 0.75:
L.pop()
else:
L.append(3)
return random.random() < 0.5
L = [C() for i in range(50)]
self.assertRaises(ValueError, L.sort)
def test_cmpNone(self):
# Testing None as a comparison function.
L = range(50)
random.shuffle(L)
L.sort(None)
self.assertEqual(L, range(50))
def test_undetected_mutation(self):
# Python 2.4a1 did not always detect mutation
memorywaster = []
for i in range(20):
def mutating_cmp(x, y):
L.append(3)
L.pop()
return cmp(x, y)
L = [1,2]
self.assertRaises(ValueError, L.sort, mutating_cmp)
def mutating_cmp(x, y):
L.append(3)
del L[:]
return cmp(x, y)
self.assertRaises(ValueError, L.sort, mutating_cmp)
memorywaster = [memorywaster]
#==============================================================================
class TestDecorateSortUndecorate(unittest.TestCase):
def test_decorated(self):
data = 'The quick Brown fox Jumped over The lazy Dog'.split()
copy = data[:]
random.shuffle(data)
data.sort(key=str.lower)
copy.sort(cmp=lambda x,y: cmp(x.lower(), y.lower()))
def test_baddecorator(self):
data = 'The quick Brown fox Jumped over The lazy Dog'.split()
self.assertRaises(TypeError, data.sort, None, lambda x,y: 0)
def test_stability(self):
data = [(random.randrange(100), i) for i in xrange(200)]
copy = data[:]
data.sort(key=lambda (x,y): x) # sort on the random first field
copy.sort() # sort using both fields
self.assertEqual(data, copy) # should get the same result
def test_cmp_and_key_combination(self):
# Verify that the wrapper has been removed
def compare(x, y):
self.assertEqual(type(x), str)
self.assertEqual(type(x), str)
return cmp(x, y)
data = 'The quick Brown fox Jumped over The lazy Dog'.split()
data.sort(cmp=compare, key=str.lower)
def test_badcmp_with_key(self):
# Verify that the wrapper has been removed
data = 'The quick Brown fox Jumped over The lazy Dog'.split()
self.assertRaises(TypeError, data.sort, "bad", str.lower)
def test_key_with_exception(self):
# Verify that the wrapper has been removed
data = range(-2,2)
dup = data[:]
self.assertRaises(ZeroDivisionError, data.sort, None, lambda x: 1/x)
self.assertEqual(data, dup)
def test_key_with_mutation(self):
data = range(10)
def k(x):
del data[:]
data[:] = range(20)
return x
self.assertRaises(ValueError, data.sort, key=k)
def test_key_with_mutating_del(self):
data = range(10)
class SortKiller(object):
def __init__(self, x):
pass
def __del__(self):
del data[:]
data[:] = range(20)
self.assertRaises(ValueError, data.sort, key=SortKiller)
def test_key_with_mutating_del_and_exception(self):
data = range(10)
## dup = data[:]
class SortKiller(object):
def __init__(self, x):
if x > 2:
raise RuntimeError
def __del__(self):
del data[:]
data[:] = range(20)
self.assertRaises(RuntimeError, data.sort, key=SortKiller)
## major honking subtlety: we *can't* do:
##
## self.assertEqual(data, dup)
##
## because there is a reference to a SortKiller in the
## traceback and by the time it dies we're outside the call to
## .sort() and so the list protection gimmicks are out of
## date (this cost some brain cells to figure out...).
def test_reverse(self):
data = range(100)
random.shuffle(data)
data.sort(reverse=True)
self.assertEqual(data, range(99,-1,-1))
self.assertRaises(TypeError, data.sort, "wrong type")
def test_reverse_stability(self):
data = [(random.randrange(100), i) for i in xrange(200)]
copy1 = data[:]
copy2 = data[:]
data.sort(cmp=lambda x,y: cmp(x[0],y[0]), reverse=True)
copy1.sort(cmp=lambda x,y: cmp(y[0],x[0]))
self.assertEqual(data, copy1)
copy2.sort(key=lambda x: x[0], reverse=True)
self.assertEqual(data, copy2)
#==============================================================================
def test_main(verbose=None):
test_classes = (
TestBase,
TestDecorateSortUndecorate,
TestBugs,
)
test_support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
if __name__ == "__main__":
test_main(verbose=True)
| 32.906897 | 88 | 0.507178 |
2c6f82ee65522f6fc19cc60e8669ae68f74028b2 | 13,128 | py | Python | timm_new/models/selecsls.py | Yuki-Tanaka-33937424/pytorch-image-models | 6c1da622dcb2a0421aeb6cdcadd03cc366331f66 | [
"Apache-2.0"
] | null | null | null | timm_new/models/selecsls.py | Yuki-Tanaka-33937424/pytorch-image-models | 6c1da622dcb2a0421aeb6cdcadd03cc366331f66 | [
"Apache-2.0"
] | null | null | null | timm_new/models/selecsls.py | Yuki-Tanaka-33937424/pytorch-image-models | 6c1da622dcb2a0421aeb6cdcadd03cc366331f66 | [
"Apache-2.0"
] | null | null | null | """PyTorch SelecSLS Net example for ImageNet Classification
License: CC BY 4.0 (https://creativecommons.org/licenses/by/4.0/legalcode)
Author: Dushyant Mehta (@mehtadushy)
SelecSLS (core) Network Architecture as proposed in "XNect: Real-time Multi-person 3D
Human Pose Estimation with a Single RGB Camera, Mehta et al."
https://arxiv.org/abs/1907.00837
Based on ResNet implementation in https://github.com/rwightman/pytorch-image-models
and SelecSLS Net implementation in https://github.com/mehtadushy/SelecSLS-Pytorch
"""
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm_new.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .helpers import build_model_with_cfg
from .layers import create_classifier
from .registry import register_model
__all__ = ['SelecSLS'] # model_registry will add each entrypoint fn to this
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (4, 4),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.0', 'classifier': 'fc',
**kwargs
}
default_cfgs = {
'selecsls42': _cfg(
url='',
interpolation='bicubic'),
'selecsls42b': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls42b-8af30141.pth',
interpolation='bicubic'),
'selecsls60': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls60-bbf87526.pth',
interpolation='bicubic'),
'selecsls60b': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls60b-94e619b5.pth',
interpolation='bicubic'),
'selecsls84': _cfg(
url='',
interpolation='bicubic'),
}
class SequentialList(nn.Sequential):
def __init__(self, *args):
super(SequentialList, self).__init__(*args)
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (List[torch.Tensor]) -> (List[torch.Tensor])
pass
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (torch.Tensor) -> (List[torch.Tensor])
pass
def forward(self, x) -> List[torch.Tensor]:
for module in self:
x = module(x)
return x
class SelectSeq(nn.Module):
def __init__(self, mode='index', index=0):
super(SelectSeq, self).__init__()
self.mode = mode
self.index = index
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (List[torch.Tensor]) -> (torch.Tensor)
pass
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (Tuple[torch.Tensor]) -> (torch.Tensor)
pass
def forward(self, x) -> torch.Tensor:
if self.mode == 'index':
return x[self.index]
else:
return torch.cat(x, dim=1)
def conv_bn(in_chs, out_chs, k=3, stride=1, padding=None, dilation=1):
if padding is None:
padding = ((stride - 1) + dilation * (k - 1)) // 2
return nn.Sequential(
nn.Conv2d(in_chs, out_chs, k, stride, padding=padding, dilation=dilation, bias=False),
nn.BatchNorm2d(out_chs),
nn.ReLU(inplace=True)
)
class SelecSLSBlock(nn.Module):
def __init__(self, in_chs, skip_chs, mid_chs, out_chs, is_first, stride, dilation=1):
super(SelecSLSBlock, self).__init__()
self.stride = stride
self.is_first = is_first
assert stride in [1, 2]
# Process input with 4 conv blocks with the same number of input and output channels
self.conv1 = conv_bn(in_chs, mid_chs, 3, stride, dilation=dilation)
self.conv2 = conv_bn(mid_chs, mid_chs, 1)
self.conv3 = conv_bn(mid_chs, mid_chs // 2, 3)
self.conv4 = conv_bn(mid_chs // 2, mid_chs, 1)
self.conv5 = conv_bn(mid_chs, mid_chs // 2, 3)
self.conv6 = conv_bn(2 * mid_chs + (0 if is_first else skip_chs), out_chs, 1)
def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]:
if not isinstance(x, list):
x = [x]
assert len(x) in [1, 2]
d1 = self.conv1(x[0])
d2 = self.conv3(self.conv2(d1))
d3 = self.conv5(self.conv4(d2))
if self.is_first:
out = self.conv6(torch.cat([d1, d2, d3], 1))
return [out, out]
else:
return [self.conv6(torch.cat([d1, d2, d3, x[1]], 1)), x[1]]
class SelecSLS(nn.Module):
"""SelecSLS42 / SelecSLS60 / SelecSLS84
Parameters
----------
cfg : network config dictionary specifying block type, feature, and head args
num_classes : int, default 1000
Number of classification classes.
in_chans : int, default 3
Number of input (color) channels.
drop_rate : float, default 0.
Dropout probability before classifier, for training
global_pool : str, default 'avg'
Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax'
"""
def __init__(self, cfg, num_classes=1000, in_chans=3, drop_rate=0.0, global_pool='avg'):
self.num_classes = num_classes
self.drop_rate = drop_rate
super(SelecSLS, self).__init__()
self.stem = conv_bn(in_chans, 32, stride=2)
self.features = SequentialList(*[cfg['block'](*block_args) for block_args in cfg['features']])
self.from_seq = SelectSeq() # from List[tensor] -> Tensor in module compatible way
self.head = nn.Sequential(*[conv_bn(*conv_args) for conv_args in cfg['head']])
self.num_features = cfg['num_features']
self.feature_info = cfg['feature_info']
self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
for n, m in self.named_modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1.)
nn.init.constant_(m.bias, 0.)
def get_classifier(self):
return self.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
x = self.stem(x)
x = self.features(x)
x = self.head(self.from_seq(x))
return x
def forward(self, x):
x = self.forward_features(x)
x = self.global_pool(x)
if self.drop_rate > 0.:
x = F.dropout(x, p=self.drop_rate, training=self.training)
x = self.fc(x)
return x
def _create_selecsls(variant, pretrained, **kwargs):
cfg = {}
feature_info = [dict(num_chs=32, reduction=2, module='stem.2')]
if variant.startswith('selecsls42'):
cfg['block'] = SelecSLSBlock
# Define configuration of the network after the initial neck
cfg['features'] = [
# in_chs, skip_chs, mid_chs, out_chs, is_first, stride
(32, 0, 64, 64, True, 2),
(64, 64, 64, 128, False, 1),
(128, 0, 144, 144, True, 2),
(144, 144, 144, 288, False, 1),
(288, 0, 304, 304, True, 2),
(304, 304, 304, 480, False, 1),
]
feature_info.extend([
dict(num_chs=128, reduction=4, module='features.1'),
dict(num_chs=288, reduction=8, module='features.3'),
dict(num_chs=480, reduction=16, module='features.5'),
])
# Head can be replaced with alternative configurations depending on the problem
feature_info.append(dict(num_chs=1024, reduction=32, module='head.1'))
if variant == 'selecsls42b':
cfg['head'] = [
(480, 960, 3, 2),
(960, 1024, 3, 1),
(1024, 1280, 3, 2),
(1280, 1024, 1, 1),
]
feature_info.append(dict(num_chs=1024, reduction=64, module='head.3'))
cfg['num_features'] = 1024
else:
cfg['head'] = [
(480, 960, 3, 2),
(960, 1024, 3, 1),
(1024, 1024, 3, 2),
(1024, 1280, 1, 1),
]
feature_info.append(dict(num_chs=1280, reduction=64, module='head.3'))
cfg['num_features'] = 1280
elif variant.startswith('selecsls60'):
cfg['block'] = SelecSLSBlock
# Define configuration of the network after the initial neck
cfg['features'] = [
# in_chs, skip_chs, mid_chs, out_chs, is_first, stride
(32, 0, 64, 64, True, 2),
(64, 64, 64, 128, False, 1),
(128, 0, 128, 128, True, 2),
(128, 128, 128, 128, False, 1),
(128, 128, 128, 288, False, 1),
(288, 0, 288, 288, True, 2),
(288, 288, 288, 288, False, 1),
(288, 288, 288, 288, False, 1),
(288, 288, 288, 416, False, 1),
]
feature_info.extend([
dict(num_chs=128, reduction=4, module='features.1'),
dict(num_chs=288, reduction=8, module='features.4'),
dict(num_chs=416, reduction=16, module='features.8'),
])
# Head can be replaced with alternative configurations depending on the problem
feature_info.append(dict(num_chs=1024, reduction=32, module='head.1'))
if variant == 'selecsls60b':
cfg['head'] = [
(416, 756, 3, 2),
(756, 1024, 3, 1),
(1024, 1280, 3, 2),
(1280, 1024, 1, 1),
]
feature_info.append(dict(num_chs=1024, reduction=64, module='head.3'))
cfg['num_features'] = 1024
else:
cfg['head'] = [
(416, 756, 3, 2),
(756, 1024, 3, 1),
(1024, 1024, 3, 2),
(1024, 1280, 1, 1),
]
feature_info.append(dict(num_chs=1280, reduction=64, module='head.3'))
cfg['num_features'] = 1280
elif variant == 'selecsls84':
cfg['block'] = SelecSLSBlock
# Define configuration of the network after the initial neck
cfg['features'] = [
# in_chs, skip_chs, mid_chs, out_chs, is_first, stride
(32, 0, 64, 64, True, 2),
(64, 64, 64, 144, False, 1),
(144, 0, 144, 144, True, 2),
(144, 144, 144, 144, False, 1),
(144, 144, 144, 144, False, 1),
(144, 144, 144, 144, False, 1),
(144, 144, 144, 304, False, 1),
(304, 0, 304, 304, True, 2),
(304, 304, 304, 304, False, 1),
(304, 304, 304, 304, False, 1),
(304, 304, 304, 304, False, 1),
(304, 304, 304, 304, False, 1),
(304, 304, 304, 512, False, 1),
]
feature_info.extend([
dict(num_chs=144, reduction=4, module='features.1'),
dict(num_chs=304, reduction=8, module='features.6'),
dict(num_chs=512, reduction=16, module='features.12'),
])
# Head can be replaced with alternative configurations depending on the problem
cfg['head'] = [
(512, 960, 3, 2),
(960, 1024, 3, 1),
(1024, 1024, 3, 2),
(1024, 1280, 3, 1),
]
cfg['num_features'] = 1280
feature_info.extend([
dict(num_chs=1024, reduction=32, module='head.1'),
dict(num_chs=1280, reduction=64, module='head.3')
])
else:
raise ValueError('Invalid net configuration ' + variant + ' !!!')
cfg['feature_info'] = feature_info
# this model can do 6 feature levels by default, unlike most others, leave as 0-4 to avoid surprises?
return build_model_with_cfg(
SelecSLS, variant, pretrained,
default_cfg=default_cfgs[variant],
model_cfg=cfg,
feature_cfg=dict(out_indices=(0, 1, 2, 3, 4), flatten_sequential=True),
**kwargs)
@register_model
def selecsls42(pretrained=False, **kwargs):
"""Constructs a SelecSLS42 model.
"""
return _create_selecsls('selecsls42', pretrained, **kwargs)
@register_model
def selecsls42b(pretrained=False, **kwargs):
"""Constructs a SelecSLS42_B model.
"""
return _create_selecsls('selecsls42b', pretrained, **kwargs)
@register_model
def selecsls60(pretrained=False, **kwargs):
"""Constructs a SelecSLS60 model.
"""
return _create_selecsls('selecsls60', pretrained, **kwargs)
@register_model
def selecsls60b(pretrained=False, **kwargs):
"""Constructs a SelecSLS60_B model.
"""
return _create_selecsls('selecsls60b', pretrained, **kwargs)
@register_model
def selecsls84(pretrained=False, **kwargs):
"""Constructs a SelecSLS84 model.
"""
return _create_selecsls('selecsls84', pretrained, **kwargs)
| 36.165289 | 121 | 0.587218 |
a5cab280478e1f01c8f252ac7758efa3cd252a3b | 762 | py | Python | ALPHABETS/SMALL_ALPHABETS/e.py | charansaim1819/Python_Patterns | 02e636855003346ec84c3d69f2be174dc9e9e3cb | [
"MIT"
] | null | null | null | ALPHABETS/SMALL_ALPHABETS/e.py | charansaim1819/Python_Patterns | 02e636855003346ec84c3d69f2be174dc9e9e3cb | [
"MIT"
] | null | null | null | ALPHABETS/SMALL_ALPHABETS/e.py | charansaim1819/Python_Patterns | 02e636855003346ec84c3d69f2be174dc9e9e3cb | [
"MIT"
] | null | null | null | #Shape of small e:
def for_e():
"""printing small 'e' using for loop"""
for row in range(7):
for col in range(4):
if col==0 and row not in(0,6) or col in(1,2) and row%3==0 or col==3 and row in(1,2,5):
print("*",end=" ")
else:
print(" ",end= " ")
print()
def while_e():
"""printing small 'e' using while loop"""
i=0
while i<7:
j=0
while j<4:
if j==0 and i not in(0,6) or i==0 and j in(1,2) or j==3 and i in(1,2,5) or j==1 and i in(0,3,6) or j==2 and i in(0,3,6):
print("*",end=" ")
else:
print(" ",end=" ")
j+=1
print()
i+=1
| 25.4 | 133 | 0.39895 |
44e5b512bed9545444a35d0d99af33c016f1b0c2 | 323 | py | Python | sms_proxy/log.py | jmcparland/sms-proxy | 1e59ab9877be9dca5dea02f04f9404e6eb70edb2 | [
"MIT"
] | 12 | 2016-06-02T22:23:20.000Z | 2021-11-28T00:57:27.000Z | sms_poll/log.py | caseymacphee/sms-polling | 04aeb4114c46018cd94b50c57c461136a4dae5ff | [
"MIT"
] | 2 | 2019-04-10T02:26:35.000Z | 2020-06-13T06:34:28.000Z | sms_poll/log.py | caseymacphee/sms-polling | 04aeb4114c46018cd94b50c57c461136a4dae5ff | [
"MIT"
] | 8 | 2016-06-02T22:23:25.000Z | 2020-06-13T05:59:01.000Z | import sys
import os
import logging
from pythonjsonlogger import jsonlogger
log = logging.getLogger()
handler = logging.StreamHandler(sys.stdout)
formatter = jsonlogger.JsonFormatter()
handler.setFormatter(formatter)
log.addHandler(handler)
log.setLevel(int(os.environ.get('LOG_LEVEL', 20))) # Default to INFO log level
| 24.846154 | 79 | 0.801858 |
575b3b1d27fc964715de0e9c302160aaffb4c772 | 807 | py | Python | FastAPI-Project-Template/settings/config.py | shikanon/privatecode | 85ffa80ef2815ff1af799e38d033d9c8a7a1cad1 | [
"MIT"
] | null | null | null | FastAPI-Project-Template/settings/config.py | shikanon/privatecode | 85ffa80ef2815ff1af799e38d033d9c8a7a1cad1 | [
"MIT"
] | 2 | 2022-02-13T13:48:19.000Z | 2022-02-27T05:07:02.000Z | FastAPI-Project-Template/settings/config.py | shikanon/privatecode | 85ffa80ef2815ff1af799e38d033d9c8a7a1cad1 | [
"MIT"
] | null | null | null | # coding=utf-8
'''
# Author: shikanon ([email protected])
# File Created Time: 2020-03-31 11:04:51
#
# Project: settings
# File: config.py
# Description:
#
'''
import configparser
class Config:
'''配置
'''
def __init__(self):
self.config = configparser.ConfigParser()
self.mysqldb = ""
self.redis = ""
def parse(self, path):
self.config.read(path)
if "db" not in self.config.sections():
raise ValueError("config file can not find db section")
host = self.config.get("db", "host")
port = self.config.get("db", "port")
username = self.config.get("db", "username")
passwd = self.config.get("db", "passwd")
self.mysqldb = "mysql+pymysql://%s:%s@%s:%s/ovision"%(username,passwd,host,port) | 26.032258 | 88 | 0.592317 |
eeeb2fbfcc9befc64b80483380b12d3fcd88b229 | 10,047 | py | Python | prep_test_data.py | samirak93/NBA_Hackathon | 035c0cb87114e14ad033fa1b6928d6954ab47024 | [
"MIT"
] | 1 | 2019-07-24T19:23:37.000Z | 2019-07-24T19:23:37.000Z | prep_test_data.py | samirak93/NBA_Hackathon | 035c0cb87114e14ad033fa1b6928d6954ab47024 | [
"MIT"
] | null | null | null | prep_test_data.py | samirak93/NBA_Hackathon | 035c0cb87114e14ad033fa1b6928d6954ab47024 | [
"MIT"
] | null | null | null | import pandas as pd
from pandas.tseries.holiday import USFederalHolidayCalendar as calendar
#prepare columns for test data
df=pd.read_csv('test_set.csv')
data=pd.DataFrame(df)
mapping = {'CLE': 1, 'POR': 2,'GSW': 3,'ORL': 4,'IND': 5,'BOS': 6,'TOR': 7,'MIL': 8,'MEM': 9,
'PHI': 10,'PHX': 11,'LAL': 12,'ATL': 13,'CHI': 14,'SAC': 15,'BKN': 16,'DET': 17,'OKC': 18,
'MIA': 19,'UTA': 20,'NOP': 21,'NYK': 22,'SAS': 23,'DEN': 24,'LAC': 25,'HOU': 26,'MIN': 27,'WAS': 28,'CHA': 29,'DAL': 30}
updated=data.replace({'Home_Team': mapping,'Away_Team':mapping})
updated['home_team_score']=0
updated['away_team_score']=0
updated['wins_home']=0
updated['wins_away']=0
updated['loss_home']=0
updated['loss_away']=0
updated['largest_lead_home']=0
updated['largest_lead_away']=0
updated['result_win']=0
updated['ASG_Count']=0
updated['day']= pd.to_datetime(updated['Game_Date']).dt.dayofweek
array=[]
for x,y in zip(updated.Home_Team,updated.Away_Team):
if ((x==1)&(y==3)) | ((x==3)&(y==1)):
(array.append(int('1')))
elif ((x==6)&(y==12)) | ((x==12)&(y==6)):
array.append(int('1'))
elif ((x == 17) & (y == 12)) | ((x == 12) & (y == 17)):
array.append(int('1'))
elif ((x == 10) & (y == 6)) | ((x == 6) & (y == 10)):
array.append(int('2'))
elif ((x == 6) & (y == 22)) | ((x == 22) & (y == 6)):
array.append(int('2'))
elif ((x == 16) & (y == 22)) | ((x == 22) & (y == 16)):
array.append(int('2'))
elif ((x == 17) & (y == 14)) | ((x == 14) & (y == 17)):
array.append(int('2'))
elif ((x == 1) & (y == 14)) | ((x == 14) & (y == 1)):
array.append(int('2'))
elif ((x == 19) & (y == 14)) | ((x == 14) & (y == 19)):
array.append(int('2'))
elif ((x == 22) & (y == 14)) | ((x == 14) & (y == 22)):
array.append(int('2'))
elif ((x == 6) & (y == 17)) | ((x == 17) & (y == 6)):
array.append(int('2'))
elif ((x == 22) & (y == 19)) | ((x == 19) & (y == 22)):
array.append(int('2'))
elif ((x == 22) & (y == 5)) | ((x == 5) & (y == 22)):
array.append(int('2'))
elif ((x == 12) & (y == 25)) | ((x == 25) & (y == 12)):
array.append(int('3'))
elif ((x == 30) & (y == 26)) | ((x == 26) & (y == 30)):
array.append(int('3'))
elif ((x == 23) & (y == 26)) | ((x == 26) & (y == 23)):
array.append(int('3'))
elif ((x == 20) & (y == 26)) | ((x == 26) & (y == 20)):
array.append(int('3'))
elif ((x == 12) & (y == 23)) | ((x == 23) & (y == 12)):
array.append(int('3'))
elif ((x == 11) & (y == 23)) | ((x == 23) & (y == 11)):
array.append(int('3'))
else:
array.append(int(0))
updated["rivalry"]=array
home_team_rank=[]
twitter_followers_home=[]
for x in (updated.Home_Team):
if ((x==1)):
home_team_rank.append(int(11))
twitter_followers_home.append(int(2100000))
elif (x==2):
home_team_rank.append(int(17))
twitter_followers_home.append(int(823000))
elif (x==3):
home_team_rank.append(int(3))
twitter_followers_home.append(int(3500000))
elif (x==4):
home_team_rank.append(int(19))
twitter_followers_home.append(int(1500000))
elif (x==5):
home_team_rank.append(int(24))
twitter_followers_home.append(int(930000))
elif (x==6):
home_team_rank.append(int(5))
twitter_followers_home.append(int(2300000))
elif (x==7):
home_team_rank.append(int(13))
twitter_followers_home.append(int(1400000))
elif (x==8):
home_team_rank.append(int(27))
twitter_followers_home.append(int(695000))
elif (x==9):
home_team_rank.append(int(26))
twitter_followers_home.append(int(766000))
elif (x==10):
home_team_rank.append(int(25))
twitter_followers_home.append(int(925000))
elif (x==11):
home_team_rank.append(int(14))
twitter_followers_home.append(int(753000))
elif (x==12):
home_team_rank.append(int(2))
twitter_followers_home.append(int(6170000))
elif (x==13):
home_team_rank.append(int(23))
twitter_followers_home.append(int(991000))
elif (x==14):
home_team_rank.append(int(4))
twitter_followers_home.append(int(3600000))
elif (x==15):
home_team_rank.append(int(15))
twitter_followers_home.append(int(714000))
elif (x==16):
home_team_rank.append(int(7))
twitter_followers_home.append(int(755000))
elif (x==17):
home_team_rank.append(int(21))
twitter_followers_home.append(int(710000))
elif (x==18):
home_team_rank.append(int(16))
twitter_followers_home.append(int(1800000))
elif (x==19):
home_team_rank.append(int(10))
twitter_followers_home.append(int(4090000))
elif (x==20):
home_team_rank.append(int(20))
twitter_followers_home.append(int(632000))
elif (x==21):
home_team_rank.append(int(30))
twitter_followers_home.append(int(659000))
elif (x==22):
home_team_rank.append(int(1))
twitter_followers_home.append(int(1780000))
elif (x==23):
home_team_rank.append(int(12))
twitter_followers_home.append(int(2300000))
elif (x==24):
home_team_rank.append(int(22))
twitter_followers_home.append(int(634000))
elif (x==25):
home_team_rank.append(int(6))
twitter_followers_home.append(int(1100000))
elif (x==26):
home_team_rank.append(int(8))
twitter_followers_home.append(int(1710000))
elif (x==27):
home_team_rank.append(int(29))
twitter_followers_home.append(int(645000))
elif (x == 28):
home_team_rank.append(int(18))
twitter_followers_home.append(int(662000))
elif (x==29):
home_team_rank.append(int(28))
twitter_followers_home.append(int(726000))
elif (x==30):
twitter_followers_home.append(int(1200000))
home_team_rank.append(int(9))
twitter_followers_away=[]
away_team_rank=[]
for x in (updated.Away_Team):
if ((x==1)):
away_team_rank.append(int(11))
twitter_followers_away.append(int(2100000))
elif (x==2):
away_team_rank.append(int(17))
twitter_followers_away.append(int(823000))
elif (x==3):
away_team_rank.append(int(3))
twitter_followers_away.append(int(3500000))
elif (x==4):
away_team_rank.append(int(19))
twitter_followers_away.append(int(1500000))
elif (x==5):
away_team_rank.append(int(24))
twitter_followers_away.append(int(930000))
elif (x==6):
away_team_rank.append(int(5))
twitter_followers_away.append(int(2300000))
elif (x==7):
away_team_rank.append(int(13))
twitter_followers_away.append(int(1400000))
elif (x==8):
away_team_rank.append(int(27))
twitter_followers_away.append(int(695000))
elif (x==9):
away_team_rank.append(int(26))
twitter_followers_away.append(int(766000))
elif (x==10):
away_team_rank.append(int(25))
twitter_followers_away.append(int(925000))
elif (x==11):
away_team_rank.append(int(14))
twitter_followers_away.append(int(753000))
elif (x==12):
away_team_rank.append(int(2))
twitter_followers_away.append(int(6170000))
elif (x==13):
away_team_rank.append(int(23))
twitter_followers_away.append(int(991000))
elif (x==14):
away_team_rank.append(int(4))
twitter_followers_away.append(int(3600000))
elif (x==15):
away_team_rank.append(int(15))
twitter_followers_away.append(int(714000))
elif (x==16):
away_team_rank.append(int(7))
twitter_followers_away.append(int(755000))
elif (x==17):
away_team_rank.append(int(21))
twitter_followers_away.append(int(710000))
elif (x==18):
away_team_rank.append(int(16))
twitter_followers_away.append(int(1800000))
elif (x==19):
away_team_rank.append(int(10))
twitter_followers_away.append(int(4090000))
elif (x==20):
away_team_rank.append(int(20))
twitter_followers_away.append(int(632000))
elif (x==21):
away_team_rank.append(int(30))
twitter_followers_away.append(int(659000))
elif (x==22):
away_team_rank.append(int(1))
twitter_followers_away.append(int(1780000))
elif (x==23):
away_team_rank.append(int(12))
twitter_followers_away.append(int(2300000))
elif (x==24):
away_team_rank.append(int(22))
twitter_followers_away.append(int(634000))
elif (x==25):
away_team_rank.append(int(6))
twitter_followers_away.append(int(1100000))
elif (x==26):
away_team_rank.append(int(8))
twitter_followers_away.append(int(1710000))
elif (x==27):
away_team_rank.append(int(29))
twitter_followers_away.append(int(645000))
elif (x == 28):
away_team_rank.append(int(18))
twitter_followers_away.append(int(662000))
elif (x==29):
away_team_rank.append(int(28))
twitter_followers_away.append(int(726000))
elif (x==30):
twitter_followers_away.append(int(1200000))
away_team_rank.append(int(9))
updated['Home_Team_Twitter']=twitter_followers_home
updated['Away_Team_Twitter']=twitter_followers_away
updated['Home_Team_Rank']=home_team_rank
updated['Away_Team_Rank']=away_team_rank
updated['Game_Date'] = pd.to_datetime(updated.Game_Date)
cal = calendar()
holidays = cal.holidays(start=updated.Game_Date.min(), end=updated.Game_Date.max())
updated['Holiday'] = updated['Game_Date'].isin(holidays)
holiday=[]
for holi in updated.Holiday:
if holi==True:
holiday.append(int(1))
elif holi!=True:
holiday.append(int(0))
updated['Holiday'] = holiday
df_dummy=updated.pop('Total_Viewers')
updated['Total_Viewers']=df_dummy
updated.to_csv('test_data.csv')
print updated.shape | 35.006969 | 131 | 0.599681 |
1649b1bcb3483747d649e7cd39dd913429cb6b3c | 10,798 | py | Python | NVLL/distribution/vmf_hypvae.py | jennhu/vmf_vae_nlp | 95a39fa9f7a0659e432475e8dfb9a46e305d53b7 | [
"MIT"
] | 159 | 2018-08-31T15:57:36.000Z | 2022-03-27T15:31:38.000Z | NVLL/distribution/vmf_hypvae.py | jennhu/vmf_vae_nlp | 95a39fa9f7a0659e432475e8dfb9a46e305d53b7 | [
"MIT"
] | 9 | 2018-10-11T15:58:50.000Z | 2019-04-16T03:13:33.000Z | NVLL/distribution/vmf_hypvae.py | jennhu/vmf_vae_nlp | 95a39fa9f7a0659e432475e8dfb9a46e305d53b7 | [
"MIT"
] | 21 | 2018-09-01T17:57:20.000Z | 2021-12-17T03:31:01.000Z | import torch
from scipy import special as sp
import numpy as np
from NVLL.util.util import GVar
from NVLL.util.gpu_flag import device
from torch.autograd import gradcheck
class BesselIve(torch.autograd.Function):
"""
We can implement our own custom autograd Functions by subclassing
torch.autograd.Function and implementing the forward and backward passes
which operate on Tensors.
"""
@staticmethod
def forward(ctx, dim, kappa):
"""
In the forward pass we receive a Tensor containing the input and return
a Tensor containing the output. ctx is a context object that can be used
to stash information for backward computation. You can cache arbitrary
objects for use in the backward pass using the ctx.save_for_backward method.
"""
ctx.save_for_backward(dim, kappa)
kappa_copy = kappa.clone()
m = sp.ive(dim, kappa_copy)
x = torch.tensor(m).to(device)
# x = torch.from_numpy(np.asarray(sp.ive(dim, kappa)))
return x.clone()
@staticmethod
def backward(ctx, grad_output):
"""
In the backward pass we receive a Tensor containing the gradient of the loss
with respect to the output, and we need to compute the gradient of the loss
with respect to the input.
"""
# print('called')
dim, kappa = ctx.saved_tensors
grad_input = grad_output.clone()
grad = grad_input * (bessel_ive(dim - 1, kappa) - bessel_ive(dim, kappa) * (dim + kappa) / kappa)
# grad = grad_input * (bessel(dim-1, kappa) + bessel(dim+1, kappa)) *0.5
return None, grad
class BesselIv(torch.autograd.Function):
"""
We can implement our own custom autograd Functions by subclassing
torch.autograd.Function and implementing the forward and backward passes
which operate on Tensors.
"""
@staticmethod
def forward(ctx, dim, kappa):
"""
In the forward pass we receive a Tensor containing the input and return
a Tensor containing the output. ctx is a context object that can be used
to stash information for backward computation. You can cache arbitrary
objects for use in the backward pass using the ctx.save_for_backward method.
"""
ctx.save_for_backward(dim, kappa)
kappa_copy = kappa.clone()
m = sp.iv(dim, kappa_copy)
x = torch.tensor(m).to(device)
return x.clone()
@staticmethod
def backward(ctx, grad_output):
"""
In the backward pass we receive a Tensor containing the gradient of the loss
with respect to the output, and we need to compute the gradient of the loss
with respect to the input.
"""
# print('called')
dim, kappa = ctx.saved_tensors
grad_input = grad_output.clone()
# grad = grad_input * (bessel_ive(dim - 1, kappa) - bessel_ive(dim, kappa) * (dim + kappa) / kappa)
grad = grad_input * (bessel_iv(dim - 1, kappa) + bessel_iv(dim + 1, kappa)) * 0.5
return None, grad
bessel_ive = BesselIve.apply
bessel_iv = BesselIv.apply
# dim = torch.tensor(3.0).to(device)
# kappa = torch.tensor(100.0,requires_grad=True).to(device)
# res = torch.autograd.gradcheck(bessel_ive, (dim, kappa), raise_exception=True)
#
# print(res)
# exit()
class VmfDiff(torch.nn.Module):
def __init__(self, hid_dim, lat_dim):
super().__init__()
self.hid_dim = hid_dim
self.lat_dim = lat_dim
self.func_mu = torch.nn.Linear(hid_dim, lat_dim)
self.func_kappa = torch.nn.Linear(hid_dim, 1)
# self.kld = GVar(torch.from_numpy(vMF._vmf_kld(kappa, lat_dim)).float())
# print('KLD: {}'.format(self.kld.data[0]))
self.nonneg = torch.nn.ReLU()
def estimate_param(self, latent_code):
ret_dict = {}
# print(torch.max(self.func_kappa(latent_code)).item())
# ret_dict['kappa'] = self.nonneg(1 + self.func_kappa(latent_code) * 5 ) +1
ret_dict['kappa'] = torch.max(torch.min(self.func_kappa(latent_code) * 10 + 50, torch.tensor(150.0).to(device)),
torch.tensor(10.0).to(device))
# Only compute mu, use mu/mu_norm as mu,
# use 1 as norm, use diff(mu_norm, 1) as redundant_norm
mu = self.func_mu(latent_code)
norm = torch.norm(mu, 2, 1, keepdim=True)
mu_norm_sq_diff_from_one = torch.pow(torch.add(norm, -1), 2)
redundant_norm = torch.sum(mu_norm_sq_diff_from_one, dim=1, keepdim=True)
ret_dict['norm'] = torch.ones_like(mu)
ret_dict['redundant_norm'] = redundant_norm
mu = mu / torch.norm(mu, p=2, dim=1, keepdim=True)
ret_dict['mu'] = mu
return ret_dict
def compute_KLD(self, tup, batch_sz):
kappa = tup['kappa']
d = self.lat_dim
rt_bag = []
# const = torch.log(torch.tensor(3.1415926)) * d / 2 + torch.log(torch.tensor(2.0)) \
# - torch.tensor(sp.loggamma(d / 2).real) - (d / 2) * torch.log(torch.tensor(2 * 3.1415926))
const = torch.tensor(
np.log(np.pi) * d / 2 + np.log(2) - sp.loggamma(d / 2).real - (d / 2) * np.log(2 * np.pi)).to(
device)
d = torch.tensor([d], dtype=torch.float).to(device)
batchsz = kappa.size()[0]
rt_tensor = torch.zeros(batchsz)
for k_idx in range(batchsz):
k = kappa[k_idx]
# print(k)
# print(k)
# print(d)
first = k * bessel_iv(d / 2, k) / bessel_iv(d / 2 - 1, k)
second = (d / 2 - 1) * torch.log(k) - torch.log(bessel_iv(d / 2 - 1, k))
combin = first + second + const
rt_tensor[k_idx] = combin
# rt_bag.append(combin)
return rt_tensor.to(device)
# return torch.tensor(rt_bag,requires_grad=True).to(device)
def build_bow_rep(self, lat_code, n_sample):
batch_sz = lat_code.size()[0]
tup = self.estimate_param(latent_code=lat_code)
mu = tup['mu']
norm = tup['norm']
kappa = tup['kappa']
kld = self.compute_KLD(tup, batch_sz)
vecs = []
kappa_clone = kappa.detach().cpu().numpy()
if n_sample == 1:
return tup, kld, self.sample_cell(mu, norm, kappa_clone)
for n in range(n_sample):
sample = self.sample_cell(mu, norm, kappa_clone)
vecs.append(sample)
vecs = torch.cat(vecs, dim=0)
return tup, kld, vecs
def sample_cell(self, mu, norm, kappa):
batch_sz, lat_dim = mu.size()
# mu = GVar(mu)
mu = mu / torch.norm(mu, p=2, dim=1, keepdim=True)
w = self._sample_weight_batch(kappa, lat_dim, batch_sz)
w = w.unsqueeze(1)
# batch version
w_var = GVar(w * torch.ones(batch_sz, lat_dim).to(device))
v = self._sample_ortho_batch(mu, lat_dim)
scale_factr = torch.sqrt(
GVar(torch.ones(batch_sz, lat_dim)) - torch.pow(w_var, 2))
orth_term = v * scale_factr
muscale = mu * w_var
sampled_vec = orth_term + muscale
return sampled_vec.unsqueeze(0).to(device)
def _sample_weight_batch(self, kappa, dim, batch_sz=1):
# result = torch.FloatTensor((batch_sz))
result = np.zeros((batch_sz))
for b in range(batch_sz):
result[b] = self._sample_weight(kappa[b], dim)
return torch.from_numpy(result).float().to(device)
def _sample_weight(self, kappa, dim):
"""Rejection sampling scheme for sampling distance from center on
surface of the sphere.
"""
dim = dim - 1 # since S^{n-1}
# print(dim)
# print(kappa)
b = dim / (np.sqrt(4. * kappa ** 2 + dim ** 2) + 2 * kappa) # b= 1/(sqrt(4.* kdiv**2 + 1) + 2 * kdiv)
x = (1. - b) / (1. + b)
c = kappa * x + dim * np.log(1 - x ** 2) # dim * (kdiv *x + np.log(1-x**2))
while True:
z = np.random.beta(dim / 2., dim / 2.) # concentrates towards 0.5 as d-> inf
w = (1. - (1. + b) * z) / (1. - (1. - b) * z)
u = np.random.uniform(low=0, high=1)
if kappa * w + dim * np.log(1. - x * w) - c >= np.log(
u): # thresh is dim *(kdiv * (w-x) + log(1-x*w) -log(1-x**2))
return w
def _sample_ortho_batch(self, mu, dim):
"""
:param mu: Variable, [batch size, latent dim]
:param dim: scala. =latent dim
:return:
"""
_batch_sz, _lat_dim = mu.size()
assert _lat_dim == dim
squeezed_mu = mu.unsqueeze(1)
v = GVar(torch.randn(_batch_sz, dim, 1)) # TODO random
# v = GVar(torch.linspace(-1, 1, steps=dim))
# v = v.expand(_batch_sz, dim).unsqueeze(2)
rescale_val = torch.bmm(squeezed_mu, v).squeeze(2)
proj_mu_v = mu * rescale_val
ortho = v.squeeze() - proj_mu_v
ortho_norm = torch.norm(ortho, p=2, dim=1, keepdim=True)
y = ortho / ortho_norm
return y
def _sample_orthonormal_to(self, mu, dim):
"""Sample point on sphere orthogonal to mu.
"""
v = GVar(torch.randn(dim)) # TODO random
# v = GVar(torch.linspace(-1,1,steps=dim))
rescale_value = mu.dot(v) / mu.norm()
proj_mu_v = mu * rescale_value.expand(dim)
ortho = v - proj_mu_v
ortho_norm = torch.norm(ortho)
return ortho / ortho_norm.expand_as(ortho)
#
# a = torch.tensor(10)
# b = torch.ones(1, dtype=torch.float, requires_grad=True)
#
# y = bessel(a, b)
# loss = 1 - y
# print(y)
# loss.backward()
# print(a)
def KL_guu(k, d):
kld = k * ((sp.iv(d / 2.0 + 1.0, k) \
+ sp.iv(d / 2.0, k) * d / (2.0 * k)) / sp.iv(d / 2.0, k) - d / (2.0 * k)) \
+ d * np.log(k) / 2.0 - np.log(sp.iv(d / 2.0, k)) \
- sp.loggamma(d / 2 + 1) - d * np.log(2) / 2
return kld
from scipy.special import ive
from scipy.special import iv
# print(iv(100,50))
def KL_davidson(k, d):
vmf_entropy = k * ive(d / 2, k) / ive((d / 2) - 1, k) + \
(d / 2 - 1) * np.log(k) \
- (d / 2) * np.log(2 * np.pi) - np.log(iv(d / 2 - 1, k))
hyu_ent = np.log(2) + (d / 2) * np.log(np.pi) - sp.loggamma(
d / 2)
kl = vmf_entropy + hyu_ent
return kl
#
# first = k * bessel(d / 2, k) / bessel(d / 2 - 1, k)
# second = (d / 2 - 1) * torch.log(k) - torch.log(bessel(d / 2 - 1, k))
# const = torch.tensor(
# np.log(3.1415926) * d / 2 + np.log(2) - sp.loggamma(d / 2).real - (d / 2) * np.log(2 * 3.1415926)).to(
# devic
# for kappa in range(10, 150, 20):
# for d in range(50, 150, 50):
# print("Davidson:{}\t\tGuu:{}".format(KL_davidson(kappa, d), KL_guu(kappa, d)))
| 35.873754 | 120 | 0.576403 |
c7f667527161b701f9dccfad3c7aa7ea6fa76227 | 14,011 | py | Python | MAPS/scalar_train_small_fully_connected.py | gmooers96/CBRAIN-CAM | c5a26e415c031dea011d7cb0b8b4c1ca00751e2a | [
"MIT"
] | null | null | null | MAPS/scalar_train_small_fully_connected.py | gmooers96/CBRAIN-CAM | c5a26e415c031dea011d7cb0b8b4c1ca00751e2a | [
"MIT"
] | null | null | null | MAPS/scalar_train_small_fully_connected.py | gmooers96/CBRAIN-CAM | c5a26e415c031dea011d7cb0b8b4c1ca00751e2a | [
"MIT"
] | 5 | 2019-09-30T20:17:13.000Z | 2022-03-01T07:03:30.000Z | import math
import os
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import argparse
import json
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import netCDF4
import keras
from keras import layers
from keras import backend as K
from keras.models import Model
from keras.losses import binary_crossentropy, mse
from keras.utils import plot_model
from keras.callbacks import ModelCheckpoint
class AnnealingCallback(keras.callbacks.Callback):
def __init__(self, epochs):
super(AnnealingCallback, self).__init__()
self.epochs = epochs
def on_epoch_begin(self, epoch, logs={}):
new_kl_weight = epoch/self.epochs
K.set_value(self.model.kl_weight, new_kl_weight)
print("Using updated KL Weight:", K.get_value(self.model.kl_weight))
class Sampling(keras.layers.Layer):
def call(self, inputs):
"""
TODO
"""
mean, log_var = inputs
return K.random_normal(tf.shape(log_var)) * K.exp(log_var/2) + mean
def kl_reconstruction_loss(z_log_var, z_mean, vae, lambda_weight):
def _kl_reconstruction_loss(true, pred):
"""
TODO
"""
true = tf.reshape(true, [-1, 128])
x_mu = pred[:, :128]
x_log_var = pred[:, 128:]
# Gaussian reconstruction loss
mse = -0.5 * K.sum(K.square(true - x_mu)/K.exp(x_log_var), axis=1)
var_trace = -0.5 * K.sum(x_log_var, axis=1)
log2pi = -0.5 * 128 * np.log(2 * np.pi)
log_likelihood = mse + var_trace + log2pi
#print("log likelihood shape", log_likelihood.shape)
# NOTE: We don't take a mean here, since we first want to add the KL term
reconstruction_loss = -log_likelihood
# KL divergence loss
kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
kl_loss = K.sum(kl_loss, axis=1)
kl_loss *= -0.5
print("true is",tf.shape(true))
print("true is",true.get_shape)
print("true is", K.int_shape(true))
print("x_mu is",tf.shape(x_mu))
print("x_mu is",x_mu.get_shape)
print("x_mu is", K.int_shape(x_mu))
#print(fgdfdfgdfag)
covariance_truth = tfp.stats.covariance(true)
covariance_prediction = tfp.stats.covariance(x_mu)
Frobenius_norm = tf.norm(covariance_prediction-covariance_truth, ord="euclidean")
print("true is",tf.shape(true))
print("true is",true.get_shape)
print("true is", K.int_shape(true))
print("x_mu is",tf.shape(x_mu))
print("x_mu is",x_mu.get_shape)
print("x_mu is", K.int_shape(x_mu))
#Frobenius_norm = K.sum(Frobenius_norm, axis = 1)
#print("Frobenius_norm is",tf.shape(Frobenius_norm))
#print("Frobenius_norm is",Frobenius_norm.get_shape)
print("reconstruction_loss is",tf.shape(reconstruction_loss))
print("reconstruction_loss is",reconstruction_loss.get_shape)
print("reconstruction_loss is", K.int_shape(reconstruction_loss))
print("kl_loss is",tf.shape(kl_loss))
print("kl_loss is",kl_loss.get_shape)
print("kl_loss is", K.int_shape(kl_loss))
#print(gsdgsgs)
#Frobenius_norm = K.sum(Frobenius_norm, axis = 1)
#####################################################################################
#return K.mean(reconstruction_loss + vae.kl_weight*kl_loss + lambda_weight*Frobenius_norm)
return K.mean(reconstruction_loss + vae.kl_weight*kl_loss)# + lambda_weight*Frobenius_norm)
return _kl_reconstruction_loss
def kl(z_log_var, z_mean):
def _kl(true, pred):
"""
TODO
"""
kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
kl_loss = K.sum(kl_loss, axis=-1)
kl_loss *= -0.5
# kl_loss = K.print_tensor(kl_loss, message='EULA PEULA')
return K.mean(kl_loss)
return _kl
def reconstruction(true, pred):
"""
TODO
"""
true = tf.reshape(true, [-1, 128])
x_mu = pred[:, :128]
x_log_var = pred[:, 128:]
mse = -0.5 * K.sum(K.square(true - x_mu)/K.exp(x_log_var), axis=1)
var_trace = -0.5 * K.sum(x_log_var, axis=1)
log2pi = -0.5 * 128 * np.log(2 * np.pi)
log_likelihood = mse + var_trace + log2pi
print("log likelihood shape", log_likelihood.shape)
return K.mean(-log_likelihood)
def constrainer(z_log_var, z_mean, lambda_weight):
def _constrainer(true, pred):
true = tf.reshape(true, [-1, 128])
x_mu = pred[:, :128]
covariance_truth = tfp.stats.covariance(true)
covariance_prediction = tfp.stats.covariance(x_mu)
Frobenius_norm = tf.norm(covariance_prediction-covariance_truth, ord="euclidean")
return lambda_weight*Frobenius_norm
#return 1000000.0*Frobenius_norm
return _constrainer
def power_spectrum(z_log_var, z_mean):
def _power_spectrum(true, pred):
p850 = tf.reshape(pred[22,:], [-1, 128 ])
t850 = tf.reshape(true[22,:], [-1, 128 ])
p850 = tf.cast(p850, dtype=tf.float32)
t850 = tf.cast(t850, dtype=tf.float32)
P_pred = tf.signal.rfft(p850)*tf.math.conj(tf.signal.rfft(p850))
P_truth = tf.signal.rfft(t850)*tf.math.conj(tf.signal.rfft(t850))
spectrum_loss = tf.math.square(tf.math.log(P_pred/P_truth))
spectrum_loss = tf.cast(spectrum_loss, dtype=tf.float32)
#sprectrum_loss = K.sum(spectrum_loss, axis = 1)
return spectrum_loss
return _power_spectrum
def encoder_gen(input_shape: tuple, encoder_config: dict, id):
"""
Create the architecture for the VAE encoder.
"""
class EncoderResult():
pass
encoder_result = EncoderResult()
inputs = keras.layers.Input(shape=[input_shape, 1])
print("shape of input after padding", inputs.shape)
z = keras.layers.Flatten()(inputs)
shape_before_flattening = K.int_shape(z)
print("shape of input after flattening", inputs.shape)
print("shape after first Dense layer", z.shape)
z = keras.layers.Dense(encoder_config["dense_1"]["dim"], activation=encoder_config["activation"])(z)
print("shape after first Dense layer", z.shape)
z = keras.layers.Dense(encoder_config["dense_2"]["dim"], activation=encoder_config["activation"])(z)
print("shape after second Dense layer", z.shape)
# Compute mean and log variance
z_mean = keras.layers.Dense(encoder_config["latent_dim"], name='z_mean')(z)
z_log_var = keras.layers.Dense(encoder_config["latent_dim"], name='z_log_var')(z)
print("z mean shape", z_mean._keras_shape)
print("z log var shape", z_log_var._keras_shape)
z = Sampling()([z_mean, z_log_var])
# Instantiate Keras model for VAE encoder
vae_encoder = keras.Model(inputs=[inputs], outputs=[z_mean, z_log_var, z])
plot_model(vae_encoder, to_file='./model_graphs/model_diagrams/encoder_{}.png'.format(id), show_shapes=True)
# Package up everything for the encoder
encoder_result.inputs = inputs
encoder_result.z_mean = z_mean
encoder_result.z_log_var = z_log_var
encoder_result.z = z
encoder_result.vae_encoder = vae_encoder
return encoder_result, shape_before_flattening
def decoder_gen(
original_input: tuple,
decoder_config: dict, flatter_shape
):
"""
Create the architecture for the VAE decoder
"""
decoder_inputs = keras.layers.Input(shape=[decoder_config["latent_dim"]])
print("decoder_inputs", decoder_inputs._keras_shape)
#x = keras.layers.Dense(np.prod(flatter_shape[1:]), activation=decoder_config["activation"])(decoder_inputs)
#print("shape after initial change", x._keras_shape)
# Reshape input to be an image
#x = keras.layers.Reshape(flatter_shape[1:])(x)
#print("shape after resdhaping to an image", x._keras_shape)
#x = keras.layers.Dense(decoder_config["dense_1"]["dim"], activation=decoder_config["activation"])(x)
#print("shape after first dense layer", x._keras_shape)
x = keras.layers.Dense(decoder_config["dense_1"]["dim"], activation=decoder_config["activation"])(decoder_inputs)
print("shape after first dense layer", x._keras_shape)
x = keras.layers.Dense(decoder_config["dense_2"]["dim"], activation=decoder_config["activation"])(x)
print("shape after second dense layer", x.shape)
x_mu = keras.layers.Dense(decoder_config["dense_mu"]["dim"], activation=decoder_config["dense_mu"]["activation"])(x)
print("shape after dense mu layer", x_mu._keras_shape)
x_log_var = keras.layers.Dense(decoder_config["dense_log_var"]["dim"], activation=decoder_config["dense_log_var"]["activation"])(x)
print("shape after dense log var layer", x_log_var._keras_shape)
x_mu_log_var = keras.layers.Concatenate(axis=1)([x_mu, x_log_var])
variational_decoder = keras.Model(inputs=[decoder_inputs], outputs=[x_mu_log_var])
return variational_decoder
def plot_training_losses(h, id):
"""
Plot training loss graphs for
(1) KL term
(2) Reconstruction term
(3) Total ELBO loss
"""
hdict = h.history
print(hdict)
train_reconstruction_losses = hdict['reconstruction']
valid_reconstruction_losses = hdict['val_reconstruction']
kl_train_losses = hdict['_kl']
kl_valid_losses = hdict['val__kl']
#constraint_train_losses = hdict['_constrainer']
#constraint_valid_losses = hdict['val__constrainer']
total_train_losses = hdict['_kl_reconstruction_loss']
total_valid_losses = hdict['val__kl_reconstruction_loss']
epochs = range(1, len(train_reconstruction_losses) + 1)
#fig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, figsize=(12.8, 4.8))
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(12.8, 4.8))
# Plot combined loss
ax1.plot(epochs, total_train_losses, 'b', label='Train')
ax1.plot(epochs, total_valid_losses, 'r', label='Valid')
ax1.set(xlabel="Epochs", ylabel="Loss")
ax1.legend(prop={'size': 10})
ax1.set_title("Combined Loss")
# Plot KL
ax2.plot(epochs, kl_train_losses, 'b', label='Train')
ax2.plot(epochs, kl_valid_losses, 'r', label='Valid')
ax2.set(xlabel="Epochs", ylabel="Loss")
ax2.legend(prop={'size': 10})
ax2.set_title("KL Loss")
# Plot reconstruction loss
ax3.plot(epochs, train_reconstruction_losses, 'b', label='Train')
ax3.plot(epochs, valid_reconstruction_losses, 'r', label='Valid')
ax3.set(xlabel="Epochs", ylabel="Loss")
ax3.legend(prop={'size': 10})
ax3.set_title("Reconstruction Loss")
plt.tight_layout()
plt.savefig('./model_graphs/losses/model_losses_{}.png'.format(id))
def main():
args = argument_parsing()
print("Command line args:", args)
f = open("./model_config/config_{}.json".format(args.id))
model_config = json.load(f)
f.close()
train_data = np.load(model_config["data"]["training_data_path"])
test_data = np.load(model_config["data"]["test_data_path"])
img_height = train_data.shape[1]
print("Image shape:", img_height)
# Construct VAE Encoder
encoder_result, shape_flatten = encoder_gen((img_height), model_config["encoder"], args.id)
# Construct VAE Decoder
vae_decoder = decoder_gen(
(img_height),
model_config["decoder"], shape_flatten
)
plot_model(vae_decoder, to_file='./model_graphs/model_diagrams/decoder_{}.png'.format(args.id), show_shapes=True)
_, _, z = encoder_result.vae_encoder(encoder_result.inputs)
x_mu_log_var = vae_decoder(z)
vae = keras.Model(inputs=[encoder_result.inputs], outputs=[x_mu_log_var])
plot_model(vae, to_file='./model_graphs/model_diagrams/full_vae_{}.png'.format(args.id), show_shapes=True)
vae.kl_weight = K.variable(model_config["kl_weight"])
# Specify the optimizer
optimizer = keras.optimizers.Adam(lr=model_config['optimizer']['lr'])
stat_weight = model_config['contraint_weight']['lambda']
# Compile model
vae.compile(
# loss=reconstruction,
loss=kl_reconstruction_loss(
encoder_result.z_log_var,
encoder_result.z_mean,
vae,
stat_weight
),
optimizer=optimizer,
metrics=[
reconstruction,
kl(
encoder_result.z_log_var,
encoder_result.z_mean
),
kl_reconstruction_loss(
encoder_result.z_log_var,
encoder_result.z_mean,
vae,
stat_weight
)
]
)
vae.summary()
train_data = train_data.reshape(train_data.shape+(1,))
test_data = test_data.reshape(test_data.shape+(1,))
print("train data shape", train_data.shape)
print("test data shape", test_data.shape)
checkpoint = ModelCheckpoint(
'./models/model_{}.th'.format(args.id),
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=True
)
callbacks_list = [checkpoint]
if model_config["annealing"]:
kl_weight_annealing = AnnealingCallback(model_config["train_epochs"])
callbacks_list.append(kl_weight_annealing)
h = vae.fit(
x=train_data,
y=train_data,
epochs=model_config["train_epochs"],
batch_size=model_config["batch_size"],
validation_data=[test_data, test_data],
callbacks=callbacks_list
)
plot_training_losses(h, args.id)
def argument_parsing():
parser = argparse.ArgumentParser()
parser.add_argument('--id', type=int, help='This option specifies the config file to use to construct and train the VAE.')
args = parser.parse_args()
return args
if __name__ == "__main__":
main() | 34.766749 | 135 | 0.65213 |
7918d7f7478ae872ea70a028ef308f4e3e3f5d72 | 7,647 | py | Python | gewittergefahr/plotting/feature_map_plotting.py | liuximarcus/GewitterGefahr | d819874d616f98a25187bfd3091073a2e6d5279e | [
"MIT"
] | 1 | 2020-11-19T08:15:03.000Z | 2020-11-19T08:15:03.000Z | gewittergefahr/plotting/feature_map_plotting.py | liuximarcus/GewitterGefahr | d819874d616f98a25187bfd3091073a2e6d5279e | [
"MIT"
] | null | null | null | gewittergefahr/plotting/feature_map_plotting.py | liuximarcus/GewitterGefahr | d819874d616f98a25187bfd3091073a2e6d5279e | [
"MIT"
] | null | null | null | """Plotting methods for CNN feature maps."""
import numpy
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as pyplot
from gewittergefahr.gg_utils import error_checking
from gewittergefahr.plotting import plotting_utils
DEFAULT_FIG_WIDTH_INCHES = 15
DEFAULT_FIG_HEIGHT_INCHES = 15
DEFAULT_FONT_SIZE = 20
def plot_2d_feature_map(
feature_matrix, axes_object, colour_map_object,
font_size=DEFAULT_FONT_SIZE, colour_norm_object=None,
min_colour_value=None, max_colour_value=None, annotation_string=None):
"""Plots 2-D feature map.
M = number of rows in grid
N = number of columns in grid
:param feature_matrix: M-by-N numpy array of feature values (either before
or after activation function -- this method doesn't care).
:param axes_object: Instance of `matplotlib.axes._subplots.AxesSubplot`.
:param font_size: Font size for annotation.
:param colour_map_object: Instance of `matplotlib.pyplot.cm`.
:param colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`.
:param min_colour_value: [used only if `colour_norm_object is None`]
Minimum value in colour scheme.
:param max_colour_value: [used only if `colour_norm_object is None`]
Max value in colour scheme.
:param annotation_string: Annotation (printed in the bottom-center of the
map). For no annotation, leave this alone.
"""
error_checking.assert_is_numpy_array_without_nan(feature_matrix)
error_checking.assert_is_numpy_array(feature_matrix, num_dimensions=2)
if colour_norm_object is None:
error_checking.assert_is_greater(max_colour_value, min_colour_value)
colour_norm_object = None
else:
if hasattr(colour_norm_object, 'boundaries'):
min_colour_value = colour_norm_object.boundaries[0]
max_colour_value = colour_norm_object.boundaries[-1]
else:
min_colour_value = colour_norm_object.vmin
max_colour_value = colour_norm_object.vmax
axes_object.pcolormesh(
feature_matrix, cmap=colour_map_object, norm=colour_norm_object,
vmin=min_colour_value, vmax=max_colour_value, shading='flat',
edgecolors='None')
if annotation_string is not None:
error_checking.assert_is_string(annotation_string)
axes_object.text(
0.5, 0.01, annotation_string, fontsize=font_size, fontweight='bold',
color='black', horizontalalignment='center',
verticalalignment='bottom', transform=axes_object.transAxes)
axes_object.set_xticks([])
axes_object.set_yticks([])
def plot_many_2d_feature_maps(
feature_matrix, annotation_string_by_panel, num_panel_rows,
colour_map_object, colour_norm_object=None, min_colour_value=None,
max_colour_value=None, figure_width_inches=DEFAULT_FIG_WIDTH_INCHES,
figure_height_inches=DEFAULT_FIG_HEIGHT_INCHES,
font_size=DEFAULT_FONT_SIZE):
"""Plots many 2-D feature maps in the same figure (one per panel).
M = number of rows in spatial grid
N = number of columns in spatial grid
P = number of panels
:param feature_matrix: M-by-N-by-P numpy array of feature values (either
before or after activation function -- this method doesn't care).
:param annotation_string_by_panel: length-P list of annotations.
annotation_string_by_panel[k] will be printed in the bottom-center of
the [k]th panel.
:param num_panel_rows: Number of panel rows.
:param colour_map_object: See doc for `plot_2d_feature_map`.
:param colour_norm_object: Same.
:param min_colour_value: Same.
:param max_colour_value: Same.
:param figure_width_inches: Figure width.
:param figure_height_inches: Figure height.
:param font_size: Font size for panel labels.
:return: figure_object: See doc for `plotting_utils.create_paneled_figure`.
:return: axes_object_matrix: Same.
"""
pyplot.rc('axes', linewidth=3)
error_checking.assert_is_numpy_array(feature_matrix, num_dimensions=3)
num_panels = feature_matrix.shape[-1]
error_checking.assert_is_numpy_array(
numpy.array(annotation_string_by_panel),
exact_dimensions=numpy.array([num_panels])
)
error_checking.assert_is_integer(num_panel_rows)
error_checking.assert_is_geq(num_panel_rows, 1)
error_checking.assert_is_leq(num_panel_rows, num_panels)
num_panel_columns = int(numpy.ceil(
float(num_panels) / num_panel_rows
))
figure_object, axes_object_matrix = plotting_utils.create_paneled_figure(
num_rows=num_panel_rows, num_columns=num_panel_columns,
figure_width_inches=figure_width_inches,
figure_height_inches=figure_height_inches,
horizontal_spacing=0., vertical_spacing=0.,
shared_x_axis=False, shared_y_axis=False, keep_aspect_ratio=False)
for i in range(num_panel_rows):
for j in range(num_panel_columns):
this_linear_index = i * num_panel_columns + j
if this_linear_index >= num_panels:
axes_object_matrix[i, j].axis('off')
continue
plot_2d_feature_map(
feature_matrix=feature_matrix[..., this_linear_index],
axes_object=axes_object_matrix[i, j], font_size=font_size,
colour_map_object=colour_map_object,
colour_norm_object=colour_norm_object,
min_colour_value=min_colour_value,
max_colour_value=max_colour_value,
annotation_string=annotation_string_by_panel[this_linear_index]
)
return figure_object, axes_object_matrix
def plot_many_1d_feature_maps(
feature_matrix, colour_map_object, colour_norm_object=None,
min_colour_value=None, max_colour_value=None,
figure_width_inches=DEFAULT_FIG_WIDTH_INCHES,
figure_height_inches=DEFAULT_FIG_HEIGHT_INCHES):
"""Plots many 1-D feature maps in the same figure (one per column).
N = number of points in spatial grid
C = number of channels
:param feature_matrix: N-by-C numpy array of feature values.
:param colour_map_object: See doc for `plot_many_2d_feature_maps`.
:param colour_norm_object: Same.
:param min_colour_value: Same.
:param max_colour_value: Same.
:param figure_width_inches: Same.
:param figure_height_inches: Same.
:return: figure_object: See doc for `plotting_utils.create_paneled_figure`.
:return: axes_object_matrix: Same.
"""
pyplot.rc('axes', linewidth=1)
error_checking.assert_is_numpy_array(feature_matrix, num_dimensions=2)
num_channels = feature_matrix.shape[1]
num_spatial_points = feature_matrix.shape[0]
figure_object, axes_object_matrix = plotting_utils.create_paneled_figure(
num_rows=1, num_columns=num_channels,
figure_width_inches=figure_width_inches,
figure_height_inches=figure_height_inches,
horizontal_spacing=0., vertical_spacing=0.,
shared_x_axis=False, shared_y_axis=False, keep_aspect_ratio=False)
for k in range(num_channels):
this_matrix = numpy.reshape(
feature_matrix[..., k], (num_spatial_points, 1)
)
plot_2d_feature_map(
feature_matrix=this_matrix, axes_object=axes_object_matrix[0, k],
font_size=30, colour_map_object=colour_map_object,
colour_norm_object=colour_norm_object,
min_colour_value=min_colour_value,
max_colour_value=max_colour_value,
annotation_string=''
)
return figure_object, axes_object_matrix
| 39.828125 | 80 | 0.722898 |
c0013262202fd4a1674f576fe3efcc747907a571 | 28,315 | py | Python | gluoncv/model_zoo/rcnn/faster_rcnn/predefined_models.py | aptsunny/gluon-cv | 7f050d3411b1ada7d2b9515d63b848c55139fdbb | [
"Apache-2.0"
] | 1 | 2020-03-18T04:19:26.000Z | 2020-03-18T04:19:26.000Z | gluoncv/model_zoo/rcnn/faster_rcnn/predefined_models.py | aptsunny/gluon-cv | 7f050d3411b1ada7d2b9515d63b848c55139fdbb | [
"Apache-2.0"
] | null | null | null | gluoncv/model_zoo/rcnn/faster_rcnn/predefined_models.py | aptsunny/gluon-cv | 7f050d3411b1ada7d2b9515d63b848c55139fdbb | [
"Apache-2.0"
] | null | null | null | """Predefined Faster RCNN Model."""
from __future__ import absolute_import
import warnings
import mxnet as mx
from mxnet.gluon import nn
from mxnet.gluon.contrib.nn import SyncBatchNorm
from ..faster_rcnn import get_faster_rcnn
from ....nn.feature import FPNFeatureExpander
__all__ = ['faster_rcnn_resnet50_v1b_voc',
'faster_rcnn_resnet50_v1b_coco',
'faster_rcnn_fpn_resnet50_v1b_coco',
'faster_rcnn_fpn_syncbn_resnet50_v1b_coco',
'faster_rcnn_resnet50_v1b_custom',
'faster_rcnn_resnet101_v1d_voc',
'faster_rcnn_resnet101_v1d_coco',
'faster_rcnn_fpn_resnet101_v1d_coco',
'faster_rcnn_fpn_syncbn_resnet101_v1d_coco',
'faster_rcnn_resnet101_v1d_custom']
def faster_rcnn_resnet50_v1b_voc(pretrained=False, pretrained_base=True, **kwargs):
r"""Faster RCNN model from the paper
"Ren, S., He, K., Girshick, R., & Sun, J. (2015). Faster r-cnn: Towards
real-time object detection with region proposal networks"
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `True`, this has no effect.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Examples
--------
>>> model = get_faster_rcnn_resnet50_v1b_voc(pretrained=True)
>>> print(model)
"""
from ....model_zoo.resnetv1b import resnet50_v1b
from ....data import VOCDetection
classes = VOCDetection.CLASSES
pretrained_base = False if pretrained else pretrained_base
base_network = resnet50_v1b(pretrained=pretrained_base, dilated=False,
use_global_stats=True, **kwargs)
features = nn.HybridSequential()
top_features = nn.HybridSequential()
for layer in ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3']:
features.add(getattr(base_network, layer))
for layer in ['layer4']:
top_features.add(getattr(base_network, layer))
train_patterns = '|'.join(['.*dense', '.*rpn', '.*down(2|3|4)_conv', '.*layers(2|3|4)_conv'])
return get_faster_rcnn(
name='resnet50_v1b', dataset='voc', pretrained=pretrained,
features=features, top_features=top_features, classes=classes,
short=600, max_size=1000, train_patterns=train_patterns,
nms_thresh=0.3, nms_topk=400, post_nms=100,
roi_mode='align', roi_size=(14, 14), strides=16, clip=None,
rpn_channel=1024, base_size=16, scales=(2, 4, 8, 16, 32),
ratios=(0.5, 1, 2), alloc_size=(128, 128), rpn_nms_thresh=0.7,
rpn_train_pre_nms=12000, rpn_train_post_nms=2000,
rpn_test_pre_nms=6000, rpn_test_post_nms=300, rpn_min_size=16,
num_sample=128, pos_iou_thresh=0.5, pos_ratio=0.25, max_num_gt=100,
**kwargs)
def faster_rcnn_resnet50_v1b_coco(pretrained=False, pretrained_base=True, **kwargs):
r"""Faster RCNN model from the paper
"Ren, S., He, K., Girshick, R., & Sun, J. (2015). Faster r-cnn: Towards
real-time object detection with region proposal networks"
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `True`, this has no effect.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Examples
--------
>>> model = get_faster_rcnn_resnet50_v1b_coco(pretrained=True)
>>> print(model)
"""
from ....model_zoo.resnetv1b import resnet50_v1b
from ....data import COCODetection
classes = COCODetection.CLASSES
pretrained_base = False if pretrained else pretrained_base
base_network = resnet50_v1b(pretrained=pretrained_base, dilated=False,
use_global_stats=True, **kwargs)
features = nn.HybridSequential()
top_features = nn.HybridSequential()
for layer in ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3']:
features.add(getattr(base_network, layer))
for layer in ['layer4']:
top_features.add(getattr(base_network, layer))
train_patterns = '|'.join(['.*dense', '.*rpn', '.*down(2|3|4)_conv', '.*layers(2|3|4)_conv'])
return get_faster_rcnn(
name='resnet50_v1b', dataset='coco', pretrained=pretrained,
features=features, top_features=top_features, classes=classes,
short=800, max_size=1333, train_patterns=train_patterns,
nms_thresh=0.5, nms_topk=-1, post_nms=-1,
roi_mode='align', roi_size=(14, 14), strides=16, clip=4.14,
rpn_channel=1024, base_size=16, scales=(2, 4, 8, 16, 32),
ratios=(0.5, 1, 2), alloc_size=(128, 128), rpn_nms_thresh=0.7,
rpn_train_pre_nms=12000, rpn_train_post_nms=2000,
rpn_test_pre_nms=6000, rpn_test_post_nms=1000, rpn_min_size=1,
num_sample=128, pos_iou_thresh=0.5, pos_ratio=0.25,
max_num_gt=100, **kwargs)
def faster_rcnn_fpn_resnet50_v1b_coco(pretrained=False, pretrained_base=True, **kwargs):
r"""Faster RCNN model with FPN from the paper
"Ren, S., He, K., Girshick, R., & Sun, J. (2015). Faster r-cnn: Towards
real-time object detection with region proposal networks"
"Lin, T., Dollar, P., Girshick, R., He, K., Hariharan, B., Belongie, S. (2016).
Feature Pyramid Networks for Object Detection"
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `Ture`, this has no effect.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Examples
--------
>>> model = get_faster_rcnn_fpn_resnet50_v1b_coco(pretrained=True)
>>> print(model)
"""
from ....model_zoo.resnetv1b import resnet50_v1b
from ....data import COCODetection
classes = COCODetection.CLASSES
pretrained_base = False if pretrained else pretrained_base
base_network = resnet50_v1b(pretrained=pretrained_base, dilated=False,
use_global_stats=True, **kwargs)
features = FPNFeatureExpander(
network=base_network,
outputs=['layers1_relu8_fwd', 'layers2_relu11_fwd', 'layers3_relu17_fwd',
'layers4_relu8_fwd'], num_filters=[256, 256, 256, 256], use_1x1=True,
use_upsample=True, use_elewadd=True, use_p6=True, no_bias=False, pretrained=pretrained_base)
top_features = None
# 2 FC layer before RCNN cls and reg
box_features = nn.HybridSequential()
for _ in range(2):
box_features.add(nn.Dense(1024, weight_initializer=mx.init.Normal(0.01)))
box_features.add(nn.Activation('relu'))
train_patterns = '|'.join(
['.*dense', '.*rpn', '.*down(2|3|4)_conv', '.*layers(2|3|4)_conv', 'P'])
return get_faster_rcnn(
name='fpn_resnet50_v1b', dataset='coco', pretrained=pretrained, features=features,
top_features=top_features, classes=classes, box_features=box_features,
short=800, max_size=1333, min_stage=2, max_stage=6, train_patterns=train_patterns,
nms_thresh=0.5, nms_topk=-1, post_nms=-1, roi_mode='align', roi_size=(7, 7),
strides=(4, 8, 16, 32, 64), clip=4.14, rpn_channel=1024, base_size=16,
scales=(2, 4, 8, 16, 32), ratios=(0.5, 1, 2), alloc_size=(384, 384),
rpn_nms_thresh=0.7, rpn_train_pre_nms=12000, rpn_train_post_nms=2000,
rpn_test_pre_nms=6000, rpn_test_post_nms=1000, rpn_min_size=1, num_sample=512,
pos_iou_thresh=0.5, pos_ratio=0.25, max_num_gt=100, **kwargs)
def faster_rcnn_fpn_syncbn_resnet50_v1b_coco(pretrained=False, pretrained_base=True, num_devices=0,
**kwargs):
r"""Faster RCNN model with FPN from the paper
"Ren, S., He, K., Girshick, R., & Sun, J. (2015). Faster r-cnn: Towards
real-time object detection with region proposal networks"
"Lin, T., Dollar, P., Girshick, R., He, K., Hariharan, B., Belongie, S. (2016).
Feature Pyramid Networks for Object Detection"
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `Ture`, this has no effect.
num_devices : int, default is 0
Number of devices for sync batch norm layer. if less than 1, use all devices available.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Examples
--------
>>> model = get_faster_rcnn_fpn_syncbn_resnet50_v1b_coco(pretrained=True)
>>> print(model)
"""
from ....model_zoo.resnetv1b import resnet50_v1b
from ....data import COCODetection
classes = COCODetection.CLASSES
pretrained_base = False if pretrained else pretrained_base
gluon_norm_kwargs = {'num_devices': num_devices} if num_devices >= 1 else {}
base_network = resnet50_v1b(pretrained=pretrained_base, dilated=False, use_global_stats=False,
norm_layer=SyncBatchNorm, norm_kwargs=gluon_norm_kwargs, **kwargs)
sym_norm_kwargs = {'ndev': num_devices} if num_devices >= 1 else {}
features = FPNFeatureExpander(
network=base_network,
outputs=['layers1_relu8_fwd', 'layers2_relu11_fwd', 'layers3_relu17_fwd',
'layers4_relu8_fwd'], num_filters=[256, 256, 256, 256], use_1x1=True,
use_upsample=True, use_elewadd=True, use_p6=True, no_bias=True, pretrained=pretrained_base,
norm_layer=mx.sym.contrib.SyncBatchNorm, norm_kwargs=sym_norm_kwargs)
top_features = None
# 1 Conv 1 FC layer before RCNN cls and reg
box_features = nn.HybridSequential()
box_features.add(nn.Conv2D(256, 3, padding=1, use_bias=False),
SyncBatchNorm(**gluon_norm_kwargs),
nn.Activation('relu'),
nn.Dense(1024, weight_initializer=mx.init.Normal(0.01)),
nn.Activation('relu'))
train_patterns = '(?!.*moving)' # excluding symbol bn moving mean and var
return get_faster_rcnn(
name='fpn_syncbn_resnet50_v1b', dataset='coco', pretrained=pretrained, features=features,
top_features=top_features, classes=classes, box_features=box_features,
short=(640, 800), max_size=1333, min_stage=2, max_stage=6, train_patterns=train_patterns,
nms_thresh=0.5, nms_topk=-1, post_nms=-1, roi_mode='align', roi_size=(7, 7),
strides=(4, 8, 16, 32, 64), clip=4.14, rpn_channel=256, base_size=16,
scales=(2, 4, 8, 16, 32), ratios=(0.5, 1, 2), alloc_size=(384, 384),
rpn_nms_thresh=0.7, rpn_train_pre_nms=12000, rpn_train_post_nms=2000,
rpn_test_pre_nms=6000, rpn_test_post_nms=1000, rpn_min_size=1, num_sample=512,
pos_iou_thresh=0.5, pos_ratio=0.25, max_num_gt=100, **kwargs)
def faster_rcnn_resnet50_v1b_custom(classes, transfer=None, pretrained_base=True,
pretrained=False, **kwargs):
r"""Faster RCNN model with resnet50_v1b base network on custom dataset.
Parameters
----------
classes : iterable of str
Names of custom foreground classes. `len(classes)` is the number of foreground classes.
transfer : str or None
If not `None`, will try to reuse pre-trained weights from faster RCNN networks trained
on other datasets.
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Returns
-------
mxnet.gluon.HybridBlock
Hybrid faster RCNN network.
"""
if pretrained:
warnings.warn("Custom models don't provide `pretrained` weights, ignored.")
if transfer is None:
from ....model_zoo.resnetv1b import resnet50_v1b
base_network = resnet50_v1b(pretrained=pretrained_base, dilated=False,
use_global_stats=True, **kwargs)
features = nn.HybridSequential()
top_features = nn.HybridSequential()
for layer in ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3']:
features.add(getattr(base_network, layer))
for layer in ['layer4']:
top_features.add(getattr(base_network, layer))
train_patterns = '|'.join(['.*dense', '.*rpn', '.*down(2|3|4)_conv',
'.*layers(2|3|4)_conv'])
return get_faster_rcnn(
name='resnet50_v1b', dataset='custom', pretrained=pretrained,
features=features, top_features=top_features, classes=classes,
train_patterns=train_patterns, **kwargs)
else:
from ...model_zoo import get_model
net = get_model('faster_rcnn_resnet50_v1b_' + str(transfer), pretrained=True, **kwargs)
reuse_classes = [x for x in classes if x in net.classes]
net.reset_class(classes, reuse_weights=reuse_classes)
return net
def faster_rcnn_resnet101_v1d_voc(pretrained=False, pretrained_base=True, **kwargs):
r"""Faster RCNN model from the paper
"Ren, S., He, K., Girshick, R., & Sun, J. (2015). Faster r-cnn: Towards
real-time object detection with region proposal networks"
Parameters
----------
pretrained : bool, optional, default is False
Load pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `True`, this has no effect.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Examples
--------
>>> model = get_faster_rcnn_resnet101_v1d_voc(pretrained=True)
>>> print(model)
"""
from ....model_zoo.resnetv1b import resnet101_v1d
from ....data import VOCDetection
classes = VOCDetection.CLASSES
pretrained_base = False if pretrained else pretrained_base
base_network = resnet101_v1d(pretrained=pretrained_base, dilated=False,
use_global_stats=True, **kwargs)
features = nn.HybridSequential()
top_features = nn.HybridSequential()
for layer in ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3']:
features.add(getattr(base_network, layer))
for layer in ['layer4']:
top_features.add(getattr(base_network, layer))
train_patterns = '|'.join(['.*dense', '.*rpn', '.*down(2|3|4)_conv', '.*layers(2|3|4)_conv'])
return get_faster_rcnn(
name='resnet101_v1d', dataset='voc', pretrained=pretrained,
features=features, top_features=top_features, classes=classes,
short=600, max_size=1000, train_patterns=train_patterns,
nms_thresh=0.3, nms_topk=400, post_nms=100,
roi_mode='align', roi_size=(14, 14), strides=16, clip=None,
rpn_channel=1024, base_size=16, scales=(2, 4, 8, 16, 32),
ratios=(0.5, 1, 2), alloc_size=(128, 128), rpn_nms_thresh=0.7,
rpn_train_pre_nms=12000, rpn_train_post_nms=2000,
rpn_test_pre_nms=6000, rpn_test_post_nms=300, rpn_min_size=16,
num_sample=128, pos_iou_thresh=0.5, pos_ratio=0.25, max_num_gt=100,
**kwargs)
def faster_rcnn_resnet101_v1d_coco(pretrained=False, pretrained_base=True, **kwargs):
r"""Faster RCNN model from the paper
"Ren, S., He, K., Girshick, R., & Sun, J. (2015). Faster r-cnn: Towards
real-time object detection with region proposal networks"
Parameters
----------
pretrained : bool, optional, default is False
Load pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `True`, this has no effect.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Examples
--------
>>> model = get_faster_rcnn_resnet101_v1d_coco(pretrained=True)
>>> print(model)
"""
from ....model_zoo.resnetv1b import resnet101_v1d
from ....data import COCODetection
classes = COCODetection.CLASSES
pretrained_base = False if pretrained else pretrained_base
base_network = resnet101_v1d(pretrained=pretrained_base, dilated=False,
use_global_stats=True, **kwargs)
features = nn.HybridSequential()
top_features = nn.HybridSequential()
for layer in ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3']:
features.add(getattr(base_network, layer))
for layer in ['layer4']:
top_features.add(getattr(base_network, layer))
train_patterns = '|'.join(['.*dense', '.*rpn', '.*down(2|3|4)_conv', '.*layers(2|3|4)_conv'])
return get_faster_rcnn(
name='resnet101_v1d', dataset='coco', pretrained=pretrained,
features=features, top_features=top_features, classes=classes,
short=800, max_size=1333, train_patterns=train_patterns,
nms_thresh=0.5, nms_topk=-1, post_nms=-1,
roi_mode='align', roi_size=(14, 14), strides=16, clip=4.14,
rpn_channel=1024, base_size=16, scales=(2, 4, 8, 16, 32),
ratios=(0.5, 1, 2), alloc_size=(128, 128), rpn_nms_thresh=0.7,
rpn_train_pre_nms=12000, rpn_train_post_nms=2000,
rpn_test_pre_nms=6000, rpn_test_post_nms=1000, rpn_min_size=1,
num_sample=128, pos_iou_thresh=0.5, pos_ratio=0.25, max_num_gt=100,
**kwargs)
def faster_rcnn_fpn_resnet101_v1d_coco(pretrained=False, pretrained_base=True, **kwargs):
r"""Faster RCNN model with FPN from the paper
"Ren, S., He, K., Girshick, R., & Sun, J. (2015). Faster r-cnn: Towards
real-time object detection with region proposal networks"
"Lin, T., Dollar, P., Girshick, R., He, K., Hariharan, B., Belongie, S. (2016).
Feature Pyramid Networks for Object Detection"
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `Ture`, this has no effect.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Examples
--------
>>> model = get_faster_rcnn_fpn_resnet101_v1d_coco(pretrained=True)
>>> print(model)
"""
from ....model_zoo.resnetv1b import resnet101_v1d
from ....data import COCODetection
classes = COCODetection.CLASSES
pretrained_base = False if pretrained else pretrained_base
base_network = resnet101_v1d(pretrained=pretrained_base, dilated=False,
use_global_stats=True, **kwargs)
features = FPNFeatureExpander(
network=base_network,
outputs=['layers1_relu8_fwd', 'layers2_relu11_fwd', 'layers3_relu68_fwd',
'layers4_relu8_fwd'], num_filters=[256, 256, 256, 256], use_1x1=True,
use_upsample=True, use_elewadd=True, use_p6=True, no_bias=False, pretrained=pretrained_base)
top_features = None
# 2 FC layer before RCNN cls and reg
box_features = nn.HybridSequential()
for _ in range(2):
box_features.add(nn.Dense(1024, weight_initializer=mx.init.Normal(0.01)))
box_features.add(nn.Activation('relu'))
train_patterns = '|'.join(
['.*dense', '.*rpn', '.*down(2|3|4)_conv', '.*layers(2|3|4)_conv', 'P'])
return get_faster_rcnn(
name='fpn_resnet101_v1d', dataset='coco', pretrained=pretrained, features=features,
top_features=top_features, classes=classes, box_features=box_features,
short=800, max_size=1333, min_stage=2, max_stage=6, train_patterns=train_patterns,
nms_thresh=0.5, nms_topk=-1, post_nms=-1, roi_mode='align', roi_size=(7, 7),
strides=(4, 8, 16, 32, 64), clip=4.14, rpn_channel=1024, base_size=16,
scales=(2, 4, 8, 16, 32), ratios=(0.5, 1, 2), alloc_size=(384, 384),
rpn_nms_thresh=0.7, rpn_train_pre_nms=12000, rpn_train_post_nms=2000,
rpn_test_pre_nms=6000, rpn_test_post_nms=1000, rpn_min_size=1, num_sample=512,
pos_iou_thresh=0.5, pos_ratio=0.25, max_num_gt=100, **kwargs)
def faster_rcnn_fpn_syncbn_resnet101_v1d_coco(pretrained=False, pretrained_base=True, num_devices=0,
**kwargs):
r"""Faster RCNN model with FPN from the paper
"Ren, S., He, K., Girshick, R., & Sun, J. (2015). Faster r-cnn: Towards
real-time object detection with region proposal networks"
"Lin, T., Dollar, P., Girshick, R., He, K., Hariharan, B., Belongie, S. (2016).
Feature Pyramid Networks for Object Detection"
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `Ture`, this has no effect.
num_devices : int, default is 0
Number of devices for sync batch norm layer. if less than 1, use all devices available.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Examples
--------
>>> model = get_faster_rcnn_fpn_syncbn_resnet101_v1d_coco(pretrained=True)
>>> print(model)
"""
from ....model_zoo.resnetv1b import resnet101_v1d
from ....data import COCODetection
classes = COCODetection.CLASSES
pretrained_base = False if pretrained else pretrained_base
gluon_norm_kwargs = {'num_devices': num_devices} if num_devices >= 1 else {}
base_network = resnet101_v1d(pretrained=pretrained_base, dilated=False, use_global_stats=False,
norm_layer=SyncBatchNorm, norm_kwargs=gluon_norm_kwargs, **kwargs)
sym_norm_kwargs = {'ndev': num_devices} if num_devices >= 1 else {}
features = FPNFeatureExpander(
network=base_network,
outputs=['layers1_relu8_fwd', 'layers2_relu11_fwd', 'layers3_relu68_fwd',
'layers4_relu8_fwd'], num_filters=[256, 256, 256, 256], use_1x1=True,
use_upsample=True, use_elewadd=True, use_p6=True, no_bias=True, pretrained=pretrained_base,
norm_layer=mx.sym.contrib.SyncBatchNorm, norm_kwargs=sym_norm_kwargs)
top_features = None
# 1 Conv 1 FC layer before RCNN cls and reg
box_features = nn.HybridSequential()
for _ in range(4):
box_features.add(nn.Conv2D(256, 3, padding=1, use_bias=False),
SyncBatchNorm(**gluon_norm_kwargs),
nn.Activation('relu'))
box_features.add(nn.Dense(1024, weight_initializer=mx.init.Normal(0.01)),
nn.Activation('relu'))
train_patterns = '(?!.*moving)' # excluding symbol bn moving mean and var
return get_faster_rcnn(
name='fpn_syncbn_resnet101_v1d', dataset='coco', pretrained=pretrained, features=features,
top_features=top_features, classes=classes, box_features=box_features,
short=(640, 800), max_size=1333, min_stage=2, max_stage=6, train_patterns=train_patterns,
nms_thresh=0.5, nms_topk=-1, post_nms=-1, roi_mode='align', roi_size=(7, 7),
strides=(4, 8, 16, 32, 64), clip=4.14, rpn_channel=256, base_size=16,
scales=(2, 4, 8, 16, 32), ratios=(0.5, 1, 2), alloc_size=(384, 384),
rpn_nms_thresh=0.7, rpn_train_pre_nms=12000, rpn_train_post_nms=2000,
rpn_test_pre_nms=6000, rpn_test_post_nms=1000, rpn_min_size=1, num_sample=512,
pos_iou_thresh=0.5, pos_ratio=0.25, max_num_gt=100, **kwargs)
def faster_rcnn_resnet101_v1d_custom(classes, transfer=None, pretrained_base=True,
pretrained=False, **kwargs):
r"""Faster RCNN model with resnet101_v1d base network on custom dataset.
Parameters
----------
classes : iterable of str
Names of custom foreground classes. `len(classes)` is the number of foreground classes.
transfer : str or None
If not `None`, will try to reuse pre-trained weights from faster RCNN networks trained
on other datasets.
pretrained_base : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Returns
-------
mxnet.gluon.HybridBlock
Hybrid faster RCNN network.
"""
if pretrained:
warnings.warn("Custom models don't provide `pretrained` weights, ignored.")
if transfer is None:
from ....model_zoo.resnetv1b import resnet101_v1d
base_network = resnet101_v1d(pretrained=pretrained_base, dilated=False,
use_global_stats=True, **kwargs)
features = nn.HybridSequential()
top_features = nn.HybridSequential()
for layer in ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3']:
features.add(getattr(base_network, layer))
for layer in ['layer4']:
top_features.add(getattr(base_network, layer))
train_patterns = '|'.join(['.*dense', '.*rpn', '.*down(2|3|4)_conv',
'.*layers(2|3|4)_conv'])
return get_faster_rcnn(
name='resnet101_v1d', dataset='custom', pretrained=pretrained,
features=features, top_features=top_features, classes=classes,
train_patterns=train_patterns, **kwargs)
else:
from ....model_zoo import get_model
net = get_model('faster_rcnn_resnet101_v1d_' + str(transfer), pretrained=True, **kwargs)
reuse_classes = [x for x in classes if x in net.classes]
net.reset_class(classes, reuse_weights=reuse_classes)
return net
| 49.938272 | 100 | 0.671446 |
fd42a12229725badd8822327248f7f2b9ba862b6 | 16,763 | py | Python | test/functional/test_framework/mininode.py | SovranoCoin/sovranocoin | d18c83a4f4db44393de271eb2b8fba6c1d536db1 | [
"MIT"
] | 4 | 2019-09-15T01:19:06.000Z | 2021-05-03T13:59:19.000Z | test/functional/test_framework/mininode.py | SovranoCoin/sovranocoin | d18c83a4f4db44393de271eb2b8fba6c1d536db1 | [
"MIT"
] | null | null | null | test/functional/test_framework/mininode.py | SovranoCoin/sovranocoin | d18c83a4f4db44393de271eb2b8fba6c1d536db1 | [
"MIT"
] | 5 | 2019-01-15T18:59:04.000Z | 2020-06-21T08:42:32.000Z | #!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Bitcoin P2P network half-a-node.
This python code was modified from ArtForz' public domain half-a-node, as
found in the mini-node branch of http://github.com/jgarzik/pynode.
P2PConnection: A low-level connection object to a node's P2P interface
P2PInterface: A high-level interface object for communicating to a node over P2P"""
import asyncore
from collections import defaultdict
from io import BytesIO
import logging
import socket
import struct
import sys
import threading
from test_framework.messages import *
from test_framework.util import wait_until
logger = logging.getLogger("TestFramework.mininode")
MESSAGEMAP = {
b"addr": msg_addr,
b"block": msg_block,
b"blocktxn": msg_blocktxn,
b"cmpctblock": msg_cmpctblock,
b"feefilter": msg_feefilter,
b"getaddr": msg_getaddr,
b"getblocks": msg_getblocks,
b"getblocktxn": msg_getblocktxn,
b"getdata": msg_getdata,
b"getheaders": msg_getheaders,
b"headers": msg_headers,
b"inv": msg_inv,
b"mempool": msg_mempool,
b"ping": msg_ping,
b"pong": msg_pong,
b"reject": msg_reject,
b"sendcmpct": msg_sendcmpct,
b"sendheaders": msg_sendheaders,
b"tx": msg_tx,
b"verack": msg_verack,
b"version": msg_version,
#b"getsporks": msg_generic,
}
MAGIC_BYTES = {
"mainnet": b"\x90\xc4\xfd\xe9", # mainnet
"testnet3": b"\x45\x76\x65\xba", # testnet3
"regtest": b"\xa1\xcf\x7e\xac", # regtest
}
class P2PConnection(asyncore.dispatcher):
"""A low-level connection object to a node's P2P interface.
This class is responsible for:
- opening and closing the TCP connection to the node
- reading bytes from and writing bytes to the socket
- deserializing and serializing the P2P message header
- logging messages as they are sent and received
This class contains no logic for handing the P2P message payloads. It must be
sub-classed and the on_message() callback overridden."""
def __init__(self):
# All P2PConnections must be created before starting the NetworkThread.
# assert that the network thread is not running.
assert not network_thread_running()
super().__init__(map=mininode_socket_map)
def peer_connect(self, dstaddr, dstport, net="regtest"):
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.sendbuf = b""
self.recvbuf = b""
self.state = "connecting"
self.network = net
self.disconnect = False
logger.info('Connecting to SOVRANOCOIN Node: %s:%d' % (self.dstaddr, self.dstport))
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
def peer_disconnect(self):
# Connection could have already been closed by other end.
if self.state == "connected":
self.disconnect_node()
# Connection and disconnection methods
def handle_connect(self):
"""asyncore callback when a connection is opened."""
if self.state != "connected":
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self.state = "connected"
self.on_open()
def handle_close(self):
"""asyncore callback when a connection is closed."""
logger.debug("Closing connection to: %s:%d" % (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = b""
self.sendbuf = b""
try:
self.close()
except:
pass
self.on_close()
def disconnect_node(self):
"""Disconnect the p2p connection.
Called by the test logic thread. Causes the p2p connection
to be disconnected on the next iteration of the asyncore loop."""
self.disconnect = True
# Socket read methods
def handle_read(self):
"""asyncore callback when data is read from the socket."""
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self._on_data()
def _on_data(self):
"""Try to read P2P messages from the recv buffer.
This method reads data from the buffer in a loop. It deserializes,
parses and verifies the P2P header, then passes the P2P payload to
the on_message callback for processing."""
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command in MESSAGEMAP:
#raise ValueError("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg)))
logger.debug("Command: '" + str(command) + "'")
f = BytesIO(msg)
t = MESSAGEMAP[command]()
t.deserialize(f)
self._log_message("receive", t)
self.on_message(t)
except Exception as e:
logger.exception('Error reading message:', repr(e))
raise
def on_message(self, message):
"""Callback for processing a P2P payload. Must be overridden by derived class."""
raise NotImplementedError
# Socket write methods
def writable(self):
"""asyncore method to determine whether the handle_write() callback should be called on the next loop."""
with mininode_lock:
pre_connection = self.state == "connecting"
length = len(self.sendbuf)
return (length > 0 or pre_connection)
def handle_write(self):
"""asyncore callback when data should be written to the socket."""
with mininode_lock:
# asyncore does not expose socket connection, only the first read/write
# event, thus we must check connection manually here to know when we
# actually connect
if self.state == "connecting":
self.handle_connect()
if not self.writable():
return
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def send_message(self, message, pushbuf=False):
"""Send a P2P message over the socket.
This method takes a P2P payload, builds the P2P header and adds
the message to the send buffer to be sent over the socket."""
if self.state != "connected" and not pushbuf:
raise IOError('Not connected, no pushbuf')
self._log_message("send", message)
command = message.command
data = message.serialize()
tmsg = MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
if (len(self.sendbuf) == 0 and not pushbuf):
try:
sent = self.send(tmsg)
self.sendbuf = tmsg[sent:]
except BlockingIOError:
self.sendbuf = tmsg
else:
self.sendbuf += tmsg
# Class utility methods
def _log_message(self, direction, msg):
"""Logs a message being sent or received over the connection."""
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
class P2PInterface(P2PConnection):
"""A high-level P2P interface class for communicating with a Bitcoin node.
This class provides high-level callbacks for processing P2P message
payloads, as well as convenience methods for interacting with the
node over P2P.
Individual testcases should subclass this and override the on_* methods
if they want to alter message handling behaviour."""
def __init__(self):
super().__init__()
# Track number of messages of each type received and the most recent
# message of each type
self.message_count = defaultdict(int)
self.last_message = {}
# A count of the number of ping messages we've sent to the node
self.ping_counter = 1
# The network services received from the peer
self.nServices = 0
def peer_connect(self, *args, services=NODE_NETWORK, send_version=True, **kwargs):
super().peer_connect(*args, **kwargs)
if send_version:
# Send a version msg
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
# Message receiving methods
def on_message(self, message):
"""Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
and the most recent message of each type."""
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
getattr(self, 'on_' + command)(message)
except:
print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0]))
raise
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
def on_open(self):
pass
def on_close(self):
pass
def on_addr(self, message): pass
def on_block(self, message): pass
def on_blocktxn(self, message): pass
def on_cmpctblock(self, message): pass
def on_feefilter(self, message): pass
def on_getaddr(self, message): pass
def on_getblocks(self, message): pass
def on_getblocktxn(self, message): pass
def on_getdata(self, message): pass
def on_getheaders(self, message): pass
def on_headers(self, message): pass
def on_mempool(self, message): pass
def on_pong(self, message): pass
def on_reject(self, message): pass
def on_sendcmpct(self, message): pass
def on_sendheaders(self, message): pass
def on_tx(self, message): pass
def on_inv(self, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
self.send_message(want)
def on_ping(self, message):
self.send_message(msg_pong(message.nonce))
def on_verack(self, message):
self.verack_received = True
def on_version(self, message):
assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED)
self.send_message(msg_verack())
self.nServices = message.nServices
# Connection helper methods
def wait_for_disconnect(self, timeout=60):
test_function = lambda: self.state != "connected"
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message receiving helper methods
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getdata(self, timeout=60):
test_function = lambda: self.last_message.get("getdata")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getheaders(self, timeout=60):
test_function = lambda: self.last_message.get("getheaders")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_inv(self, expected_inv, timeout=60):
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
if len(expected_inv) > 1:
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
test_function = lambda: self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_verack(self, timeout=60):
test_function = lambda: self.message_count["verack"]
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message sending helper functions
def send_and_ping(self, message):
self.send_message(message)
self.sync_with_ping()
# Sync up with the node
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
wait_until(test_function, timeout=timeout, lock=mininode_lock)
self.ping_counter += 1
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# P2PConnection acquires this lock whenever delivering a message to a P2PInterface,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the P2PInterface or P2PConnection.
mininode_lock = threading.RLock()
class NetworkThread(threading.Thread):
def __init__(self):
super().__init__(name="NetworkThread")
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[obj.handle_close() for obj in disconnected]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
logger.debug("Network thread closing")
def network_thread_start():
"""Start the network thread."""
# Only one network thread may run at a time
assert not network_thread_running()
NetworkThread().start()
def network_thread_running():
"""Return whether the network thread is running."""
return any([thread.name == "NetworkThread" for thread in threading.enumerate()])
def network_thread_join(timeout=10):
"""Wait timeout seconds for the network thread to terminate.
Throw if the network thread doesn't terminate in timeout seconds."""
network_threads = [thread for thread in threading.enumerate() if thread.name == "NetworkThread"]
assert len(network_threads) <= 1
for thread in network_threads:
thread.join(timeout)
assert not thread.is_alive()
| 37.669663 | 182 | 0.633598 |
e766343ebc9e5cabfb88d1bccd35040f0e60872a | 7,827 | py | Python | test/test_bvr_rest_before_after.py | doedotdev/bvr | 023fc93424fa6a50c8a3c2ce2fc48b76a041b58c | [
"MIT"
] | null | null | null | test/test_bvr_rest_before_after.py | doedotdev/bvr | 023fc93424fa6a50c8a3c2ce2fc48b76a041b58c | [
"MIT"
] | 12 | 2019-12-07T21:40:23.000Z | 2019-12-07T21:43:54.000Z | test/test_bvr_rest_before_after.py | doedotdev/bvr | 023fc93424fa6a50c8a3c2ce2fc48b76a041b58c | [
"MIT"
] | null | null | null | from bvr.bvr_rest import bvr_rest_before_after
def test_bvr_rest_before_after_called_as_decorator(capsys):
@bvr_rest_before_after
def rest_before_after():
return 2
return_value = rest_before_after()
captured_ouput = capsys.readouterr().out
assert return_value == 2
assert "RESTING_BEFORE: 5 second(s) | FUNCTION: rest_before_after | ARGS: () | KWARGS: {} \n" in captured_ouput
assert "RESTING_AFTER: 5 second(s) | FUNCTION: rest_before_after | ARGS: () | KWARGS: {} " in captured_ouput
assert rest_before_after.__name__ == "rest_before_after" # Important for decorators to not override method name
def test_bvr_rest_before_after_called_as_callable_returning_decorator(capsys):
@bvr_rest_before_after()
def rest_before_after():
return 2
return_value = rest_before_after()
captured_ouput = capsys.readouterr().out
assert return_value == 2
assert "RESTING_BEFORE: 5 second(s) | FUNCTION: rest_before_after | ARGS: () | KWARGS: {} \n" in captured_ouput
assert "RESTING_AFTER: 5 second(s) | FUNCTION: rest_before_after | ARGS: () | KWARGS: {} " in captured_ouput
assert rest_before_after.__name__ == "rest_before_after" # Important for decorators to not override method name
def test_bvr_rest_before_after_called_as_decorator_with_function_args(capsys):
@bvr_rest_before_after
def rest_before_after(msg):
print(msg)
return msg
return_value = rest_before_after("Hello")
captured_ouput = capsys.readouterr().out
assert return_value == "Hello"
assert "RESTING_BEFORE: 5 second(s) | FUNCTION: rest_before_after | ARGS: ('Hello',) | KWARGS: {} \n" in captured_ouput
assert "RESTING_AFTER: 5 second(s) | FUNCTION: rest_before_after | ARGS: ('Hello',) | KWARGS: {} " in captured_ouput
assert rest_before_after.__name__ == "rest_before_after" # Important for decorators to not override method name
def test_bvr_rest_before_after_called_as_callable_returning_decorator_with_function_args(capsys):
@bvr_rest_before_after()
def rest_before_after(msg):
print(msg)
return msg
return_value = rest_before_after("Hello")
captured_ouput = capsys.readouterr().out
assert return_value == "Hello"
assert "RESTING_BEFORE: 5 second(s) | FUNCTION: rest_before_after | ARGS: ('Hello',) | KWARGS: {} \n" in captured_ouput
assert "RESTING_AFTER: 5 second(s) | FUNCTION: rest_before_after | ARGS: ('Hello',) | KWARGS: {} " in captured_ouput
assert rest_before_after.__name__ == "rest_before_after" # Important for decorators to not override method name
def test_bvr_rest_before_after_called_as_decorator_with_function_kwargs(capsys):
@bvr_rest_before_after
def rest_before_after(msg):
print(msg)
return msg
return_value = rest_before_after(msg="Hello")
captured_ouput = capsys.readouterr().out
assert return_value == "Hello"
assert "RESTING_BEFORE: 5 second(s) | FUNCTION: rest_before_after | ARGS: () | KWARGS: {'msg': 'Hello'} \nHello\n" in captured_ouput
assert "RESTING_AFTER: 5 second(s) | FUNCTION: rest_before_after | ARGS: () | KWARGS: {'msg': 'Hello'} " in captured_ouput
assert rest_before_after.__name__ == "rest_before_after" # Important for decorators to not override method name
def test_bvr_rest_before_after_called_as_callable_returning_decorator_with_function_kwargs(capsys):
@bvr_rest_before_after()
def rest_before_after(msg):
print(msg)
return msg
return_value = rest_before_after(msg="Hello")
captured_ouput = capsys.readouterr().out
assert return_value == "Hello"
assert "RESTING_BEFORE: 5 second(s) | FUNCTION: rest_before_after | ARGS: () | KWARGS: {'msg': 'Hello'} \nHello\n" in captured_ouput
assert "RESTING_AFTER: 5 second(s) | FUNCTION: rest_before_after | ARGS: () | KWARGS: {'msg': 'Hello'} " in captured_ouput
assert rest_before_after.__name__ == "rest_before_after" # Important for decorators to not override method name
def test_bvr_rest_before_after_with_countdown_true(capsys):
@bvr_rest_before_after(countdown=True)
def rest_before_after():
return 2
return_value = rest_before_after()
captured_output = capsys.readouterr().out
assert return_value == 2
assert "RESTING_BEFORE: 5/5 second(s) | FUNCTION: rest_before_after | ARGS: () | KWARGS: {} \n" in captured_output
assert "RESTING_BEFORE: 4/5 second(s) | FUNCTION: rest_before_after | ARGS: () | KWARGS: {} \n" in captured_output
assert "RESTING_BEFORE: 3/5 second(s) | FUNCTION: rest_before_after | ARGS: () | KWARGS: {} \n" in captured_output
assert "RESTING_BEFORE: 2/5 second(s) | FUNCTION: rest_before_after | ARGS: () | KWARGS: {} \n" in captured_output
assert "RESTING_BEFORE: 1/5 second(s) | FUNCTION: rest_before_after | ARGS: () | KWARGS: {} \n" in captured_output
assert "RESTING_AFTER: 5/5 second(s) | FUNCTION: rest_before_after | ARGS: () | KWARGS: {} \n" in captured_output
assert "RESTING_AFTER: 4/5 second(s) | FUNCTION: rest_before_after | ARGS: () | KWARGS: {} \n" in captured_output
assert "RESTING_AFTER: 3/5 second(s) | FUNCTION: rest_before_after | ARGS: () | KWARGS: {} \n" in captured_output
assert "RESTING_AFTER: 2/5 second(s) | FUNCTION: rest_before_after | ARGS: () | KWARGS: {} \n" in captured_output
assert "RESTING_AFTER: 1/5 second(s) | FUNCTION: rest_before_after | ARGS: () | KWARGS: {} \n" in captured_output
assert rest_before_after.__name__ == "rest_before_after" # Important for decorators to not override method name
def test_bvr_rest_before_after_with_countdown_true_and_non_default_seconds(capsys):
@bvr_rest_before_after(seconds=2, countdown=True)
def rest_before_after():
return 2
return_value = rest_before_after()
captured_output = capsys.readouterr().out
assert return_value == 2
assert "RESTING_BEFORE: 2/2 second(s) | FUNCTION: rest_before_after | ARGS: () | KWARGS: {} \n" in captured_output
assert "RESTING_BEFORE: 1/2 second(s) | FUNCTION: rest_before_after | ARGS: () | KWARGS: {} \n" in captured_output
assert "RESTING_AFTER: 2/2 second(s) | FUNCTION: rest_before_after | ARGS: () | KWARGS: {} \n" in captured_output
assert "RESTING_AFTER: 1/2 second(s) | FUNCTION: rest_before_after | ARGS: () | KWARGS: {} \n" in captured_output
assert rest_before_after.__name__ == "rest_before_after" # Important for decorators to not override method name
def test_bvr_rest_before_after_with_countdown_false_and_non_default_seconds(capsys):
@bvr_rest_before_after(seconds=2)
def rest_before_after():
print('Hello')
return 2
return_value = rest_before_after()
captured_output = capsys.readouterr().out
assert return_value == 2
assert "RESTING_BEFORE: 2 second(s) | FUNCTION: rest_before_after | ARGS: () | KWARGS: {} \nHello\n" in captured_output
assert "RESTING_AFTER: 2 second(s) | FUNCTION: rest_before_after | ARGS: () | KWARGS: {} " in captured_output
assert rest_before_after.__name__ == "rest_before_after" # Important for decorators to not override method name
def test_bvr_rest_before_after_should_case_float_to_int(capsys):
@bvr_rest_before_after(seconds=2.23)
def rest_before_after():
print('Hello')
return 2
return_value = rest_before_after()
captured_output = capsys.readouterr().out
assert return_value == 2
assert "RESTING_BEFORE: 2 second(s) | FUNCTION: rest_before_after | ARGS: () | KWARGS: {} \nHello" in captured_output
assert "RESTING_AFTER: 2 second(s) | FUNCTION: rest_before_after | ARGS: () | KWARGS: {} " in captured_output
assert rest_before_after.__name__ == "rest_before_after" # Important for decorators to not override method name
| 41.632979 | 136 | 0.722755 |
b520f3e06410f6d8a645d661f9bb433101ffce2b | 2,761 | py | Python | challenge/tastyContribs.py | histrio/tastydata | 75b36954f851e0d22b9968bebdb5c77331853f54 | [
"Apache-2.0"
] | 2 | 2019-08-09T22:16:54.000Z | 2019-09-30T11:20:05.000Z | challenge/tastyContribs.py | histrio/tastydata | 75b36954f851e0d22b9968bebdb5c77331853f54 | [
"Apache-2.0"
] | 7 | 2015-06-11T06:50:44.000Z | 2016-10-25T18:07:06.000Z | challenge/tastyContribs.py | histrio/tastydata | 75b36954f851e0d22b9968bebdb5c77331853f54 | [
"Apache-2.0"
] | 3 | 2019-08-12T14:09:21.000Z | 2019-09-30T10:22:52.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Identifies and lists contributors to to the challenge
Largely based on contributors.py in the ÖDOK project
'''
import codecs
import json
import challengeStats
import WikiApi as wikiApi
import contributors # this one lives in the ÖDOK project
def run(start='2015-05-08', end=None):
# connect to api
site = 'https://www.wikidata.org/w/api.php'
scriptidentify = 'TastyDataContribs/1.0'
fromConf = False
try:
import config
fromConf = True
wdApi = wikiApi.WikiDataApi.setUpApi(user=config.username,
password=config.password,
site=site,
scriptidentify=scriptidentify)
except ImportError:
from getpass import getpass
user = challengeStats.raw_encoded_input('Username: ')
wdApi = wikiApi.WikiDataApi.setUpApi(user=user,
password=getpass(),
site=site,
scriptidentify=scriptidentify)
# find changed pages
entities_file = u'entities.json'
if fromConf:
entities_file = u'%s%s' % (config.path, entities_file)
fin = codecs.open(entities_file, 'r', 'utf8')
pageList = json.load(fin)
fin.close()
contribs, ministats, users = contributors.handleContributions(wdApi,
pageList,
start=start,
end=end)
userInfo = wdApi.getUserData(users)
# outputs
output_contrib_file = 'contribs.json'
output_user_file = 'users.json'
if fromConf:
output_contrib_file = u'%s%s' % (config.path, output_contrib_file)
output_user_file = u'%s%s' % (config.path, output_user_file)
f = codecs.open(output_user_file, 'w', 'utf8')
f.write(json.dumps(userInfo, indent=4, ensure_ascii=False))
f.close()
f = codecs.open(output_contrib_file, 'w', 'utf8')
f.write(json.dumps(contribs, indent=4, ensure_ascii=False))
f.close()
print json.dumps(ministats, indent=4, ensure_ascii=False)
if __name__ == "__main__":
import sys
usage = '''Usage: python contributors.py start end
\tstart (optional): YYYY-MM-DD start date (default 2015-01-01)
\tend (optional): YYYY-MM-DD end date (default None)'''
argv = sys.argv[1:]
if len(argv) == 0:
run()
elif len(argv) == 1:
run(start=argv[0])
elif len(argv) == 2:
run(start=argv[0], end=argv[1])
else:
print usage
# EoF
| 34.08642 | 78 | 0.558493 |
7a28473fff017c7f441bd10c57583cd1dc369676 | 1,449 | py | Python | parser/team27/G-27/execution/function/mathematical/factorial.py | mr8ug/tytus | a09abe4095e49d333a8ed9ca81cb3d88f90872ba | [
"MIT"
] | 1 | 2021-01-09T05:32:35.000Z | 2021-01-09T05:32:35.000Z | parser/team27/G-27/execution/function/mathematical/factorial.py | XiomRB/tytus | 0873e4bdce5c110bee6ef2aa98240be6a93ae024 | [
"MIT"
] | null | null | null | parser/team27/G-27/execution/function/mathematical/factorial.py | XiomRB/tytus | 0873e4bdce5c110bee6ef2aa98240be6a93ae024 | [
"MIT"
] | null | null | null | import sys
sys.path.append('../tytus/parser/team27/G-27/execution/abstract')
sys.path.append('../tytus/parser/team27/G-27/execution/expression')
sys.path.append('../tytus/parser/team27/G-27/execution/symbol')
sys.path.append('../tytus/parser/team27/G-27/libraries')
from function import *
from typ import *
from math_functions import factorial
class Factorial(Function):
def __init__(self, input, row, column):
Function.__init__(self,row,column)
self.input = input
def execute(self, environment):
#input es una lista
if isinstance(self.input,list):
respuesta = []
for val in self.input:
value = val.execute(environment)
if value['typ'] != Type.INT and value['typ'] != Type.DECIMAL:
return {'Error':"El valor " + value['value'] + " no es decimal o entero", 'linea':self.row,'columna':self.column }
result = factorial(value['value'])
respuesta.append({'value':result, 'typ': Type.INT})
return respuesta
#input valor puntual
else:
value = self.input.execute(environment)
if value['typ'] != Type.INT and value['typ'] != Type.DECIMAL:
return {'Error':"El valor " + value['value'] + " no es decimal o entero", 'linea':self.row,'columna':self.column }
return [{'value': factorial(value['value']), 'typ': Type.INT}] | 46.741935 | 134 | 0.602484 |
24381fe4c544f03b26f2fee1959bc5ace4ef98ea | 11,746 | py | Python | ghstack/shell.py | BowenBao/ghstack | 906274f42a28c690a49bff0af2063323bb06c5c3 | [
"MIT"
] | 1 | 2021-06-25T18:22:26.000Z | 2021-06-25T18:22:26.000Z | ghstack/shell.py | BowenBao/ghstack | 906274f42a28c690a49bff0af2063323bb06c5c3 | [
"MIT"
] | null | null | null | ghstack/shell.py | BowenBao/ghstack | 906274f42a28c690a49bff0af2063323bb06c5c3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import subprocess
import os
import logging
from typing import Dict, Sequence, Optional, TypeVar, Union, Any, overload, IO, Tuple
import asyncio
import sys
# Shell commands generally return str, but with exitcode=True
# they return a bool, and if stdout is piped straight to sys.stdout
# they return None.
_SHELL_RET = Union[bool, str, None]
_HANDLE = Union[None, int, IO[Any]]
def log_command(args: Sequence[str]) -> None:
"""
Given a command, print it in a both machine and human readable way.
Args:
*args: the list of command line arguments you want to run
env: the dictionary of environment variable settings for the command
"""
# TODO: Irritatingly, this doesn't insert quotes for shell
# metacharacters like exclamation marks or parentheses.
cmd = subprocess.list2cmdline(args).replace("\n", "\\n")
logging.info("$ " + cmd)
K = TypeVar('K')
V = TypeVar('V')
def merge_dicts(x: Dict[K, V], y: Dict[K, V]) -> Dict[K, V]:
z = x.copy()
z.update(y)
return z
class Shell(object):
"""
An object representing a shell (e.g., the bash prompt in your
terminal), maintaining a concept of current working directory, and
also the necessary accoutrements for testing.
"""
# Current working directory of shell.
cwd: str
# Whether or not to suppress printing of command executed.
quiet: bool
# Whether or not shell is in testing mode; some commands are made
# more deterministic in this case.
testing: bool
# The current Unix timestamp. Only used during testing mode.
testing_time: int
def __init__(self,
quiet: bool = False,
cwd: Optional[str] = None,
testing: bool = False):
"""
Args:
cwd: Current working directory of the shell. Pass None to
initialize to the current cwd of the current process.
quiet: If True, suppress printing out the command executed
by the shell. By default, we print out commands for ease
of debugging. Quiet is most useful for non-mutating
shell commands.
testing: If True, operate in testing mode. Testing mode
enables features which make the outputs of commands more
deterministic; e.g., it sets a number of environment
variables for Git.
"""
self.cwd = cwd if cwd else os.getcwd()
self.quiet = quiet
self.testing = testing
self.testing_time = 1112911993
def sh(self, *args: str, # noqa: C901
env: Optional[Dict[str, str]] = None,
stderr: _HANDLE = None,
# TODO: Arguably bytes should be accepted here too
input: Optional[str] = None,
stdin: _HANDLE = None,
stdout: _HANDLE = subprocess.PIPE,
exitcode: bool = False) -> _SHELL_RET:
"""
Run a command specified by args, and return string representing
the stdout of the run command, raising an error if exit code
was nonzero (unless exitcode kwarg is specified; see below).
Args:
*args: the list of command line arguments to run
env: any extra environment variables to set when running the
command. Environment variables set this way are ADDITIVE
(unlike subprocess default)
stderr: where to pipe stderr; by default, we pipe it straight
to this process's stderr
input: string value to pass stdin. This is mutually exclusive
with stdin
stdin: where to pipe stdin from. This is mutually exclusive
with input
stdout: where to pipe stdout; by default, we capture the stdout
and return it
exitcode: if True, return a bool rather than string, specifying
whether or not the process successfully returned with exit
code 0. We never raise an exception when this is True.
"""
assert not (stdin and input)
if input:
stdin = subprocess.PIPE
if not self.quiet:
log_command(args)
if env is not None:
env = merge_dicts(dict(os.environ), env)
# The things we do for logging...
#
# - I didn't make a PTY, so programs are going to give
# output assuming there isn't a terminal at the other
# end. This is less nice for direct terminal use, but
# it's better for logging (since we get to dispense
# with the control codes).
#
# - We assume line buffering. This is kind of silly but
# we need to assume *some* sort of buffering with the
# stream API.
async def process_stream(proc_stream: asyncio.StreamReader, setting: _HANDLE,
default_stream: IO[str]) -> bytes:
output = []
while True:
try:
line = await proc_stream.readuntil()
except asyncio.LimitOverrunError as e:
line = await proc_stream.readexactly(e.consumed)
except asyncio.IncompleteReadError as e:
line = e.partial
if not line:
break
output.append(line)
if setting == subprocess.PIPE:
pass
elif setting == subprocess.STDOUT:
sys.stdout.buffer.write(line)
elif isinstance(setting, int):
os.write(setting, line)
elif setting is None:
# Sigh. See https://stackoverflow.com/questions/55681488/python-3-write-binary-to-stdout-respecting-buffering
default_stream.write(line.decode('utf-8'))
else:
# NB: don't use setting.write directly, that will
# not properly handle binary. This gives us
# "parity" with the normal subprocess implementation
os.write(setting.fileno(), line)
return b''.join(output)
async def feed_input(stdin_writer: Optional[asyncio.StreamWriter]) -> None:
if stdin_writer is None:
return
if not input:
return
stdin_writer.write(input.encode('utf-8'))
await stdin_writer.drain()
stdin_writer.close()
async def run() -> Tuple[int, bytes, bytes]:
proc = await asyncio.create_subprocess_exec(
*args,
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
cwd=self.cwd,
env=env,
)
assert proc.stdout is not None
assert proc.stderr is not None
_, out, err, _ = await asyncio.gather(
feed_input(proc.stdin),
process_stream(proc.stdout, stdout, sys.stdout),
process_stream(proc.stderr, stderr, sys.stderr),
proc.wait()
)
assert proc.returncode is not None
return (proc.returncode, out, err)
loop = asyncio.get_event_loop()
returncode, out, err = loop.run_until_complete(run())
# NB: Not debug; we always want to show this to user.
if err:
logging.debug("# stderr:\n" + err.decode(errors="backslashreplace"))
if out:
logging.debug(
("# stdout:\n" if err else "")
+ out.decode(errors="backslashreplace").replace('\0', '\\0'))
if exitcode:
logging.debug("Exit code: {}".format(returncode))
return returncode == 0
if returncode != 0:
raise RuntimeError(
"{} failed with exit code {}"
.format(' '.join(args), returncode)
)
if stdout == subprocess.PIPE:
return out.decode() # do a strict decode for actual return
else:
return None
def _maybe_rstrip(self, s: _SHELL_RET) -> _SHELL_RET:
if isinstance(s, str):
return s.rstrip()
else:
return s
@overload # noqa: F811
def git(self, *args: str) -> str:
...
@overload # noqa: F811
def git(self, *args: str, input: str) -> str:
...
@overload # noqa: F811
def git(self, *args: str, **kwargs: Any) -> _SHELL_RET:
...
def git(self, *args: str, **kwargs: Any # noqa: F811
) -> _SHELL_RET:
"""
Run a git command. The returned stdout has trailing newlines stripped.
Args:
*args: Arguments to git
**kwargs: Any valid kwargs for sh()
"""
env = kwargs.setdefault("env", {})
# Some envvars to make things a little more script mode nice
if self.testing:
env.setdefault("EDITOR", ":")
env.setdefault("GIT_MERGE_AUTOEDIT", "no")
env.setdefault("LANG", "C")
env.setdefault("LC_ALL", "C")
env.setdefault("PAGER", "cat")
env.setdefault("TZ", "UTC")
env.setdefault("TERM", "dumb")
# These are important so we get deterministic commit times
env.setdefault("GIT_AUTHOR_EMAIL", "[email protected]")
env.setdefault("GIT_AUTHOR_NAME", "A U Thor")
env.setdefault("GIT_COMMITTER_EMAIL", "[email protected]")
env.setdefault("GIT_COMMITTER_NAME", "C O Mitter")
env.setdefault("GIT_COMMITTER_DATE",
"{} -0700".format(self.testing_time))
env.setdefault("GIT_AUTHOR_DATE",
"{} -0700".format(self.testing_time))
if 'stderr' not in kwargs:
kwargs['stderr'] = subprocess.PIPE
return self._maybe_rstrip(self.sh(*(("git",) + args), **kwargs))
@overload # noqa: F811
def hg(self, *args: str) -> str:
...
@overload # noqa: F811
def hg(self, *args: str, input: str) -> str:
...
@overload # noqa: F811
def hg(self, *args: str, **kwargs: Any) -> _SHELL_RET:
...
def hg(self, *args: str, **kwargs: Any # noqa: F811
) -> _SHELL_RET:
"""
Run a hg command. The returned stdout has trailing newlines stripped.
Args:
*args: Arguments to hg
**kwargs: Any valid kwargs for sh()
"""
return self._maybe_rstrip(self.sh(*(("hg",) + args), **kwargs))
def jf(self, *args: str, **kwargs: Any) -> _SHELL_RET:
"""
Run a jf command. The returned stdout has trailing newlines stripped.
Args:
*args: Arguments to jf
**kwargs: Any valid kwargs for sh()
"""
kwargs.setdefault('stdout', sys.stderr)
return self._maybe_rstrip(self.sh(*(("jf",) + args), **kwargs))
def test_tick(self) -> None:
"""
Increase the current time. Useful when testing is True.
"""
self.testing_time += 60
def open(self, fn: str, mode: str) -> IO[Any]:
"""
Open a file, relative to the current working directory.
Args:
fn: filename to open
mode: mode to open the file as
"""
return open(os.path.join(self.cwd, fn), mode)
def cd(self, d: str) -> None:
"""
Change the current working directory.
Args:
d: directory to change to
"""
self.cwd = os.path.join(self.cwd, d)
| 35.062687 | 130 | 0.557211 |
add4393dbc084cd2372b7356452b5fc4953a8657 | 396 | py | Python | src/gethash/cli/blake2s.py | xymy/gethash | 88fd23f1c30338ceb95ff5b71a0112be349fe359 | [
"MIT"
] | null | null | null | src/gethash/cli/blake2s.py | xymy/gethash | 88fd23f1c30338ceb95ff5b71a0112be349fe359 | [
"MIT"
] | null | null | null | src/gethash/cli/blake2s.py | xymy/gethash | 88fd23f1c30338ceb95ff5b71a0112be349fe359 | [
"MIT"
] | null | null | null | from gethash.script import gethashcli, script_main
META = {
"cmdname": "blake2s",
"hashname": "BLAKE2s",
"suffix": ".blake2s",
"package": "hashlib",
"hasher": "blake2s",
}
@gethashcli(**META)
def main(files, **kwargs):
"""Generate or check BLAKE2s."""
from hashlib import blake2s as H
script_main(H(), files, **kwargs)
if __name__ == "__main__":
main()
| 17.217391 | 50 | 0.613636 |
7cde0dc896ee27661e89be3b0b359dd6112f5007 | 10,419 | py | Python | uniter_model/data/mrm.py | intersun/LightningDOT | 5f2880f69ba87b8701ab89348d70ebb11432578c | [
"MIT"
] | 64 | 2021-03-17T02:01:34.000Z | 2021-12-31T08:05:57.000Z | uniter_model/data/mrm.py | intersun/LightningDOT | 5f2880f69ba87b8701ab89348d70ebb11432578c | [
"MIT"
] | 9 | 2021-04-16T07:58:33.000Z | 2021-11-09T11:09:58.000Z | uniter_model/data/mrm.py | intersun/LightningDOT | 5f2880f69ba87b8701ab89348d70ebb11432578c | [
"MIT"
] | 5 | 2021-03-18T01:21:44.000Z | 2022-01-20T13:23:39.000Z | """
MRM Datasets
"""
import random
import torch
from torch.utils.data import Dataset
from torch.nn.utils.rnn import pad_sequence
from toolz.sandbox import unzip
from .data import DetectFeatTxtTokDataset, pad_tensors, get_gather_index
def _get_img_mask(mask_prob, num_bb):
img_mask = [random.random() < mask_prob for _ in range(num_bb)]
if not any(img_mask):
# at least mask 1
img_mask[random.choice(range(num_bb))] = True
img_mask = torch.tensor(img_mask)
return img_mask
def _get_img_tgt_mask(img_mask, txt_len):
z = torch.zeros(txt_len, dtype=torch.bool)
img_mask_tgt = torch.cat([z, img_mask], dim=0)
return img_mask_tgt
def _get_feat_target(img_feat, img_masks):
img_masks_ext = img_masks.unsqueeze(-1).expand_as(img_feat) # (n, m, d)
feat_dim = img_feat.size(-1)
feat_targets = img_feat[img_masks_ext].contiguous().view(
-1, feat_dim) # (s, d)
return feat_targets
def _mask_img_feat(img_feat, img_masks):
img_masks_ext = img_masks.unsqueeze(-1).expand_as(img_feat)
img_feat_masked = img_feat.data.masked_fill(img_masks_ext, 0)
return img_feat_masked
class MrfrDataset(DetectFeatTxtTokDataset):
def __init__(self, mask_prob, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mask_prob = mask_prob
def __getitem__(self, i):
"""
Return:
- input_ids : (L, ), i.e., [cls, wd, wd, ..., sep, 0, 0], 0s padded
- img_feat : (num_bb, d)
- img_pos_feat : (num_bb, 7)
- attn_masks : (L + num_bb, ), ie., [1, 1, ..., 0, 0, 1, 1]
- img_mask : (num_bb, ) between {0, 1}
"""
example = super().__getitem__(i)
# text input
input_ids = example['input_ids']
input_ids = self.txt_db.combine_inputs(input_ids)
# image input features
img_feat, img_pos_feat, num_bb = self._get_img_feat(
example['img_fname'])
img_mask = _get_img_mask(self.mask_prob, num_bb)
img_mask_tgt = _get_img_tgt_mask(img_mask, len(input_ids))
attn_masks = torch.ones(len(input_ids) + num_bb, dtype=torch.long)
return (input_ids, img_feat, img_pos_feat,
attn_masks, img_mask, img_mask_tgt)
def mrfr_collate(inputs):
"""
Return:
- input_ids : (n, max_L), i.e., [cls, wd, wd, ..., sep, 0, 0], 0s padded
- position_ids : (n, max_L)
- txt_lens : list of [input_len]
- img_feat : (n, max_num_bb, d)
- img_pos_feat : (n, max_num_bb, 7)
- num_bbs : list of [num_bb]
- attn_masks : (n, max_{L + num_bb}), ie., [1, 1, ..., 0, 0, 1, 1]
- img_masks : (n, max_num_bb) between {0, 1}
"""
(input_ids, img_feats, img_pos_feats, attn_masks, img_masks, img_mask_tgts,
) = map(list, unzip(inputs))
txt_lens = [i.size(0) for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
# mask features
img_masks = pad_sequence(img_masks, batch_first=True, padding_value=0)
feat_targets = _get_feat_target(img_feat, img_masks)
img_feat = _mask_img_feat(img_feat, img_masks)
img_mask_tgt = pad_sequence(img_mask_tgts,
batch_first=True, padding_value=0)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
batch = {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index,
'feat_targets': feat_targets,
'img_masks': img_masks,
'img_mask_tgt': img_mask_tgt}
return batch
class OnlyImgMrfrDataset(Dataset):
""" an image-only MRM """
def __init__(self, mask_prob, img_db):
self.ids, self.lens = map(list, unzip(self.img_db.name2nbb.items()))
def __getitem__(self, i):
id_ = self.ids[i]
img_feat, img_pos_feat, num_bb = self._get_img_feat(id_)
attn_masks = torch.ones(num_bb, dtype=torch.long)
img_mask = _get_img_mask(self.mask_prob, num_bb)
return img_feat, img_pos_feat, attn_masks, img_mask
def _get_img_feat(self, fname):
img_feat, bb = self.img_db[fname]
img_bb = torch.cat([bb, bb[:, 4:5]*bb[:, 5:]], dim=-1)
num_bb = img_feat.size(0)
return img_feat, img_bb, num_bb
def mrfr_only_img_collate(inputs):
img_feats, img_pos_feats, attn_masks, img_masks = map(list, unzip(inputs))
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
# mask features
img_masks = pad_sequence(img_masks, batch_first=True, padding_value=0)
feat_targets = _get_feat_target(img_feat, img_masks)
img_feat = _mask_img_feat(img_feat, img_masks)
batch = {'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'feat_targets': feat_targets,
'img_masks': img_masks,
'img_mask_tgt': img_masks}
return batch
def _get_targets(img_masks, img_soft_label):
soft_label_dim = img_soft_label.size(-1)
img_masks_ext_for_label = img_masks.unsqueeze(-1).expand_as(img_soft_label)
label_targets = img_soft_label[img_masks_ext_for_label].contiguous().view(
-1, soft_label_dim)
return label_targets
class MrcDataset(DetectFeatTxtTokDataset):
def __init__(self, mask_prob, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mask_prob = mask_prob
def _get_img_feat(self, fname):
img_dump = self.img_db.get_dump(fname)
num_bb = self.img_db.name2nbb[fname]
img_feat = torch.tensor(img_dump['features'])
bb = torch.tensor(img_dump['norm_bb'])
img_bb = torch.cat([bb, bb[:, 4:5]*bb[:, 5:]], dim=-1)
img_soft_label = torch.tensor(img_dump['soft_labels'])
return img_feat, img_bb, img_soft_label, num_bb
def __getitem__(self, i):
example = super().__getitem__(i)
img_feat, img_pos_feat, img_soft_labels, num_bb = self._get_img_feat(
example['img_fname'])
# image input features
img_mask = _get_img_mask(self.mask_prob, num_bb)
# text input
input_ids = example['input_ids']
input_ids = self.txt_db.combine_inputs(input_ids)
img_mask_tgt = _get_img_tgt_mask(img_mask, len(input_ids))
attn_masks = torch.ones(len(input_ids) + num_bb, dtype=torch.long)
return (input_ids, img_feat, img_pos_feat,
img_soft_labels, attn_masks, img_mask, img_mask_tgt)
def mrc_collate(inputs):
(input_ids, img_feats, img_pos_feats, img_soft_labels,
attn_masks, img_masks, img_mask_tgts) = map(list, unzip(inputs))
txt_lens = [i.size(0) for i in input_ids]
num_bbs = [f.size(0) for f in img_feats]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long
).unsqueeze(0)
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
img_soft_label = pad_tensors(img_soft_labels, num_bbs)
img_masks = pad_sequence(img_masks, batch_first=True, padding_value=0)
label_targets = _get_targets(img_masks, img_soft_label)
img_feat = _mask_img_feat(img_feat, img_masks)
img_mask_tgt = pad_sequence(img_mask_tgts,
batch_first=True, padding_value=0)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
bs, max_tl = input_ids.size()
out_size = attn_masks.size(1)
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
batch = {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index,
'img_masks': img_masks,
'img_mask_tgt': img_mask_tgt,
'label_targets': label_targets}
return batch
class OnlyImgMrcDataset(OnlyImgMrfrDataset):
""" an image-only MRC """
def __getitem__(self, i):
id_ = self.ids[i]
(img_feat, img_pos_feat, img_soft_labels, num_bb
) = self._get_img_feat(id_)
attn_masks = torch.ones(num_bb, dtype=torch.long)
img_mask = _get_img_mask(self.mask_prob, num_bb)
return img_feat, img_pos_feat, img_soft_labels, attn_masks, img_mask
def _get_img_feat(self, fname):
img_dump = self.img_db.get_dump(fname)
num_bb = self.img_db.name2nbb[fname]
img_feat = torch.tensor(img_dump['features'])
bb = torch.tensor(img_dump['norm_bb'])
img_bb = torch.cat([bb, bb[:, 4:5]*bb[:, 5:]], dim=-1)
img_soft_labels = torch.tensor(img_dump['soft_labels'])
return img_feat, img_bb, img_soft_labels, num_bb
def mrc_only_img_collate(inputs):
(img_feats, img_pos_feats, img_soft_labels, attn_masks, img_masks
) = map(list, unzip(inputs))
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0)
img_masks = pad_sequence(img_masks, batch_first=True, padding_value=0)
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
img_soft_label = pad_tensors(img_soft_labels, num_bbs)
label_targets = _get_targets(img_masks, img_soft_label)
# mask features
img_feat = _mask_img_feat(img_feat, img_masks)
batch = {'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'img_masks': img_masks,
'img_mask_tgt': img_masks,
'label_targets': label_targets}
return batch
| 36.177083 | 79 | 0.655533 |
59b61caf3b4a9d65a3fded6e3f174c33dc339596 | 6,507 | py | Python | theano/sandbox/jax_linker.py | canyon289/Theano-PyMC | 1a9b04bfe480b758ddfa54ba49c88bee3bec419c | [
"BSD-3-Clause"
] | null | null | null | theano/sandbox/jax_linker.py | canyon289/Theano-PyMC | 1a9b04bfe480b758ddfa54ba49c88bee3bec419c | [
"BSD-3-Clause"
] | null | null | null | theano/sandbox/jax_linker.py | canyon289/Theano-PyMC | 1a9b04bfe480b758ddfa54ba49c88bee3bec419c | [
"BSD-3-Clause"
] | 1 | 2020-08-15T17:09:10.000Z | 2020-08-15T17:09:10.000Z | from collections.abc import Sequence
from warnings import warn
from theano.gof.graph import Constant
from theano.gof.link import (
Container,
PerformLinker,
add_clear_storage,
gc_helper,
map_storage,
streamline,
utils,
)
class JAXLinker(PerformLinker):
"""A `Linker` that JIT-compiles NumPy-based operations using JAX.
Attributes
----------
allow_non_jax: bool
A boolean indicating whether or not an exception is thrown when the
graph cannot be JAX compiled (e.g. the graph has an unsupported operator).
If `allow_non_jax` is `True`, the fallback is currently Python compilation.
"""
allow_non_jax = False
def create_jax_thunks(self, compute_map, storage_map):
"""Create a thunk for each output of the `Linker`s `FunctionGraph`.
This is differs from the other thunk-making function in that it only
produces thunks for the `FunctionGraph` output nodes.
Parameters
----------
compute_map: dict
The compute map dictionary.
storage_map: dict
The storage map dictionary.
Returns
-------
thunks: list
A tuple containing the thunks.
output_nodes: list and their
A tuple containing the output nodes.
"""
import jax
from theano.sandbox.jaxify import jax_funcify
output_nodes = [o.owner for o in self.fgraph.outputs]
# Create a JAX-compilable function from our `FunctionGraph`
jaxed_fgraph_outputs = jax_funcify(self.fgraph)
assert len(jaxed_fgraph_outputs) == len(output_nodes)
# I suppose we can consider `Constant`s to be "static" according to
# JAX.
static_argnums = [
n for n, i in enumerate(self.fgraph.inputs) if isinstance(i, Constant)
]
thunk_inputs = [storage_map[n] for n in self.fgraph.inputs]
thunks = []
for node, jax_funcs in zip(output_nodes, jaxed_fgraph_outputs):
thunk_outputs = [storage_map[n] for n in node.outputs]
if not isinstance(jax_funcs, Sequence):
jax_funcs = [jax_funcs]
jax_impl_jits = [
jax.jit(jax_func, static_argnums) for jax_func in jax_funcs
]
def thunk(
node=node, jax_impl_jits=jax_impl_jits, thunk_outputs=thunk_outputs
):
outputs = [
jax_impl_jit(*[x[0] for x in thunk_inputs])
for jax_impl_jit in jax_impl_jits
]
if len(jax_impl_jits) < len(node.outputs):
# In this case, the JAX function will output a single
# output that contains the other outputs.
# This happens for multi-output `Op`s that directly
# correspond to multi-output JAX functions (e.g. `SVD` and
# `jax.numpy.linalg.svd`).
outputs = outputs[0]
for o_node, o_storage, o_val in zip(
node.outputs, thunk_outputs, outputs
):
compute_map[o_node][0] = True
if len(o_storage) > 1:
assert len(o_storage) == len(o_val)
for i, o_sub_val in enumerate(o_val):
o_storage[i] = o_sub_val
else:
o_storage[0] = o_val
return outputs
thunk.inputs = thunk_inputs
thunk.outputs = thunk_outputs
thunk.lazy = False
thunks.append(thunk)
return thunks, output_nodes
def make_all(self, input_storage=None, output_storage=None, storage_map=None):
fgraph = self.fgraph
nodes = self.schedule(fgraph)
no_recycling = self.no_recycling
input_storage, output_storage, storage_map = map_storage(
fgraph, nodes, input_storage, output_storage, storage_map
)
compute_map = {}
for k in storage_map:
compute_map[k] = [k.owner is None]
try:
# We need to create thunk functions that will populate the output
# storage arrays with the JAX-computed values.
thunks, nodes = self.create_jax_thunks(compute_map, storage_map)
except NotImplementedError as e:
if not self.allow_non_jax:
raise
warn("JaxLinker could not JAXify graph: {}".format(e))
thunks = []
for node in nodes:
thunk = node.op.make_thunk(
node, storage_map, compute_map, no_recycling, "py"
)
thunk_inputs = [storage_map[v] for v in node.inputs]
thunk_outputs = [storage_map[v] for v in node.outputs]
thunk.inputs = thunk_inputs
thunk.outputs = thunk_outputs
thunks.append(thunk)
computed, last_user = gc_helper(nodes)
if self.allow_gc:
post_thunk_old_storage = []
for node in nodes:
post_thunk_old_storage.append(
[
storage_map[input]
for input in node.inputs
if (input in computed)
and (input not in fgraph.outputs)
and (node == last_user[input])
]
)
else:
post_thunk_old_storage = None
if no_recycling is True:
no_recycling = list(storage_map.values())
no_recycling = utils.difference(no_recycling, input_storage)
else:
no_recycling = [
storage_map[r] for r in no_recycling if r not in fgraph.inputs
]
fn = streamline(
fgraph, thunks, nodes, post_thunk_old_storage, no_recycling=no_recycling
)
fn.allow_gc = self.allow_gc
add_clear_storage(fn, computed, storage_map)
fn.storage_map = storage_map
return (
fn,
[
Container(input, storage)
for input, storage in zip(fgraph.inputs, input_storage)
],
[
Container(output, storage, True)
for output, storage in zip(fgraph.outputs, output_storage)
],
thunks,
nodes,
)
| 32.054187 | 84 | 0.553865 |
3cf04ce323a2ca25ae724cc993b51973e59afec3 | 8,892 | py | Python | accelbyte_py_sdk/api/cloudsave/operations/admin_player_record/admin_put_player_public_record_handler_v1.py | encyphered/accelbyte-python-sdk | 09c1e989d7251de308150fdcd3119d662ca2d205 | [
"MIT"
] | null | null | null | accelbyte_py_sdk/api/cloudsave/operations/admin_player_record/admin_put_player_public_record_handler_v1.py | encyphered/accelbyte-python-sdk | 09c1e989d7251de308150fdcd3119d662ca2d205 | [
"MIT"
] | null | null | null | accelbyte_py_sdk/api/cloudsave/operations/admin_player_record/admin_put_player_public_record_handler_v1.py | encyphered/accelbyte-python-sdk | 09c1e989d7251de308150fdcd3119d662ca2d205 | [
"MIT"
] | null | null | null | # Auto-generated at 2021-09-27T17:01:31.247008+08:00
# from: Justice Cloudsave Service (3.38.0)
# Copyright (c) 2018 - 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HttpResponse
from ...models import ModelsPlayerRecordRequest
from ...models import ResponseError
class AdminPutPlayerPublicRecordHandlerV1(Operation):
"""Create or replace player record (adminPutPlayerPublicRecordHandlerV1)
Properties:
url: /cloudsave/v1/admin/namespaces/{namespace}/users/{userID}/records/{key}/public
method: PUT
tags: AdminPlayerRecord
consumes: ["application/json"]
produces: ["application/json"]
security: bearer
body: (body) REQUIRED ModelsPlayerRecordRequest in body
namespace: (namespace) REQUIRED str in path
user_id: (userID) REQUIRED str in path
key: (key) REQUIRED str in path
Responses:
200: OK - (Record in user-level saved)
400: Bad Request - ResponseError (Bad Request)
500: Internal Server Error - ResponseError (Internal Server Error)
"""
# region fields
_url: str = "/cloudsave/v1/admin/namespaces/{namespace}/users/{userID}/records/{key}/public"
_method: str = "PUT"
_consumes: List[str] = ["application/json"]
_produces: List[str] = ["application/json"]
_security: Optional[str] = "bearer"
_location_query: str = None
body: ModelsPlayerRecordRequest # REQUIRED in [body]
namespace: str # REQUIRED in [path]
user_id: str # REQUIRED in [path]
key: str # REQUIRED in [path]
# endregion fields
# region properties
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def security(self) -> Optional[str]:
return self._security
@property
def location_query(self) -> str:
return self._location_query
# endregion properties
# region get methods
def get_full_url(self, base_url: Union[None, str] = None) -> str:
result = base_url if base_url is not None else ""
# path params
url = self.url
for k, v in self.get_path_params().items():
url = url.replace(f"{{{k}}}", v)
result += url
return result
# noinspection PyMethodMayBeStatic
def get_all_required_fields(self) -> List[str]:
return [
"body",
"namespace",
"user_id",
"key",
]
# endregion get methods
# region get_x_params methods
def get_all_params(self) -> dict:
return {
"body": self.get_body_params(),
"path": self.get_path_params(),
}
def get_body_params(self) -> Any:
return self.body.to_dict()
def get_path_params(self) -> dict:
result = {}
if hasattr(self, "namespace"):
result["namespace"] = self.namespace
if hasattr(self, "user_id"):
result["userID"] = self.user_id
if hasattr(self, "key"):
result["key"] = self.key
return result
# endregion get_x_params methods
# region is/has methods
def is_valid(self) -> bool:
if not hasattr(self, "body") or self.body is None:
return False
if not hasattr(self, "namespace") or self.namespace is None:
return False
if not hasattr(self, "user_id") or self.user_id is None:
return False
if not hasattr(self, "key") or self.key is None:
return False
return True
# endregion is/has methods
# region with_x methods
def with_body(self, value: ModelsPlayerRecordRequest) -> AdminPutPlayerPublicRecordHandlerV1:
self.body = value
return self
def with_namespace(self, value: str) -> AdminPutPlayerPublicRecordHandlerV1:
self.namespace = value
return self
def with_user_id(self, value: str) -> AdminPutPlayerPublicRecordHandlerV1:
self.user_id = value
return self
def with_key(self, value: str) -> AdminPutPlayerPublicRecordHandlerV1:
self.key = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result = {}
if hasattr(self, "body") and self.body:
result["body"] = self.body.to_dict(include_empty=include_empty)
elif include_empty:
result["body"] = ModelsPlayerRecordRequest()
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = str()
if hasattr(self, "user_id") and self.user_id:
result["userID"] = str(self.user_id)
elif include_empty:
result["userID"] = str()
if hasattr(self, "key") and self.key:
result["key"] = str(self.key)
elif include_empty:
result["key"] = str()
return result
# endregion to methods
# region response methods
# noinspection PyMethodMayBeStatic
def parse_response(self, code: int, content_type: str, content: Any) -> Tuple[Union[None, HttpResponse], Union[None, ResponseError]]:
"""Parse the given response.
200: OK - (Record in user-level saved)
400: Bad Request - ResponseError (Bad Request)
500: Internal Server Error - ResponseError (Internal Server Error)
"""
if code == 200:
return HttpResponse.create(code, "OK"), None
if code == 400:
return None, ResponseError.create_from_dict(content)
if code == 500:
return None, ResponseError.create_from_dict(content)
was_handled, undocumented_response = HttpResponse.try_create_undocumented_response(code, content)
if was_handled:
return None, undocumented_response
return None, HttpResponse.create_unhandled_error()
# endregion response methods
# region static methods
@classmethod
def create(
cls,
body: ModelsPlayerRecordRequest,
namespace: str,
user_id: str,
key: str,
) -> AdminPutPlayerPublicRecordHandlerV1:
instance = cls()
instance.body = body
instance.namespace = namespace
instance.user_id = user_id
instance.key = key
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> AdminPutPlayerPublicRecordHandlerV1:
instance = cls()
if "body" in dict_ and dict_["body"] is not None:
instance.body = ModelsPlayerRecordRequest.create_from_dict(dict_["body"], include_empty=include_empty)
elif include_empty:
instance.body = ModelsPlayerRecordRequest()
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = str()
if "userID" in dict_ and dict_["userID"] is not None:
instance.user_id = str(dict_["userID"])
elif include_empty:
instance.user_id = str()
if "key" in dict_ and dict_["key"] is not None:
instance.key = str(dict_["key"])
elif include_empty:
instance.key = str()
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"body": "body",
"namespace": "namespace",
"userID": "user_id",
"key": "key",
}
# endregion static methods
| 30.982578 | 137 | 0.607512 |
a4bb1893b7ecc310bc9ee7ffcf907aef60b35b52 | 3,779 | py | Python | homeassistant/components/deluge/config_flow.py | mib1185/core | b17d4ac65cde9a27ff6032d70b148792e5eba8df | [
"Apache-2.0"
] | null | null | null | homeassistant/components/deluge/config_flow.py | mib1185/core | b17d4ac65cde9a27ff6032d70b148792e5eba8df | [
"Apache-2.0"
] | null | null | null | homeassistant/components/deluge/config_flow.py | mib1185/core | b17d4ac65cde9a27ff6032d70b148792e5eba8df | [
"Apache-2.0"
] | null | null | null | """Config flow for the Deluge integration."""
from __future__ import annotations
from collections.abc import Mapping
import socket
from ssl import SSLError
from typing import Any
from deluge_client.client import DelugeRPCClient
import voluptuous as vol
from homeassistant.config_entries import SOURCE_REAUTH, ConfigFlow
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_SOURCE,
CONF_USERNAME,
)
from homeassistant.data_entry_flow import FlowResult
import homeassistant.helpers.config_validation as cv
from .const import (
CONF_WEB_PORT,
DEFAULT_NAME,
DEFAULT_RPC_PORT,
DEFAULT_WEB_PORT,
DOMAIN,
)
class DelugeFlowHandler(ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Deluge."""
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a flow initiated by the user."""
errors = {}
if user_input is not None:
if (error := await self.validate_input(user_input)) is None:
for entry in self._async_current_entries():
if (
user_input[CONF_HOST] == entry.data[CONF_HOST]
and user_input[CONF_PORT] == entry.data[CONF_PORT]
):
if self.context.get(CONF_SOURCE) == SOURCE_REAUTH:
self.hass.config_entries.async_update_entry(
entry, data=user_input
)
await self.hass.config_entries.async_reload(entry.entry_id)
return self.async_abort(reason="reauth_successful")
return self.async_abort(reason="already_configured")
return self.async_create_entry(
title=DEFAULT_NAME,
data=user_input,
)
errors["base"] = error
user_input = user_input or {}
schema = vol.Schema(
{
vol.Required(CONF_HOST, default=user_input.get(CONF_HOST)): cv.string,
vol.Required(
CONF_USERNAME, default=user_input.get(CONF_USERNAME)
): cv.string,
vol.Required(CONF_PASSWORD, default=""): cv.string,
vol.Optional(
CONF_PORT, default=user_input.get(CONF_PORT, DEFAULT_RPC_PORT)
): int,
vol.Optional(
CONF_WEB_PORT,
default=user_input.get(CONF_WEB_PORT, DEFAULT_WEB_PORT),
): int,
}
)
return self.async_show_form(step_id="user", data_schema=schema, errors=errors)
async def async_step_reauth(self, config: Mapping[str, Any]) -> FlowResult:
"""Handle a reauthorization flow request."""
return await self.async_step_user()
async def validate_input(self, user_input: dict[str, Any]) -> str | None:
"""Handle common flow input validation."""
host = user_input[CONF_HOST]
port = user_input[CONF_PORT]
username = user_input[CONF_USERNAME]
password = user_input[CONF_PASSWORD]
api = DelugeRPCClient(
host=host, port=port, username=username, password=password
)
try:
await self.hass.async_add_executor_job(api.connect)
except (
ConnectionRefusedError,
socket.timeout,
SSLError,
):
return "cannot_connect"
except Exception as ex: # pylint:disable=broad-except
if type(ex).__name__ == "BadLoginError":
return "invalid_auth" # pragma: no cover
return "unknown"
return None
| 35.990476 | 87 | 0.588251 |
1213316c659236c3b5650550647c066f2e4e03c2 | 437 | py | Python | versions/v1/create_jobs.py | bric-tb-softwares/rxpixp2pixcycle | 3ec59373d777908210483a41478d6fbc2fe60f3e | [
"BSD-3-Clause"
] | null | null | null | versions/v1/create_jobs.py | bric-tb-softwares/rxpixp2pixcycle | 3ec59373d777908210483a41478d6fbc2fe60f3e | [
"BSD-3-Clause"
] | null | null | null | versions/v1/create_jobs.py | bric-tb-softwares/rxpixp2pixcycle | 3ec59373d777908210483a41478d6fbc2fe60f3e | [
"BSD-3-Clause"
] | null | null | null |
import json, os
output_path = 'jobs'
os.makedirs(output_path, exist_ok=True)
tests = 1
sorts = 9
for test in range(tests):
for sort in range(sorts):
d = {
'sort' : sort,
'test' : test,
'seed' : 512,
}
print(d)
o = output_path + '/job.test_%d.sort_%d.json'%(test,sort)
with open(o, 'w') as f:
json.dump(d, f)
| 15.068966 | 65 | 0.462243 |
280622a5495276855ef0b3a4c3614289da4c3518 | 119,735 | py | Python | autoload/leaderf/python/leaderf/manager.py | paperboard/LeaderF | 5d39b1a704419436a812118c6281ddb5023137e3 | [
"Apache-2.0"
] | null | null | null | autoload/leaderf/python/leaderf/manager.py | paperboard/LeaderF | 5d39b1a704419436a812118c6281ddb5023137e3 | [
"Apache-2.0"
] | null | null | null | autoload/leaderf/python/leaderf/manager.py | paperboard/LeaderF | 5d39b1a704419436a812118c6281ddb5023137e3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import vim
import os
import sys
import json
import time
import operator
import itertools
import threading
import multiprocessing
from functools import partial
from functools import wraps
from .instance import LfInstance
from .cli import LfCli
from .utils import *
from .fuzzyMatch import FuzzyMatch
from .asyncExecutor import AsyncExecutor
from .devicons import (
webDevIconsGetFileTypeSymbol,
removeDevIcons
)
is_fuzzyEngine_C = False
try:
import fuzzyEngine
is_fuzzyEngine_C = True
cpu_count = multiprocessing.cpu_count()
lfCmd("let g:Lf_fuzzyEngine_C = 1")
except ImportError:
lfCmd("let g:Lf_fuzzyEngine_C = 0")
is_fuzzyMatch_C = False
try:
import fuzzyMatchC
is_fuzzyMatch_C = True
lfCmd("let g:Lf_fuzzyMatch_C = 1")
except ImportError:
lfCmd("let g:Lf_fuzzyMatch_C = 0")
if sys.version_info >= (3, 0):
def isAscii(str):
try:
str.encode("ascii")
return True
except UnicodeEncodeError:
return False
else:
def isAscii(str):
try:
str.decode("ascii")
return True
except UnicodeDecodeError:
return False
def modifiableController(func):
@wraps(func)
def deco(self, *args, **kwargs):
self._getInstance().buffer.options['modifiable'] = True
func(self, *args, **kwargs)
self._getInstance().buffer.options['modifiable'] = False
return deco
def catchException(func):
@wraps(func)
def deco(self, *args, **kwargs):
try:
func(self, *args, **kwargs)
except vim.error as e: # for neovim
if str(e) != "b'Keyboard interrupt'" and str(e) != 'Keyboard interrupt':
raise e
elif self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
except KeyboardInterrupt: # <C-C>, this does not work in vim
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
return deco
def ignoreEvent(events):
def wrapper(func):
@wraps(func)
def deco(self, *args, **kwargs):
try:
saved_eventignore = vim.options['eventignore']
vim.options['eventignore'] = events
func(self, *args, **kwargs)
finally:
vim.options['eventignore'] = saved_eventignore
return deco
return wrapper
#*****************************************************
# Manager
#*****************************************************
class Manager(object):
def __init__(self):
self._autochdir = 0
self._instance = None
self._cli = LfCli()
self._explorer = None
self._content = []
self._index = 0
self._help_length = 0
self._show_help = False
self._selections = {}
self._highlight_pos = []
self._highlight_pos_list = []
self._highlight_refine_pos = []
self._highlight_ids = []
self._orig_line = ''
self._ctrlp_pressed = False
self._fuzzy_engine = None
self._result_content = []
self._reader_thread = None
self._timer_id = None
self._highlight_method = lambda : None
self._orig_cwd = None
self._cursorline_dict = {}
self._empty_query = lfEval("get(g:, 'Lf_EmptyQuery', 1)") == '1'
self._preview_winid = 0
self._is_previewed = False
self._match_ids = []
self._vim_file_autoloaded = False
self._arguments = {}
self._getExplClass()
#**************************************************************
# abstract methods, in fact all the functions can be overridden
#**************************************************************
def _getExplClass(self):
"""
this function MUST be overridden
return the name of Explorer class
"""
raise NotImplementedError("Can't instantiate abstract class Manager "
"with abstract methods _getExplClass")
def _defineMaps(self):
pass
def _defineCommonMaps(self):
normal_map = lfEval("get(g:, 'Lf_NormalMap', {})")
if "_" not in normal_map:
return
for [lhs, rhs] in normal_map["_"]:
# If a buffer-local mapping does not exist, map it
maparg = lfEval("maparg('{}', 'n', 0, 1)".format(lhs))
if maparg == {} or maparg.get("buffer", "0") == "0" :
lfCmd("nnoremap <buffer> <silent> {} {}".format(lhs, rhs))
def _cmdExtension(self, cmd):
"""
this function can be overridden to add new cmd
if return true, exit the input loop
"""
pass
@removeDevIcons
def _argaddFiles(self, files):
# It will raise E480 without 'silent!'
lfCmd("silent! argdelete *")
for file in files:
lfCmd("argadd %s" % escSpecial(file))
def _issue_422_set_option(self):
if lfEval("has('nvim')") == '1' and self._is_previewed:
lfCmd("silent! setlocal number<")
lfCmd("silent! setlocal relativenumber<")
lfCmd("silent! setlocal cursorline<")
lfCmd("silent! setlocal colorcolumn<")
lfCmd("silent! setlocal winhighlight<")
def _acceptSelection(self, *args, **kwargs):
pass
def _getDigest(self, line, mode):
"""
this function can be overridden
specify what part in the line to be processed and highlighted
Args:
mode: 0, return the full path
1, return the name only
2, return the directory name
"""
if mode == 0:
return line
elif mode == 1:
return getBasename(line)
else:
return getDirname(line)
def _getDigestStartPos(self, line, mode):
"""
this function can be overridden
return the start position of the digest returned by _getDigest()
Args:
mode: 0, return the start postion of full path
1, return the start postion of name only
2, return the start postion of directory name
"""
if mode == 0 or mode == 2:
return 0
else:
return lfBytesLen(getDirname(line))
def _createHelp(self):
return []
def _setStlMode(self, **kwargs):
if self._cli.isFuzzy:
if self._getExplorer().supportsNameOnly():
if self._cli.isFullPath:
mode = 'FullPath'
else:
mode = 'NameOnly'
else:
mode = 'Fuzzy'
else:
mode = 'Regex'
modes = {"--nameOnly", "--fullPath", "--fuzzy", "--regexMode"}
for opt in kwargs.get("arguments", {}):
if opt in modes:
if opt == "--regexMode":
mode = 'Regex'
elif self._getExplorer().supportsNameOnly():
if opt == "--nameOnly":
mode = 'NameOnly'
elif opt == "--fullPath":
mode = 'FullPath'
else: # "--fuzzy"
if self._cli.isFullPath:
mode = 'FullPath'
else:
mode = 'NameOnly'
elif opt in ("--nameOnly", "--fullPath", "--fuzzy"):
mode = 'Fuzzy'
break
self._getInstance().setStlMode(mode)
self._cli.setCurrentMode(mode)
def _beforeEnter(self):
self._resetAutochdir()
self._cur_buffer = vim.current.buffer
def _afterEnter(self):
if self._vim_file_autoloaded == False:
category = self._getExplorer().getStlCategory()
if category == 'Colorscheme':
category = 'Colors'
lfCmd("silent! call leaderf#%s#a_nonexistent_function()" % category)
self._vim_file_autoloaded = True
if "--nowrap" in self._arguments:
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'setlocal nowrap')" % self._getInstance().getPopupWinId())
elif self._getInstance().getWinPos() == 'floatwin':
lfCmd("call nvim_win_set_option(%d, 'wrap', v:false)" % self._getInstance().getPopupWinId())
else:
self._getInstance().window.options['wrap'] = False
else:
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'setlocal wrap')" % self._getInstance().getPopupWinId())
elif self._getInstance().getWinPos() == 'floatwin':
lfCmd("call nvim_win_set_option(%d, 'wrap', v:true)" % self._getInstance().getPopupWinId())
else:
self._getInstance().window.options['wrap'] = True
if self._getInstance().getWinPos() != 'popup':
self._defineMaps()
self._defineCommonMaps()
id = int(lfEval("matchadd('Lf_hl_cursorline', '.*\%#.*', 9)"))
self._match_ids.append(id)
else:
lfCmd("""call win_execute({}, 'let matchid = matchadd(''Lf_hl_cursorline'', ''.*\%#.*'', 9)')"""
.format(self._getInstance().getPopupWinId()))
id = int(lfEval("matchid"))
self._match_ids.append(id)
if is_fuzzyEngine_C:
self._fuzzy_engine = fuzzyEngine.createFuzzyEngine(cpu_count, False)
def _beforeExit(self):
if self._getInstance().window.valid:
self._getInstance().cursorRow = self._getInstance().window.cursor[0]
self._getInstance().helpLength = self._help_length
self.clearSelections()
self._getExplorer().cleanup()
if self._fuzzy_engine:
fuzzyEngine.closeFuzzyEngine(self._fuzzy_engine)
self._fuzzy_engine = None
if self._reader_thread and self._reader_thread.is_alive():
self._stop_reader_thread = True
self._closePreviewPopup()
if self._getInstance().getWinPos() == 'popup':
for i in self._match_ids:
lfCmd("silent! call matchdelete(%d, %d)" % (i, self._getInstance().getPopupWinId()))
else:
for i in self._match_ids:
lfCmd("silent! call matchdelete(%d)" % i)
self._match_ids = []
def _afterExit(self):
pass
def _bangEnter(self):
self._preview_open = False
self._current_mode = 'NORMAL'
if self._getInstance().getWinPos() == 'popup':
self._cli.hideCursor()
if lfEval("exists('*leaderf#%s#NormalModeFilter')" % self._getExplorer().getStlCategory()) == '1':
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', '%s')" % (self._getInstance().getPopupWinId(),
'leaderf#%s#NormalModeFilter' % self._getExplorer().getStlCategory()))
else:
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', function('leaderf#NormalModeFilter', [%d]))"
% (self._getInstance().getPopupWinId(), id(self)))
self._resetHighlights()
if self._cli.pattern and self._index == 0:
self._search(self._content)
def _bangReadFinished(self):
if self._preview_open == False and self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._previewResult(False)
self._preview_open = True
def _getList(self, pairs):
"""
this function can be overridden
return a list constructed from pairs
Args:
pairs: a list of tuple(weight, line, ...)
"""
return [p[1] for p in pairs]
def _getUnit(self):
"""
indicates how many lines are considered as a unit
"""
return 1
def _supportsRefine(self):
return False
def _previewInPopup(self, *args, **kwargs):
pass
def _closePreviewPopup(self):
if lfEval("has('nvim')") == '1':
if self._preview_winid:
if int(lfEval("nvim_win_is_valid(%d) == v:true" % self._preview_winid)):
lfCmd("call nvim_win_close(%d, 1)" % self._preview_winid)
self._preview_winid = 0
else:
if self._preview_winid:
lfCmd("call popup_close(%d)" % self._preview_winid)
self._preview_winid = 0
def _previewResult(self, preview):
if self._getInstance().getWinPos() == 'floatwin':
self._cli.buildPopupPrompt()
if lfEval("get(g:, 'Lf_PreviewInPopup', 0)") == '1' and \
int(lfEval("win_id2win(%d)" % self._preview_winid)) != vim.current.window.number:
self._closePreviewPopup()
if not self._needPreview(preview):
return
line = self._getInstance().currentLine
if lfEval("get(g:, 'Lf_PreviewInPopup', 0)") == '1':
line_nr = self._getInstance().window.cursor[0]
self._previewInPopup(line, self._getInstance().buffer, line_nr)
return
orig_pos = self._getInstance().getOriginalPos()
cur_pos = (vim.current.tabpage, vim.current.window, vim.current.buffer)
saved_eventignore = vim.options['eventignore']
vim.options['eventignore'] = 'BufLeave,WinEnter,BufEnter'
try:
vim.current.tabpage, vim.current.window = orig_pos[:2]
self._acceptSelection(line, preview=True)
lfCmd("augroup Lf_Cursorline")
lfCmd("autocmd! BufwinEnter <buffer> setlocal cursorline<")
lfCmd("augroup END")
finally:
if self._getInstance().getWinPos() != 'popup':
vim.current.tabpage, vim.current.window, vim.current.buffer = cur_pos
vim.options['eventignore'] = saved_eventignore
def _restoreOrigCwd(self):
if self._orig_cwd is None:
return
# https://github.com/neovim/neovim/issues/8336
if lfEval("has('nvim')") == '1':
chdir = vim.chdir
else:
chdir = os.chdir
try:
if int(lfEval("&autochdir")) == 0 and lfGetCwd() != self._orig_cwd:
chdir(self._orig_cwd)
except:
if lfGetCwd() != self._orig_cwd:
chdir(self._orig_cwd)
def _needExit(self, line, arguments):
return True
def setArguments(self, arguments):
self._arguments = arguments
def getArguments(self):
return self._arguments
#**************************************************************
def _createPopupModePreview(self, title, source, line_nr, jump_cmd):
"""
Args:
source:
if the type is int, it is a buffer number
if the type is str, it is a file name
"""
self._is_previewed = True
if lfEval("has('nvim')") == '1':
width = int(lfEval("get(g:, 'Lf_PreviewPopupWidth', 0)"))
if width == 0:
maxwidth = int(lfEval("&columns"))//2
else:
maxwidth = min(width, int(lfEval("&columns")))
relative = 'editor'
if isinstance(source, int):
buffer_len = len(vim.buffers[source])
else:
try:
lfCmd("let content = readfile('%s')" % escQuote(source))
except vim.error as e:
lfPrintError(e)
return
buffer_len = int(lfEval("len(content)"))
lfCmd("let scratch_buffer = nvim_create_buf(0, 1)")
lfCmd("call setbufline(scratch_buffer, 1, content)")
lfCmd("call nvim_buf_set_option(scratch_buffer, 'bufhidden', 'wipe')")
float_window = self._getInstance().window
float_win_row = int(float(lfEval("nvim_win_get_config(%d).row" % float_window.id)))
float_win_col = int(float(lfEval("nvim_win_get_config(%d).col" % float_window.id)))
preview_pos = lfEval("get(g:, 'Lf_PopupPreviewPosition', 'top')")
if preview_pos.lower() == 'bottom':
anchor = "NW"
if self._getInstance().getPopupInstance().statusline_win:
statusline_height = 1
else:
statusline_height = 0
row = float_win_row + float_window.height + statusline_height
col = float_win_col
height = int(lfEval("&lines")) - row - 2
if height < 1:
return
width = float_window.width
elif preview_pos.lower() == 'top':
anchor = "SW"
row = float_win_row - 1
col = float_win_col
height = row
if height < 1:
return
width = float_window.width
elif preview_pos.lower() == 'right':
anchor = "SW"
row = float_win_row - 1
col = float_win_col + float_window.width
height = row
if height < 1:
return
width = float_window.width
else:
anchor = "SW"
start = int(lfEval("line('w0')")) - 1
end = int(lfEval("line('.')")) - 1
col_width = float_window.width - int(lfEval("&numberwidth")) - 1
delta_height = lfActualLineCount(self._getInstance().buffer, start, end, col_width)
row = float_win_row + delta_height
col = float_win_col + int(lfEval("&numberwidth")) + 1 + float_window.cursor[1]
height = row
width = maxwidth
config = {
"relative": relative,
"anchor" : anchor,
"height" : height,
"width" : width,
"row" : row,
"col" : col
}
if isinstance(source, int):
self._preview_winid = int(lfEval("nvim_open_win(%d, 0, %s)" % (source, str(config))))
else:
self._preview_winid = int(lfEval("nvim_open_win(scratch_buffer, 0, %s)" % str(config)))
if jump_cmd:
cur_winid = lfEval("win_getid()")
lfCmd("noautocmd call win_gotoid(%d)" % self._preview_winid)
lfCmd(jump_cmd)
lfCmd("noautocmd call win_gotoid(%s)" % cur_winid)
if buffer_len >= line_nr > 0:
lfCmd("""call nvim_win_set_cursor(%d, [%d, 1])""" % (self._preview_winid, line_nr))
lfCmd("call nvim_win_set_option(%d, 'number', v:true)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'relativenumber', v:false)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'cursorline', v:true)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'colorcolumn', '')" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'winhighlight', 'Normal:Lf_hl_popup_window')" % self._preview_winid)
cur_winid = lfEval("win_getid()")
lfCmd("noautocmd call win_gotoid(%d)" % self._preview_winid)
if not isinstance(source, int):
lfCmd("doautocmd filetypedetect BufNewFile %s" % source)
lfCmd("silent! %foldopen!")
lfCmd("norm! zz")
lfCmd("noautocmd call win_gotoid(%s)" % cur_winid)
# lfCmd("redraw!") # maybe we don't need it, it makes the preview slow
else:
popup_window = self._getInstance().window
popup_pos = lfEval("popup_getpos(%d)" % popup_window.id)
width = int(lfEval("get(g:, 'Lf_PreviewPopupWidth', 0)"))
if width == 0:
maxwidth = int(lfEval("&columns"))//2 - 1
else:
maxwidth = min(width, int(lfEval("&columns")))
if not isinstance(source, int):
try:
lfCmd("let content = readfile('%s')" % escQuote(source))
except vim.error as e:
lfPrintError(e)
return
preview_pos = lfEval("get(g:, 'Lf_PopupPreviewPosition', 'top')")
if preview_pos.lower() == 'bottom':
maxwidth = int(popup_pos["width"])
col = int(popup_pos["col"])
if self._getInstance().getPopupInstance().statusline_win:
statusline_height = 1
else:
statusline_height = 0
line = int(popup_pos["line"]) + int(popup_pos["height"]) + statusline_height
pos = "topleft"
maxheight = int(lfEval("&lines")) - line
if maxheight < 1:
return
elif preview_pos.lower() == 'top':
maxwidth = int(popup_pos["width"])
col = int(popup_pos["col"])
# int(popup_pos["line"]) - 1(exclude the first line) - 1(input window) - 1(title)
maxheight = int(popup_pos["line"]) - 3
if maxheight < 1:
return
pos = "botleft"
line = maxheight + 1
elif preview_pos.lower() == 'right':
maxwidth = int(popup_pos["width"])
col = int(popup_pos["col"]) + maxwidth
# int(popup_pos["line"]) - 1(exclude the first line) - 1(input window) - 1(title)
maxheight = int(popup_pos["height"]) + 1
if maxheight < 1:
return
pos = "topleft"
line = int(popup_pos["line"]) - 1
else: # cursor
lfCmd("""call win_execute(%d, "let numberwidth = &numberwidth")""" % popup_window.id)
col = int(popup_pos["core_col"]) + int(lfEval("numberwidth")) + popup_window.cursor[1]
lfCmd("""call win_execute(%d, "let delta_height = line('.') - line('w0')")""" % popup_window.id)
# the line of buffer starts from 0, while the line of line() starts from 1
start = int(lfEval("line('w0', %d)" % popup_window.id)) - 1
end = int(lfEval("line('.', %d)" % popup_window.id)) - 1
col_width = int(popup_pos["core_width"]) - int(lfEval("numberwidth"))
delta_height = lfActualLineCount(self._getInstance().buffer, start, end, col_width)
# int(popup_pos["core_line"]) - 1(exclude the first line) - 1(input window)
maxheight = int(popup_pos["core_line"]) + delta_height - 2
pos = "botleft"
line = maxheight + 1
options = {
"title": title,
"maxwidth": maxwidth,
"minwidth": maxwidth,
"maxheight": maxheight,
"minheight": maxheight,
"zindex": 20481,
"pos": pos,
"line": line,
"col": col,
"padding": [0, 0, 0, 0],
"border": [1, 0, 0, 0],
"borderchars": [' '],
"borderhighlight": ["Lf_hl_previewTitle"],
"filter": "leaderf#popupModePreviewFilter",
"scrollbar": 0,
}
if preview_pos.lower() == 'bottom':
del options["title"]
options["border"] = [0, 0, 1, 0]
elif preview_pos.lower() == 'cursor' and maxheight < int(lfEval("&lines"))//2 - 2:
maxheight = int(lfEval("&lines")) - maxheight - 5
del options["title"]
options["border"] = [0, 0, 1, 0]
options["maxheight"] = maxheight
options["minheight"] = maxheight
if isinstance(source, int):
lfCmd("silent! let winid = popup_create(%d, %s)" % (source, json.dumps(options)))
else:
lfCmd("silent! let winid = popup_create(content, %s)" % json.dumps(options))
lfCmd("call win_execute(winid, 'doautocmd filetypedetect BufNewFile %s')" % escQuote(source))
self._preview_winid = int(lfEval("winid"))
if jump_cmd:
lfCmd("""call win_execute(%d, '%s')""" % (self._preview_winid, escQuote(jump_cmd)))
elif line_nr > 0:
lfCmd("""call win_execute(%d, "call cursor(%d, 1)")""" % (self._preview_winid, line_nr))
lfCmd("call win_execute(%d, 'setlocal cursorline number norelativenumber colorcolumn= ')" % self._preview_winid)
lfCmd("call win_execute(%d, 'setlocal wincolor=Lf_hl_popup_window')" % self._preview_winid)
if lfEval("get(g:, 'Lf_PopupShowFoldcolumn', 1)") == '0':
lfCmd("call win_execute(%d, 'setlocal foldcolumn=0')" % self._preview_winid)
else:
lfCmd("call win_execute(%d, 'setlocal foldcolumn=1')" % self._preview_winid)
lfCmd("call win_execute(%d, 'norm! zz')" % self._preview_winid)
@ignoreEvent('BufRead,BufReadPre,BufReadPost')
def _createPopupPreview(self, title, source, line_nr, jump_cmd=''):
"""
Args:
source:
if the type is int, it is a buffer number
if the type is str, it is a file name
"""
self._is_previewed = True
line_nr = int(line_nr)
if self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._createPopupModePreview(title, source, line_nr, jump_cmd)
return
if lfEval("has('nvim')") == '1':
width = int(lfEval("get(g:, 'Lf_PreviewPopupWidth', 0)"))
if width == 0:
width = int(lfEval("&columns"))//2
else:
width = min(width, int(lfEval("&columns")))
maxheight = int(lfEval("&lines - (line('w$') - line('.')) - 3"))
maxheight -= int(self._getInstance().window.height) - int(lfEval("(line('w$') - line('w0') + 1)"))
relative = 'editor'
anchor = "SW"
row = maxheight
if isinstance(source, int):
buffer_len = len(vim.buffers[source])
else:
try:
lfCmd("let content = readfile('%s')" % escQuote(source))
except vim.error as e:
lfPrintError(e)
return
buffer_len = int(lfEval("len(content)"))
lfCmd("let scratch_buffer = nvim_create_buf(0, 1)")
lfCmd("call setbufline(scratch_buffer, 1, content)")
lfCmd("call nvim_buf_set_option(scratch_buffer, 'bufhidden', 'wipe')")
height = min(maxheight, buffer_len)
preview_pos = lfEval("get(g:, 'Lf_PreviewHorizontalPosition', 'right')")
if preview_pos.lower() == 'center':
col = (int(lfEval("&columns")) - width) // 2
elif preview_pos.lower() == 'left':
col = 0
elif preview_pos.lower() == 'right':
col = int(lfEval("&columns")) - width
else:
relative = 'cursor'
row = 0
col = 0
if maxheight < int(lfEval("&lines"))//2 - 2:
anchor = "NW"
if relative == 'cursor':
row = 1
else:
row = maxheight + 1
height = min(int(lfEval("&lines")) - maxheight - 3, buffer_len)
config = {
"relative": relative,
"anchor" : anchor,
"height" : height,
"width" : width,
"row" : row,
"col" : col
}
if isinstance(source, int):
self._preview_winid = int(lfEval("nvim_open_win(%d, 0, %s)" % (source, str(config))))
else:
self._preview_winid = int(lfEval("nvim_open_win(scratch_buffer, 0, %s)" % str(config)))
if jump_cmd:
cur_winid = lfEval("win_getid()")
lfCmd("noautocmd call win_gotoid(%d)" % self._preview_winid)
lfCmd(jump_cmd)
lfCmd("noautocmd call win_gotoid(%s)" % cur_winid)
if buffer_len >= line_nr > 0:
lfCmd("""call nvim_win_set_cursor(%d, [%d, 1])""" % (self._preview_winid, line_nr))
lfCmd("call nvim_win_set_option(%d, 'number', v:true)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'relativenumber', v:false)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'cursorline', v:true)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'colorcolumn', '')" % self._preview_winid)
cur_winid = lfEval("win_getid()")
lfCmd("noautocmd call win_gotoid(%d)" % self._preview_winid)
if not isinstance(source, int):
lfCmd("doautocmd filetypedetect BufNewFile %s" % source)
lfCmd("silent! %foldopen!")
lfCmd("noautocmd call win_gotoid(%s)" % cur_winid)
else:
preview_pos = lfEval("get(g:, 'Lf_PreviewHorizontalPosition', 'right')")
if preview_pos.lower() == 'center':
col = 0
elif preview_pos.lower() == 'left':
col = 1
elif preview_pos.lower() == 'right':
col = int(lfEval("&columns"))//2 + 2
else:
col = "cursor"
width = int(lfEval("get(g:, 'Lf_PreviewPopupWidth', 0)"))
if width == 0:
maxwidth = int(lfEval("&columns"))//2 - 1
else:
maxwidth = min(width, int(lfEval("&columns")))
maxheight = int(lfEval("&lines - (line('w$') - line('.')) - 4"))
maxheight -= int(self._getInstance().window.height) - int(lfEval("(line('w$') - line('w0') + 1)"))
options = {
"title": title,
"maxwidth": maxwidth,
"minwidth": maxwidth,
"maxheight": maxheight,
"minheight": maxheight,
"zindex": 20481,
"pos": "botleft",
"line": "cursor-1",
"col": col,
"padding": [0, 0, 0, 1],
"border": [1, 0, 0, 0],
"borderchars": [' '],
"borderhighlight": ["Lf_hl_previewTitle"],
"filter": "leaderf#popupModePreviewFilter",
}
if maxheight < int(lfEval("&lines"))//2 - 2:
maxheight = int(lfEval("&lines")) - maxheight - 5
del options["title"]
options["border"] = [0, 0, 1, 0]
options["maxheight"] = maxheight
options["minheight"] = maxheight
if isinstance(source, int):
lfCmd("silent! let winid = popup_create(%d, %s)" % (source, json.dumps(options)))
else:
try:
lfCmd("let content = readfile('%s')" % escQuote(source))
except vim.error as e:
lfPrintError(e)
return
lfCmd("silent! let winid = popup_create(content, %s)" % json.dumps(options))
lfCmd("call win_execute(winid, 'doautocmd filetypedetect BufNewFile %s')" % escQuote(source))
self._preview_winid = int(lfEval("winid"))
if self._current_mode == 'NORMAL':
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', function('leaderf#normalModePreviewFilter', [%d]))"
% (self._preview_winid, id(self)))
if jump_cmd:
lfCmd("""call win_execute(%d, '%s')""" % (self._preview_winid, escQuote(jump_cmd)))
elif line_nr > 0:
lfCmd("""call win_execute(%d, "exec 'norm! %dG'")""" % (self._preview_winid, line_nr))
lfCmd("call win_execute(%d, 'setlocal cursorline number norelativenumber')" % self._preview_winid)
def _needPreview(self, preview):
"""
Args:
preview:
if True, always preview the result no matter what `g:Lf_PreviewResult` is.
"""
preview_dict = {k.lower(): v for k, v in lfEval("g:Lf_PreviewResult").items()}
category = self._getExplorer().getStlCategory()
if not preview and int(preview_dict.get(category.lower(), 0)) == 0:
return False
if self._getInstance().isReverseOrder():
if self._getInstance().window.cursor[0] > len(self._getInstance().buffer) - self._help_length:
return False
elif self._getInstance().window.cursor[0] <= self._help_length:
return False
if self._getInstance().empty() or (self._getInstance().getWinPos() != 'popup' and
vim.current.buffer != self._getInstance().buffer):
return False
if self._ctrlp_pressed == True:
return True
line = self._getInstance().currentLine
if self._orig_line == line and self._getInstance().buffer.options['modifiable']:
return False
self._orig_line = line
return True
def _getInstance(self):
if self._instance is None:
self._instance = LfInstance(self, self._getExplorer().getStlCategory(),
self._cli,
self._beforeEnter,
self._afterEnter,
self._beforeExit,
self._afterExit)
return self._instance
def _createHelpHint(self):
help = []
if not self._show_help:
if lfEval("get(g:, 'Lf_HideHelp', 0)") == '0':
help.append('" Press <F1> for help')
help.append('" ---------------------------------------------------------')
else:
help += self._createHelp()
self._help_length = len(help)
orig_row = self._getInstance().window.cursor[0]
if self._getInstance().isReverseOrder():
self._getInstance().buffer.options['modifiable'] = True
self._getInstance().buffer.append(help[::-1])
self._getInstance().buffer.options['modifiable'] = False
buffer_len = len(self._getInstance().buffer)
if buffer_len < self._initial_count:
if "--nowrap" not in self._arguments:
self._getInstance().window.height = min(self._initial_count,
self._getInstance()._actualLength(self._getInstance().buffer))
else:
self._getInstance().window.height = buffer_len
elif self._getInstance().window.height < self._initial_count:
self._getInstance().window.height = self._initial_count
lfCmd("normal! Gzb")
self._getInstance().window.cursor = (orig_row, 0)
else:
self._getInstance().buffer.options['modifiable'] = True
self._getInstance().buffer.append(help, 0)
self._getInstance().buffer.options['modifiable'] = False
self._getInstance().window.cursor = (orig_row + self._help_length, 0)
self._getInstance().mimicCursor()
self._getInstance().refreshPopupStatusline()
def _hideHelp(self):
self._getInstance().buffer.options['modifiable'] = True
if self._getInstance().isReverseOrder():
orig_row = self._getInstance().window.cursor[0]
countdown = len(self._getInstance().buffer) - orig_row - self._help_length
if self._help_length > 0:
del self._getInstance().buffer[-self._help_length:]
self._getInstance().buffer[:] = self._getInstance().buffer[-self._initial_count:]
lfCmd("normal! Gzb")
if 0 < countdown < self._initial_count:
self._getInstance().window.cursor = (len(self._getInstance().buffer) - countdown, 0)
else:
self._getInstance().window.cursor = (len(self._getInstance().buffer), 0)
self._getInstance().setLineNumber()
else:
del self._getInstance().buffer[:self._help_length]
if self._help_length > 0 and self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'norm! %dk')" % (self._getInstance().getPopupWinId(), self._help_length))
self._help_length = 0
self._getInstance().refreshPopupStatusline()
def _inHelpLines(self):
if self._getInstance().isReverseOrder():
if self._getInstance().window.cursor[0] > len(self._getInstance().buffer) - self._help_length:
return True
elif self._getInstance().window.cursor[0] <= self._help_length:
return True
return False
def _getExplorer(self):
if self._explorer is None:
self._explorer = self._getExplClass()()
return self._explorer
def _resetAutochdir(self):
if int(lfEval("&autochdir")) == 1:
self._autochdir = 1
lfCmd("set noautochdir")
else:
self._autochdir = 0
def _setAutochdir(self):
if self._autochdir == 1:
# When autochdir is set, Vim will change the current working directory
# to the directory containing the file which was opened or selected.
lfCmd("set autochdir")
def _toUpInPopup(self):
if self._preview_winid > 0 and int(lfEval("winbufnr(%d)" % self._preview_winid)) != -1:
if lfEval("has('nvim')") == '1':
cur_winid = lfEval("win_getid()")
lfCmd("noautocmd call win_gotoid(%d)" % self._preview_winid)
lfCmd("norm! k")
lfCmd("redraw")
lfCmd("noautocmd call win_gotoid(%s)" % cur_winid)
else:
lfCmd("call win_execute(%d, 'norm! k')" % (self._preview_winid))
def _toDownInPopup(self):
if self._preview_winid > 0 and int(lfEval("winbufnr(%d)" % self._preview_winid)) != -1:
if lfEval("has('nvim')") == '1':
cur_winid = lfEval("win_getid()")
lfCmd("noautocmd call win_gotoid(%d)" % self._preview_winid)
lfCmd("norm! j")
lfCmd("redraw")
lfCmd("noautocmd call win_gotoid(%s)" % cur_winid)
else:
lfCmd("call win_execute(%d, 'norm! j')" % (self._preview_winid))
def _toUp(self):
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'norm! k')" % (self._getInstance().getPopupWinId()))
self._getInstance().refreshPopupStatusline()
return
adjust = False
if self._getInstance().isReverseOrder() and self._getInstance().getCurrentPos()[0] == 1:
adjust = True
self._setResultContent()
if self._cli.pattern and self._cli.isFuzzy \
and len(self._highlight_pos) < (len(self._getInstance().buffer) - self._help_length) // self._getUnit() \
and len(self._highlight_pos) < int(lfEval("g:Lf_NumberOfHighlight")):
self._highlight_method()
lfCmd("norm! k")
if adjust:
lfCmd("norm! zt")
self._getInstance().setLineNumber()
lfCmd("setlocal cursorline!") # these two help to redraw the statusline,
lfCmd("setlocal cursorline!") # also fix a weird bug of vim
def _toDown(self):
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'norm! j')" % (self._getInstance().getPopupWinId()))
self._getInstance().refreshPopupStatusline()
return
if not self._getInstance().isReverseOrder() \
and self._getInstance().getCurrentPos()[0] == self._getInstance().window.height:
self._setResultContent()
lfCmd("norm! j")
self._getInstance().setLineNumber()
lfCmd("setlocal cursorline!") # these two help to redraw the statusline,
lfCmd("setlocal cursorline!") # also fix a weird bug of vim
def _pageUp(self):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, 'exec "norm! \<PageUp>"')""" % (self._getInstance().getPopupWinId()))
self._getInstance().refreshPopupStatusline()
return
if self._getInstance().isReverseOrder():
self._setResultContent()
if self._cli.pattern and self._cli.isFuzzy \
and len(self._highlight_pos) < (len(self._getInstance().buffer) - self._help_length) // self._getUnit() \
and len(self._highlight_pos) < int(lfEval("g:Lf_NumberOfHighlight")):
self._highlight_method()
lfCmd('exec "norm! \<PageUp>"')
self._getInstance().setLineNumber()
def _pageDown(self):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, 'exec "norm! \<PageDown>"')""" % (self._getInstance().getPopupWinId()))
self._getInstance().refreshPopupStatusline()
return
if not self._getInstance().isReverseOrder():
self._setResultContent()
lfCmd('exec "norm! \<PageDown>"')
self._getInstance().setLineNumber()
def _leftClick(self):
if self._getInstance().getWinPos() == 'popup':
if int(lfEval("has('patch-8.1.2266')")) == 1:
if self._getInstance().getPopupWinId() == int(lfEval("v:mouse_winid")):
lfCmd("""call win_execute(%d, "exec v:mouse_lnum")"""
% (self._getInstance().getPopupWinId()))
lfCmd("""call win_execute(%d, "exec 'norm!'.v:mouse_col.'|'")"""
% (self._getInstance().getPopupWinId()))
exit_loop = False
elif self._getInstance().window.number == int(lfEval("v:mouse_win")):
lfCmd("exec v:mouse_lnum")
lfCmd("exec 'norm!'.v:mouse_col.'|'")
self._getInstance().setLineNumber()
self.clearSelections()
exit_loop = False
elif self._preview_winid == int(lfEval("v:mouse_winid")):
if lfEval("has('nvim')") == '1':
lfCmd("call win_gotoid(%d)" % self._preview_winid)
lfCmd("exec v:mouse_lnum")
lfCmd("exec 'norm!'.v:mouse_col.'|'")
self._current_mode = 'NORMAL'
lfCmd("call leaderf#colorscheme#popup#hiMode('%s', '%s')"
% (self._getExplorer().getStlCategory(), self._current_mode))
self._getInstance().setPopupStl(self._current_mode)
exit_loop = True
else:
self.quit()
exit_loop = True
return exit_loop
def _search(self, content, is_continue=False, step=0):
if not is_continue:
self.clearSelections()
self._clearHighlights()
self._clearHighlightsPos()
self._cli.highlightMatches()
if not self._cli.pattern: # e.g., when <BS> or <Del> is typed
if self._empty_query and self._getExplorer().getStlCategory() in ["File"]:
self._guessSearch(self._content)
else:
self._getInstance().setBuffer(content[:self._initial_count])
self._getInstance().setStlResultsCount(len(content), True)
self._result_content = []
return
if self._cli.isFuzzy:
self._fuzzySearch(content, is_continue, step)
else:
self._regexSearch(content, is_continue, step)
if self._getExplorer().getStlCategory() not in ["File"]:
self._previewResult(False)
def _filter(self, step, filter_method, content, is_continue,
use_fuzzy_engine=False, return_index=False):
""" Construct a list from result of filter_method(content).
Args:
step: An integer to indicate the number of lines to filter one time.
filter_method: A function to apply `content` as parameter and
return an iterable.
content: The list to be filtered.
"""
unit = self._getUnit()
step = step // unit * unit
length = len(content)
if self._index == 0:
self._cb_content = []
self._result_content = []
self._index = min(step, length)
cur_content = content[:self._index]
else:
if not is_continue and self._result_content:
if self._cb_content:
self._cb_content += self._result_content
else:
self._cb_content = self._result_content
if len(self._cb_content) >= step:
cur_content = self._cb_content[:step]
self._cb_content = self._cb_content[step:]
else:
cur_content = self._cb_content
left = step - len(self._cb_content)
self._cb_content = []
if self._index < length:
end = min(self._index + left, length)
cur_content += content[self._index:end]
self._index = end
if self._cli.isAndMode:
result, highlight_methods = filter_method(cur_content)
if is_continue:
self._previous_result = (self._previous_result[0] + result[0],
self._previous_result[1] + result[1])
result = self._previous_result
else:
self._previous_result = result
return (result, highlight_methods)
elif use_fuzzy_engine:
if return_index:
mode = 0 if self._cli.isFullPath else 1
tmp_content = [self._getDigest(line, mode) for line in cur_content]
result = filter_method(source=tmp_content)
result = (result[0], [cur_content[i] for i in result[1]])
else:
result = filter_method(source=cur_content)
if is_continue:
result = fuzzyEngine.merge(self._previous_result, result)
self._previous_result = result
else:
result = list(filter_method(cur_content))
if is_continue:
self._previous_result += result
result = self._previous_result
else:
self._previous_result = result
return result
def _fuzzyFilter(self, is_full_path, get_weight, iterable):
"""
return a list, each item is a pair (weight, line)
"""
getDigest = partial(self._getDigest, mode=0 if is_full_path else 1)
pairs = ((get_weight(getDigest(line)), line) for line in iterable)
MIN_WEIGHT = fuzzyMatchC.MIN_WEIGHT if is_fuzzyMatch_C else FuzzyMatch.MIN_WEIGHT
return (p for p in pairs if p[0] > MIN_WEIGHT)
def _fuzzyFilterEx(self, is_full_path, get_weight, iterable):
"""
return a tuple, (weights, indices)
"""
getDigest = partial(self._getDigest, mode=0 if is_full_path else 1)
if self._getUnit() > 1: # currently, only BufTag's _getUnit() is 2
iterable = itertools.islice(iterable, 0, None, self._getUnit())
pairs = ((get_weight(getDigest(line)), i) for i, line in enumerate(iterable))
MIN_WEIGHT = fuzzyMatchC.MIN_WEIGHT if is_fuzzyMatch_C else FuzzyMatch.MIN_WEIGHT
result = [p for p in pairs if p[0] > MIN_WEIGHT]
if len(result) == 0:
weights, indices = [], []
else:
weights, indices = zip(*result)
return (list(weights), list(indices))
def _refineFilter(self, first_get_weight, get_weight, iterable):
getDigest = self._getDigest
triples = ((first_get_weight(getDigest(line, 1)),
get_weight(getDigest(line, 2)), line)
for line in iterable)
MIN_WEIGHT = fuzzyMatchC.MIN_WEIGHT if is_fuzzyMatch_C else FuzzyMatch.MIN_WEIGHT
return ((i[0] + i[1], i[2]) for i in triples if i[0] > MIN_WEIGHT and i[1] > MIN_WEIGHT)
def _andModeFilter(self, iterable):
encoding = lfEval("&encoding")
use_fuzzy_engine = False
cur_content = iterable
weight_lists = []
highlight_methods = []
for p in self._cli.pattern:
if self._fuzzy_engine and isAscii(p) and self._getUnit() == 1: # currently, only BufTag's _getUnit() is 2
use_fuzzy_engine = True
pattern = fuzzyEngine.initPattern(p)
if self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath:
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=False, sort_results=False, is_and_mode=True)
elif self._getExplorer().getStlCategory() in ["Self", "Buffer", "Mru", "BufTag",
"Function", "History", "Cmd_History", "Search_History", "Tag", "Rg", "Filetype",
"Command", "Window", "QuickFix", "LocList"]:
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=True, sort_results=False, is_and_mode=True)
else:
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=not self._cli.isFullPath, sort_results=False, is_and_mode=True)
getHighlights = partial(fuzzyEngine.getHighlights, engine=self._fuzzy_engine,
pattern=pattern, is_name_only=not self._cli.isFullPath)
highlight_method = partial(self._highlight, self._cli.isFullPath, getHighlights, True, clear=False)
elif is_fuzzyMatch_C and isAscii(p):
pattern = fuzzyMatchC.initPattern(p)
if self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath:
getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=False)
getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=False)
else:
getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=True)
getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=True)
filter_method = partial(self._fuzzyFilterEx, self._cli.isFullPath, getWeight)
highlight_method = partial(self._highlight, self._cli.isFullPath, getHighlights, clear=False)
else:
fuzzy_match = FuzzyMatch(p, encoding)
if self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath:
filter_method = partial(self._fuzzyFilterEx,
self._cli.isFullPath,
fuzzy_match.getWeight2)
elif self._getExplorer().getStlCategory() in ["Self", "Buffer", "Mru", "BufTag",
"Function", "History", "Cmd_History", "Search_History", "Tag", "Rg", "Filetype",
"Command", "Window", "QuickFix", "LocList"]:
filter_method = partial(self._fuzzyFilterEx,
self._cli.isFullPath,
fuzzy_match.getWeight3)
else:
filter_method = partial(self._fuzzyFilterEx,
self._cli.isFullPath,
fuzzy_match.getWeight)
highlight_method = partial(self._highlight,
self._cli.isFullPath,
fuzzy_match.getHighlights,
clear=False)
if use_fuzzy_engine:
mode = 0 if self._cli.isFullPath else 1
tmp_content = [self._getDigest(line, mode) for line in cur_content]
result = filter_method(source=tmp_content)
else:
result = filter_method(cur_content)
for i, wl in enumerate(weight_lists):
weight_lists[i] = [wl[j] for j in result[1]]
weight_lists.append(result[0])
if self._getUnit() > 1: # currently, only BufTag's _getUnit() is 2
unit = self._getUnit()
result_content = [cur_content[i*unit:i*unit + unit] for i in result[1]]
cur_content = list(itertools.chain.from_iterable(result_content))
else:
cur_content = [cur_content[i] for i in result[1]]
result_content = cur_content
highlight_methods.append(highlight_method)
weights = [sum(i) for i in zip(*weight_lists)]
return ((weights, result_content), highlight_methods)
def _fuzzySearch(self, content, is_continue, step):
encoding = lfEval("&encoding")
use_fuzzy_engine = False
use_fuzzy_match_c = False
if self._cli.isAndMode:
filter_method = self._andModeFilter
elif self._cli.isRefinement:
if self._cli.pattern[1] == '': # e.g. abc;
if self._fuzzy_engine and isAscii(self._cli.pattern[0]):
use_fuzzy_engine = True
return_index = True
pattern = fuzzyEngine.initPattern(self._cli.pattern[0])
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine,
pattern=pattern, is_name_only=True, sort_results=True)
getHighlights = partial(fuzzyEngine.getHighlights, engine=self._fuzzy_engine,
pattern=pattern, is_name_only=True)
highlight_method = partial(self._highlight, True, getHighlights, True)
elif is_fuzzyMatch_C and isAscii(self._cli.pattern[0]):
use_fuzzy_match_c = True
pattern = fuzzyMatchC.initPattern(self._cli.pattern[0])
getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=True)
getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=True)
filter_method = partial(self._fuzzyFilter, False, getWeight)
highlight_method = partial(self._highlight, False, getHighlights)
else:
fuzzy_match = FuzzyMatch(self._cli.pattern[0], encoding)
getWeight = fuzzy_match.getWeight
getHighlights = fuzzy_match.getHighlights
filter_method = partial(self._fuzzyFilter, False, getWeight)
highlight_method = partial(self._highlight, False, getHighlights)
elif self._cli.pattern[0] == '': # e.g. ;abc
if self._fuzzy_engine and isAscii(self._cli.pattern[1]):
use_fuzzy_engine = True
return_index = True
pattern = fuzzyEngine.initPattern(self._cli.pattern[1])
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine,
pattern=pattern, is_name_only=False, sort_results=True)
getHighlights = partial(fuzzyEngine.getHighlights, engine=self._fuzzy_engine,
pattern=pattern, is_name_only=False)
highlight_method = partial(self._highlight, True, getHighlights, True)
elif is_fuzzyMatch_C and isAscii(self._cli.pattern[1]):
use_fuzzy_match_c = True
pattern = fuzzyMatchC.initPattern(self._cli.pattern[1])
getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=False)
getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=False)
filter_method = partial(self._fuzzyFilter, True, getWeight)
highlight_method = partial(self._highlight, True, getHighlights)
else:
fuzzy_match = FuzzyMatch(self._cli.pattern[1], encoding)
getWeight = fuzzy_match.getWeight
getHighlights = fuzzy_match.getHighlights
filter_method = partial(self._fuzzyFilter, True, getWeight)
highlight_method = partial(self._highlight, True, getHighlights)
else: # e.g. abc;def
if is_fuzzyMatch_C and isAscii(self._cli.pattern[0]):
is_ascii_0 = True
pattern_0 = fuzzyMatchC.initPattern(self._cli.pattern[0])
getWeight_0 = partial(fuzzyMatchC.getWeight, pattern=pattern_0, is_name_only=True)
getHighlights_0 = partial(fuzzyMatchC.getHighlights, pattern=pattern_0, is_name_only=True)
else:
is_ascii_0 = False
fuzzy_match_0 = FuzzyMatch(self._cli.pattern[0], encoding)
getWeight_0 = fuzzy_match_0.getWeight
getHighlights_0 = fuzzy_match_0.getHighlights
if is_fuzzyMatch_C and isAscii(self._cli.pattern[1]):
is_ascii_1 = True
pattern_1 = fuzzyMatchC.initPattern(self._cli.pattern[1])
getWeight_1 = partial(fuzzyMatchC.getWeight, pattern=pattern_1, is_name_only=False)
getHighlights_1 = partial(fuzzyMatchC.getHighlights, pattern=pattern_1, is_name_only=False)
else:
is_ascii_1 = False
fuzzy_match_1 = FuzzyMatch(self._cli.pattern[1], encoding)
getWeight_1 = fuzzy_match_1.getWeight
getHighlights_1 = fuzzy_match_1.getHighlights
use_fuzzy_match_c = is_ascii_0 and is_ascii_1
filter_method = partial(self._refineFilter, getWeight_0, getWeight_1)
highlight_method = partial(self._highlightRefine, getHighlights_0, getHighlights_1)
else:
if self._fuzzy_engine and isAscii(self._cli.pattern) and self._getUnit() == 1: # currently, only BufTag's _getUnit() is 2
use_fuzzy_engine = True
pattern = fuzzyEngine.initPattern(self._cli.pattern)
if self._getExplorer().getStlCategory() == "File":
return_index = False
if self._cli.isFullPath:
filter_method = partial(fuzzyEngine.fuzzyMatch, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=False, sort_results=True)
else:
filter_method = partial(fuzzyEngine.fuzzyMatchPart, engine=self._fuzzy_engine,
pattern=pattern, category=fuzzyEngine.Category_File,
param=fuzzyEngine.createParameter(1),
is_name_only=True, sort_results=True)
elif self._getExplorer().getStlCategory() == "Rg":
return_index = False
if "--match-path" in self._arguments:
filter_method = partial(fuzzyEngine.fuzzyMatch, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=True, sort_results=True)
else:
filter_method = partial(fuzzyEngine.fuzzyMatchPart, engine=self._fuzzy_engine,
pattern=pattern, category=fuzzyEngine.Category_Rg,
param=fuzzyEngine.createRgParameter(self._getExplorer().displayMulti(),
self._getExplorer().getContextSeparator(), self._has_column),
is_name_only=True, sort_results=True)
elif self._getExplorer().getStlCategory() == "Tag":
return_index = False
mode = 0 if self._cli.isFullPath else 1
filter_method = partial(fuzzyEngine.fuzzyMatchPart, engine=self._fuzzy_engine,
pattern=pattern, category=fuzzyEngine.Category_Tag,
param=fuzzyEngine.createParameter(mode), is_name_only=True, sort_results=True)
elif self._getExplorer().getStlCategory() == "Gtags":
return_index = False
result_format = 1
if self._getExplorer().getResultFormat() in [None, "ctags-mod"]:
result_format = 0
elif self._getExplorer().getResultFormat() == "ctags-x":
result_format = 2
filter_method = partial(fuzzyEngine.fuzzyMatchPart, engine=self._fuzzy_engine,
pattern=pattern, category=fuzzyEngine.Category_Gtags,
param=fuzzyEngine.createGtagsParameter(0, result_format, self._match_path),
is_name_only=True, sort_results=True)
elif self._getExplorer().getStlCategory() == "Line":
return_index = False
filter_method = partial(fuzzyEngine.fuzzyMatchPart, engine=self._fuzzy_engine,
pattern=pattern, category=fuzzyEngine.Category_Line,
param=fuzzyEngine.createParameter(1), is_name_only=True, sort_results=True)
elif self._getExplorer().getStlCategory() in ["Self", "Buffer", "Mru", "BufTag",
"Function", "History", "Cmd_History", "Search_History", "Filetype",
"Command", "Window", "QuickFix", "LocList"]:
return_index = True
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=True, sort_results=True)
else:
return_index = True
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=not self._cli.isFullPath, sort_results=True)
getHighlights = partial(fuzzyEngine.getHighlights, engine=self._fuzzy_engine,
pattern=pattern, is_name_only=not self._cli.isFullPath)
highlight_method = partial(self._highlight, self._cli.isFullPath, getHighlights, True)
elif is_fuzzyMatch_C and isAscii(self._cli.pattern):
use_fuzzy_match_c = True
pattern = fuzzyMatchC.initPattern(self._cli.pattern)
if self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath:
getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=False)
getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=False)
else:
getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=True)
getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=True)
filter_method = partial(self._fuzzyFilter, self._cli.isFullPath, getWeight)
highlight_method = partial(self._highlight, self._cli.isFullPath, getHighlights)
else:
fuzzy_match = FuzzyMatch(self._cli.pattern, encoding)
if self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath:
filter_method = partial(self._fuzzyFilter,
self._cli.isFullPath,
fuzzy_match.getWeight2)
elif self._getExplorer().getStlCategory() in ["Self", "Buffer", "Mru", "BufTag",
"Function", "History", "Cmd_History", "Search_History", "Rg", "Filetype",
"Command", "Window", "QuickFix", "LocList"]:
filter_method = partial(self._fuzzyFilter,
self._cli.isFullPath,
fuzzy_match.getWeight3)
else:
filter_method = partial(self._fuzzyFilter,
self._cli.isFullPath,
fuzzy_match.getWeight)
highlight_method = partial(self._highlight,
self._cli.isFullPath,
fuzzy_match.getHighlights)
if self._cli.isAndMode:
if self._fuzzy_engine and isAscii(''.join(self._cli.pattern)):
step = 20000 * cpu_count
else:
step = 10000
pair, highlight_methods = self._filter(step, filter_method, content, is_continue)
pairs = sorted(zip(*pair), key=operator.itemgetter(0), reverse=True)
self._result_content = self._getList(pairs)
elif use_fuzzy_engine:
if step == 0:
if return_index == True:
step = 30000 * cpu_count
else:
step = 60000 * cpu_count
_, self._result_content = self._filter(step, filter_method, content, is_continue, True, return_index)
else:
if step == 0:
if use_fuzzy_match_c:
step = 60000
elif self._getExplorer().supportsNameOnly() and self._cli.isFullPath:
step = 6000
else:
step = 12000
pairs = self._filter(step, filter_method, content, is_continue)
pairs.sort(key=operator.itemgetter(0), reverse=True)
self._result_content = self._getList(pairs)
self._getInstance().setBuffer(self._result_content[:self._initial_count])
self._getInstance().setStlResultsCount(len(self._result_content), True)
if self._cli.isAndMode:
self._highlight_method = partial(self._highlight_and_mode, highlight_methods)
self._highlight_method()
else:
self._highlight_method = highlight_method
self._highlight_method()
if len(self._cli.pattern) > 1 and not is_continue:
lfCmd("redraw")
def _guessFilter(self, filename, suffix, dirname, icon, iterable):
"""
return a list, each item is a pair (weight, line)
"""
icon_len = len(icon)
return ((FuzzyMatch.getPathWeight(filename, suffix, dirname, line[icon_len:]), line) for line in iterable)
def _guessSearch(self, content, is_continue=False, step=0):
if self._cur_buffer.name == '' or self._cur_buffer.options["buftype"] not in [b'', '']:
self._getInstance().setBuffer(content[:self._initial_count])
self._getInstance().setStlResultsCount(len(content), True)
self._result_content = []
return
buffer_name = os.path.normpath(lfDecode(self._cur_buffer.name))
if lfEval("g:Lf_ShowRelativePath") == '1':
try:
buffer_name = os.path.relpath(buffer_name)
except ValueError:
pass
buffer_name = lfEncode(buffer_name)
dirname, basename = os.path.split(buffer_name)
filename, suffix = os.path.splitext(basename)
if lfEval("get(g:, 'Lf_ShowDevIcons', 1)") == "1":
icon = webDevIconsGetFileTypeSymbol(basename)
else:
icon = ''
if self._fuzzy_engine:
filter_method = partial(fuzzyEngine.guessMatch, engine=self._fuzzy_engine, filename=filename,
suffix=suffix, dirname=dirname, icon=icon, sort_results=True)
step = len(content)
_, self._result_content = self._filter(step, filter_method, content, is_continue, True)
else:
step = len(content)
filter_method = partial(self._guessFilter, filename, suffix, dirname, icon)
pairs = self._filter(step, filter_method, content, is_continue)
pairs.sort(key=operator.itemgetter(0), reverse=True)
self._result_content = self._getList(pairs)
self._getInstance().setBuffer(self._result_content[:self._initial_count])
self._getInstance().setStlResultsCount(len(self._result_content), True)
def _highlight_and_mode(self, highlight_methods):
self._clearHighlights()
for i, highlight_method in enumerate(highlight_methods):
highlight_method(hl_group='Lf_hl_match' + str(i % 5))
def _clearHighlights(self):
if self._getInstance().getWinPos() == 'popup':
for i in self._highlight_ids:
lfCmd("silent! call matchdelete(%d, %d)" % (i, self._getInstance().getPopupWinId()))
else:
for i in self._highlight_ids:
lfCmd("silent! call matchdelete(%d)" % i)
self._highlight_ids = []
def _clearHighlightsPos(self):
self._highlight_pos = []
self._highlight_pos_list = []
self._highlight_refine_pos = []
def _resetHighlights(self):
self._clearHighlights()
unit = self._getUnit()
bottom = len(self._getInstance().buffer) - self._help_length
if self._cli.isAndMode:
highlight_pos_list = self._highlight_pos_list
else:
highlight_pos_list = [self._highlight_pos]
for n, highlight_pos in enumerate(highlight_pos_list):
hl_group = 'Lf_hl_match' + str(n % 5)
for i, pos in enumerate(highlight_pos):
if self._getInstance().isReverseOrder():
pos = [[bottom - unit*i] + p for p in pos]
else:
pos = [[unit*i + 1 + self._help_length] + p for p in pos]
# The maximum number of positions is 8 in matchaddpos().
for j in range(0, len(pos), 8):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchaddpos('%s', %s)")"""
% (self._getInstance().getPopupWinId(), hl_group, str(pos[j:j+8])))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchaddpos('%s', %s)" % (hl_group, str(pos[j:j+8]))))
self._highlight_ids.append(id)
for i, pos in enumerate(self._highlight_refine_pos):
if self._getInstance().isReverseOrder():
pos = [[bottom - unit*i] + p for p in pos]
else:
pos = [[unit*i + 1 + self._help_length] + p for p in pos]
# The maximum number of positions is 8 in matchaddpos().
for j in range(0, len(pos), 8):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchaddpos('Lf_hl_matchRefine', %s)")"""
% (self._getInstance().getPopupWinId(), str(pos[j:j+8])))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchaddpos('Lf_hl_matchRefine', %s)" % str(pos[j:j+8])))
self._highlight_ids.append(id)
def _highlight(self, is_full_path, get_highlights, use_fuzzy_engine=False, clear=True, hl_group='Lf_hl_match'):
# matchaddpos() is introduced by Patch 7.4.330
if (lfEval("exists('*matchaddpos')") == '0' or
lfEval("g:Lf_HighlightIndividual") == '0'):
return
cb = self._getInstance().buffer
if self._getInstance().empty(): # buffer is empty.
return
highlight_number = int(lfEval("g:Lf_NumberOfHighlight"))
if clear:
self._clearHighlights()
getDigest = partial(self._getDigest, mode=0 if is_full_path else 1)
unit = self._getUnit()
if self._getInstance().isReverseOrder():
if self._help_length > 0:
content = cb[:-self._help_length][::-1]
else:
content = cb[:][::-1]
else:
content = cb[self._help_length:]
if use_fuzzy_engine:
self._highlight_pos = get_highlights(source=[getDigest(line)
for line in content[:highlight_number:unit]])
else:
# e.g., self._highlight_pos = [ [ [2,3], [6,2] ], [ [1,4], [7,6], ... ], ... ]
# where [2, 3] indicates the highlight starts at the 2nd column with the
# length of 3 in bytes
self._highlight_pos = [get_highlights(getDigest(line))
for line in content[:highlight_number:unit]]
if self._cli.isAndMode:
self._highlight_pos_list.append(self._highlight_pos)
bottom = len(content)
for i, pos in enumerate(self._highlight_pos):
start_pos = self._getDigestStartPos(content[unit*i], 0 if is_full_path else 1)
if start_pos > 0:
for j in range(len(pos)):
pos[j][0] += start_pos
if self._getInstance().isReverseOrder():
pos = [[bottom - unit*i] + p for p in pos]
else:
pos = [[unit*i + 1 + self._help_length] + p for p in pos]
# The maximum number of positions is 8 in matchaddpos().
for j in range(0, len(pos), 8):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchaddpos('%s', %s)")"""
% (self._getInstance().getPopupWinId(), hl_group, str(pos[j:j+8])))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchaddpos('%s', %s)" % (hl_group, str(pos[j:j+8]))))
self._highlight_ids.append(id)
def _highlightRefine(self, first_get_highlights, get_highlights):
# matchaddpos() is introduced by Patch 7.4.330
if (lfEval("exists('*matchaddpos')") == '0' or
lfEval("g:Lf_HighlightIndividual") == '0'):
return
cb = self._getInstance().buffer
if self._getInstance().empty(): # buffer is empty.
return
highlight_number = int(lfEval("g:Lf_NumberOfHighlight"))
self._clearHighlights()
getDigest = self._getDigest
unit = self._getUnit()
if self._getInstance().isReverseOrder():
if self._help_length > 0:
content = cb[:-self._help_length][::-1]
else:
content = cb[:][::-1]
else:
content = cb[self._help_length:]
bottom = len(content)
self._highlight_pos = [first_get_highlights(getDigest(line, 1))
for line in content[:highlight_number:unit]]
for i, pos in enumerate(self._highlight_pos):
start_pos = self._getDigestStartPos(content[unit*i], 1)
if start_pos > 0:
for j in range(len(pos)):
pos[j][0] += start_pos
if self._getInstance().isReverseOrder():
pos = [[bottom - unit*i] + p for p in pos]
else:
pos = [[unit*i + 1 + self._help_length] + p for p in pos]
# The maximum number of positions is 8 in matchaddpos().
for j in range(0, len(pos), 8):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchaddpos('Lf_hl_match', %s)")"""
% (self._getInstance().getPopupWinId(), str(pos[j:j+8])))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchaddpos('Lf_hl_match', %s)" % str(pos[j:j+8])))
self._highlight_ids.append(id)
self._highlight_refine_pos = [get_highlights(getDigest(line, 2))
for line in content[:highlight_number:unit]]
for i, pos in enumerate(self._highlight_refine_pos):
start_pos = self._getDigestStartPos(content[unit*i], 2)
if start_pos > 0:
for j in range(len(pos)):
pos[j][0] += start_pos
if self._getInstance().isReverseOrder():
pos = [[bottom - unit*i] + p for p in pos]
else:
pos = [[unit*i + 1 + self._help_length] + p for p in pos]
# The maximum number of positions is 8 in matchaddpos().
for j in range(0, len(pos), 8):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchaddpos('Lf_hl_matchRefine', %s)")"""
% (self._getInstance().getPopupWinId(), str(pos[j:j+8])))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchaddpos('Lf_hl_matchRefine', %s)" % str(pos[j:j+8])))
self._highlight_ids.append(id)
def _regexFilter(self, iterable):
def noErrMatch(text, pattern):
try:
return '-1' != lfEval("g:LfNoErrMsgMatch('%s', '%s')" % (text, pattern))
except TypeError: # python 2
return '-1' != lfEval("g:LfNoErrMsgMatch('%s', '%s')" % (text.replace('\x00', '\x01'), pattern))
except ValueError: # python 3
return '-1' != lfEval("g:LfNoErrMsgMatch('%s', '%s')" % (text.replace('\x00', '\x01'), pattern))
except:
return '-1' != lfEval("g:LfNoErrMsgMatch('%s', '%s')" % (text.replace('\x00', '\x01'), pattern))
try:
if ('-2' == lfEval("g:LfNoErrMsgMatch('', '%s')" % escQuote(self._cli.pattern))):
return iter([])
else:
return (line for line in iterable
if noErrMatch(escQuote(self._getDigest(line, 0)), escQuote(self._cli.pattern)))
except vim.error:
return iter([])
def _regexSearch(self, content, is_continue, step):
if not is_continue and not self._cli.isPrefix:
self._index = 0
self._result_content = self._filter(8000, self._regexFilter, content, is_continue)
self._getInstance().setBuffer(self._result_content[:self._initial_count])
self._getInstance().setStlResultsCount(len(self._result_content), True)
def clearSelections(self):
for i in self._selections.values():
if self._getInstance().getWinPos() == 'popup':
lfCmd("call matchdelete(%d, %d)" % (i, self._getInstance().getPopupWinId()))
else:
lfCmd("call matchdelete(%d)" % i)
self._selections.clear()
def _cleanup(self):
if not ("--recall" in self._arguments or lfEval("g:Lf_RememberLastSearch") == '1'):
self._pattern_bak = self._cli.pattern
self._cli.clear()
self._clearHighlights()
self._clearHighlightsPos()
self._help_length = 0
self._show_help = False
@modifiableController
def toggleHelp(self):
self._show_help = not self._show_help
if self._getInstance().isReverseOrder():
if self._help_length > 0:
del self._getInstance().buffer[-self._help_length:]
else:
del self._getInstance().buffer[:self._help_length]
if self._help_length > 0 and self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'norm! %dk')" % (self._getInstance().getPopupWinId(), self._help_length))
self._createHelpHint()
self.clearSelections()
self._resetHighlights()
def _accept(self, file, mode, *args, **kwargs):
if file:
if self._getExplorer().getStlCategory() != "Jumps":
lfCmd("norm! m'")
if mode == '':
pass
elif mode == 'h':
lfCmd("split")
elif mode == 'v':
lfCmd("bel vsplit")
kwargs["mode"] = mode
tabpage_count = len(vim.tabpages)
self._acceptSelection(file, *args, **kwargs)
for k, v in self._cursorline_dict.items():
if k.valid:
k.options["cursorline"] = v
self._cursorline_dict.clear()
self._issue_422_set_option()
if mode == 't' and len(vim.tabpages) > tabpage_count:
tab_pos = int(lfEval("g:Lf_TabpagePosition"))
if tab_pos == 0:
lfCmd("tabm 0")
elif tab_pos == 1:
lfCmd("tabm -1")
elif tab_pos == 3:
lfCmd("tabm")
def accept(self, mode=''):
if self._getInstance().isReverseOrder():
if self._getInstance().window.cursor[0] > len(self._getInstance().buffer) - self._help_length:
lfCmd("norm! k")
return
else:
if self._getInstance().window.cursor[0] <= self._help_length:
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute({}, 'norm! j')".format(self._getInstance().getPopupWinId()))
else:
lfCmd("norm! j")
if self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._cli.buildPopupPrompt()
return
if self._getExplorer().getStlCategory() == "Rg":
if self._getInstance().currentLine == self._getExplorer().getContextSeparator():
return
if "--heading" in self._arguments and not re.match(r'^\d+[:-]', self._getInstance().currentLine):
return
self._cli.writeHistory(self._getExplorer().getStlCategory())
# https://github.com/neovim/neovim/issues/8336
if lfEval("has('nvim')") == '1':
chdir = vim.chdir
else:
chdir = os.chdir
cwd = lfGetCwd()
if len(self._selections) > 0:
files = []
for i in sorted(self._selections.keys()):
files.append(self._getInstance().buffer[i-1])
if "--stayOpen" in self._arguments:
if self._getInstance().window.valid:
self._getInstance().cursorRow = self._getInstance().window.cursor[0]
self._getInstance().helpLength = self._help_length
try:
vim.current.tabpage, vim.current.window, vim.current.buffer = self._getInstance().getOriginalPos()
except vim.error: # error if original buffer is an No Name buffer
pass
else:
self._getInstance().exitBuffer()
# https://github.com/Yggdroot/LeaderF/issues/257
win_local_cwd = lfEval("getcwd(winnr())")
if cwd != win_local_cwd:
chdir(cwd)
orig_cwd = lfGetCwd()
if mode == '' and self._getExplorer().getStlCategory() == "File":
self._accept(files[0], mode)
self._argaddFiles(files)
self._accept(files[0], mode)
lfCmd("doautocmd BufwinEnter")
else:
for file in files:
self._accept(file, mode)
if lfGetCwd() != orig_cwd:
dir_changed_by_autocmd = True
else:
dir_changed_by_autocmd = False
need_exit = True
else:
file = self._getInstance().currentLine
line_nr = self._getInstance().window.cursor[0]
need_exit = self._needExit(file, self._arguments)
if need_exit:
if "--stayOpen" in self._arguments:
if self._getInstance().window.valid:
self._getInstance().cursorRow = self._getInstance().window.cursor[0]
self._getInstance().helpLength = self._help_length
try:
vim.current.tabpage, vim.current.window, vim.current.buffer = self._getInstance().getOriginalPos()
except vim.error: # error if original buffer is an No Name buffer
pass
else:
self._getInstance().exitBuffer()
# https://github.com/Yggdroot/LeaderF/issues/257
win_local_cwd = lfEval("getcwd(winnr())")
if cwd != win_local_cwd:
chdir(cwd)
orig_cwd = lfGetCwd()
self._accept(file, mode, self._getInstance().buffer, line_nr) # for bufTag
if lfGetCwd() != orig_cwd:
dir_changed_by_autocmd = True
else:
dir_changed_by_autocmd = False
if need_exit:
self._setAutochdir()
if dir_changed_by_autocmd == False:
self._restoreOrigCwd()
return None
else:
self._beforeExit()
self._content = vim.current.buffer[:]
return False
def _jumpNext(self):
instance = self._getInstance()
if instance.window is None or instance.empty() or len(instance.buffer) == self._help_length:
return False
if instance.isReverseOrder():
if instance.window.valid:
if instance.window.cursor[0] > len(instance.buffer) - self._help_length:
instance.window.cursor = (len(instance.buffer) - self._help_length, 0)
elif instance.window.cursor[0] == 1: # at the first line
instance.window.cursor = (len(instance.buffer) - self._help_length, 0)
else:
instance.window.cursor = (instance.window.cursor[0] - 1, 0)
instance.window.options["cursorline"] = True
instance.gotoOriginalWindow()
self._accept(instance.buffer[instance.window.cursor[0] - 1], "")
else:
if instance.cursorRow > len(instance.buffer) - instance.helpLength:
instance.cursorRow = len(instance.buffer) - instance.helpLength
elif instance.cursorRow == 1: # at the last line
instance.cursorRow = len(instance.buffer) - instance.helpLength
else:
instance.cursorRow -= 1
self._accept(instance.buffer[instance.cursorRow - 1], "")
lfCmd("echohl WarningMsg | redraw | echo ' (%d of %d)' | echohl NONE"
% (len(instance.buffer) - instance.cursorRow - instance.helpLength + 1,
len(instance.buffer) - instance.helpLength))
else:
if instance.window.valid and self._getInstance().getWinPos() != 'popup':
if instance.window.cursor[0] <= self._help_length:
instance.window.cursor = (self._help_length + 1, 0)
elif instance.window.cursor[0] == len(instance.buffer): # at the last line
instance.window.cursor = (self._help_length + 1, 0)
else:
instance.window.cursor = (instance.window.cursor[0] + 1, 0)
instance.window.options["cursorline"] = True
instance.gotoOriginalWindow()
self._accept(instance.buffer[instance.window.cursor[0] - 1], "")
else:
if instance.cursorRow <= instance.helpLength:
instance.cursorRow = instance.helpLength + 1
elif instance.cursorRow == len(instance.buffer): # at the last line
instance.cursorRow = instance.helpLength + 1
else:
instance.cursorRow += 1
self._accept(instance.buffer[instance.cursorRow - 1], "")
lfCmd("echohl WarningMsg | redraw | echo ' (%d of %d)' | echohl NONE" % \
(instance.cursorRow - instance.helpLength, len(instance.buffer) - instance.helpLength))
return True
def _jumpPrevious(self):
instance = self._getInstance()
if instance.window is None or instance.empty() or len(instance.buffer) == self._help_length:
return False
if instance.isReverseOrder():
if instance.window.valid:
if instance.window.cursor[0] >= len(instance.buffer) - self._help_length:
instance.window.cursor = (1, 0)
else:
instance.window.cursor = (instance.window.cursor[0] + 1, 0)
instance.window.options["cursorline"] = True
instance.gotoOriginalWindow()
self._accept(instance.buffer[instance.window.cursor[0] - 1], "")
else:
if instance.cursorRow >= len(instance.buffer) - instance.helpLength:
instance.cursorRow = 1
else:
instance.cursorRow += 1
self._accept(instance.buffer[instance.cursorRow - 1], "")
lfCmd("echohl WarningMsg | redraw | echo ' (%d of %d)' | echohl NONE"
% (len(instance.buffer) - instance.cursorRow - instance.helpLength + 1,
len(instance.buffer) - instance.helpLength))
else:
if instance.window.valid and self._getInstance().getWinPos() != 'popup':
if instance.window.cursor[0] <= self._help_length + 1:
instance.window.cursor = (len(instance.buffer), 0)
else:
instance.window.cursor = (instance.window.cursor[0] - 1, 0)
instance.window.options["cursorline"] = True
instance.gotoOriginalWindow()
self._accept(instance.buffer[instance.window.cursor[0] - 1], "")
else:
if instance.cursorRow <= instance.helpLength + 1:
instance.cursorRow = len(instance.buffer)
else:
instance.cursorRow -= 1
self._accept(instance.buffer[instance.cursorRow - 1], "")
lfCmd("echohl WarningMsg | redraw | echo ' (%d of %d)' | echohl NONE" % \
(instance.cursorRow - instance.helpLength, len(instance.buffer) - instance.helpLength))
def quit(self):
self._getInstance().exitBuffer()
self._setAutochdir()
self._restoreOrigCwd()
def refresh(self, normal_mode=True):
self._getExplorer().cleanup()
content = self._getExplorer().getFreshContent()
if not content:
lfCmd("echohl Error | redraw | echo ' No content!' | echohl NONE")
return
if normal_mode: # when called in Normal mode
self._getInstance().buffer.options['modifiable'] = True
self._clearHighlights()
self._clearHighlightsPos()
self.clearSelections()
self._content = self._getInstance().initBuffer(content, self._getUnit(), self._getExplorer().setContent)
self._iteration_end = True
if self._cli.pattern:
self._index = 0
self._search(self._content)
if normal_mode: # when called in Normal mode
self._createHelpHint()
self._resetHighlights()
self._getInstance().buffer.options['modifiable'] = False
def addSelections(self):
nr = self._getInstance().window.number
if self._getInstance().getWinPos() != 'popup':
if (int(lfEval("v:mouse_win")) != 0 and
nr != int(lfEval("v:mouse_win"))):
return
elif nr == int(lfEval("v:mouse_win")):
lfCmd("exec v:mouse_lnum")
lfCmd("exec 'norm!'.v:mouse_col.'|'")
line_nr = self._getInstance().window.cursor[0]
if self._getInstance().isReverseOrder():
if line_nr > len(self._getInstance().buffer) - self._help_length:
lfCmd("norm! k")
return
else:
if line_nr <= self._help_length:
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute({}, 'norm! j')".format(self._getInstance().getPopupWinId()))
else:
lfCmd("norm! j")
if self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._cli.buildPopupPrompt()
return
if line_nr in self._selections:
if self._getInstance().getWinPos() == 'popup':
lfCmd("call matchdelete(%d, %d)" % (self._selections[line_nr], self._getInstance().getPopupWinId()))
else:
lfCmd("call matchdelete(%d)" % self._selections[line_nr])
del self._selections[line_nr]
else:
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchadd('Lf_hl_selection', '\\\\%%%dl.')")"""
% (self._getInstance().getPopupWinId(), line_nr))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchadd('Lf_hl_selection', '\%%%dl.')" % line_nr))
self._selections[line_nr] = id
def selectMulti(self):
orig_line = self._getInstance().window.cursor[0]
nr = self._getInstance().window.number
if (int(lfEval("v:mouse_win")) != 0 and
nr != int(lfEval("v:mouse_win"))):
return
elif nr == int(lfEval("v:mouse_win")):
cur_line = int(lfEval("v:mouse_lnum"))
self.clearSelections()
for i in range(min(orig_line, cur_line), max(orig_line, cur_line)+1):
if i > self._help_length and i not in self._selections:
id = int(lfEval("matchadd('Lf_hl_selection', '\%%%dl.')" % (i)))
self._selections[i] = id
def selectAll(self):
line_num = len(self._getInstance().buffer)
if line_num > 300:
lfCmd("echohl Error | redraw | echo ' Too many files selected!' | echohl NONE")
lfCmd("sleep 1")
return
for i in range(line_num):
if i >= self._help_length and i+1 not in self._selections:
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchadd('Lf_hl_selection', '\\\\%%%dl.')")"""
% (self._getInstance().getPopupWinId(), i+1))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchadd('Lf_hl_selection', '\%%%dl.')" % (i+1)))
self._selections[i+1] = id
def _gotoFirstLine(self):
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute({}, 'norm! gg')".format(self._getInstance().getPopupWinId()))
else:
lfCmd("normal! gg")
def _readFinished(self):
pass
def startExplorer(self, win_pos, *args, **kwargs):
arguments_dict = kwargs.get("arguments", {})
if "--recall" in arguments_dict:
self._arguments["--recall"] = arguments_dict["--recall"]
else:
self.setArguments(arguments_dict)
self._cli.setNameOnlyFeature(self._getExplorer().supportsNameOnly())
self._cli.setRefineFeature(self._supportsRefine())
if self._getExplorer().getStlCategory() in ["Gtags"]:
if "--update" in self._arguments or "--remove" in self._arguments:
self._getExplorer().getContent(*args, **kwargs)
return
if "--next" in arguments_dict:
if self._jumpNext() == False:
lfCmd("echohl Error | redraw | echo 'Error, no content!' | echohl NONE")
return
elif "--previous" in arguments_dict:
if self._jumpPrevious() == False:
lfCmd("echohl Error | redraw | echo 'Error, no content!' | echohl NONE")
return
self._cleanup()
# lfCmd("echohl WarningMsg | redraw | echo ' searching ...' | echohl NONE")
self._getInstance().setArguments(self._arguments)
empty_query = self._empty_query and self._getExplorer().getStlCategory() in ["File"]
remember_last_status = "--recall" in self._arguments \
or lfEval("g:Lf_RememberLastSearch") == '1' and self._cli.pattern
if remember_last_status:
content = self._content
self._getInstance().useLastReverseOrder()
win_pos = self._getInstance().getWinPos()
else:
content = self._getExplorer().getContent(*args, **kwargs)
self._getInstance().setCwd(lfGetCwd())
if self._getExplorer().getStlCategory() in ["Gtags"] and "--auto-jump" in self._arguments \
and isinstance(content, list) and len(content) == 1:
mode = self._arguments["--auto-jump"][0] if len(self._arguments["--auto-jump"]) else ""
self._accept(content[0], mode)
return
self._index = 0
pattern = kwargs.get("pattern", "") or arguments_dict.get("--input", [""])[0]
if len(pattern) > 1 and (pattern[0] == '"' and pattern[-1] == '"'
or pattern[0] == "'" and pattern[-1] == "'"):
pattern = pattern[1:-1]
self._cli.setPattern(pattern)
self._result_content = []
self._cb_content = []
if not content:
lfCmd("echohl Error | redraw | echo ' No content!' | echohl NONE")
return
# clear the buffer only when the content is not a list
self._getInstance().enterBuffer(win_pos, not isinstance(content, list))
self._initial_count = self._getInstance().getInitialWinHeight()
self._getInstance().setStlCategory(self._getExplorer().getStlCategory())
self._setStlMode(**kwargs)
self._getInstance().setStlCwd(self._getExplorer().getStlCurDir())
if kwargs.get('bang', 0):
self._current_mode = 'NORMAL'
else:
self._current_mode = 'INPUT'
lfCmd("call leaderf#colorscheme#popup#hiMode('%s', '%s')"
% (self._getExplorer().getStlCategory(), self._current_mode))
self._getInstance().setPopupStl(self._current_mode)
if not remember_last_status:
self._gotoFirstLine()
self._start_time = time.time()
self._bang_start_time = self._start_time
self._bang_count = 0
self._getInstance().buffer.vars['Lf_category'] = self._getExplorer().getStlCategory()
self._read_content_exception = None
if isinstance(content, list):
self._is_content_list = True
self._read_finished = 2
if not remember_last_status:
if len(content[0]) == len(content[0].rstrip("\r\n")):
self._content = content
else:
self._content = [line.rstrip("\r\n") for line in content]
self._getInstance().setStlTotal(len(self._content)//self._getUnit())
self._getInstance().setStlResultsCount(len(self._content))
if not empty_query:
self._getInstance().setBuffer(self._content[:self._initial_count])
if lfEval("has('nvim')") == '1':
lfCmd("redrawstatus")
self._callback = self._workInIdle
if not kwargs.get('bang', 0):
self._readFinished()
self.input()
else:
if not remember_last_status and not empty_query:
self._getInstance().appendBuffer(self._content[self._initial_count:])
elif remember_last_status and len(self._getInstance().buffer) < len(self._result_content):
self._getInstance().appendBuffer(self._result_content[self._initial_count:])
lfCmd("echo")
if self._cli.pattern:
self._cli._buildPrompt()
self._getInstance().buffer.options['modifiable'] = False
self._bangEnter()
self._getInstance().mimicCursor()
if not remember_last_status and not self._cli.pattern and empty_query:
self._gotoFirstLine()
self._guessSearch(self._content)
if self._result_content: # self._result_content is [] only if
# self._cur_buffer.name == '' or self._cur_buffer.options["buftype"] not in [b'', '']:
self._getInstance().appendBuffer(self._result_content[self._initial_count:])
else:
self._getInstance().appendBuffer(self._content[self._initial_count:])
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
self._bangReadFinished()
lfCmd("echohl WarningMsg | redraw | echo ' Done!' | echohl NONE")
elif isinstance(content, AsyncExecutor.Result):
self._is_content_list = False
self._callback = self._workInIdle
if lfEval("get(g:, 'Lf_NoAsync', 0)") == '1':
self._content = self._getInstance().initBuffer(content, self._getUnit(), self._getExplorer().setContent)
self._read_finished = 1
self._offset_in_content = 0
else:
if self._getExplorer().getStlCategory() in ["Rg", "Gtags"]:
if "--append" in self.getArguments():
self._offset_in_content = len(self._content)
if self._pattern_bak:
self._getInstance().setBuffer(self._content, need_copy=False)
self._createHelpHint()
else:
self._getInstance().clearBuffer()
self._content = []
self._offset_in_content = 0
else:
self._content = []
self._offset_in_content = 0
self._read_finished = 0
self._stop_reader_thread = False
self._reader_thread = threading.Thread(target=self._readContent, args=(content,))
self._reader_thread.daemon = True
self._reader_thread.start()
if not kwargs.get('bang', 0):
self.input()
else:
lfCmd("echo")
self._getInstance().buffer.options['modifiable'] = False
self._bangEnter()
self._getInstance().mimicCursor()
else:
self._is_content_list = False
self._callback = partial(self._workInIdle, content)
if lfEval("get(g:, 'Lf_NoAsync', 0)") == '1':
self._content = self._getInstance().initBuffer(content, self._getUnit(), self._getExplorer().setContent)
self._read_finished = 1
self._offset_in_content = 0
else:
self._content = []
self._offset_in_content = 0
self._read_finished = 0
if not kwargs.get('bang', 0):
self.input()
else:
lfCmd("echo")
self._getInstance().buffer.options['modifiable'] = False
self._bangEnter()
self._getInstance().mimicCursor()
def _readContent(self, content):
try:
for line in content:
self._content.append(line)
if self._stop_reader_thread:
break
else:
self._read_finished = 1
except Exception:
self._read_finished = 1
self._read_content_exception = sys.exc_info()
def _setResultContent(self):
if len(self._result_content) > len(self._getInstance().buffer):
self._getInstance().setBuffer(self._result_content)
elif self._index == 0:
self._getInstance().setBuffer(self._content, need_copy=True)
@catchException
def _workInIdle(self, content=None, bang=False):
if self._read_content_exception is not None:
if bang == True:
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
lfPrintError(self._read_content_exception[1])
return
else:
raise self._read_content_exception[1]
if bang == False and self._preview_open == False and self._getInstance().getWinPos() in ('popup', 'floatwin') \
and not self._getInstance().empty():
self._previewResult(False)
self._preview_open = True
if self._is_content_list:
if self._cli.pattern and (self._index < len(self._content) or len(self._cb_content) > 0):
if self._fuzzy_engine:
step = 60000 * cpu_count
elif is_fuzzyMatch_C:
step = 10000
else:
step = 2000
self._search(self._content, True, step)
return
if content:
i = -1
for i, line in enumerate(itertools.islice(content, 20)):
self._content.append(line)
if i == -1 and self._read_finished == 0:
self._read_finished = 1
if self._read_finished > 0:
if self._read_finished == 1:
self._read_finished += 1
self._getExplorer().setContent(self._content)
self._getInstance().setStlTotal(len(self._content)//self._getUnit())
self._getInstance().setStlRunning(False)
if self._cli.pattern:
self._getInstance().setStlResultsCount(len(self._result_content))
elif self._empty_query and self._getExplorer().getStlCategory() in ["File"]:
self._guessSearch(self._content)
if bang:
if self._result_content: # self._result_content is [] only if
# self._cur_buffer.name == '' or self._cur_buffer.options["buftype"] != b'':
self._getInstance().appendBuffer(self._result_content[self._initial_count:])
else:
self._getInstance().appendBuffer(self._content[self._initial_count:])
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
self._bangReadFinished()
lfCmd("echohl WarningMsg | redraw | echo ' Done!' | echohl NONE")
else:
if bang:
if self._getInstance().empty():
self._offset_in_content = len(self._content)
if self._offset_in_content > 0:
self._getInstance().appendBuffer(self._content[:self._offset_in_content])
else:
cur_len = len(self._content)
if cur_len > self._offset_in_content:
self._getInstance().appendBuffer(self._content[self._offset_in_content:cur_len])
self._offset_in_content = cur_len
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
self._bangReadFinished()
lfCmd("echohl WarningMsg | redraw | echo ' Done!' | echohl NONE")
else:
self._getInstance().setBuffer(self._content[:self._initial_count])
self._getInstance().setStlResultsCount(len(self._content))
if self._getInstance().getWinPos() not in ('popup', 'floatwin'):
lfCmd("redrawstatus")
if self._cli.pattern:
if self._index < len(self._content) or len(self._cb_content) > 0:
if self._fuzzy_engine:
step = 60000 * cpu_count
elif is_fuzzyMatch_C:
step = 10000
else:
step = 2000
self._search(self._content, True, step)
if bang:
self._getInstance().appendBuffer(self._result_content[self._initial_count:])
else:
cur_len = len(self._content)
if time.time() - self._start_time > 0.1:
self._start_time = time.time()
self._getInstance().setStlTotal(cur_len//self._getUnit())
self._getInstance().setStlRunning(True)
if self._cli.pattern:
self._getInstance().setStlResultsCount(len(self._result_content))
else:
self._getInstance().setStlResultsCount(cur_len)
if self._getInstance().getWinPos() not in ('popup', 'floatwin'):
lfCmd("redrawstatus")
if self._cli.pattern:
if self._index < cur_len or len(self._cb_content) > 0:
if self._fuzzy_engine:
step = 60000 * cpu_count
elif is_fuzzyMatch_C:
step = 10000
else:
step = 2000
self._search(self._content[:cur_len], True, step)
else:
if bang:
if self._getInstance().empty():
self._offset_in_content = len(self._content)
if self._offset_in_content > 0:
self._getInstance().appendBuffer(self._content[:self._offset_in_content])
else:
cur_len = len(self._content)
if cur_len > self._offset_in_content:
self._getInstance().appendBuffer(self._content[self._offset_in_content:cur_len])
self._offset_in_content = cur_len
if self._getInstance().getWinPos() not in ('popup', 'floatwin') \
and time.time() - self._bang_start_time > 0.5:
self._bang_start_time = time.time()
lfCmd("echohl WarningMsg | redraw | echo ' searching %s' | echohl NONE" % ('.' * self._bang_count))
self._bang_count = (self._bang_count + 1) % 9
elif len(self._getInstance().buffer) < min(cur_len, self._initial_count):
self._getInstance().setBuffer(self._content[:self._initial_count])
@modifiableController
def input(self):
self._preview_open = False
self._current_mode = 'INPUT'
self._getInstance().hideMimicCursor()
if self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._cli.buildPopupPrompt()
lfCmd("call leaderf#colorscheme#popup#hiMode('%s', '%s')"
% (self._getExplorer().getStlCategory(), self._current_mode))
self._getInstance().setPopupStl(self._current_mode)
if self._getInstance().getWinPos() == 'popup':
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', '%s')"
% (self._getInstance().getPopupWinId(), 'leaderf#PopupFilter'))
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
self.clearSelections()
self._hideHelp()
self._resetHighlights()
if self._cli.pattern: # --input xxx or from normal mode to input mode
if self._index == 0: # --input xxx
self._search(self._content)
elif self._empty_query and self._getExplorer().getStlCategory() in ["File"] \
and "--recall" not in self._arguments:
self._guessSearch(self._content)
for cmd in self._cli.input(self._callback):
cur_len = len(self._content)
cur_content = self._content[:cur_len]
if equal(cmd, '<Update>'):
if self._getInstance().getWinPos() == 'popup':
if self._getInstance()._window_object.cursor[0] > 1:
lfCmd("call win_execute({}, 'norm! gg')".format(self._getInstance().getPopupWinId()))
self._search(cur_content)
self._previewResult(False)
elif equal(cmd, '<Shorten>'):
if self._getInstance().isReverseOrder():
lfCmd("normal! G")
else:
self._gotoFirstLine()
self._index = 0 # search from beginning
self._search(cur_content)
self._previewResult(False)
elif equal(cmd, '<Mode>'):
self._setStlMode()
if self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._getInstance().setPopupStl(self._current_mode)
if self._getInstance().isReverseOrder():
lfCmd("normal! G")
else:
self._gotoFirstLine()
self._index = 0 # search from beginning
if self._cli.pattern:
self._search(cur_content)
elif equal(cmd, '<C-K>') or equal(cmd, '<Up>'):
self._toUp()
self._previewResult(False)
elif equal(cmd, '<C-J>') or equal(cmd, '<Down>'):
self._toDown()
self._previewResult(False)
elif equal(cmd, '<Up>'):
if self._cli.previousHistory(self._getExplorer().getStlCategory()):
if self._getInstance().isReverseOrder():
lfCmd("normal! G")
else:
self._gotoFirstLine()
self._index = 0 # search from beginning
self._search(cur_content)
elif equal(cmd, '<Down>'):
if self._cli.nextHistory(self._getExplorer().getStlCategory()):
if self._getInstance().isReverseOrder():
lfCmd("normal! G")
else:
self._gotoFirstLine()
self._index = 0 # search from beginning
self._search(cur_content)
elif equal(cmd, '<CR>'):
if self.accept() is None:
break
elif equal(cmd, '<Quit>'):
self._cli.writeHistory(self._getExplorer().getStlCategory())
self.quit()
break
elif equal(cmd, '<Tab>'): # switch to Normal mode
self._current_mode = 'NORMAL'
if self._getInstance().getWinPos() == 'popup':
if lfEval("exists('*leaderf#%s#NormalModeFilter')" % self._getExplorer().getStlCategory()) == '1':
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', '%s')" % (self._getInstance().getPopupWinId(),
'leaderf#%s#NormalModeFilter' % self._getExplorer().getStlCategory()))
else:
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', function('leaderf#NormalModeFilter', [%d]))"
% (self._getInstance().getPopupWinId(), id(self)))
self._setResultContent()
self.clearSelections()
self._cli.hideCursor()
self._createHelpHint()
self._resetHighlights()
if self._getInstance().isReverseOrder() and self._cli.pattern \
and len(self._highlight_pos) < (len(self._getInstance().buffer) - self._help_length) // self._getUnit() \
and len(self._highlight_pos) < int(lfEval("g:Lf_NumberOfHighlight")):
self._highlight_method()
if self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._cli.buildPopupPrompt()
lfCmd("call leaderf#colorscheme#popup#hiMode('%s', '%s')"
% (self._getExplorer().getStlCategory(), self._current_mode))
self._getInstance().setPopupStl(self._current_mode)
break
elif equal(cmd, '<F5>'):
self.refresh(False)
elif equal(cmd, '<C-H>'):
if lfEval("get(g:, 'Lf_ShowHidden')") == '1':
lfCmd("let g:Lf_ShowHidden = 0")
else:
lfCmd("let g:Lf_ShowHidden = 1")
self.refresh(False)
elif equal(cmd, '<C-P>'):
self._ctrlp_pressed = True
self._previewResult(True)
self._ctrlp_pressed = False
elif equal(cmd, '<C-PageUp>'):
self._toUpInPopup()
elif equal(cmd, '<C-PageDown>'):
self._toDownInPopup()
elif equal(cmd, '<PageUp>'):
for x in range(10):
self._toUpInPopup()
elif equal(cmd, '<PageDown>'):
for x in range(10):
self._toDownInPopup()
else:
if self._cmdExtension(cmd):
break
# vim: set ts=4 sw=4 tw=0 et :
| 45.92827 | 133 | 0.536735 |
7e45cc960da12ae131782ea270240dc079dff488 | 2,326 | py | Python | crawler/27bao/app.py | hmumixaM/anything | 5810132118d6d3f3859d607fca068c8275d4bf42 | [
"MIT"
] | null | null | null | crawler/27bao/app.py | hmumixaM/anything | 5810132118d6d3f3859d607fca068c8275d4bf42 | [
"MIT"
] | null | null | null | crawler/27bao/app.py | hmumixaM/anything | 5810132118d6d3f3859d607fca068c8275d4bf42 | [
"MIT"
] | null | null | null | import requests
import re
import pymongo
import time
client = pymongo.MongoClient(host='127.0.0.1', port=27017)
db = client.baola
collection = db.gif
def data(info):
result = collection.insert_one(info)
def main():
pages = list_link()
for link in pages:
gifs, description = gif_match(link)
info = {"title": description, "gif": gifs}
data(info)
def list_link():
pages = []
prefix = "https://www.27bao.com/gif/list_{}.html"
for i in range(51):
link = prefix.format(i)
text = download(link)
info = match(text, r"href='/gif/\d{1,5}\.html' alt='.{4,30}'")
for item in info:
location = "https://www.27bao.com"
href = location + match(item, r"/gif/\d{1,5}\.html", "search")
alt = match(item, r"alt='.{5,40}'", "search")[5:-1]
pages.append([href, alt])
fo = open("page.txt", "w")
for i in pages:
fo.write(i[0])
fo.write("@")
fo.write(i[1])
fo.write("\n")
fo.close()
return pages
def gif_match(link):
gifs = []
href = link[0]
description = link[1]
text = download(href)
text = match(text, r"<div id=\"pages\">.+</div>", "search")
image_pages = match(text, r"href='\d{1,5}_\d{1,2}\.html'")
for image in image_pages:
location = "https://www.27bao.com/gif/{}"
image = location.format(image[6:-1])
image_text = download(image)
info = match(image_text, r"<img alt=\".+\.gif", "search")
gif_href = match(info, r"http.+\.gif", "search")
gif_alt = match(info, r"alt=\".{5,40}\"", "search")[5:-7]
gifs.append([gif_href, gif_alt])
return gifs, description
def download(link):
# time.sleep(1.0)
response = requests.get(link)
response.encoding = "UTF-8"
return response.text
def match(text, regex, mode="findall"):
pattern = re.compile(regex)
if mode == "findall":
result = pattern.findall(text)
elif mode == "search":
result = pattern.search(text)[0]
return result
if __name__ == '__main__':
fo = open("page.txt", "r")
a = fo.readlines()
for i in a[1187:]:
link = i.split("@")
gifs, description = gif_match(link)
info = {"title": description, "gif": gifs}
data(info)
| 26.735632 | 74 | 0.560189 |
9631bcb84a724f9556d588350db7aa3ed948a8d9 | 950 | py | Python | tests/test_services.py | Wicker25/intercom-test | af9bfbbf4fbc9b803e387f101332ef7af13d7676 | [
"MIT"
] | null | null | null | tests/test_services.py | Wicker25/intercom-test | af9bfbbf4fbc9b803e387f101332ef7af13d7676 | [
"MIT"
] | null | null | null | tests/test_services.py | Wicker25/intercom-test | af9bfbbf4fbc9b803e387f101332ef7af13d7676 | [
"MIT"
] | null | null | null | import pytest
from intercom.models import Location, Customer
from intercom.services import get_customers_close_to_office
@pytest.fixture()
def customer_repository():
class CustomerRepositoryMock:
def fetch_all(self):
return [
Customer(user_id=1, name='Ian', location=Location(latitude=53.2451022, longitude=-6.238335)),
Customer(user_id=2, name='Eoin', location=Location(latitude=54.0894797, longitude=-6.18671)),
Customer(user_id=3, name='David', location=Location(latitude=52.833502, longitude=-8.522366)),
]
return CustomerRepositoryMock()
def test_get_customers_close_to_office(customer_repository):
office_location = Location(latitude=53.339428, longitude=-6.257664) # Dublin
close_customers = get_customers_close_to_office(customer_repository, office_location, 100.0)
assert [customer.user_id for customer in close_customers] == [1, 2]
| 38 | 110 | 0.722105 |
870f4ac860eb51d9709aaac4d8f2596cacfa5721 | 16,879 | py | Python | neutron/objects/qos/policy.py | acdc-cloud/neutron | 2510836886555179f9e9e39b1fdbf94296befc51 | [
"Apache-2.0"
] | 1 | 2018-10-19T01:48:37.000Z | 2018-10-19T01:48:37.000Z | neutron/objects/qos/policy.py | weiqiLee/neutron | ddc72ebd41a0e7804b33a21583d3add008191229 | [
"Apache-2.0"
] | null | null | null | neutron/objects/qos/policy.py | weiqiLee/neutron | ddc72ebd41a0e7804b33a21583d3add008191229 | [
"Apache-2.0"
] | 1 | 2018-08-28T17:13:16.000Z | 2018-08-28T17:13:16.000Z | # Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from neutron_lib import constants as n_const
from oslo_db import exception as db_exc
from oslo_utils import versionutils
from oslo_versionedobjects import exception
from oslo_versionedobjects import fields as obj_fields
from neutron.common import exceptions
from neutron.db.models import l3
from neutron.db import models_v2
from neutron.db.qos import models as qos_db_model
from neutron.db import rbac_db_models
from neutron.objects import base as base_db
from neutron.objects import common_types
from neutron.objects.db import api as obj_db_api
from neutron.objects.qos import binding
from neutron.objects.qos import rule as rule_obj_impl
from neutron.objects import rbac_db
@base_db.NeutronObjectRegistry.register
class QosPolicyRBAC(base_db.NeutronDbObject):
# Version 1.0: Initial version
VERSION = '1.0'
db_model = rbac_db_models.QosPolicyRBAC
fields = {
'object_id': obj_fields.StringField(),
'target_tenant': obj_fields.StringField(),
'action': obj_fields.StringField(),
}
@base_db.NeutronObjectRegistry.register
class QosPolicy(rbac_db.NeutronRbacObject):
# Version 1.0: Initial version
# Version 1.1: QosDscpMarkingRule introduced
# Version 1.2: Added QosMinimumBandwidthRule
# Version 1.3: Added standard attributes (created_at, revision, etc)
# Version 1.4: Changed tenant_id to project_id
# Version 1.5: Direction for bandwidth limit rule added
# Version 1.6: Added "is_default" field
# Version 1.7: Added floating IP bindings
VERSION = '1.7'
# required by RbacNeutronMetaclass
rbac_db_cls = QosPolicyRBAC
db_model = qos_db_model.QosPolicy
fields = {
'id': common_types.UUIDField(),
'project_id': obj_fields.StringField(),
'name': obj_fields.StringField(),
'shared': obj_fields.BooleanField(default=False),
'rules': obj_fields.ListOfObjectsField('QosRule', subclasses=True),
'is_default': obj_fields.BooleanField(default=False),
}
fields_no_update = ['id', 'project_id']
synthetic_fields = ['rules', 'is_default']
extra_filter_names = {'is_default'}
binding_models = {'port': binding.QosPolicyPortBinding,
'network': binding.QosPolicyNetworkBinding,
'fip': binding.QosPolicyFloatingIPBinding}
def obj_load_attr(self, attrname):
if attrname == 'rules':
return self._reload_rules()
elif attrname == 'is_default':
return self._reload_is_default()
return super(QosPolicy, self).obj_load_attr(attrname)
def _reload_rules(self):
rules = rule_obj_impl.get_rules(self, self.obj_context, self.id)
setattr(self, 'rules', rules)
self.obj_reset_changes(['rules'])
def _reload_is_default(self):
if self.get_default() == self.id:
setattr(self, 'is_default', True)
else:
setattr(self, 'is_default', False)
self.obj_reset_changes(['is_default'])
def get_rule_by_id(self, rule_id):
"""Return rule specified by rule_id.
@raise QosRuleNotFound: if there is no such rule in the policy.
"""
for rule in self.rules:
if rule_id == rule.id:
return rule
raise exceptions.QosRuleNotFound(policy_id=self.id,
rule_id=rule_id)
# TODO(hichihara): For tag mechanism. This will be removed in bug/1704137
def to_dict(self):
_dict = super(QosPolicy, self).to_dict()
try:
_dict['tags'] = [t.tag for t in self.db_obj.standard_attr.tags]
except AttributeError:
# AttrtibuteError can be raised when accessing self.db_obj
# or self.db_obj.standard_attr
pass
return _dict
@classmethod
def get_policy_obj(cls, context, policy_id):
"""Fetch a QoS policy.
:param context: neutron api request context
:type context: neutron.context.Context
:param policy_id: the id of the QosPolicy to fetch
:type policy_id: str uuid
:returns: a QosPolicy object
:raises: n_exc.QosPolicyNotFound
"""
obj = cls.get_object(context, id=policy_id)
if obj is None:
raise exceptions.QosPolicyNotFound(policy_id=policy_id)
return obj
@classmethod
def get_object(cls, context, **kwargs):
# We want to get the policy regardless of its tenant id. We'll make
# sure the tenant has permission to access the policy later on.
admin_context = context.elevated()
with cls.db_context_reader(admin_context):
policy_obj = super(QosPolicy, cls).get_object(admin_context,
**kwargs)
if (not policy_obj or
not cls.is_accessible(context, policy_obj)):
return
policy_obj.obj_load_attr('rules')
policy_obj.obj_load_attr('is_default')
return policy_obj
@classmethod
def get_objects(cls, context, _pager=None, validate_filters=True,
**kwargs):
# We want to get the policy regardless of its tenant id. We'll make
# sure the tenant has permission to access the policy later on.
admin_context = context.elevated()
with cls.db_context_reader(admin_context):
objs = super(QosPolicy, cls).get_objects(admin_context, _pager,
validate_filters,
**kwargs)
result = []
for obj in objs:
if not cls.is_accessible(context, obj):
continue
obj.obj_load_attr('rules')
obj.obj_load_attr('is_default')
result.append(obj)
return result
@classmethod
def _get_object_policy(cls, context, binding_cls, **kwargs):
with cls.db_context_reader(context):
binding_db_obj = obj_db_api.get_object(binding_cls, context,
**kwargs)
if binding_db_obj:
return cls.get_object(context, id=binding_db_obj['policy_id'])
@classmethod
def get_network_policy(cls, context, network_id):
return cls._get_object_policy(context, binding.QosPolicyNetworkBinding,
network_id=network_id)
@classmethod
def get_port_policy(cls, context, port_id):
return cls._get_object_policy(context, binding.QosPolicyPortBinding,
port_id=port_id)
@classmethod
def get_fip_policy(cls, context, fip_id):
return cls._get_object_policy(
context, binding.QosPolicyFloatingIPBinding, fip_id=fip_id)
# TODO(QoS): Consider extending base to trigger registered methods for us
def create(self):
with self.db_context_writer(self.obj_context):
super(QosPolicy, self).create()
if self.is_default:
self.set_default()
self.obj_load_attr('rules')
def update(self):
with self.db_context_writer(self.obj_context):
if 'is_default' in self.obj_what_changed():
if self.is_default:
self.set_default()
else:
self.unset_default()
super(QosPolicy, self).update()
def delete(self):
with self.db_context_writer(self.obj_context):
for object_type, obj_class in self.binding_models.items():
pager = base_db.Pager(limit=1)
binding_obj = obj_class.get_objects(self.obj_context,
policy_id=self.id,
_pager=pager)
if binding_obj:
raise exceptions.QosPolicyInUse(
policy_id=self.id,
object_type=object_type,
object_id=binding_obj[0]['%s_id' % object_type])
super(QosPolicy, self).delete()
def attach_network(self, network_id):
network_binding = {'policy_id': self.id,
'network_id': network_id}
network_binding_obj = binding.QosPolicyNetworkBinding(
self.obj_context, **network_binding)
try:
network_binding_obj.create()
except db_exc.DBReferenceError as e:
raise exceptions.NetworkQosBindingError(policy_id=self.id,
net_id=network_id,
db_error=e)
def attach_port(self, port_id):
port_binding_obj = binding.QosPolicyPortBinding(
self.obj_context, policy_id=self.id, port_id=port_id)
try:
port_binding_obj.create()
except db_exc.DBReferenceError as e:
raise exceptions.PortQosBindingError(policy_id=self.id,
port_id=port_id,
db_error=e)
def attach_floatingip(self, fip_id):
fip_binding_obj = binding.QosPolicyFloatingIPBinding(
self.obj_context, policy_id=self.id, fip_id=fip_id)
try:
fip_binding_obj.create()
except db_exc.DBReferenceError as e:
raise exceptions.FloatingIPQosBindingError(policy_id=self.id,
fip_id=fip_id,
db_error=e)
def detach_network(self, network_id):
deleted = binding.QosPolicyNetworkBinding.delete_objects(
self.obj_context, network_id=network_id)
if not deleted:
raise exceptions.NetworkQosBindingNotFound(net_id=network_id,
policy_id=self.id)
def detach_port(self, port_id):
deleted = binding.QosPolicyPortBinding.delete_objects(self.obj_context,
port_id=port_id)
if not deleted:
raise exceptions.PortQosBindingNotFound(port_id=port_id,
policy_id=self.id)
def detach_floatingip(self, fip_id):
deleted = binding.QosPolicyFloatingIPBinding.delete_objects(
self.obj_context, fip_id=fip_id)
if not deleted:
raise exceptions.FloatingIPQosBindingNotFound(fip_id=fip_id,
policy_id=self.id)
def set_default(self):
if not self.get_default():
qos_default_policy = QosPolicyDefault(self.obj_context,
qos_policy_id=self.id,
project_id=self.project_id)
qos_default_policy.create()
elif self.get_default() != self.id:
raise exceptions.QoSPolicyDefaultAlreadyExists(
project_id=self.project_id)
def unset_default(self):
if self.get_default() == self.id:
qos_default_policy = QosPolicyDefault.get_object(
self.obj_context, project_id=self.project_id)
qos_default_policy.delete()
def get_default(self):
qos_default_policy = QosPolicyDefault.get_object(
self.obj_context, project_id=self.project_id)
if qos_default_policy:
return qos_default_policy.qos_policy_id
def get_bound_networks(self):
return [
nb.network_id
for nb in binding.QosPolicyNetworkBinding.get_objects(
self.obj_context, policy_id=self.id)
]
def get_bound_ports(self):
return [
pb.port_id
for pb in binding.QosPolicyPortBinding.get_objects(
self.obj_context, policy_id=self.id)
]
def get_bound_floatingips(self):
return [
fb.fip_id
for fb in binding.QosPolicyFloatingIPBinding.get_objects(
self.obj_context, policy_id=self.id)
]
@classmethod
def _get_bound_tenant_ids(cls, session, binding_db, bound_db,
binding_db_id_column, policy_id):
return list(itertools.chain.from_iterable(
session.query(bound_db.tenant_id).join(
binding_db, bound_db.id == binding_db_id_column).filter(
binding_db.policy_id == policy_id).all()))
@classmethod
def get_bound_tenant_ids(cls, context, policy_id):
"""Implements RbacNeutronObject.get_bound_tenant_ids.
:returns: set -- a set of tenants' ids dependent on QosPolicy.
"""
net = models_v2.Network
qosnet = qos_db_model.QosNetworkPolicyBinding
port = models_v2.Port
qosport = qos_db_model.QosPortPolicyBinding
fip = l3.FloatingIP
qosfip = qos_db_model.QosFIPPolicyBinding
bound_tenants = []
with cls.db_context_reader(context):
bound_tenants.extend(cls._get_bound_tenant_ids(
context.session, qosnet, net, qosnet.network_id, policy_id))
bound_tenants.extend(
cls._get_bound_tenant_ids(context.session, qosport, port,
qosport.port_id, policy_id))
bound_tenants.extend(
cls._get_bound_tenant_ids(context.session, qosfip, fip,
qosfip.fip_id, policy_id))
return set(bound_tenants)
def obj_make_compatible(self, primitive, target_version):
def filter_rules(obj_names, rules):
return [rule for rule in rules if
rule['versioned_object.name'] in obj_names]
def filter_ingress_bandwidth_limit_rules(rules):
bwlimit_obj_name = rule_obj_impl.QosBandwidthLimitRule.obj_name()
filtered_rules = []
for rule in rules:
if rule['versioned_object.name'] == bwlimit_obj_name:
direction = rule['versioned_object.data'].get("direction")
if direction == n_const.EGRESS_DIRECTION:
rule['versioned_object.data'].pop('direction')
filtered_rules.append(rule)
else:
filtered_rules.append(rule)
return filtered_rules
_target_version = versionutils.convert_version_to_tuple(target_version)
names = []
if _target_version >= (1, 0):
names.append(rule_obj_impl.QosBandwidthLimitRule.obj_name())
if _target_version >= (1, 1):
names.append(rule_obj_impl.QosDscpMarkingRule.obj_name())
if _target_version >= (1, 2):
names.append(rule_obj_impl.QosMinimumBandwidthRule.obj_name())
if 'rules' in primitive and names:
primitive['rules'] = filter_rules(names, primitive['rules'])
if _target_version < (1, 3):
standard_fields = ['revision_number', 'created_at', 'updated_at']
for f in standard_fields:
primitive.pop(f)
if primitive['description'] is None:
# description was not nullable before
raise exception.IncompatibleObjectVersion(
objver=target_version, objname='QoSPolicy')
if _target_version < (1, 4):
primitive['tenant_id'] = primitive.pop('project_id')
if _target_version < (1, 5):
if 'rules' in primitive:
primitive['rules'] = filter_ingress_bandwidth_limit_rules(
primitive['rules'])
if _target_version < (1, 6):
primitive.pop('is_default', None)
@base_db.NeutronObjectRegistry.register
class QosPolicyDefault(base_db.NeutronDbObject):
# Version 1.0: Initial version
VERSION = '1.0'
db_model = qos_db_model.QosPolicyDefault
fields = {
'qos_policy_id': common_types.UUIDField(),
'project_id': obj_fields.StringField(),
}
primary_keys = ['project_id']
| 39.529274 | 79 | 0.605486 |
4daa5d1bc78511764107c9706af4fd798f131cfe | 19,287 | py | Python | sdk/python/pulumi_azure_native/network/v20180601/route_filter_rule.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20180601/route_filter_rule.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20180601/route_filter_rule.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = ['RouteFilterRuleArgs', 'RouteFilterRule']
@pulumi.input_type
class RouteFilterRuleArgs:
def __init__(__self__, *,
access: pulumi.Input[Union[str, 'Access']],
communities: pulumi.Input[Sequence[pulumi.Input[str]]],
resource_group_name: pulumi.Input[str],
route_filter_name: pulumi.Input[str],
route_filter_rule_type: pulumi.Input[Union[str, 'RouteFilterRuleType']],
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
rule_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a RouteFilterRule resource.
:param pulumi.Input[Union[str, 'Access']] access: The access type of the rule. Valid values are: 'Allow', 'Deny'
:param pulumi.Input[Sequence[pulumi.Input[str]]] communities: The collection for bgp community values to filter on. e.g. ['12076:5010','12076:5020']
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] route_filter_name: The name of the route filter.
:param pulumi.Input[Union[str, 'RouteFilterRuleType']] route_filter_rule_type: The rule type of the rule. Valid value is: 'Community'
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[str] rule_name: The name of the route filter rule.
"""
pulumi.set(__self__, "access", access)
pulumi.set(__self__, "communities", communities)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "route_filter_name", route_filter_name)
pulumi.set(__self__, "route_filter_rule_type", route_filter_rule_type)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if rule_name is not None:
pulumi.set(__self__, "rule_name", rule_name)
@property
@pulumi.getter
def access(self) -> pulumi.Input[Union[str, 'Access']]:
"""
The access type of the rule. Valid values are: 'Allow', 'Deny'
"""
return pulumi.get(self, "access")
@access.setter
def access(self, value: pulumi.Input[Union[str, 'Access']]):
pulumi.set(self, "access", value)
@property
@pulumi.getter
def communities(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The collection for bgp community values to filter on. e.g. ['12076:5010','12076:5020']
"""
return pulumi.get(self, "communities")
@communities.setter
def communities(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "communities", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="routeFilterName")
def route_filter_name(self) -> pulumi.Input[str]:
"""
The name of the route filter.
"""
return pulumi.get(self, "route_filter_name")
@route_filter_name.setter
def route_filter_name(self, value: pulumi.Input[str]):
pulumi.set(self, "route_filter_name", value)
@property
@pulumi.getter(name="routeFilterRuleType")
def route_filter_rule_type(self) -> pulumi.Input[Union[str, 'RouteFilterRuleType']]:
"""
The rule type of the rule. Valid value is: 'Community'
"""
return pulumi.get(self, "route_filter_rule_type")
@route_filter_rule_type.setter
def route_filter_rule_type(self, value: pulumi.Input[Union[str, 'RouteFilterRuleType']]):
pulumi.set(self, "route_filter_rule_type", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="ruleName")
def rule_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the route filter rule.
"""
return pulumi.get(self, "rule_name")
@rule_name.setter
def rule_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "rule_name", value)
class RouteFilterRule(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access: Optional[pulumi.Input[Union[str, 'Access']]] = None,
communities: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_filter_name: Optional[pulumi.Input[str]] = None,
route_filter_rule_type: Optional[pulumi.Input[Union[str, 'RouteFilterRuleType']]] = None,
rule_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Route Filter Rule Resource
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Union[str, 'Access']] access: The access type of the rule. Valid values are: 'Allow', 'Deny'
:param pulumi.Input[Sequence[pulumi.Input[str]]] communities: The collection for bgp community values to filter on. e.g. ['12076:5010','12076:5020']
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] route_filter_name: The name of the route filter.
:param pulumi.Input[Union[str, 'RouteFilterRuleType']] route_filter_rule_type: The rule type of the rule. Valid value is: 'Community'
:param pulumi.Input[str] rule_name: The name of the route filter rule.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RouteFilterRuleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Route Filter Rule Resource
:param str resource_name: The name of the resource.
:param RouteFilterRuleArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RouteFilterRuleArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access: Optional[pulumi.Input[Union[str, 'Access']]] = None,
communities: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_filter_name: Optional[pulumi.Input[str]] = None,
route_filter_rule_type: Optional[pulumi.Input[Union[str, 'RouteFilterRuleType']]] = None,
rule_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RouteFilterRuleArgs.__new__(RouteFilterRuleArgs)
if access is None and not opts.urn:
raise TypeError("Missing required property 'access'")
__props__.__dict__["access"] = access
if communities is None and not opts.urn:
raise TypeError("Missing required property 'communities'")
__props__.__dict__["communities"] = communities
__props__.__dict__["id"] = id
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if route_filter_name is None and not opts.urn:
raise TypeError("Missing required property 'route_filter_name'")
__props__.__dict__["route_filter_name"] = route_filter_name
if route_filter_rule_type is None and not opts.urn:
raise TypeError("Missing required property 'route_filter_rule_type'")
__props__.__dict__["route_filter_rule_type"] = route_filter_rule_type
__props__.__dict__["rule_name"] = rule_name
__props__.__dict__["etag"] = None
__props__.__dict__["provisioning_state"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20180601:RouteFilterRule"), pulumi.Alias(type_="azure-native:network:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network:RouteFilterRule"), pulumi.Alias(type_="azure-native:network/v20161201:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20161201:RouteFilterRule"), pulumi.Alias(type_="azure-native:network/v20170301:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20170301:RouteFilterRule"), pulumi.Alias(type_="azure-native:network/v20170601:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20170601:RouteFilterRule"), pulumi.Alias(type_="azure-native:network/v20170801:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20170801:RouteFilterRule"), pulumi.Alias(type_="azure-native:network/v20170901:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20170901:RouteFilterRule"), pulumi.Alias(type_="azure-native:network/v20171001:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20171001:RouteFilterRule"), pulumi.Alias(type_="azure-native:network/v20171101:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20171101:RouteFilterRule"), pulumi.Alias(type_="azure-native:network/v20180101:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20180101:RouteFilterRule"), pulumi.Alias(type_="azure-native:network/v20180201:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20180201:RouteFilterRule"), pulumi.Alias(type_="azure-native:network/v20180401:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20180401:RouteFilterRule"), pulumi.Alias(type_="azure-native:network/v20180701:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20180701:RouteFilterRule"), pulumi.Alias(type_="azure-native:network/v20180801:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20180801:RouteFilterRule"), pulumi.Alias(type_="azure-native:network/v20181001:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20181001:RouteFilterRule"), pulumi.Alias(type_="azure-native:network/v20181101:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20181101:RouteFilterRule"), pulumi.Alias(type_="azure-native:network/v20181201:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20181201:RouteFilterRule"), pulumi.Alias(type_="azure-native:network/v20190201:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20190201:RouteFilterRule"), pulumi.Alias(type_="azure-native:network/v20190401:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20190401:RouteFilterRule"), pulumi.Alias(type_="azure-native:network/v20190601:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20190601:RouteFilterRule"), pulumi.Alias(type_="azure-native:network/v20190701:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20190701:RouteFilterRule"), pulumi.Alias(type_="azure-native:network/v20190801:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20190801:RouteFilterRule"), pulumi.Alias(type_="azure-native:network/v20190901:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20190901:RouteFilterRule"), pulumi.Alias(type_="azure-native:network/v20191101:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20191101:RouteFilterRule"), pulumi.Alias(type_="azure-native:network/v20191201:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20191201:RouteFilterRule"), pulumi.Alias(type_="azure-native:network/v20200301:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20200301:RouteFilterRule"), pulumi.Alias(type_="azure-native:network/v20200401:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20200401:RouteFilterRule"), pulumi.Alias(type_="azure-native:network/v20200501:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20200501:RouteFilterRule"), pulumi.Alias(type_="azure-native:network/v20200601:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20200601:RouteFilterRule"), pulumi.Alias(type_="azure-native:network/v20200701:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20200701:RouteFilterRule"), pulumi.Alias(type_="azure-native:network/v20200801:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20200801:RouteFilterRule"), pulumi.Alias(type_="azure-native:network/v20201101:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20201101:RouteFilterRule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(RouteFilterRule, __self__).__init__(
'azure-native:network/v20180601:RouteFilterRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'RouteFilterRule':
"""
Get an existing RouteFilterRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = RouteFilterRuleArgs.__new__(RouteFilterRuleArgs)
__props__.__dict__["access"] = None
__props__.__dict__["communities"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["route_filter_rule_type"] = None
return RouteFilterRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def access(self) -> pulumi.Output[str]:
"""
The access type of the rule. Valid values are: 'Allow', 'Deny'
"""
return pulumi.get(self, "access")
@property
@pulumi.getter
def communities(self) -> pulumi.Output[Sequence[str]]:
"""
The collection for bgp community values to filter on. e.g. ['12076:5010','12076:5020']
"""
return pulumi.get(self, "communities")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', 'Succeeded' and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="routeFilterRuleType")
def route_filter_rule_type(self) -> pulumi.Output[str]:
"""
The rule type of the rule. Valid value is: 'Community'
"""
return pulumi.get(self, "route_filter_rule_type")
| 55.742775 | 4,475 | 0.680095 |
d70cbfddf8575eea76be97a6df90d7cffaf43d98 | 7,120 | py | Python | kaolin/datasets/base.py | zuru/kaolin | 343a820c75383dd01b6f2247f237073f3e8dcb46 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | kaolin/datasets/base.py | zuru/kaolin | 343a820c75383dd01b6f2247f237073f3e8dcb46 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | kaolin/datasets/base.py | zuru/kaolin | 343a820c75383dd01b6f2247f237073f3e8dcb46 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-05-06T06:03:13.000Z | 2020-05-06T06:03:13.000Z | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
from tqdm import tqdm
import torch
from torch.multiprocessing import Pool
from torch.utils.data import Dataset
from kaolin import helpers
def _preprocess_task(args):
torch.set_num_threads(1)
with torch.no_grad():
idx, get_data, get_attributes, cache_transform = args
name = get_attributes(idx)['name']
if name not in cache_transform.cached_ids:
data = get_data(idx)
cache_transform(name, data)
class KaolinDatasetMeta(type):
def __new__(metacls, cls_name, base_cls, class_dict):
if cls_name != "KaolinDataset":
class_dict['__doc__'] += \
"""Additional args:
preprocessing_params (dict): parameters for the preprocessing:
- 'cache_dir': path to the cached preprocessed data.
- 'num_workers': number of process used in parallel for preprocessing (default: number of cores)
preprocessing_transform (Callable): Called on the outputs of _get_data over the indices
from 0 to len(self) during the construction of the dataset,
the preprocessed outputs are then cached to 'cache_dir'.
transform (Callable): Called on the preprocessed data at __getitem__.
no_progress (bool): disable tqdm progress bar for preprocessing."""
return type.__new__(metacls, cls_name, base_cls, class_dict)
class KaolinDataset(Dataset, metaclass=KaolinDatasetMeta):
"""
Abstract class for dataset with handling of multiprocess or cuda preprocessing.
A KaolinDataset children class will need the above implementation:
1) initialize:
Initialization function called at the beginning of the constructor.
2) _get_data:
Data getter that will be preprocessed => cached => transformed, take an index as input.
3) _get_attributes:
Attributes getter that will be preprocess / transform independent.
4) __len__:
Return the size of the dataset
"""
def __init__(self, *args, preprocessing_transform=None, preprocessing_params: dict = None,
transform=None, no_progress: bool = False, **kwargs):
"""
Args:
positional and keyword arguments for initialize(*args, **kwargs) (see class and initialize documentation)
preprocessing_params (dict): parameters for the preprocessing:
- 'cache_dir': path to the cached preprocessed data.
- 'num_workers': number of process used in parallel for preprocessing (default: number of cores)
preprocessing_transform (Callable): Called on the outputs of _get_data over the indices
from 0 to len(self) during the construction of the dataset,
the preprocessed outputs are then cached to 'cache_dir'.
transform (Callable): Called on the preprocessed data at __getitem__.
no_progress (bool): disable tqdm progress bar for preprocessing.
"""
self.initialize(*args, **kwargs)
if preprocessing_transform is not None:
desc = 'Applying preprocessing'
if preprocessing_params is None:
preprocessing_params = {}
cache_dir = preprocessing_params.get('cache_dir')
assert cache_dir is not None, 'Cache directory is not given'
self.cache_convert = helpers.Cache(
preprocessing_transform,
cache_dir=cache_dir,
cache_key=helpers._get_hash(repr(preprocessing_transform))
)
use_cuda = preprocessing_params.get('use_cuda', False)
num_workers = preprocessing_params.get('num_workers')
if num_workers == 0:
with torch.no_grad():
for idx in tqdm(range(len(self)), desc=desc, disable=no_progress):
name = self._get_attributes(idx)['name']
if name not in self.cache_convert.cached_ids:
data = self._get_data(idx)
self.cache_convert(name, data)
else:
p = Pool(num_workers)
iterator = p.imap_unordered(
_preprocess_task,
[(idx, self._get_data, self._get_attributes, self.cache_convert)
for idx in range(len(self))])
for i in tqdm(range(len(self)), desc=desc, disable=no_progress):
next(iterator)
else:
self.cache_convert = None
self.transform = transform
def __getitem__(self, index):
"""Returns the item at index idx. """
attributes = self._get_attributes(index)
data = (self._get_data(index) if self.cache_convert is None else
self.cache_convert(attributes['name']))
if self.transform is not None:
data = self.transform(data)
return {'data': data, 'attributes': attributes}
@abstractmethod
def initialize(self, *args, **kwargs):
pass
@abstractmethod
def _get_attributes(self, index):
pass
@abstractmethod
def _get_data(self, index):
pass
@abstractmethod
def __len__(self):
pass
class CombinationDataset(KaolinDataset):
"""Dataset combining a list of datasets into a unified dataset object.
Useful when multiple output representations are needed from a common base representation
(Eg. when a mesh is to be served as both a pointcloud and a voxelgrid, etc.)
the output of _get_attributes will be a tuple of all the _get_attributes of the dataset list
the output of _get_data wiil be a tuple of all the 'data' of the __getitem__ of the dataset list
Args:
datasets: list or tuple of KaolinDataset
"""
def initialize(self, datasets):
self.len = len(datasets[0])
for i, d in enumerate(datasets):
assert len(d) == self.len, \
f"All datasets must have the same length. Invalid length at index {i} (expected: {self.len}, got: {len(d)})"
self.datasets = datasets
def __len__(self):
return self.len
def _get_attributes(self, index):
return (d._get_attributes(index) for d in self.datasets)
def _get_data(self, index):
return (d[index]['data'] for d in self.datasets)
| 40.454545 | 124 | 0.635112 |
825e7b700a5f39c734ab662d408e94a3c08b24e3 | 3,860 | py | Python | homeassistant/components/ecobee.py | loraxx753/skynet | 86a1b0a6c6a3f81bc92d4f61de6a9a6b9f964543 | [
"Apache-2.0"
] | 13 | 2017-02-01T13:25:34.000Z | 2022-01-26T01:30:39.000Z | homeassistant/components/ecobee.py | 1Forward1Back/home-assistant | ce24ef0c20dea0fd671d6f2c2a8b1456b4b66ba6 | [
"MIT"
] | 9 | 2017-07-26T18:05:32.000Z | 2021-12-05T14:16:34.000Z | homeassistant/components/ecobee.py | 1Forward1Back/home-assistant | ce24ef0c20dea0fd671d6f2c2a8b1456b4b66ba6 | [
"MIT"
] | 21 | 2017-07-26T17:09:40.000Z | 2022-03-27T22:37:22.000Z | """
Support for Ecobee.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/ecobee/
"""
import logging
import os
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers import discovery
from homeassistant.const import CONF_API_KEY
from homeassistant.loader import get_component
from homeassistant.util import Throttle
REQUIREMENTS = [
'https://github.com/nkgilley/python-ecobee-api/archive/'
'4856a704670c53afe1882178a89c209b5f98533d.zip#python-ecobee==0.0.6']
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
CONF_HOLD_TEMP = 'hold_temp'
DOMAIN = 'ecobee'
ECOBEE_CONFIG_FILE = 'ecobee.conf'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=180)
NETWORK = None
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_API_KEY): cv.string,
vol.Optional(CONF_HOLD_TEMP, default=False): cv.boolean
})
}, extra=vol.ALLOW_EXTRA)
def request_configuration(network, hass, config):
"""Request configuration steps from the user."""
configurator = get_component('configurator')
if 'ecobee' in _CONFIGURING:
configurator.notify_errors(
_CONFIGURING['ecobee'], "Failed to register, please try again.")
return
# pylint: disable=unused-argument
def ecobee_configuration_callback(callback_data):
"""The actions to do when our configuration callback is called."""
network.request_tokens()
network.update()
setup_ecobee(hass, network, config)
_CONFIGURING['ecobee'] = configurator.request_config(
hass, "Ecobee", ecobee_configuration_callback,
description=(
'Please authorize this app at https://www.ecobee.com/consumer'
'portal/index.html with pin code: ' + network.pin),
description_image="/static/images/config_ecobee_thermostat.png",
submit_caption="I have authorized the app."
)
def setup_ecobee(hass, network, config):
"""Setup Ecobee thermostat."""
# If ecobee has a PIN then it needs to be configured.
if network.pin is not None:
request_configuration(network, hass, config)
return
if 'ecobee' in _CONFIGURING:
configurator = get_component('configurator')
configurator.request_done(_CONFIGURING.pop('ecobee'))
hold_temp = config[DOMAIN].get(CONF_HOLD_TEMP)
discovery.load_platform(hass, 'climate', DOMAIN,
{'hold_temp': hold_temp}, config)
discovery.load_platform(hass, 'sensor', DOMAIN, {}, config)
discovery.load_platform(hass, 'binary_sensor', DOMAIN, {}, config)
class EcobeeData(object):
"""Get the latest data and update the states."""
def __init__(self, config_file):
"""Initialize the Ecobee data object."""
from pyecobee import Ecobee
self.ecobee = Ecobee(config_file)
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from pyecobee."""
self.ecobee.update()
_LOGGER.info("Ecobee data updated successfully")
def setup(hass, config):
"""Setup Ecobee.
Will automatically load thermostat and sensor components to support
devices discovered on the network.
"""
# pylint: disable=global-statement, import-error
global NETWORK
if 'ecobee' in _CONFIGURING:
return
from pyecobee import config_from_file
# Create ecobee.conf if it doesn't exist
if not os.path.isfile(hass.config.path(ECOBEE_CONFIG_FILE)):
jsonconfig = {"API_KEY": config[DOMAIN].get(CONF_API_KEY)}
config_from_file(hass.config.path(ECOBEE_CONFIG_FILE), jsonconfig)
NETWORK = EcobeeData(hass.config.path(ECOBEE_CONFIG_FILE))
setup_ecobee(hass, NETWORK.ecobee, config)
return True
| 30.15625 | 76 | 0.704404 |
f00dd97a90a95174346c00dd9e5cef796d2a6b18 | 2,443 | py | Python | BERT/train/model.py | ShuntaIto/azureml-pl-sample | e5ae7b0a06d72f7b1371675f42ef9708cc8ea2c5 | [
"MIT"
] | 6 | 2021-04-02T07:22:51.000Z | 2021-07-14T08:45:42.000Z | BERT/train/model.py | ShuntaIto/azureml-pl-sample | e5ae7b0a06d72f7b1371675f42ef9708cc8ea2c5 | [
"MIT"
] | null | null | null | BERT/train/model.py | ShuntaIto/azureml-pl-sample | e5ae7b0a06d72f7b1371675f42ef9708cc8ea2c5 | [
"MIT"
] | null | null | null | import torch
from torch import nn
import torch.nn.functional as F
import pytorch_lightning as pl
from transformers import BertJapaneseTokenizer
from transformers import BertModel
from azureml.core import Run
run = Run.get_context()
class BERTClassificationModel(pl.LightningModule):
def __init__(self, bert_lr=5e-5, output_lr=1e-4):
super(BERTClassificationModel, self).__init__()
self.bert = BertModel.from_pretrained('cl-tohoku/bert-base-japanese-whole-word-masking')
self.output = nn.Linear(768, 9)
self.bert_lr = bert_lr
self.output_lr = output_lr
self.train_acc = pl.metrics.Accuracy()
self.val_acc = pl.metrics.Accuracy()
self.test_acc = pl.metrics.Accuracy()
def forward(self, input_ids, attention_mask, token_type_ids):
y = self.bert(input_ids, attention_mask, token_type_ids).last_hidden_state
## cls token相当部分のhidden_stateのみ抜粋
y = y[:,0,:]
y = y.view(-1, 768)
y = self.output(y)
y = F.softmax(y, dim=1)
return y
def training_step(self, batch, batch_nb):
x, t = batch
y = self(x['input_ids'], x['attention_mask'], x['token_type_ids'])
loss = F.cross_entropy(y, t)
run.log("loss", float(loss))
##self.log("loss", loss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
return loss
def validation_step(self, batch, batch_nb):
x, t = batch
y = self(x['input_ids'], x['attention_mask'], x['token_type_ids'])
loss = F.cross_entropy(y, t)
preds = torch.argmax(y, dim=1)
run.log("val_loss", float(loss))
run.log("val_acc", float(self.val_acc(y,t)))
##self.log('val_loss', loss, prog_bar=True)
##self.log('val_acc', self.val_acc(y,t), prog_bar=True)
return loss
def test_step(self, batch, batch_nb):
x, t = batch
y = self(x['input_ids'], x['attention_mask'], x['token_type_ids'])
loss = F.cross_entropy(y, t)
preds = torch.argmax(y, dim=1)
run.log("test_loss", float(loss))
run.log("test_acc", float(self.test_acc(y,t)))
return loss
def configure_optimizers(self):
return torch.optim.Adam([
{'params': self.bert.encoder.layer[-1].parameters(),
'lr': self.bert_lr},
{'params': self.output.parameters(), 'lr': self.output_lr}
])
| 35.926471 | 96 | 0.618502 |
baf8df5344a0605ac4b758ec2135596f72816053 | 13,116 | py | Python | fdia_simulation/models/radar.py | QDucasse/FDIA_simulation | bdd0cb072f07b9a96fd82df581c9c7493ae66cbc | [
"MIT"
] | 7 | 2020-12-11T16:20:59.000Z | 2022-01-11T21:18:25.000Z | fdia_simulation/models/radar.py | QDucasse/FDIA_simulation | bdd0cb072f07b9a96fd82df581c9c7493ae66cbc | [
"MIT"
] | 2 | 2020-09-25T06:56:56.000Z | 2021-06-25T15:40:38.000Z | fdia_simulation/models/radar.py | QDucasse/FDIA_simulation | bdd0cb072f07b9a96fd82df581c9c7493ae66cbc | [
"MIT"
] | 5 | 2019-08-27T11:13:31.000Z | 2021-11-26T12:52:19.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 21 13:07:46 2019
@author: qde
"""
import numpy as np
import matplotlib.pyplot as plt
from math import cos,sin,sqrt,pi,atan2
from numpy.random import randn
from filterpy.common import pretty_str
from fdia_simulation.models import ManeuveredAirplane, NoisySensor, Track, ManeuveredSystem, Command
class Radar(object):
'''Implements a simulated radar.
The radar will output a data set corresponding to typical radar values.
Attributes
----------
x, y, z: floats
Radar position along x, y and z-axis.
r_std: float
Standard deviation on the measurement of r. Default value of 1.
theta_std: float
Standard deviation on the measurement of theta. Default value of 0.1
phi_std: float
Standard deviation on the measurement of phi. Default value of 0.1
Parameters
----------
Identical to Attributes
'''
DT_RADAR = 0.1
def __init__(self, x = 0, y = 0, z = 0, dt = None,
r_std = 1., theta_std = 0.001, phi_std = 0.001):
if dt is None:
dt = self.DT_RADAR
self.dt = dt
self.x = x
self.y = y
self.z = z
self.step = self.dt / Track.DT_TRACK # Sampling step from the position data
self.r_std = r_std
self.theta_std = theta_std
self.phi_std = phi_std
self.R = np.array([[r_std,0 ,0 ],
[0 ,theta_std,0 ],
[0 ,0 ,phi_std]])
def get_position(self):
'''
Position accessor.
Returns
-------
position: float iterable
[x,y,z] of the radar.
'''
return [self.x,self.y,self.z]
def gen_radar_values(self,x,y,z):
'''
Computes the three parameters r, theta and phi from the given positions.
Parameters
----------
x,y,z: floats
Position of the airplane.
Returns
-------
r,theta,phi: floats
Radar values corresponding to the input position.
'''
# Importance of the radar position
x -= self.x
y -= self.y
z -= self.z
# Computation of the distance of the airplane
r = sqrt(x**2 + y**2 + z**2)
# Computation of the turning angle of the airplane
theta = atan2(y,x)
# Computation of the elevation angle of the airplane
phi = atan2(z, sqrt(x**2 + y**2))
return r, theta, phi
def sample_position_data(self,position_data):
'''
Samples the initial position data (computed with dt = 0.01) to reduce it
to the actual data rate of the radar.
'''
sampled_position_data = position_data[::int(self.step)]
return sampled_position_data
def gen_data(self,position_data):
'''
Generates simulated received data for a radar.
Parameters
----------
position_data: float list numpy array
List of positions in the form of lists [x_k, y_k, z_k].
Corresponding to:
x_k: float
Position along x-axis.
y_k: float
Position along y-axis.
z_k: float
Position along z-axis.
Returns
-------
rs, thetas, phis: float iterables
Distances, azimuth/turn angles and elevation angles.
'''
rs, thetas, phis = [], [], []
for position in position_data:
x_k = position[0]
y_k = position[1]
z_k = position[2]
# Computation of the supposed distance of the airplane
r_k, theta_k, phi_k = self.gen_radar_values(x_k,y_k,z_k)
rs.append(r_k)
thetas.append(theta_k)
phis.append(phi_k)
return rs, thetas, phis
def sense(self, rs, thetas, phis):
'''
Simulates real sensors by adding noise to the predicted simulated values.
Parameters
----------
rs, thetas, phis: float iterable
Distances, azimuth/turn angles and elevation angles.
Returns
-------
noisy_rs, noisy_thetas, noisy_phis: float iterable
Distances, azimuth/turn angles and elevation angles with added white noise.
'''
nsr = NoisySensor(std_noise = self.r_std)
nstheta = NoisySensor(std_noise = self.theta_std)
nsphi = NoisySensor(std_noise = self.phi_std)
noisy_rs = [nsr.sense(r) for r in rs]
noisy_thetas = [nstheta.sense(theta) for theta in thetas]
noisy_phis = [nsphi.sense(phi) for phi in phis]
return noisy_rs, noisy_thetas, noisy_phis
def gen_position_vals(self,r,theta,phi):
'''
Compute the position from the radar values r, theta and phi.
Parameters
----------
r,theta,phi: floats
Radar values.
Returns
-------
x,y,z: floats
Sensed position of the airplane extracted from the measurement given
in input.
'''
x = r * cos(theta) * cos(phi) + self.x
y = r * sin(theta) * cos(phi) + self.y
z = r * sin(phi) + self.z
return x,y,z
def radar2cartesian(self,rs,thetas,phis):
'''
Transcripts the radar measured values (r, theta, phi) to cartesian
positions (x, y, z).
Parameters
----------
rs: float iterable
List of the rs (distance) measured by the radar.
thetas: float iterable
List of the thetas (azimuth/turn angle) measured by the radar.
phis: float iterable
List of the phis (elevation angle) measured by the radar.
Returns
-------
xs: float iterable
List of the computed positions along x-axis.
ys: float iterable
List of the computed positions along y-axis.
zs: float iterable
List of the computed positions along z-axis.
'''
xs,ys,zs = [],[],[]
for r,theta,phi in zip(rs,thetas,phis):
x_k,y_k,z_k = self.gen_position_vals(r,theta,phi)
xs.append(x_k)
ys.append(y_k)
zs.append(z_k)
return xs,ys,zs
def __eq__(self,other):
eq_dt = (self.dt == other.dt)
eq_pos = (
(self.y == other.y) and
(self.x == other.x) and
(self.z == other.z)
)
eq_std = (
(self.r_std == other.r_std) and
(self.theta_std == other.theta_std) and
(self.phi_std == other.phi_std)
)
return all([eq_dt,eq_pos,eq_std])
class LabeledMeasurement(object):
'''
Measurement labeled with a tag (radar ownership) and a timestamp (date of the
measurement).
Parameters
----------
tag: int
Tag (position) of the radar emitting this measurement.
time: float
Time of the measurement, starting from the beginning of the observation.
value: float numpy array
Array containing [r, theta, phi], measurement of tagged radar at the
given time.
'''
def __init__(self,tag,time,value):
self.tag = tag
self.time = time
self.value = value
'''
Redifinition of the comparison operators using as only criteria the time of
measurement.
'''
def __gt__(self,other):
return (self.time > other.time)
def __ge__(self,other):
return (self.time >= other.time)
def __le__(self,other):
return (self.time <= other.time)
def __lt__(self,other):
return (self.time < other.time)
def __eq__(self,other):
eq_time = (self.time == other.time)
eq_tag = (self.tag == other.tag)
eq_value = np.array_equal(self.value,other.value)
return (eq_time and eq_tag and eq_value)
def __repr__(self):
return '\n'.join([
'LabeledMeasurement object',
pretty_str('tag', self.tag),
pretty_str('time', self.time),
pretty_str('value', self.value)])
class PeriodRadar(Radar):
'''
Implements a radar with a given data rate (dt).
Attributes
----------
Radar attributes +
dt: float
Data rate of the radar.
tag: int
Radar tag (position in the radars list).
time_std: float
Standard deviation of the time. Default value of 0.001
Parameters
----------
Identical to attributes
'''
def __init__(self, x, y, z=0, dt = None,
r_std = 1., theta_std = 0.001, phi_std = 0.001,
time_std = 0.001):
if dt is None:
dt = Radar.DT_RADAR
self.time_std = time_std
self.tag = 0
Radar.__init__(self,x = x, y = y, z = z, dt = dt,
r_std = r_std, theta_std = theta_std, phi_std = phi_std)
def compute_meas_times(self, size):
'''
Computes the measurement times adding repeatitively dt (modified by the
time_std).
Parameters
----------
size: int
Size of the list of times.
Returns
-------
meas_times: float list
List of the sample times.
'''
t_k = 0
meas_times = [t_k]
for _ in range(size-1):
t_k += self.dt + randn()*self.time_std # Adding a time jitter
meas_times.append(t_k)
return meas_times
def compute_measurements(self,position_data):
'''
Computes the measurements of given positions.
Parameters
----------
position_data: float numpy array
Array of positions [x,y,z].
Returns
-------
measurements: LabeledMeasurement list
List of labeled measurements with time and tag.
'''
rs, thetas, phis = self.gen_data(position_data)
noisy_rs, noisy_thetas, noisy_phis = self.sense(rs, thetas, phis)
n = len(noisy_rs)
measurement_times = self.compute_meas_times(n)
measurements = []
for i in range(n):
value = [noisy_rs[i], noisy_thetas[i], noisy_phis[i]]
measurement = LabeledMeasurement(tag = self.tag,
time = measurement_times[i],
value = value)
measurements.append(measurement)
return measurements
if __name__ == "__main__":
#================== Positions generation for the airplane ==================
trajectory = Track()
states = trajectory.gen_landing()
xs = states[:,0]
ys = states[:,3]
zs = states[:,6]
position_data = np.array(list(zip(xs,ys,zs)))
# ==========================================================================
# ========================== Radars generation =============================
# Radar 1
radar = Radar(x=-6000,y=10000, dt = 0.4)
rs, thetas, phis = radar.gen_data(position_data)
noisy_rs, noisy_thetas, noisy_phis = radar.sense(rs, thetas, phis)
xs_from_rad, ys_from_rad, zs_from_rad = radar.radar2cartesian(noisy_rs, noisy_thetas, noisy_phis)
radar_values = np.array(list(zip(noisy_rs, noisy_thetas, noisy_phis)))
# print("Noisy radar data:\n{0}\n".format(radar_values[-25:,:]))
radar_computed_values = np.array(list(zip(xs_from_rad, ys_from_rad, zs_from_rad)))
# print("Estimated positions:\n{0}\n".format(radar_computed_values[-25:,:]))
radar2 = Radar(x=-6000,y=10000, dt = 0.7)
rs2, thetas2, phis2 = radar2.gen_data(position_data)
noisy_rs2, noisy_thetas2, noisy_phis2 = radar2.sense(rs2, thetas2, phis2)
xs_from_rad2, ys_from_rad2, zs_from_rad2 = radar2.radar2cartesian(noisy_rs2, noisy_thetas2, noisy_phis2)
radar_values2 = np.array(list(zip(noisy_rs2, noisy_thetas2, noisy_phis2)))
# print("Noisy radar data:\n{0}\n".format(radar_values[-25:,:]))
radar_computed_values2 = np.array(list(zip(xs_from_rad2, ys_from_rad2, zs_from_rad2)))
# ==========================================================================
# =============================== Plotting =================================
fig = plt.figure(1)
plt.rc('font', family='serif')
ax = fig.gca(projection='3d')
ax.plot(xs, ys, zs, label='Real airplane position', color='k', linestyle='dashed')
ax.scatter(xs_from_rad, ys_from_rad, zs_from_rad, color='b', marker='o', label='Radar measurements')
ax.scatter(radar.x,radar.y,radar.z,color='r',label='Radar')
ax.scatter(xs_from_rad2, ys_from_rad2, zs_from_rad2, color='g', marker='o', label='Radar2 measurements')
ax.scatter(radar2.x,radar2.y,radar2.z,color='orange',label='Radar2')
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
ax.legend()
plt.show()
| 31.990244 | 108 | 0.552531 |
92c56968de82a8225c5093b40891e57d1367f047 | 26,953 | py | Python | GearBot/Cogs/Moderation.py | Gh0stlyy/GearBot | aa918976017c1e864ab33ccd714bf25cbefa811c | [
"MIT"
] | null | null | null | GearBot/Cogs/Moderation.py | Gh0stlyy/GearBot | aa918976017c1e864ab33ccd714bf25cbefa811c | [
"MIT"
] | null | null | null | GearBot/Cogs/Moderation.py | Gh0stlyy/GearBot | aa918976017c1e864ab33ccd714bf25cbefa811c | [
"MIT"
] | null | null | null | import asyncio
import datetime
import time
import traceback
from concurrent.futures import CancelledError
import discord
from discord.ext import commands
from discord.ext.commands import BadArgument
from Util import Permissioncheckers, Configuration, Utils, GearbotLogging, Pages, InfractionUtils, Emoji, Translator, \
Archive
from Util.Converters import BannedMember, UserID, Reason
from database.DatabaseConnector import LoggedMessage
class Moderation:
permissions = {
"min": 2,
"max": 6,
"required": 2,
"commands": {
"userinfo": {"required": 2, "min": 0, "max": 6},
"serverinfo": {"required": 2, "min": 0, "max": 6},
"roles": {"required": 2, "min": 0, "max": 6},
}
}
def __init__(self, bot):
self.bot: commands.Bot = bot
bot.mutes = self.mutes = Utils.fetch_from_disk("mutes")
self.running = True
self.bot.loop.create_task(unmuteTask(self))
Pages.register("roles", self.roles_init, self.roles_update), []
def __unload(self):
Utils.saveToDisk("mutes", self.mutes)
self.running = False
Pages.unregister("roles")
async def __local_check(self, ctx):
return Permissioncheckers.check_permission(ctx)
async def roles_init(self, ctx):
pages = self.gen_roles_pages(ctx.guild)
page = pages[0]
return f"**{Translator.translate('roles', ctx.guild.id, server_name=ctx.guild.name, page_num=1, pages=len(pages))}**```\n{page}```", None, len(pages) > 1, []
async def roles_update(self, ctx, message, page_num, action, data):
pages = self.gen_roles_pages(message.guild)
page, page_num = Pages.basic_pages(pages, page_num, action)
return f"**{Translator.translate('roles', message.guild.id, server_name=ctx.guild.name, page_num=page_num + 1, pages=len(pages))}**```\n{page}```", None, page_num
@staticmethod
def gen_roles_pages(guild: discord.Guild):
role_list = dict()
longest_name = 1
for role in guild.roles:
role_list[f"{role.name} - {role.id}"] = role
longest_name = max(longest_name, len(role.name))
return Pages.paginate("\n".join(f"{role_list[r].name} {' ' * (longest_name - len(role_list[r].name))} - {role_list[r].id}" for r in sorted(role_list.keys())))
@commands.command()
@commands.guild_only()
async def roles(self, ctx: commands.Context):
"""Lists all roles on the server and their IDs, useful for configuring without having to ping that role"""
await Pages.create_new("roles", ctx)
@commands.command(aliases=["👢"])
@commands.guild_only()
@commands.bot_has_permissions(kick_members=True)
async def kick(self, ctx, user: discord.Member, *, reason:Reason=""):
"""kick_help"""
if reason == "":
reason = Translator.translate("no_reason", ctx.guild.id)
if (ctx.author != user and user != ctx.bot.user and ctx.author.top_role > user.top_role) or (ctx.guild.owner == ctx.author and ctx.author != user):
if ctx.me.top_role > user.top_role:
self.bot.data["forced_exits"].append(user.id)
await ctx.guild.kick(user,
reason=f"Moderator: {ctx.author.name}#{ctx.author.discriminator} ({ctx.author.id}) Reason: {reason}")
await ctx.send(
f"{Emoji.get_chat_emoji('YES')} {Translator.translate('kick_confirmation', ctx.guild.id, user=Utils.clean_user(user), user_id=user.id, reason=reason)}")
await GearbotLogging.log_to(ctx.guild.id, "MOD_ACTIONS",
f":boot: {Translator.translate('kick_log', ctx.guild.id, user=Utils.clean_user(user), user_id=user.id, moderator=Utils.clean_user(ctx.author), moderator_id=ctx.author.id, reason=reason)}")
InfractionUtils.add_infraction(ctx.guild.id, user.id, ctx.author.id, Translator.translate('kick', ctx.guild.id), reason)
else:
await ctx.send(Translator.translate('kick_unable',ctx.guild.id, user=Utils.clean_user(user)))
else:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('kick_not_allowed', ctx.guild.id, user=user)}")
@commands.command(aliases=["🚪"])
@commands.guild_only()
@commands.bot_has_permissions(ban_members=True)
async def ban(self, ctx: commands.Context, user: discord.Member, *, reason:Reason=""):
"""ban_help"""
if reason == "":
reason = Translator.translate("no_reason", ctx.guild.id)
if (ctx.author != user and user != ctx.bot.user and ctx.author.top_role > user.top_role) or (ctx.guild.owner == ctx.author and ctx.author != user):
if ctx.me.top_role > user.top_role:
self.bot.data["forced_exits"].append(user.id)
await ctx.guild.ban(user, reason=f"Moderator: {ctx.author.name} ({ctx.author.id}) Reason: {reason}",
delete_message_days=0)
InfractionUtils.add_infraction(ctx.guild.id, user.id, ctx.author.id, "Ban", reason)
await ctx.send(
f"{Emoji.get_chat_emoji('YES')} {Translator.translate('ban_confirmation', ctx.guild.id, user=Utils.clean_user(user), user_id=user.id, reason=reason)}")
await GearbotLogging.log_to(ctx.guild.id, "MOD_ACTIONS",
f":door: {Translator.translate('ban_log', ctx.guild.id, user=Utils.clean_user(user), user_id=user.id, moderator=Utils.clean_user(ctx.author), moderator_id=ctx.author.id, reason=reason)}")
else:
await ctx.send(Translator.translate('ban_unable', ctx.guild.id, user=Utils.clean_user(user)))
else:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('ban_not_allowed', ctx.guild.id, user=user)}")
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(ban_members=True)
async def softban(self, ctx:commands.Context, user: discord.Member, *, reason:Reason=""):
"""softban_help"""
if reason == "":
reason = Translator.translate("no_reason", ctx.guild.id)
if (ctx.author != user and user != ctx.bot.user and ctx.author.top_role > user.top_role) or (ctx.guild.owner == ctx.author and ctx.author != user):
if ctx.me.top_role > user.top_role:
self.bot.data["forced_exits"].append(user.id)
self.bot.data["unbans"].append(user.id)
await ctx.guild.ban(user, reason=f"softban - Moderator: {ctx.author.name} ({ctx.author.id}) Reason: {reason}", delete_message_days=1)
await ctx.guild.unban(user)
await ctx.send(f"{Emoji.get_chat_emoji('YES')} {Translator.translate('softban_confirmation', ctx.guild.id, user=Utils.clean_user(user), user_id=user.id, reason=reason)}")
await GearbotLogging.log_to(ctx.guild.id, "MOD_ACTIONS", f":door: {Translator.translate('softban_log', ctx.guild.id, user=Utils.clean_user(user), user_id=user.id, moderator=Utils.clean_user(ctx.author), moderator_id=ctx.author.id, reason=reason)}")
InfractionUtils.add_infraction(ctx.guild.id, user.id, ctx.author.id, "Softban", reason)
else:
await ctx.send(Translator.translate('softban_unable', ctx.guild.id, user=Utils.clean_user(user)))
else:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('softban_not_allowed', ctx.guild.id, user=user)}")
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(ban_members=True)
async def forceban(self, ctx: commands.Context, user_id: UserID, *, reason:Reason=""):
"""forceban_help"""
if reason == "":
reason = Translator.translate("no_reason", ctx.guild.id)
try:
member = await commands.MemberConverter().convert(ctx, str(user_id))
except BadArgument:
user = await ctx.bot.get_user_info(user_id)
self.bot.data["forced_exits"].append(user.id)
await ctx.guild.ban(user, reason=f"Moderator: {ctx.author.name} ({ctx.author.id}) Reason: {reason}",
delete_message_days=0)
await ctx.send(
f"{Emoji.get_chat_emoji('YES')} {Translator.translate('forceban_confirmation', ctx.guild.id, user=Utils.clean_user(user), user_id=user_id, reason=reason)}")
await GearbotLogging.log_to(ctx.guild.id, "MOD_ACTIONS",
f":door: {Translator.translate('forceban_log', ctx.guild.id, user=Utils.clean_user(user), user_id=user_id, moderator=Utils.clean_user(ctx.author), moderator_id=ctx.author.id, reason=reason)}")
InfractionUtils.add_infraction(ctx.guild.id, user.id, ctx.author.id, Translator.translate('forced_ban', ctx.guild.id), reason)
else:
await ctx.send(f"{Emoji.get_chat_emoji('WARNING')} {Translator.translate('forceban_to_ban', ctx.guild.id, user=Utils.clean_user(member))}")
await ctx.invoke(self.ban, member, reason=reason)
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(manage_messages=True)
async def purge(self, ctx, msgs: int):
"""purge_help"""
if msgs < 1:
return await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('purge_too_small', ctx.guild.id)}")
if msgs > 1000:
return await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('purge_too_big', ctx.guild.id)}")
try:
deleted = await ctx.channel.purge(limit=msgs)
except discord.NotFound:
# sleep for a sec just in case the other bot is still purging so we don't get removed as well
await asyncio.sleep(1)
await ctx.send(f"{Emoji.get_chat_emoji('YES')} {Translator.translate('purge_fail_not_found', ctx.guild.id)}")
await ctx.send(f"{Emoji.get_chat_emoji('YES')} {Translator.translate('purge_confirmation', ctx.guild.id, count=len(deleted))}", delete_after=10)
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(ban_members=True)
async def unban(self, ctx, member: BannedMember, *, reason:Reason=""):
"""unban_help"""
if reason == "":
reason = Translator.translate("no_reason", ctx.guild.id)
self.bot.data["unbans"].append(member.user.id)
await ctx.guild.unban(member.user, reason=f"Moderator: {ctx.author.name} ({ctx.author.id}) Reason: {reason}")
InfractionUtils.add_infraction(ctx.guild.id, member.user.id, ctx.author.id, "Unban", reason)
await ctx.send(
f"{Emoji.get_chat_emoji('YES')} {Translator.translate('unban_confirmation', ctx.guild.id, user=Utils.clean_user(member.user), user_id=member.user.id, reason=reason)}")
await GearbotLogging.log_to(ctx.guild.id, "MOD_ACTIONS",
f"{Emoji.get_chat_emoji('INNOCENT')} {Translator.translate('unban_log', ctx.guild.id, user=Utils.clean_user(member.user), user_id=member.user.id, moderator=Utils.clean_user(ctx.author), moderator_id=ctx.author.id, reason=reason)}")
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(manage_roles=True)
async def mute(self, ctx: commands.Context, target: discord.Member, durationNumber: int, durationIdentifier: str, *,
reason:Reason=""):
"""mute_help"""
if reason == "":
reason = Translator.translate("no_reason", ctx.guild.id)
roleid = Configuration.get_var(ctx.guild.id, "MUTE_ROLE")
if roleid is 0:
await ctx.send(f"{Emoji.get_chat_emoji('WARNING')} {Translator.translate('mute_not_configured', ctx.guild.id, user=target.mention)}")
else:
role = discord.utils.get(ctx.guild.roles, id=roleid)
if role is None:
await ctx.send(f"{Emoji.get_chat_emoji('WARNING')} {Translator.translate('mute_role_missing', ctx.guild.id, user=target.mention)}")
else:
if (ctx.author != target and target != ctx.bot.user and ctx.author.top_role > target.top_role) or ctx.guild.owner == ctx.author:
duration = Utils.convertToSeconds(durationNumber, durationIdentifier)
if duration > 0:
until = time.time() + duration
await target.add_roles(role, reason=f"{reason}, as requested by {ctx.author.name}")
if not str(ctx.guild.id) in self.mutes:
self.mutes[str(ctx.guild.id)] = dict()
self.mutes[str(ctx.guild.id)][str(target.id)] = until
await ctx.send(f"{Emoji.get_chat_emoji('MUTE')} {Translator.translate('mute_confirmation', ctx.guild.id, user=Utils.clean_user(target), duration=f'{durationNumber} {durationIdentifier}')}")
Utils.saveToDisk("mutes", self.mutes)
await GearbotLogging.log_to(ctx.guild.id, "MOD_ACTIONS", f"{Emoji.get_chat_emoji('MUTE')} {Translator.translate('mute_log', ctx.guild.id, user=Utils.clean_user(target), user_id=target.id, moderator=Utils.clean_user(ctx.author), moderator_id=ctx.author.id, duration=f'{durationNumber} {durationIdentifier}', reason=reason)}")
InfractionUtils.add_infraction(ctx.guild.id, target.id, ctx.author.id, "Mute", reason)
else:
await ctx.send(f"{Emoji.get_chat_emoji('WHAT')} {Translator.translate('mute_negative_denied', ctx.guild.id, duration=f'{durationNumber} {durationIdentifier}')} {Emoji.get_chat_emoji('WHAT')}")
else:
await ctx.send(
f"{Emoji.get_chat_emoji('NO')} {Translator.translate('mute_not_allowed', ctx.guild.id, user=target)}")
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(manage_roles=True)
async def unmute(self, ctx: commands.Context, target: discord.Member, *, reason:Reason=""):
"""unmute_help"""
if reason == "":
reason = Translator.translate("no_reason", ctx.guild.id)
roleid = Configuration.get_var(ctx.guild.id, "MUTE_ROLE")
if roleid is 0:
await ctx.send(
f"{Emoji.get_chat_emoji('NO')} The mute feature has been disabled on this server, as such i cannot unmute that person")
else:
role = discord.utils.get(ctx.guild.roles, id=roleid)
if role is None:
await ctx.send(
f"{Emoji.get_chat_emoji('NO')} Unable to comply, the role i've been told to use for muting no longer exists")
else:
await target.remove_roles(role, reason=f"Unmuted by {ctx.author.name}, {reason}")
await ctx.send(f"{Emoji.get_chat_emoji('INNOCENT')} {target.display_name} has been unmuted")
await GearbotLogging.log_to(ctx.guild.id, "MOD_ACTIONS",
f"{Emoji.get_chat_emoji('INNOCENT')} {target.name}#{target.discriminator} (`{target.id}`) has been unmuted by {ctx.author.name}")
InfractionUtils.add_infraction(ctx.guild.id, target.id, ctx.author.id, "Unmute", reason)
@commands.command()
async def userinfo(self, ctx: commands.Context, *, userID:UserID):
"""Shows information about the chosen user"""
user = None
member = None
if userID is None:
user = ctx.author
if ctx.guild is not None:
member = ctx.guild.get_member(user.id)
elif ctx.guild is not None:
try:
user = member = ctx.guild.get_member(userID)
except BadArgument:
pass
if user is None:
user = await Utils.get_user(userID)
embed = discord.Embed(color=0x7289DA, timestamp=ctx.message.created_at)
embed.set_thumbnail(url=user.avatar_url)
embed.set_footer(text=Translator.translate('requested_by', ctx, user=ctx.author.name), icon_url=ctx.author.avatar_url)
embed.add_field(name=Translator.translate('name', ctx), value=f"{user.name}#{user.discriminator}", inline=True)
embed.add_field(name=Translator.translate('id', ctx), value=user.id, inline=True)
embed.add_field(name=Translator.translate('bot_account', ctx), value=user.bot, inline=True)
embed.add_field(name=Translator.translate('animated_avatar', ctx), value=user.is_avatar_animated(), inline=True)
if member is not None:
account_joined = member.joined_at.strftime("%d-%m-%Y")
embed.add_field(name=Translator.translate('nickname', ctx), value=member.nick, inline=True)
embed.add_field(name=Translator.translate('top_role', ctx), value=member.top_role.name, inline=True)
embed.add_field(name=Translator.translate('joined_at', ctx),
value=f"{account_joined} ({(ctx.message.created_at - member.joined_at).days} days ago)",
inline=True)
account_made = user.created_at.strftime("%d-%m-%Y")
embed.add_field(name=Translator.translate('account_created_at', ctx),
value=f"{account_made} ({(ctx.message.created_at - user.created_at).days} days ago)",
inline=True)
embed.add_field(name=Translator.translate('avatar_url', ctx), value=f"[{Translator.translate('avatar_url', ctx)}]({user.avatar_url})")
await ctx.send(embed=embed)
@commands.command()
async def serverinfo(self, ctx):
"""Shows information about the current server."""
guild_features = ", ".join(ctx.guild.features)
print(guild_features)
if guild_features == "":
guild_features = None
role_list = []
for i in range(len(ctx.guild.roles)):
role_list.append(ctx.guild.roles[i].name)
guild_made = ctx.guild.created_at.strftime("%d-%m-%Y")
embed = discord.Embed(color=0x7289DA, timestamp= datetime.datetime.fromtimestamp(time.time()))
embed.set_thumbnail(url=ctx.guild.icon_url)
embed.set_footer(text=Translator.translate('requested_by', ctx, user=ctx.author), icon_url=ctx.author.avatar_url)
embed.add_field(name=Translator.translate('name', ctx), value=ctx.guild.name, inline=True)
embed.add_field(name=Translator.translate('id', ctx), value=ctx.guild.id, inline=True)
embed.add_field(name=Translator.translate('owner', ctx), value=ctx.guild.owner, inline=True)
embed.add_field(name=Translator.translate('members', ctx), value=ctx.guild.member_count, inline=True)
embed.add_field(name=Translator.translate('text_channels', ctx), value=str(len(ctx.guild.text_channels)), inline=True)
embed.add_field(name=Translator.translate('voice_channels', ctx), value=str(len(ctx.guild.voice_channels)), inline=True)
embed.add_field(name=Translator.translate('total_channel', ctx), value=str(len(ctx.guild.text_channels) + len(ctx.guild.voice_channels)),
inline=True)
embed.add_field(name=Translator.translate('created_at', ctx),
value=f"{guild_made} ({(ctx.message.created_at - ctx.guild.created_at).days} days ago)",
inline=True)
embed.add_field(name=Translator.translate('vip_features', ctx), value=guild_features, inline=True)
if ctx.guild.icon_url != "":
embed.add_field(name=Translator.translate('server_icon', ctx), value=f"[{Translator.translate('server_icon', ctx)}]({ctx.guild.icon_url})", inline=True)
embed.add_field(name=Translator.translate('all_roles', ctx), value=", ".join(role_list), inline=True) #todo paginate
await ctx.send(embed=embed)
@commands.group()
@commands.bot_has_permissions(attach_files=True)
async def archive(self, ctx):
await ctx.trigger_typing()
@archive.command()
async def channel(self, ctx, channel:discord.TextChannel=None, amount=100):
if amount > 5000:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('archive_too_much', ctx)}")
return
if channel is None:
channel = ctx.message.channel
if Configuration.get_var(ctx.guild.id, "EDIT_LOGS"):
permissions = channel.permissions_for(ctx.author)
if permissions.read_messages and permissions.read_message_history:
messages = LoggedMessage.select().where((LoggedMessage.server == ctx.guild.id) & (LoggedMessage.channel == channel.id)).order_by(LoggedMessage.messageid.desc()).limit(amount)
await Archive.ship_messages(ctx, messages)
else:
ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('archive_denied_read_perms')}")
else:
await ctx.send("Not implemented, please enable edit logs to be able to use archiving")
@archive.command()
async def user(self, ctx, user:UserID, amount=100):
if amount > 5000:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('archive_too_much', ctx)}")
return
if Configuration.get_var(ctx.guild.id, "EDIT_LOGS"):
messages = LoggedMessage.select().where(
(LoggedMessage.server == ctx.guild.id) & (LoggedMessage.author == user)).order_by(LoggedMessage.messageid.desc()).limit(amount)
await Archive.ship_messages(ctx, messages)
else:
await ctx.send("Please enable edit logs so i can archive users")
async def on_guild_channel_create(self, channel: discord.abc.GuildChannel):
guild: discord.Guild = channel.guild
roleid = Configuration.get_var(guild.id, "MUTE_ROLE")
if roleid is not 0:
role = discord.utils.get(guild.roles, id=roleid)
if role is not None and channel.permissions_for(guild.me).manage_channels:
if isinstance(channel, discord.TextChannel):
await channel.set_permissions(role, reason=Translator.translate('mute_setup', guild.id), send_messages=False,
add_reactions=False)
else:
await channel.set_permissions(role, reason=Translator.translate('mute_setup', guild.id), speak=False, connect=False)
async def on_member_join(self, member: discord.Member):
if str(member.guild.id) in self.mutes and member.id in self.mutes[str(member.guild.id)]:
roleid = Configuration.get_var(member.guild.id, "MUTE_ROLE")
if roleid is not 0:
role = discord.utils.get(member.guild.roles, id=roleid)
if role is not None:
if member.guild.me.guild_permissions.manage_roles:
await member.add_roles(role, reason=Translator.translate('mute_reapply_reason', member.guild.id))
await GearbotLogging.log_to(member.guild.id, "MOD_ACTIONS",f"{Emoji.get_chat_emoji('MUTE')} {Translator.translate('mute_reapply_log', member.guild.id, user=Utils.clean_user(member), user_id=member.id)}")
else:
await GearbotLogging.log_to(member.guild.id, "MOD_ACTIONS", Translator.translate('mute_reapply_failed_log', member.build.id))
async def on_guild_remove(self, guild: discord.Guild):
if guild.id in self.mutes.keys():
del self.mutes[guild.id]
Utils.saveToDisk("mutes", self.mutes)
def setup(bot):
bot.add_cog(Moderation(bot))
async def unmuteTask(modcog: Moderation):
GearbotLogging.info("Started unmute background task")
skips = []
updated = False
while modcog.running:
userid = 0
guildid = 0
try:
guildstoremove = []
for guildid, list in modcog.mutes.items():
guild: discord.Guild = modcog.bot.get_guild(int(guildid))
toremove = []
if Configuration.get_var(int(guildid), "MUTE_ROLE") is 0:
guildstoremove.append(guildid)
for userid, until in list.items():
if time.time() > until and userid not in skips:
member = guild.get_member(int(userid))
role = discord.utils.get(guild.roles, id=Configuration.get_var(int(guildid), "MUTE_ROLE"))
if guild.me.guild_permissions.manage_roles:
await member.remove_roles(role, reason="Mute expired")
await GearbotLogging.log_to(guild.id, "MOD_ACTIONS",
f"<:gearInnocent:465177981287923712> {member.name}#{member.discriminator} (`{member.id}`) has automaticaly been unmuted")
else:
await GearbotLogging.log_to(guild.id, "MOD_ACTIONS",
f":no_entry: ERROR: {member.name}#{member.discriminator} (`{member.id}`) was muted earlier but I no longer have the permissions needed to unmute this person, please remove the role manually!")
updated = True
toremove.append(userid)
for todo in toremove:
del list[todo]
await asyncio.sleep(0)
if updated:
Utils.saveToDisk("mutes", modcog.mutes)
updated = False
for id in guildstoremove:
del modcog.mutes[id]
await asyncio.sleep(10)
except CancelledError:
pass # bot shutdown
except Exception as ex:
GearbotLogging.error("Something went wrong in the unmute task")
GearbotLogging.error(traceback.format_exc())
skips.append(userid)
embed = discord.Embed(colour=discord.Colour(0xff0000),
timestamp=datetime.datetime.utcfromtimestamp(time.time()))
embed.set_author(name="Something went wrong in the unmute task:")
embed.add_field(name="Current guildid", value=guildid)
embed.add_field(name="Current userid", value=userid)
embed.add_field(name="Exception", value=ex)
v = ""
for line in traceback.format_exc().splitlines():
if len(v) + len(line) > 1024:
embed.add_field(name="Stacktrace", value=v)
v = ""
v = f"{v}\n{line}"
if len(v) > 0:
embed.add_field(name="Stacktrace", value=v)
await GearbotLogging.bot_log(embed=embed)
await asyncio.sleep(10)
GearbotLogging.info("Unmute background task terminated")
| 60.841986 | 348 | 0.627314 |
fc1a61e1c549812888870f868695ee461c905230 | 27,541 | py | Python | envoy.base.runner/tests/test_runner.py | phlax/abstracts | 53fbbee68d1f56effe0ded1ed4e28be870693877 | [
"Apache-2.0"
] | 1 | 2021-12-09T19:24:48.000Z | 2021-12-09T19:24:48.000Z | envoy.base.runner/tests/test_runner.py | envoyproxy/pytooling | db8b60184f8a61b3184a111b0cfaff4780511b46 | [
"Apache-2.0"
] | 392 | 2021-08-24T15:55:32.000Z | 2022-03-28T14:26:22.000Z | envoy.base.runner/tests/test_runner.py | phlax/abstracts | 53fbbee68d1f56effe0ded1ed4e28be870693877 | [
"Apache-2.0"
] | 3 | 2021-10-06T13:43:11.000Z | 2021-11-29T13:48:56.000Z |
import logging
import sys
from unittest.mock import AsyncMock, MagicMock, patch, PropertyMock
import pytest
from envoy.base import runner
class DummyRunner(runner.BaseRunner):
def __init__(self):
self.args = PropertyMock()
class DummyForkingRunner(runner.ForkingRunner):
def __init__(self):
self.args = PropertyMock()
class Error1(Exception):
def __str__(self):
return ""
pass
class Error2(Exception):
pass
def _failing_runner(errors):
class DummyFailingRunner:
# this dummy runner calls the _runner mock
# when its run/run_async methods are called
# and optionally raises some type of error
# to ensure they are caught as expected
log = PropertyMock()
_runner = MagicMock()
def __init__(self, raises=None):
self.raises = raises
@runner.catches(errors)
def run(self, *args, **kwargs):
result = self._runner(*args, **kwargs)
if self.raises:
raise self.raises("AN ERROR OCCURRED")
return result
@runner.catches(errors)
async def run_async(self, *args, **kwargs):
result = self._runner(*args, **kwargs)
if self.raises:
raise self.raises("AN ERROR OCCURRED")
return result
return DummyFailingRunner
def test_base_log_filter():
filter = runner.runner.BaseLogFilter("APP_LOGGER")
assert isinstance(filter, logging.Filter)
assert filter.app_logger == "APP_LOGGER"
@pytest.mark.parametrize("name", ["APP_LOGGER", "SOMETHING_ELSE"])
def test_app_log_filter(name):
app_logger = MagicMock()
app_logger.name = "APP_LOGGER"
filter = runner.runner.AppLogFilter(app_logger)
assert isinstance(filter, runner.runner.BaseLogFilter)
assert filter.app_logger == app_logger
record = MagicMock()
record.name = name
assert (
filter.filter(record)
== (name == "APP_LOGGER"))
@pytest.mark.parametrize("name", ["APP_LOGGER", "SOMETHING_ELSE"])
def test_root_log_filter(name):
app_logger = MagicMock()
app_logger.name = "APP_LOGGER"
filter = runner.runner.RootLogFilter(app_logger)
assert isinstance(filter, runner.runner.BaseLogFilter)
assert filter.app_logger == app_logger
record = MagicMock()
record.name = name
assert (
filter.filter(record)
== (name != "APP_LOGGER"))
@pytest.mark.parametrize("async_fun", [True, False])
@pytest.mark.parametrize(
"errors",
[Error1, (Error1, Error2)])
@pytest.mark.parametrize(
"raises",
[None, Error1, Error2])
@pytest.mark.parametrize(
"args",
[(), ("ARG1", "ARG2")])
@pytest.mark.parametrize(
"kwargs",
[{}, dict(key1="VAL1", key2="VAL2")])
async def test_catches(errors, async_fun, raises, args, kwargs):
run = _failing_runner(errors)(raises)
should_fail = (
raises
and not (
raises == errors
or (isinstance(errors, tuple)
and raises in errors)))
assert run.run.__wrapped__.__catches__ == errors
assert run.run_async.__wrapped__.__catches__ == errors
if should_fail:
result = 1
with pytest.raises(raises):
(run.run(*args, **kwargs)
if not async_fun
else await run.run_async(*args, **kwargs))
else:
result = (
run.run(*args, **kwargs)
if not async_fun
else await run.run_async(*args, **kwargs))
assert (
list(run._runner.call_args)
== [args, kwargs])
if not should_fail and raises:
assert result == 1
error = run.log.error.call_args[0][0]
_error = raises("AN ERROR OCCURRED")
assert (
error
== (str(_error) or repr(_error)))
assert (
list(run.log.error.call_args)
== [(error,), {}])
else:
assert not run.log.error.called
if raises:
assert result == 1
else:
assert result == run._runner.return_value
def _cleanup_runner(async_fun, raises):
class DummyCleanupRunner:
# this dummy runner calls the _runner mock
# when its run/async_fun methods are called
# and optionally raises some type of error
# to ensure they are caught as expected
log = PropertyMock()
_runner = MagicMock()
@runner.cleansup
def run(self, *args, **kwargs):
result = self._runner(*args, **kwargs)
if raises:
raise Exception("AN ERROR OCCURRED")
return result
@runner.cleansup
async def run_async(self, *args, **kwargs):
result = self._runner(*args, **kwargs)
if raises:
raise Exception("AN ERROR OCCURRED")
return result
return DummyCleanupRunner()
@pytest.mark.parametrize("async_fun", [True, False])
@pytest.mark.parametrize("raises", [True, False])
async def test_cleansup(async_fun, raises):
run = _cleanup_runner(async_fun, raises)
args = [f"ARG{i}" for i in range(0, 3)]
kwargs = {f"K{i}": f"V{i}" for i in range(0, 3)}
assert run.run.__wrapped__.__cleansup__ is True
assert run.run_async.__wrapped__.__cleansup__ is True
if async_fun:
run.cleanup = AsyncMock()
if raises:
with pytest.raises(Exception):
await run.run_async(*args, **kwargs)
else:
assert (
await run.run_async(*args, **kwargs)
== run._runner.return_value)
else:
run.cleanup = MagicMock()
if raises:
with pytest.raises(Exception):
run.run(*args, **kwargs)
else:
assert (
run.run(*args, **kwargs)
== run._runner.return_value)
assert (
list(run._runner.call_args)
== [tuple(args), kwargs])
assert (
list(run.cleanup.call_args)
== [(), {}])
def test_base_runner_constructor(patches):
patched = patches(
"BaseRunner.setup_logging",
prefix="envoy.base.runner.runner")
with patched as (m_setup, ):
run = runner.BaseRunner("path1", "path2", "path3")
assert (
m_setup.call_args
== [(), {}])
assert run._args == ("path1", "path2", "path3")
assert run.log_field_styles == runner.runner.LOG_FIELD_STYLES
assert run.log_level_styles == runner.runner.LOG_LEVEL_STYLES
assert run.log_fmt == runner.runner.LOG_FMT
def test_base_runner_args(patches):
patched = patches(
("BaseRunner.parser",
dict(new_callable=PropertyMock)),
"BaseRunner.setup_logging",
prefix="envoy.base.runner.runner")
with patched as (m_parser, m_setup):
run = runner.BaseRunner('path1', 'path2', 'path3')
known_args = m_parser.return_value.parse_known_args
assert (
run.args
== known_args.return_value.__getitem__.return_value)
assert (
list(known_args.call_args)
== [(('path1', 'path2', 'path3'),), {}])
assert (
list(known_args.return_value.__getitem__.call_args)
== [(0,), {}])
assert "args" in run.__dict__
def test_base_runner_extra_args(patches):
patched = patches(
("BaseRunner.parser",
dict(new_callable=PropertyMock)),
"BaseRunner.setup_logging",
prefix="envoy.base.runner.runner")
with patched as (m_parser, m_setup):
run = runner.BaseRunner('path1', 'path2', 'path3')
known_args = m_parser.return_value.parse_known_args
assert (
run.extra_args
== known_args.return_value.__getitem__.return_value)
assert (
list(known_args.call_args)
== [(('path1', 'path2', 'path3'),), {}])
assert (
list(known_args.return_value.__getitem__.call_args)
== [(1,), {}])
assert "extra_args" in run.__dict__
def test_base_runner_log(patches):
patched = patches(
"coloredlogs",
"verboselogs",
("BaseRunner.log_field_styles",
dict(new_callable=PropertyMock)),
("BaseRunner.log_fmt",
dict(new_callable=PropertyMock)),
("BaseRunner.log_level_styles",
dict(new_callable=PropertyMock)),
("BaseRunner.name",
dict(new_callable=PropertyMock)),
("BaseRunner.verbosity",
dict(new_callable=PropertyMock)),
"BaseRunner.setup_logging",
prefix="envoy.base.runner.runner")
with patched as patchy:
(m_color, m_verb, m_fstyle, m_fmt,
m_lstyle, m_name, m_verbosity, m_setup) = patchy
run = runner.BaseRunner('path1', 'path2', 'path3')
assert run.log == m_verb.VerboseLogger.return_value
assert (
m_verb.VerboseLogger.call_args
== [(m_name.return_value, ), {}])
assert (
m_color.install.call_args
== [(),
{'fmt': m_fmt.return_value,
'isatty': True,
'field_styles': m_fstyle.return_value,
'level': m_verbosity.return_value,
'level_styles': m_lstyle.return_value,
'logger': m_verb.VerboseLogger.return_value}])
assert "log" in run.__dict__
def test_base_runner_log_level(patches):
run = DummyRunner()
patched = patches(
"dict",
("BaseRunner.args", dict(new_callable=PropertyMock)),
prefix="envoy.base.runner.runner")
with patched as (m_dict, m_args):
assert run.log_level == m_dict.return_value.__getitem__.return_value
assert (
list(m_dict.call_args)
== [(runner.runner.LOG_LEVELS, ), {}])
assert (
list(m_dict.return_value.__getitem__.call_args)
== [(m_args.return_value.log_level,), {}])
assert "log_level" in run.__dict__
def test_base_runner_name():
run = DummyRunner()
assert run.name == run.__class__.__name__
assert "name" not in run.__dict__
def test_base_runner_parser(patches):
run = DummyRunner()
patched = patches(
"argparse",
"BaseRunner.add_arguments",
prefix="envoy.base.runner.runner")
with patched as (m_parser, m_add_args):
assert run.parser == m_parser.ArgumentParser.return_value
assert (
list(m_parser.ArgumentParser.call_args)
== [(), {"allow_abbrev": False}])
assert (
list(m_add_args.call_args)
== [(m_parser.ArgumentParser.return_value,), {}])
assert "parser" in run.__dict__
def test_base_runner_path(patches):
run = DummyRunner()
patched = patches(
"pathlib",
prefix="envoy.base.runner.runner")
with patched as (m_plib, ):
assert run.path == m_plib.Path.return_value
assert (
list(m_plib.Path.call_args)
== [(".", ), {}])
def test_base_runner_root_log_format(patches):
run = DummyRunner()
patched = patches(
"logging",
prefix="envoy.base.runner.runner")
with patched as (m_logging, ):
assert run.root_log_format == m_logging.Formatter.return_value
assert (
m_logging.Formatter.call_args
== [("%(name)s: %(levelname)s %(message)s", ), {}])
assert "root_log_format" not in run.__dict__
def test_base_runner_root_log_handler(patches):
run = DummyRunner()
patched = patches(
"logging",
"RootLogFilter",
("BaseRunner.log", dict(new_callable=PropertyMock)),
("BaseRunner.log_level", dict(new_callable=PropertyMock)),
("BaseRunner.root_log_format", dict(new_callable=PropertyMock)),
prefix="envoy.base.runner.runner")
with patched as (m_logging, m_filter, m_log, m_level, m_format):
assert run.root_log_handler == m_logging.StreamHandler.return_value
assert (
m_logging.StreamHandler.call_args
== [(), {}])
assert (
m_logging.StreamHandler.return_value.setLevel.call_args
== [(m_level.return_value, ), {}])
assert (
m_logging.StreamHandler.return_value.addFilter.call_args
== [(m_filter.return_value, ), {}])
assert (
m_filter.call_args
== [(m_log.return_value, ), {}])
assert (
m_logging.StreamHandler.return_value.setFormatter.call_args
== [(m_format.return_value, ), {}])
assert "root_log_handler" in run.__dict__
def test_base_runner_root_logger(patches):
run = DummyRunner()
patched = patches(
"logging",
"AppLogFilter",
("BaseRunner.log", dict(new_callable=PropertyMock)),
("BaseRunner.root_log_handler", dict(new_callable=PropertyMock)),
prefix="envoy.base.runner.runner")
with patched as (m_logging, m_filter, m_log, m_handler):
assert run.root_logger == m_logging.getLogger.return_value
assert (
m_logging.getLogger.call_args
== [(), {}])
assert (
m_logging.getLogger.return_value.handlers.__getitem__.call_args
== [(0, ), {}])
assert (
m_logging.getLogger.return_value
.handlers.__getitem__.return_value
.addFilter.call_args
== [(m_filter.return_value, ), {}])
assert (
m_filter.call_args
== [(m_log.return_value, ), {}])
assert (
m_logging.getLogger.return_value.addHandler.call_args
== [(m_handler.return_value, ), {}])
assert "root_logger" in run.__dict__
def test_base_runner_stdout(patches):
run = DummyRunner()
patched = patches(
"logging",
("BaseRunner.log_level", dict(new_callable=PropertyMock)),
prefix="envoy.base.runner.runner")
with patched as (m_log, m_level):
assert run.stdout == m_log.getLogger.return_value
assert (
list(m_log.getLogger.call_args)
== [('stdout',), {}])
assert (
list(m_log.getLogger.return_value.setLevel.call_args)
== [(m_level.return_value,), {}])
assert (
list(m_log.StreamHandler.call_args)
== [(sys.stdout,), {}])
assert (
list(m_log.Formatter.call_args)
== [('%(message)s',), {}])
assert (
list(m_log.StreamHandler.return_value.setFormatter.call_args)
== [(m_log.Formatter.return_value,), {}])
assert (
list(m_log.getLogger.return_value.addHandler.call_args)
== [(m_log.StreamHandler.return_value,), {}])
@pytest.mark.parametrize("missing", [True, False])
def test_base_runner_tempdir(patches, missing):
run = DummyRunner()
patched = patches(
"tempfile",
("BaseRunner.log", dict(new_callable=PropertyMock)),
("BaseRunner._missing_cleanup", dict(new_callable=PropertyMock)),
prefix="envoy.base.runner.runner")
with patched as (m_tmp, m_log, m_missing):
m_missing.return_value = missing
assert run.tempdir == m_tmp.TemporaryDirectory.return_value
if missing:
assert (
list(m_log.return_value.warning.call_args)
== [(("Tempdir created but instance has a `run` method "
"which is not decorated with `@runner.cleansup`"), ), {}])
else:
assert not m_log.called
assert (
list(m_tmp.TemporaryDirectory.call_args)
== [(), {}])
assert "tempdir" in run.__dict__
def test_base_runner_verbosity(patches):
run = DummyRunner()
patched = patches(
"dict",
("BaseRunner.args", dict(new_callable=PropertyMock)),
prefix="envoy.base.runner.runner")
with patched as (m_dict, m_args):
assert run.verbosity == m_dict.return_value.__getitem__.return_value
assert (
list(m_dict.call_args)
== [(runner.runner.LOG_LEVELS, ), {}])
assert (
list(m_dict.return_value.__getitem__.call_args)
== [(m_args.return_value.verbosity,), {}])
assert "verbosity" in run.__dict__
def test_base_runner_add_arguments():
run = DummyRunner()
parser = MagicMock()
assert run.add_arguments(parser) is None
assert (
list(list(c) for c in parser.add_argument.call_args_list)
== [[('--verbosity',
'-v'),
{'choices': ['debug',
'info',
'warn',
'error'],
'default': 'info',
'help': 'Application log level'}],
[('--log-level', '-l'),
{'choices': ['debug', 'info', 'warn', 'error'],
'default': 'warn',
'help': 'Log level for non-application logs'}]])
def test_runner_setup_logging(patches):
run = DummyRunner()
patched = patches(
"logging",
("BaseRunner.log",
dict(new_callable=PropertyMock)),
("BaseRunner.log_level",
dict(new_callable=PropertyMock)),
("BaseRunner.root_logger",
dict(new_callable=PropertyMock)),
("BaseRunner.verbosity",
dict(new_callable=PropertyMock)),
prefix="envoy.base.runner.runner")
with patched as (m_logging, m_log, m_level, m_root, m_verb):
assert not run.setup_logging()
assert (
m_logging.basicConfig.call_args
== [(), dict(level=m_level.return_value)])
assert (
m_root.return_value.setLevel.call_args
== [(m_level.return_value, ), {}])
assert (
m_log.return_value.setLevel.call_args
== [(m_verb.return_value, ), {}])
@pytest.mark.parametrize("has_fun", [True, False])
@pytest.mark.parametrize("is_wrapped", [True, False])
@pytest.mark.parametrize("cleansup", [True, False])
def test_base_runner__missing_cleanup(has_fun, is_wrapped, cleansup):
def _runner_factory():
if not has_fun:
return DummyRunner()
class _Wrap:
if cleansup:
__cleansup__ = True
class _Wrapper:
if is_wrapped:
__wrapped__ = _Wrap()
class DummyRunner2(DummyRunner):
run = _Wrapper()
return DummyRunner2()
run = _runner_factory()
assert (
run._missing_cleanup
== (has_fun
and not (is_wrapped and cleansup)))
assert "_missing_cleanup" not in run.__dict__
@pytest.mark.parametrize("cached", [True, False])
def test_base_runner__cleanup_tempdir(patches, cached):
run = DummyRunner()
patched = patches(
("BaseRunner.tempdir", dict(new_callable=PropertyMock)),
prefix="envoy.base.runner.runner")
if cached:
run.__dict__["tempdir"] = "TEMPDIR"
with patched as (m_temp, ):
assert not run._cleanup_tempdir()
if cached:
assert (
list(m_temp.return_value.cleanup.call_args)
== [(), {}])
else:
assert not m_temp.called
assert "tempdir" not in run.__dict__
def test_runner_constructor(patches):
patched = patches(
"BaseRunner.__init__",
prefix="envoy.base.runner.runner")
args = [f"ARG{i}" for i in range(0, 3)]
kwargs = {f"K{i}": f"V{i}" for i in range(0, 3)}
with patched as (m_super, ):
m_super.return_value = None
run = runner.Runner(*args, **kwargs)
assert isinstance(run, runner.BaseRunner)
assert (
list(m_super.call_args)
== [tuple(args), kwargs])
def test_runner_dunder_call(patches):
patched = patches(
"Runner.run",
"Runner.setup_logging",
prefix="envoy.base.runner.runner")
with patched as (m_run, m_setup):
run = runner.Runner()
assert run() == m_run.return_value
assert (
list(m_run.call_args)
== [(), {}])
def test_runner_cleanup(patches):
patched = patches(
"Runner._cleanup_tempdir",
"Runner.setup_logging",
prefix="envoy.base.runner.runner")
with patched as (m_temp, m_setup):
run = runner.Runner()
assert not run.cleanup()
assert (
list(m_temp.call_args)
== [(), {}])
def test_async_runner_constructor(patches):
patched = patches(
"BaseRunner.__init__",
prefix="envoy.base.runner.runner")
args = [f"ARG{i}" for i in range(0, 3)]
kwargs = {f"K{i}": f"V{i}" for i in range(0, 3)}
with patched as (m_super, ):
m_super.return_value = None
run = runner.AsyncRunner(*args, **kwargs)
assert isinstance(run, runner.BaseRunner)
assert (
list(m_super.call_args)
== [tuple(args), kwargs])
@pytest.mark.parametrize("raises", [None, KeyboardInterrupt])
def test_async_runner_dunder_call(patches, raises):
patched = patches(
"asyncio",
("AsyncRunner.log", dict(new_callable=MagicMock)),
("AsyncRunner.run", dict(new_callable=MagicMock)),
"AsyncRunner.setup_logging",
prefix="envoy.base.runner.runner")
# TODO: TEST LOG
with patched as (m_asyncio, m_log, m_run, m_setup):
run = runner.AsyncRunner()
if raises:
m_run.side_effect = raises("DIE")
assert (
run()
== (m_asyncio.run.return_value
if not raises
else 1))
if not raises:
assert (
list(m_asyncio.run.call_args)
== [(m_run.return_value, ), {}])
else:
assert not m_asyncio.run.called
assert (
list(m_run.call_args)
== [(), {}])
async def test_async_runner_cleanup(patches):
patched = patches(
"AsyncRunner._cleanup_tempdir",
"AsyncRunner.setup_logging",
prefix="envoy.base.runner.runner")
with patched as (m_temp, m_setup):
run = runner.AsyncRunner()
assert not await run.cleanup()
assert (
list(m_temp.call_args)
== [(), {}])
# BazelAdapter tests
def test_bazeladapter_constructor():
run = DummyRunner()
adapter = runner.BazelAdapter(run)
assert adapter.context == run
@pytest.mark.parametrize("query_returns", [0, 1])
def test_bazeladapter_query(query_returns):
run = DummyForkingRunner()
adapter = runner.BazelAdapter(run)
fork_mock = patch("envoy.base.runner.runner.ForkingAdapter.subproc_run")
with fork_mock as m_fork:
m_fork.return_value.returncode = query_returns
stdout = m_fork.return_value.stdout.decode
if query_returns:
with pytest.raises(runner.BazelRunError) as result:
adapter.query("BAZEL QUERY")
else:
result = adapter.query("BAZEL QUERY")
assert (
list(m_fork.call_args)
== [(['bazel', 'query', "'BAZEL QUERY'"],), {}])
if query_returns:
assert result.errisinstance(runner.BazelRunError)
assert (
result.value.args
== (f"Bazel query failed: {m_fork.return_value}",))
assert not stdout.called
else:
assert (
result
== stdout.return_value.split.return_value)
assert (
list(stdout.call_args)
== [('utf-8',), {}])
assert (
list(stdout.return_value.split.call_args)
== [('\n',), {}])
@pytest.mark.parametrize("cwd", [None, "", "SOMEPATH"])
@pytest.mark.parametrize("raises", [None, True, False])
@pytest.mark.parametrize("capture_output", [None, True, False])
@pytest.mark.parametrize("run_returns", [0, 1])
@pytest.mark.parametrize("args", [(), ("foo",), ("foo", "bar")])
def test_bazeladapter_run(
patches, run_returns, cwd, raises, args, capture_output):
run = DummyForkingRunner()
adapter = runner.BazelAdapter(run)
patched = patches(
"ForkingAdapter.subproc_run",
("ForkingRunner.path", dict(new_callable=PropertyMock)),
prefix="envoy.base.runner.runner")
adapter_args = ("BAZEL RUN",) + args
kwargs = {}
if raises is not None:
kwargs["raises"] = raises
if cwd is not None:
kwargs["cwd"] = cwd
if capture_output is not None:
kwargs["capture_output"] = capture_output
with patched as (m_fork, m_path):
m_fork.return_value.returncode = run_returns
if run_returns and (raises is not False):
with pytest.raises(runner.BazelRunError) as result:
adapter.run(*adapter_args, **kwargs)
else:
result = adapter.run(*adapter_args, **kwargs)
call_args = (("--",) + args) if args else args
bazel_args = ("bazel", "run", "BAZEL RUN") + call_args
bazel_kwargs = {}
bazel_kwargs["capture_output"] = (
True
if capture_output is True
else False)
bazel_kwargs["cwd"] = (
cwd
if cwd
else m_path.return_value)
assert (
list(m_fork.call_args)
== [(bazel_args,), bazel_kwargs])
if run_returns and (raises is not False):
assert result.errisinstance(runner.BazelRunError)
assert (
result.value.args
== (f"Bazel run failed: {m_fork.return_value}",))
else:
assert result == m_fork.return_value
# ForkingAdapter tests
def test_forkingadapter_constructor():
run = DummyRunner()
adapter = runner.ForkingAdapter(run)
assert adapter.context == run
def test_forkingadapter_call():
run = DummyRunner()
adapter = runner.ForkingAdapter(run)
fork_mock = patch("envoy.base.runner.runner.ForkingAdapter.subproc_run")
with fork_mock as m_fork:
assert (
adapter(
"arg1", "arg2", "arg3",
kwa1="foo",
kwa2="bar",
kwa3="baz")
== m_fork.return_value)
assert (
list(m_fork.call_args)
== [('arg1', 'arg2', 'arg3'),
{'kwa1': 'foo', 'kwa2': 'bar', 'kwa3': 'baz'}])
@pytest.mark.parametrize("args", [(), ("a", "b")])
@pytest.mark.parametrize("cwd", [None, "NONE", "PATH"])
@pytest.mark.parametrize("capture_output", ["NONE", True, False])
def test_forkingadapter_subproc_run(patches, args, cwd, capture_output):
adapter = runner.ForkingAdapter(DummyRunner())
patched = patches(
"subprocess.run",
("BaseRunner.path", dict(new_callable=PropertyMock)),
prefix="envoy.base.runner.runner")
with patched as (m_run, m_path):
kwargs = {}
if cwd != "NONE":
kwargs["cwd"] = cwd
if capture_output != "NONE":
kwargs["capture_output"] = capture_output
assert adapter.subproc_run(*args, **kwargs) == m_run.return_value
expected = {'capture_output': True, 'cwd': cwd}
if capture_output is False:
expected["capture_output"] = False
if cwd == "NONE":
expected["cwd"] = m_path.return_value
assert (
list(m_run.call_args)
== [args, expected])
# ForkingRunner tests
def test_forkingrunner_fork(patches):
patched = patches(
"ForkingAdapter",
"ForkingRunner.setup_logging",
prefix="envoy.base.runner.runner")
with patched as (m_fork, m_setup):
run = runner.ForkingRunner("path1", "path2", "path3")
assert run.subproc_run == m_fork.return_value
assert (
list(m_fork.call_args)
== [(run,), {}])
assert "subproc_run" in run.__dict__
# BazelRunner tests
def test_bazelrunner_bazel(patches):
patched = patches(
"BazelAdapter",
"BazelRunner.setup_logging",
prefix="envoy.base.runner.runner")
with patched as (m_bazel, m_setup):
run = runner.BazelRunner("path1", "path2", "path3")
assert run.bazel == m_bazel.return_value
assert (
list(m_bazel.call_args)
== [(run,), {}])
assert "bazel" in run.__dict__
| 29.424145 | 76 | 0.605316 |
7475365537c7906a3cd5b899fbdb31404de8e118 | 13,422 | py | Python | src/test/clustering.py | fermi-lat/CalRecon | 69e123b523770baa1fc9e8f3b78e211b1064b0c0 | [
"BSD-3-Clause"
] | null | null | null | src/test/clustering.py | fermi-lat/CalRecon | 69e123b523770baa1fc9e8f3b78e211b1064b0c0 | [
"BSD-3-Clause"
] | null | null | null | src/test/clustering.py | fermi-lat/CalRecon | 69e123b523770baa1fc9e8f3b78e211b1064b0c0 | [
"BSD-3-Clause"
] | null | null | null |
from ReconReader import *
ROOT.gStyle.SetOptStat(111111)
ROOT.gStyle.SetPalette(1)
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-c', '--skim-cut', type = str, dest = 'c',
default = '1',
help = 'a cut to filter the events')
parser.add_option('-s', '--save-canvas', type = str, dest = 's',
default = None,
help = 'a path to save all canvas in .pdf format')
(opts, args) = parser.parse_args()
if len(args) == 0:
sys.exit('Please provide a recon input root file.')
elif len(args) > 2:
sys.exit('Too many arguments.')
reconFilePath = args[0]
try:
meritFilePath = args[1]
except IndexError:
meritFilePath = None
SaveAllCanvas = False
if opts.s != None:
SaveAllCanvas = True
savedCanvasPath = opts.s
ANALYSIS_BIN_LIST = ['McEnergy < 100',
'McEnergy >= 100 && McEnergy<500',
'McEnergy >= 500 && McEnergy<1000',
'McEnergy >= 1000 && McEnergy<5000',
'McEnergy >= 5000 && McEnergy<20000',
'McEnergy >= 20000'
]
reader = ReconReader(reconFilePath, meritFilePath, None, opts.c)
numEvents = min(10000, reader.getEntries())
# BAD MAIN Clster Solution TTreeFourmula
BAD_ANGLE_VALUE = -0.1
UberSolutionIsBetter = ROOT.TTreeFormula('UberSolutionIsBetter',
'(acos(-(Tkr1ZDir*CalUberZDir + Tkr1YDir*CalUberYDir + Tkr1XDir*CalUberXDir))-CalTrackAngle) < %f' % BAD_ANGLE_VALUE ,
reader.MeritChain)
# Create TTreeFormulas for the analysis bins.
treeFormulaList = []
for (i, cut) in enumerate(ANALYSIS_BIN_LIST):
treeFormula = ROOT.TTreeFormula('analysisBin%d' % i, cut,
reader.MeritChain)
treeFormulaList.append(treeFormula)
# Create the histograms.
hNumCluList = []
hNumIsolatedCluList = []
hDistIsolatedCluList = []
hDistSecondCluList = []
hFirstCluFracEneList = []
hSecondCluFracEneList = []
hFirstAndSecondCluFracEneList = []
hSecondCluEneList = []
hFirstCluFracXtalsList = []
hDist_vs_EnergyList = []
hCluAngle_vs_UberAngle = []
for (i, cut) in enumerate(ANALYSIS_BIN_LIST):
hTitle = cut
hName = 'NumClu_%d' % i
h = ROOT.TH1F(hName, hTitle, 20, 0, 20)
h.GetXaxis().SetTitle('Number of clusters')
hNumCluList.append(h)
hName = 'NumIsolatedClu_%d' % i
h = ROOT.TH1F(hName, hTitle, 20, 0, 20)
h.GetXaxis().SetTitle('Number of isolated clusters')
hNumIsolatedCluList.append(h)
hName = 'DistIsolatedClu_%d' % i
h = ROOT.TH1F(hName, hTitle, 100, 0, 500)
h.GetXaxis().SetTitle('Distance of isolated clusters from the main axis')
hDistIsolatedCluList.append(h)
hName = 'DistSecondClu_%d' % i
h = ROOT.TH1F(hName, hTitle, 100, 0, 500)
h.GetXaxis().SetTitle('Distance of the 2nd cluster from the main axis')
hDistSecondCluList.append(h)
hName = 'FirstCluFracEne_%d' % i
h = ROOT.TH1F(hName, hTitle, 100, 0.0, 1.0)
h.GetXaxis().SetTitle('Fraction of energy in the first cluster')
hFirstCluFracEneList.append(h)
hName = 'SecondCluFracEne_%d' % i
h = ROOT.TH1F(hName, hTitle, 100, 0.0, 1.0)
h.GetXaxis().SetTitle('Fraction of energy in the second cluster')
hSecondCluFracEneList.append(h)
hName = 'FirstAndSecondCluFracEne_%d' % i
h = ROOT.TH1F(hName, hTitle, 100, 0.0, 1.0)
h.GetXaxis().SetTitle('Fraction of energy in the 1st+2nd (non isolated) cluster')
hFirstAndSecondCluFracEneList.append(h)
hName = 'SecondCluEne_%d' % i
h = ROOT.TH1F(hName, hTitle, 100, 0.0, 100)
h.GetXaxis().SetTitle('Energy in the second cluster')
hSecondCluEneList.append(h)
hName = 'FirstCluFracXtals_%d' % i
h = ROOT.TH1F(hName, hTitle, 100, 0.0, 1.0)
h.GetXaxis().SetTitle('Fraction of xtals in the first cluster')
hFirstCluFracXtalsList.append(h)
hName = 'Dist_vs_Energy_%d' % i
h = ROOT.TH2F(hName, hTitle, 100, 0.0, 500, 100, 0, 30)
hDist_vs_EnergyList.append(h)
hName = 'CalAngle_vs_UberAngle_%d' % i
h = ROOT.TH2F(hName, hTitle, 50, -3, 2, 50, -3, 2)
h.GetXaxis().SetTitle('log10(First cluster--Tkr1Dir angle)')
h.GetYaxis().SetTitle('log10(Uber cluster--Tkr1Dir angle)')
hCluAngle_vs_UberAngle.append(h)
# Start the event loop.
for event in xrange(numEvents):
if reader.getEntry(event):
numClusters = reader.getNumClusters()
numIsolatedClusters = 0
if numClusters == 0:
firstCluFracEne = -1
secondCluFracEne = -1
firstAndSecondCluFracEne = -1
secondCluEne = -1
firstCluFracXtals = -1
distSecondClu = -1
else:
clusterList = reader.getCalClusterList()
for cluster in clusterList:
if cluster.getTotNumXtals() == 1:
numIsolatedClusters += 1
uberCluster = clusterList[0]
if numClusters == 1:
firstCluFracEne = 2
secondCluFracEne = -1
firstAndSecondCluFracEne = -1
secondCluEne = -1
firstCluFracXtals = 2
distSecondClu = -1
else:
cluster1 = clusterList[1]
cluster2 = clusterList[2]
firstCluFracEne = cluster1.getEnergy()/uberCluster.getEnergy()
secondCluFracEne = cluster2.getEnergy()/uberCluster.getEnergy()
if cluster2.getTotNumXtals() == 1:
firstAndSecondCluFracEne = 2
else:
firstAndSecondCluFracEne = (cluster1.getEnergy() + cluster2.getEnergy())/uberCluster.getEnergy()
secondCluEne = cluster2.getEnergy()
firstCluFracXtals = float(cluster1.getTotNumXtals())/\
uberCluster.getTotNumXtals()
distSecondClu = cluster2.distToAxis(cluster1)
RefXDir = reader.getMeritVariable("Tkr1XDir")
RefYDir = reader.getMeritVariable("Tkr1YDir")
RefZDir = reader.getMeritVariable("Tkr1ZDir")
CalXDir = reader.getMeritVariable("CalXDir")
CalYDir = reader.getMeritVariable("CalYDir")
CalZDir = reader.getMeritVariable("CalZDir")
CalUberXDir = reader.getMeritVariable("CalUberXDir")
CalUberYDir = reader.getMeritVariable("CalUberYDir")
CalUberZDir = reader.getMeritVariable("CalUberZDir")
CluAngle = log10(acos(-(RefZDir*CalZDir+\
RefYDir*CalYDir+\
RefXDir*CalXDir)))
UberAngle = log10(acos(-(RefZDir*CalUberZDir+\
RefYDir*CalUberYDir+\
RefXDir*CalUberXDir)))
for (i, treeFormula) in enumerate(treeFormulaList):
if treeFormula.EvalInstance():
hNumCluList[i].Fill(numClusters - int(numClusters > 1))
hNumIsolatedCluList[i].Fill(numIsolatedClusters)
hFirstCluFracEneList[i].Fill(firstCluFracEne)
hSecondCluFracEneList[i].Fill(secondCluFracEne)
hFirstAndSecondCluFracEneList[i].Fill(firstAndSecondCluFracEne)
hSecondCluEneList[i].Fill(secondCluEne)
hFirstCluFracXtalsList[i].Fill(firstCluFracXtals)
hDistSecondCluList[i].Fill(distSecondClu)
if numClusters > 0:
for cluster in clusterList:
if cluster.getTotNumXtals() == 1:
dist = cluster.distToAxis(cluster1)
energy = cluster.getEnergy()
hDistIsolatedCluList[i].Fill(dist)
hDist_vs_EnergyList[i].Fill(dist, energy)
# if UberSolutionIsBetter.EvalInstance():
hCluAngle_vs_UberAngle[i].Fill(CluAngle, UberAngle)
# And eventually draw stuff.
cFirstCluFracEne = ROOT.TCanvas('FirstCluFracEne', '', 1100, 600)
ROOT.gPad.SetTitle(ROOT.gPad.GetName())
cFirstCluFracEne.Divide(3, 2)
for (i, cut) in enumerate(ANALYSIS_BIN_LIST):
cFirstCluFracEne.cd(i + 1)
ROOT.gPad.SetLogy(True)
hFirstCluFracEneList[i].Draw()
cFirstCluFracEne.cd()
cFirstCluFracEne.Update()
if SaveAllCanvas:
cFirstCluFracEne.Print(os.path.join(savedCanvasPath,
cFirstCluFracEne.GetName()+".pdf"))
cSecondCluFracEne = ROOT.TCanvas('SecondCluFracEne', '', 1100, 600)
ROOT.gPad.SetTitle(ROOT.gPad.GetName())
cSecondCluFracEne.Divide(3, 2)
for (i, cut) in enumerate(ANALYSIS_BIN_LIST):
cSecondCluFracEne.cd(i + 1)
ROOT.gPad.SetLogy(True)
hSecondCluFracEneList[i].Draw()
cSecondCluFracEne.cd()
cSecondCluFracEne.Update()
if SaveAllCanvas:
cSecondCluFracEne.Print(os.path.join(savedCanvasPath,
cSecondCluFracEne.GetName()+".pdf"))
cFirstAndSecondCluFracEne = ROOT.TCanvas('FirstAndSecondCluFracEne', '', 1100, 600)
ROOT.gPad.SetTitle(ROOT.gPad.GetName())
cFirstAndSecondCluFracEne.Divide(3, 2)
for (i, cut) in enumerate(ANALYSIS_BIN_LIST):
cFirstAndSecondCluFracEne.cd(i + 1)
ROOT.gPad.SetLogy(True)
hFirstAndSecondCluFracEneList[i].Draw()
cFirstAndSecondCluFracEne.cd()
cFirstAndSecondCluFracEne.Update()
if SaveAllCanvas:
cFirstAndSecondCluFracEne.Print(os.path.join(savedCanvasPath,
cFirstAndSecondCluFracEne.GetName()+".pdf"))
cSecondCluEne = ROOT.TCanvas('SecondCluEne', '', 1100, 600)
ROOT.gPad.SetTitle(ROOT.gPad.GetName())
cSecondCluEne.Divide(3, 2)
for (i, cut) in enumerate(ANALYSIS_BIN_LIST):
cSecondCluEne.cd(i + 1)
ROOT.gPad.SetLogy(True)
hSecondCluEneList[i].Draw()
cSecondCluEne.cd()
cSecondCluEne.Update()
if SaveAllCanvas:
cSecondCluEne.Print(os.path.join(savedCanvasPath,
cSecondCluEne.GetName()+".pdf"))
cFirstCluFracXtals = ROOT.TCanvas('FirstCluFracXtals', '', 1100, 600)
ROOT.gPad.SetTitle(ROOT.gPad.GetName())
cFirstCluFracXtals.Divide(3, 2)
for (i, cut) in enumerate(ANALYSIS_BIN_LIST):
cFirstCluFracXtals.cd(i + 1)
ROOT.gPad.SetLogy(True)
hFirstCluFracXtalsList[i].Draw()
cFirstCluFracXtals.cd()
cFirstCluFracXtals.Update()
if SaveAllCanvas:
cFirstCluFracXtals.Print(os.path.join(savedCanvasPath,
cFirstCluFracXtals.GetName()+".pdf"))
cNumClu = ROOT.TCanvas('NumClu', '', 1100, 600)
ROOT.gPad.SetTitle(ROOT.gPad.GetName())
cNumClu.Divide(3, 2)
labIsolated = ROOT.TLatex(0.4, 0.82, 'Isolated clusters')
labIsolated.SetTextColor(ROOT.kRed)
labIsolated.SetNDC()
labAll = ROOT.TLatex(0.4, 0.75, 'All clusters')
labAll.SetTextColor(ROOT.kBlack)
labAll.SetNDC()
for (i, cut) in enumerate(ANALYSIS_BIN_LIST):
cNumClu.cd(i + 1)
ROOT.gPad.SetLogy(True)
hNumIsolatedCluList[i].SetLineColor(ROOT.kRed)
hNumIsolatedCluList[i].Draw()
hNumCluList[i].Draw('sames')
labIsolated.Draw()
labAll.Draw()
cNumClu.cd()
cNumClu.Update()
if SaveAllCanvas:
cNumClu.Print(os.path.join(savedCanvasPath,
cNumClu.GetName()+".pdf"))
cDistIsolatedClu = ROOT.TCanvas('DistIsolatedClu', '', 1100, 600)
ROOT.gPad.SetTitle(ROOT.gPad.GetName())
cDistIsolatedClu.Divide(3, 2)
for (i, cut) in enumerate(ANALYSIS_BIN_LIST):
cDistIsolatedClu.cd(i + 1)
ROOT.gPad.SetLogy(True)
hDistIsolatedCluList[i].Draw()
cDistIsolatedClu.cd()
cDistIsolatedClu.Update()
if SaveAllCanvas:
cDistIsolatedClu.Print(os.path.join(savedCanvasPath,
cDistIsolatedClu.GetName()+".pdf"))
cDistSecondClu = ROOT.TCanvas('DistSecondClu', '', 1100, 600)
ROOT.gPad.SetTitle(ROOT.gPad.GetName())
cDistSecondClu.Divide(3, 2)
for (i, cut) in enumerate(ANALYSIS_BIN_LIST):
cDistSecondClu.cd(i + 1)
ROOT.gPad.SetLogy(True)
hDistSecondCluList[i].Draw()
cDistSecondClu.cd()
cDistSecondClu.Update()
if SaveAllCanvas:
cDistSecondClu.Print(os.path.join(savedCanvasPath,
cDistSecondClu.GetName()+".pdf"))
cDist_vs_Energy = ROOT.TCanvas('Dist_vs_Energy', '', 1100, 600)
ROOT.gPad.SetTitle(ROOT.gPad.GetName())
cDist_vs_Energy.Divide(3, 2)
for (i, cut) in enumerate(ANALYSIS_BIN_LIST):
cDist_vs_Energy.cd(i + 1)
hDist_vs_EnergyList[i].Draw()
cDist_vs_Energy.cd()
cDist_vs_Energy.Update()
if SaveAllCanvas:
cDist_vs_Energy.Print(os.path.join(savedCanvasPath,
cDist_vs_Energy.GetName()+".pdf"))
#f = ROOT.TF1("f", "x", -5, 5)
cCluAngle_vs_UberAngle = ROOT.TCanvas('CluAngle_vs_UberAngle', '', 1100, 600)
ROOT.gPad.SetTitle(ROOT.gPad.GetName())
cCluAngle_vs_UberAngle.Divide(3, 2)
for (i, cut) in enumerate(ANALYSIS_BIN_LIST):
cCluAngle_vs_UberAngle.cd(i + 1)
hCluAngle_vs_UberAngle[i].Draw('colz')
#f.Draw("same")
cCluAngle_vs_UberAngle.cd()
cCluAngle_vs_UberAngle.Update()
if SaveAllCanvas:
cCluAngle_vs_UberAngle.Print(os.path.join(savedCanvasPath,
cCluAngle_vs_UberAngle.GetName()+".pdf"))
| 39.476471 | 160 | 0.626807 |
98b6f12fbd89732708260071d99695e847c1927b | 3,995 | py | Python | syntropy_sdk/models/check_mfa_for_new_social_account_response.py | SyntropyNet/syntropy-python-sdk | 27b7756b136f83886fd2a6e342fa4d4073779ff7 | [
"MIT"
] | 1 | 2020-12-17T17:30:12.000Z | 2020-12-17T17:30:12.000Z | syntropy_sdk/models/check_mfa_for_new_social_account_response.py | SyntropyNet/syntropy-python-sdk | 27b7756b136f83886fd2a6e342fa4d4073779ff7 | [
"MIT"
] | null | null | null | syntropy_sdk/models/check_mfa_for_new_social_account_response.py | SyntropyNet/syntropy-python-sdk | 27b7756b136f83886fd2a6e342fa4d4073779ff7 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
syntropy-auth-service
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CheckMFAForNewSocialAccountResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {"secret": "str", "user_id": "str"}
attribute_map = {"secret": "secret", "user_id": "userId"}
def __init__(self, secret=None, user_id=None): # noqa: E501
"""CheckMFAForNewSocialAccountResponse - a model defined in Swagger""" # noqa: E501
self._secret = None
self._user_id = None
self.discriminator = None
if secret is not None:
self.secret = secret
if user_id is not None:
self.user_id = user_id
@property
def secret(self):
"""Gets the secret of this CheckMFAForNewSocialAccountResponse. # noqa: E501
:return: The secret of this CheckMFAForNewSocialAccountResponse. # noqa: E501
:rtype: str
"""
return self._secret
@secret.setter
def secret(self, secret):
"""Sets the secret of this CheckMFAForNewSocialAccountResponse.
:param secret: The secret of this CheckMFAForNewSocialAccountResponse. # noqa: E501
:type: str
"""
self._secret = secret
@property
def user_id(self):
"""Gets the user_id of this CheckMFAForNewSocialAccountResponse. # noqa: E501
:return: The user_id of this CheckMFAForNewSocialAccountResponse. # noqa: E501
:rtype: str
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""Sets the user_id of this CheckMFAForNewSocialAccountResponse.
:param user_id: The user_id of this CheckMFAForNewSocialAccountResponse. # noqa: E501
:type: str
"""
self._user_id = user_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(CheckMFAForNewSocialAccountResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CheckMFAForNewSocialAccountResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.592593 | 119 | 0.582478 |
27a9a72bad8a0eb188dfba50afaf87c929935c16 | 20,906 | py | Python | astropy/time/tests/test_methods.py | jbkalmbach/astropy | 88ae8c615533efd1e60de4aded204943f66f881c | [
"BSD-3-Clause"
] | null | null | null | astropy/time/tests/test_methods.py | jbkalmbach/astropy | 88ae8c615533efd1e60de4aded204943f66f881c | [
"BSD-3-Clause"
] | 11 | 2017-12-18T16:27:29.000Z | 2018-08-29T14:54:22.000Z | astropy/time/tests/test_methods.py | jbkalmbach/astropy | 88ae8c615533efd1e60de4aded204943f66f881c | [
"BSD-3-Clause"
] | 1 | 2018-08-02T09:33:21.000Z | 2018-08-02T09:33:21.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import copy
import pytest
import numpy as np
from .. import Time
@pytest.fixture(scope="module", params=[True, False])
def masked(request):
# Could not figure out a better way to parametrize the setup method
global use_masked_data
use_masked_data = request.param
yield use_masked_data
class TestManipulation():
"""Manipulation of Time objects, ensuring attributes are done correctly."""
def setup(self):
mjd = np.arange(50000, 50010)
frac = np.arange(0., 0.999, 0.2)
if use_masked_data:
frac = np.ma.array(frac)
frac[1] = np.ma.masked
self.t0 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc')
self.t1 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc',
location=('45d', '50d'))
self.t2 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc',
location=(np.arange(len(frac)), np.arange(len(frac))))
# Note: location is along last axis only.
self.t2 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc',
location=(np.arange(len(frac)), np.arange(len(frac))))
def test_ravel(self, masked):
t0_ravel = self.t0.ravel()
assert t0_ravel.shape == (self.t0.size,)
assert np.all(t0_ravel.jd1 == self.t0.jd1.ravel())
assert np.may_share_memory(t0_ravel.jd1, self.t0.jd1)
assert t0_ravel.location is None
t1_ravel = self.t1.ravel()
assert t1_ravel.shape == (self.t1.size,)
assert np.all(t1_ravel.jd1 == self.t1.jd1.ravel())
assert np.may_share_memory(t1_ravel.jd1, self.t1.jd1)
assert t1_ravel.location is self.t1.location
t2_ravel = self.t2.ravel()
assert t2_ravel.shape == (self.t2.size,)
assert np.all(t2_ravel.jd1 == self.t2.jd1.ravel())
assert np.may_share_memory(t2_ravel.jd1, self.t2.jd1)
assert t2_ravel.location.shape == t2_ravel.shape
# Broadcasting and ravelling cannot be done without a copy.
assert not np.may_share_memory(t2_ravel.location, self.t2.location)
def test_flatten(self, masked):
t0_flatten = self.t0.flatten()
assert t0_flatten.shape == (self.t0.size,)
assert t0_flatten.location is None
# Flatten always makes a copy.
assert not np.may_share_memory(t0_flatten.jd1, self.t0.jd1)
t1_flatten = self.t1.flatten()
assert t1_flatten.shape == (self.t1.size,)
assert not np.may_share_memory(t1_flatten.jd1, self.t1.jd1)
assert t1_flatten.location is not self.t1.location
assert t1_flatten.location == self.t1.location
t2_flatten = self.t2.flatten()
assert t2_flatten.shape == (self.t2.size,)
assert not np.may_share_memory(t2_flatten.jd1, self.t2.jd1)
assert t2_flatten.location.shape == t2_flatten.shape
assert not np.may_share_memory(t2_flatten.location, self.t2.location)
def test_transpose(self, masked):
t0_transpose = self.t0.transpose()
assert t0_transpose.shape == (5, 10)
assert np.all(t0_transpose.jd1 == self.t0.jd1.transpose())
assert np.may_share_memory(t0_transpose.jd1, self.t0.jd1)
assert t0_transpose.location is None
t1_transpose = self.t1.transpose()
assert t1_transpose.shape == (5, 10)
assert np.all(t1_transpose.jd1 == self.t1.jd1.transpose())
assert np.may_share_memory(t1_transpose.jd1, self.t1.jd1)
assert t1_transpose.location is self.t1.location
t2_transpose = self.t2.transpose()
assert t2_transpose.shape == (5, 10)
assert np.all(t2_transpose.jd1 == self.t2.jd1.transpose())
assert np.may_share_memory(t2_transpose.jd1, self.t2.jd1)
assert t2_transpose.location.shape == t2_transpose.shape
assert np.may_share_memory(t2_transpose.location, self.t2.location)
# Only one check on T, since it just calls transpose anyway.
t2_T = self.t2.T
assert t2_T.shape == (5, 10)
assert np.all(t2_T.jd1 == self.t2.jd1.T)
assert np.may_share_memory(t2_T.jd1, self.t2.jd1)
assert t2_T.location.shape == t2_T.location.shape
assert np.may_share_memory(t2_T.location, self.t2.location)
def test_diagonal(self, masked):
t0_diagonal = self.t0.diagonal()
assert t0_diagonal.shape == (5,)
assert np.all(t0_diagonal.jd1 == self.t0.jd1.diagonal())
assert t0_diagonal.location is None
assert np.may_share_memory(t0_diagonal.jd1, self.t0.jd1)
t1_diagonal = self.t1.diagonal()
assert t1_diagonal.shape == (5,)
assert np.all(t1_diagonal.jd1 == self.t1.jd1.diagonal())
assert t1_diagonal.location is self.t1.location
assert np.may_share_memory(t1_diagonal.jd1, self.t1.jd1)
t2_diagonal = self.t2.diagonal()
assert t2_diagonal.shape == (5,)
assert np.all(t2_diagonal.jd1 == self.t2.jd1.diagonal())
assert t2_diagonal.location.shape == t2_diagonal.shape
assert np.may_share_memory(t2_diagonal.jd1, self.t2.jd1)
assert np.may_share_memory(t2_diagonal.location, self.t2.location)
def test_swapaxes(self, masked):
t0_swapaxes = self.t0.swapaxes(0, 1)
assert t0_swapaxes.shape == (5, 10)
assert np.all(t0_swapaxes.jd1 == self.t0.jd1.swapaxes(0, 1))
assert np.may_share_memory(t0_swapaxes.jd1, self.t0.jd1)
assert t0_swapaxes.location is None
t1_swapaxes = self.t1.swapaxes(0, 1)
assert t1_swapaxes.shape == (5, 10)
assert np.all(t1_swapaxes.jd1 == self.t1.jd1.swapaxes(0, 1))
assert np.may_share_memory(t1_swapaxes.jd1, self.t1.jd1)
assert t1_swapaxes.location is self.t1.location
t2_swapaxes = self.t2.swapaxes(0, 1)
assert t2_swapaxes.shape == (5, 10)
assert np.all(t2_swapaxes.jd1 == self.t2.jd1.swapaxes(0, 1))
assert np.may_share_memory(t2_swapaxes.jd1, self.t2.jd1)
assert t2_swapaxes.location.shape == t2_swapaxes.shape
assert np.may_share_memory(t2_swapaxes.location, self.t2.location)
def test_reshape(self, masked):
t0_reshape = self.t0.reshape(5, 2, 5)
assert t0_reshape.shape == (5, 2, 5)
assert np.all(t0_reshape.jd1 == self.t0._time.jd1.reshape(5, 2, 5))
assert np.all(t0_reshape.jd2 == self.t0._time.jd2.reshape(5, 2, 5))
assert np.may_share_memory(t0_reshape.jd1, self.t0.jd1)
assert np.may_share_memory(t0_reshape.jd2, self.t0.jd2)
assert t0_reshape.location is None
t1_reshape = self.t1.reshape(2, 5, 5)
assert t1_reshape.shape == (2, 5, 5)
assert np.all(t1_reshape.jd1 == self.t1.jd1.reshape(2, 5, 5))
assert np.may_share_memory(t1_reshape.jd1, self.t1.jd1)
assert t1_reshape.location is self.t1.location
# For reshape(5, 2, 5), the location array can remain the same.
t2_reshape = self.t2.reshape(5, 2, 5)
assert t2_reshape.shape == (5, 2, 5)
assert np.all(t2_reshape.jd1 == self.t2.jd1.reshape(5, 2, 5))
assert np.may_share_memory(t2_reshape.jd1, self.t2.jd1)
assert t2_reshape.location.shape == t2_reshape.shape
assert np.may_share_memory(t2_reshape.location, self.t2.location)
# But for reshape(5, 5, 2), location has to be broadcast and copied.
t2_reshape2 = self.t2.reshape(5, 5, 2)
assert t2_reshape2.shape == (5, 5, 2)
assert np.all(t2_reshape2.jd1 == self.t2.jd1.reshape(5, 5, 2))
assert np.may_share_memory(t2_reshape2.jd1, self.t2.jd1)
assert t2_reshape2.location.shape == t2_reshape2.shape
assert not np.may_share_memory(t2_reshape2.location, self.t2.location)
t2_reshape_t = self.t2.reshape(10, 5).T
assert t2_reshape_t.shape == (5, 10)
assert np.may_share_memory(t2_reshape_t.jd1, self.t2.jd1)
assert t2_reshape_t.location.shape == t2_reshape_t.shape
assert np.may_share_memory(t2_reshape_t.location, self.t2.location)
# Finally, reshape in a way that cannot be a view.
t2_reshape_t_reshape = t2_reshape_t.reshape(10, 5)
assert t2_reshape_t_reshape.shape == (10, 5)
assert not np.may_share_memory(t2_reshape_t_reshape.jd1, self.t2.jd1)
assert (t2_reshape_t_reshape.location.shape ==
t2_reshape_t_reshape.shape)
assert not np.may_share_memory(t2_reshape_t_reshape.location,
t2_reshape_t.location)
def test_shape_setting(self, masked):
t0_reshape = self.t0.copy()
mjd = t0_reshape.mjd # Creates a cache of the mjd attribute
t0_reshape.shape = (5, 2, 5)
assert t0_reshape.shape == (5, 2, 5)
assert mjd.shape != t0_reshape.mjd.shape # Cache got cleared
assert np.all(t0_reshape.jd1 == self.t0._time.jd1.reshape(5, 2, 5))
assert np.all(t0_reshape.jd2 == self.t0._time.jd2.reshape(5, 2, 5))
assert t0_reshape.location is None
# But if the shape doesn't work, one should get an error.
t0_reshape_t = t0_reshape.T
with pytest.raises(AttributeError):
t0_reshape_t.shape = (10, 5)
# check no shape was changed.
assert t0_reshape_t.shape == t0_reshape.T.shape
assert t0_reshape_t.jd1.shape == t0_reshape.T.shape
assert t0_reshape_t.jd2.shape == t0_reshape.T.shape
t1_reshape = self.t1.copy()
t1_reshape.shape = (2, 5, 5)
assert t1_reshape.shape == (2, 5, 5)
assert np.all(t1_reshape.jd1 == self.t1.jd1.reshape(2, 5, 5))
# location is a single element, so its shape should not change.
assert t1_reshape.location.shape == ()
# For reshape(5, 2, 5), the location array can remain the same.
# Note that we need to work directly on self.t2 here, since any
# copy would cause location to have the full shape.
self.t2.shape = (5, 2, 5)
assert self.t2.shape == (5, 2, 5)
assert self.t2.jd1.shape == (5, 2, 5)
assert self.t2.jd2.shape == (5, 2, 5)
assert self.t2.location.shape == (5, 2, 5)
assert self.t2.location.strides == (0, 0, 24)
# But for reshape(50), location would need to be copied, so this
# should fail.
oldshape = self.t2.shape
with pytest.raises(AttributeError):
self.t2.shape = (50,)
# check no shape was changed.
assert self.t2.jd1.shape == oldshape
assert self.t2.jd2.shape == oldshape
assert self.t2.location.shape == oldshape
# reset t2 to its original.
self.setup()
def test_squeeze(self, masked):
t0_squeeze = self.t0.reshape(5, 1, 2, 1, 5).squeeze()
assert t0_squeeze.shape == (5, 2, 5)
assert np.all(t0_squeeze.jd1 == self.t0.jd1.reshape(5, 2, 5))
assert np.may_share_memory(t0_squeeze.jd1, self.t0.jd1)
assert t0_squeeze.location is None
t1_squeeze = self.t1.reshape(1, 5, 1, 2, 5).squeeze()
assert t1_squeeze.shape == (5, 2, 5)
assert np.all(t1_squeeze.jd1 == self.t1.jd1.reshape(5, 2, 5))
assert np.may_share_memory(t1_squeeze.jd1, self.t1.jd1)
assert t1_squeeze.location is self.t1.location
t2_squeeze = self.t2.reshape(1, 1, 5, 2, 5, 1, 1).squeeze()
assert t2_squeeze.shape == (5, 2, 5)
assert np.all(t2_squeeze.jd1 == self.t2.jd1.reshape(5, 2, 5))
assert np.may_share_memory(t2_squeeze.jd1, self.t2.jd1)
assert t2_squeeze.location.shape == t2_squeeze.shape
assert np.may_share_memory(t2_squeeze.location, self.t2.location)
def test_add_dimension(self, masked):
t0_adddim = self.t0[:, np.newaxis, :]
assert t0_adddim.shape == (10, 1, 5)
assert np.all(t0_adddim.jd1 == self.t0.jd1[:, np.newaxis, :])
assert np.may_share_memory(t0_adddim.jd1, self.t0.jd1)
assert t0_adddim.location is None
t1_adddim = self.t1[:, :, np.newaxis]
assert t1_adddim.shape == (10, 5, 1)
assert np.all(t1_adddim.jd1 == self.t1.jd1[:, :, np.newaxis])
assert np.may_share_memory(t1_adddim.jd1, self.t1.jd1)
assert t1_adddim.location is self.t1.location
t2_adddim = self.t2[:, :, np.newaxis]
assert t2_adddim.shape == (10, 5, 1)
assert np.all(t2_adddim.jd1 == self.t2.jd1[:, :, np.newaxis])
assert np.may_share_memory(t2_adddim.jd1, self.t2.jd1)
assert t2_adddim.location.shape == t2_adddim.shape
assert np.may_share_memory(t2_adddim.location, self.t2.location)
def test_take(self, masked):
t0_take = self.t0.take((5, 2))
assert t0_take.shape == (2,)
assert np.all(t0_take.jd1 == self.t0._time.jd1.take((5, 2)))
assert t0_take.location is None
t1_take = self.t1.take((2, 4), axis=1)
assert t1_take.shape == (10, 2)
assert np.all(t1_take.jd1 == self.t1.jd1.take((2, 4), axis=1))
assert t1_take.location is self.t1.location
t2_take = self.t2.take((1, 3, 7), axis=0)
assert t2_take.shape == (3, 5)
assert np.all(t2_take.jd1 == self.t2.jd1.take((1, 3, 7), axis=0))
assert t2_take.location.shape == t2_take.shape
t2_take2 = self.t2.take((5, 15))
assert t2_take2.shape == (2,)
assert np.all(t2_take2.jd1 == self.t2.jd1.take((5, 15)))
assert t2_take2.location.shape == t2_take2.shape
def test_broadcast(self, masked):
"""Test using a callable method."""
t0_broadcast = self.t0._apply(np.broadcast_to, shape=(3, 10, 5))
assert t0_broadcast.shape == (3, 10, 5)
assert np.all(t0_broadcast.jd1 == self.t0.jd1)
assert np.may_share_memory(t0_broadcast.jd1, self.t0.jd1)
assert t0_broadcast.location is None
t1_broadcast = self.t1._apply(np.broadcast_to, shape=(3, 10, 5))
assert t1_broadcast.shape == (3, 10, 5)
assert np.all(t1_broadcast.jd1 == self.t1.jd1)
assert np.may_share_memory(t1_broadcast.jd1, self.t1.jd1)
assert t1_broadcast.location is self.t1.location
t2_broadcast = self.t2._apply(np.broadcast_to, shape=(3, 10, 5))
assert t2_broadcast.shape == (3, 10, 5)
assert np.all(t2_broadcast.jd1 == self.t2.jd1)
assert np.may_share_memory(t2_broadcast.jd1, self.t2.jd1)
assert t2_broadcast.location.shape == t2_broadcast.shape
assert np.may_share_memory(t2_broadcast.location, self.t2.location)
class TestArithmetic():
"""Arithmetic on Time objects, using both doubles."""
kwargs = ({}, {'axis': None}, {'axis': 0}, {'axis': 1}, {'axis': 2})
functions = ('min', 'max', 'sort')
def setup(self):
mjd = np.arange(50000, 50100, 10).reshape(2, 5, 1)
frac = np.array([0.1, 0.1+1.e-15, 0.1-1.e-15, 0.9+2.e-16, 0.9])
if use_masked_data:
frac = np.ma.array(frac)
frac[1] = np.ma.masked
self.t0 = Time(mjd, frac, format='mjd', scale='utc')
# Define arrays with same ordinal properties
frac = np.array([1, 2, 0, 4, 3])
if use_masked_data:
frac = np.ma.array(frac)
frac[1] = np.ma.masked
self.t1 = Time(mjd + frac, format='mjd', scale='utc')
self.jd = mjd + frac
@pytest.mark.parametrize('kw, func', itertools.product(kwargs, functions))
def test_argfuncs(self, kw, func, masked):
"""
Test that np.argfunc(jd, **kw) is the same as t0.argfunc(**kw) where
jd is a similarly shaped array with the same ordinal properties but
all integer values. Also test the same for t1 which has the same
integral values as jd.
"""
t0v = getattr(self.t0, 'arg' + func)(**kw)
t1v = getattr(self.t1, 'arg' + func)(**kw)
jdv = getattr(np, 'arg' + func)(self.jd, **kw)
if self.t0.masked and kw == {'axis': None} and func == 'sort':
t0v = np.ma.array(t0v, mask=self.t0.mask.reshape(t0v.shape)[t0v])
t1v = np.ma.array(t1v, mask=self.t1.mask.reshape(t1v.shape)[t1v])
jdv = np.ma.array(jdv, mask=self.jd.mask.reshape(jdv.shape)[jdv])
assert np.all(t0v == jdv)
assert np.all(t1v == jdv)
assert t0v.shape == jdv.shape
assert t1v.shape == jdv.shape
@pytest.mark.parametrize('kw, func', itertools.product(kwargs, functions))
def test_funcs(self, kw, func, masked):
"""
Test that np.func(jd, **kw) is the same as t1.func(**kw) where
jd is a similarly shaped array and the same integral values.
"""
t1v = getattr(self.t1, func)(**kw)
jdv = getattr(np, func)(self.jd, **kw)
assert np.all(t1v.value == jdv)
assert t1v.shape == jdv.shape
def test_argmin(self, masked):
assert self.t0.argmin() == 2
assert np.all(self.t0.argmin(axis=0) == 0)
assert np.all(self.t0.argmin(axis=1) == 0)
assert np.all(self.t0.argmin(axis=2) == 2)
def test_argmax(self, masked):
assert self.t0.argmax() == self.t0.size - 2
if masked:
# The 0 is where all entries are masked in that axis
assert np.all(self.t0.argmax(axis=0) == [1, 0, 1, 1, 1])
assert np.all(self.t0.argmax(axis=1) == [4, 0, 4, 4, 4])
else:
assert np.all(self.t0.argmax(axis=0) == 1)
assert np.all(self.t0.argmax(axis=1) == 4)
assert np.all(self.t0.argmax(axis=2) == 3)
def test_argsort(self, masked):
order = [2, 0, 4, 3, 1] if masked else [2, 0, 1, 4, 3]
assert np.all(self.t0.argsort() == np.array(order))
assert np.all(self.t0.argsort(axis=0) == np.arange(2).reshape(2, 1, 1))
assert np.all(self.t0.argsort(axis=1) == np.arange(5).reshape(5, 1))
assert np.all(self.t0.argsort(axis=2) == np.array(order))
ravel = np.arange(50).reshape(-1, 5)[:, order].ravel()
if masked:
t0v = self.t0.argsort(axis=None)
# Manually remove elements in ravel that correspond to masked
# entries in self.t0. This removes the 10 entries that are masked
# which show up at the end of the list.
mask = self.t0.mask.ravel()[ravel]
ravel = ravel[~mask]
assert np.all(t0v[:-10] == ravel)
else:
assert np.all(self.t0.argsort(axis=None) == ravel)
def test_min(self, masked):
assert self.t0.min() == self.t0[0, 0, 2]
assert np.all(self.t0.min(0) == self.t0[0])
assert np.all(self.t0.min(1) == self.t0[:, 0])
assert np.all(self.t0.min(2) == self.t0[:, :, 2])
assert self.t0.min(0).shape == (5, 5)
assert self.t0.min(0, keepdims=True).shape == (1, 5, 5)
assert self.t0.min(1).shape == (2, 5)
assert self.t0.min(1, keepdims=True).shape == (2, 1, 5)
assert self.t0.min(2).shape == (2, 5)
assert self.t0.min(2, keepdims=True).shape == (2, 5, 1)
def test_max(self, masked):
assert self.t0.max() == self.t0[-1, -1, -2]
assert np.all(self.t0.max(0) == self.t0[1])
assert np.all(self.t0.max(1) == self.t0[:, 4])
assert np.all(self.t0.max(2) == self.t0[:, :, 3])
assert self.t0.max(0).shape == (5, 5)
assert self.t0.max(0, keepdims=True).shape == (1, 5, 5)
def test_ptp(self, masked):
assert self.t0.ptp() == self.t0.max() - self.t0.min()
assert np.all(self.t0.ptp(0) == self.t0.max(0) - self.t0.min(0))
assert self.t0.ptp(0).shape == (5, 5)
assert self.t0.ptp(0, keepdims=True).shape == (1, 5, 5)
def test_sort(self, masked):
order = [2, 0, 4, 3, 1] if masked else [2, 0, 1, 4, 3]
assert np.all(self.t0.sort() == self.t0[:, :, order])
assert np.all(self.t0.sort(0) == self.t0)
assert np.all(self.t0.sort(1) == self.t0)
assert np.all(self.t0.sort(2) == self.t0[:, :, order])
if not masked:
assert np.all(self.t0.sort(None) ==
self.t0[:, :, order].ravel())
# Bit superfluous, but good to check.
assert np.all(self.t0.sort(-1)[:, :, 0] == self.t0.min(-1))
assert np.all(self.t0.sort(-1)[:, :, -1] == self.t0.max(-1))
def test_regression():
# For #5225, where a time with a single-element delta_ut1_utc could not
# be copied, flattened, or ravelled. (For copy, it is in test_basic.)
t = Time(49580.0, scale='tai', format='mjd')
t_ut1 = t.ut1
t_ut1_copy = copy.deepcopy(t_ut1)
assert type(t_ut1_copy.delta_ut1_utc) is np.ndarray
t_ut1_flatten = t_ut1.flatten()
assert type(t_ut1_flatten.delta_ut1_utc) is np.ndarray
t_ut1_ravel = t_ut1.ravel()
assert type(t_ut1_ravel.delta_ut1_utc) is np.ndarray
assert t_ut1_copy.delta_ut1_utc == t_ut1.delta_ut1_utc
| 48.05977 | 79 | 0.618913 |
47eecd07df4512f1d7f8ed4ba8b39648a1ea70fc | 6,079 | py | Python | tasks/infer_engine.py | harisankarh/IndianNLP-Transliteration | 0e0dd8139c75477346c985201b51315b3a4e4f48 | [
"Apache-2.0"
] | 31 | 2020-09-24T04:32:47.000Z | 2022-02-24T06:12:39.000Z | tasks/infer_engine.py | harisankarh/IndianNLP-Transliteration | 0e0dd8139c75477346c985201b51315b3a4e4f48 | [
"Apache-2.0"
] | 4 | 2021-05-26T11:38:36.000Z | 2022-01-27T12:28:21.000Z | tasks/infer_engine.py | harisankarh/IndianNLP-Transliteration | 0e0dd8139c75477346c985201b51315b3a4e4f48 | [
"Apache-2.0"
] | 8 | 2020-09-04T12:33:30.000Z | 2022-01-12T05:43:54.000Z | import os
import sys
import torch
import utilities.lang_data_utils as lutl
import utilities.running_utils as rutl
from utilities.logging_utils import LOG2CSV
''' VacabSanitizer usage
voc_sanitize = lutl.VocabSanitizer("data/X_word_list.json")
result = voc_sanitize.reposition(result)
'''
tgt_glyph = lutl.GlyphStrawboss(glyphs = "data/hindi/hi_scripts.json")
en_glyph = lutl.GlyphStrawboss("en")
voc_sanitize = lutl.VocabSanitizer("data/hindi/mono/hi_words_sorted.json")
device = "cpu"
##=============== Models =======================================================
from tasks.rnn_xlit_runner import model
weight_path = "hypotheses/Training_hi_110/weights/Training_hi_110_model.pth"
weights = torch.load( weight_path, map_location=torch.device(device))
model.to(device)
model.load_state_dict(weights)
model.eval()
def inferencer(word, topk = 5):
in_vec = torch.from_numpy(en_glyph.word2xlitvec(word)).to(device)
## change to active or passive beam
p_out_list = model.active_beam_inference(in_vec, beam_width = topk)
p_result = [ tgt_glyph.xlitvec2word(out.cpu().numpy()) for out in p_out_list]
r_result = voc_sanitize.reposition(p_result)
return p_result, r_result
##=============== Corr/ Emb Stacked
# ------------- Correction model -----------------------------------------------
''' Multinominal
from tasks.corr_xlit_runner import corr_model
corr_weight_path = "hypotheses/Training_mai_116_corr3_a/weights/Training_mai_116_corr3_a_corrnet.pth"
corr_weights = torch.load( corr_weight_path, map_location=torch.device(device))
corr_model.load_state_dict(corr_weights)
corr_model.eval()
hi_vocab = lutl.VocableStrawboss("data/konkani/gom_all_words_sorted.json")
'''
### -------------- Annoy based correction --------------------------------------
'''
import utilities.embed_utils as eutl
from tasks.emb_xlit_runner import emb_model
emb_weight_path = "hypotheses/Training_gom_emb5/weights/Training_gom_emb5_embnet.pth"
emb_weights = torch.load( emb_weight_path, map_location=torch.device(device))
emb_model.load_state_dict(emb_weights)
emb_model.eval()
## To Create fresh
# eutl.create_annoy_index_from_model(
# voc_json_file = "data/konkani/gom_all_words_sorted.json",
# glyph_obj = hi_glyph,
# model_func = emb_model.get_word_embedding,
# vec_sz = 512,
# save_prefix= 'hypotheses/Training_gom_emb6/Gom_emb6')
# sys.exit()
annoy_obj = eutl.AnnoyStrawboss(
voc_json_file = "data/konkani/gom_all_words_sorted.json",
annoy_tree_path = "hypotheses/Training_gom_emb5/Gom_emb5_word_vec.annoy",
vec_sz = 1024)
'''
def pred_contrive(corr_lst, pred_lst):
out =[]
for l in corr_lst:
if (l not in out) and (l != "<UNK>"):
out.append(l)
for l in pred_lst:
if l not in out:
out.append(l)
return out[:len(corr_lst)]
'''
def inferencer(word, topk = 5, knear = 1):
in_vec = torch.from_numpy(en_glyph.word2xlitvec(word)).to(device)
## change to active or passive beam
p_out_list = model.active_beam_inference(in_vec, beam_width = topk)
p_result = [ hi_glyph.xlitvec2word(out.cpu().numpy()) for out in p_out_list]
emb_list = [ emb_model.get_word_embedding(out) for out in p_out_list]
c_result = [annoy_obj.get_nearest_vocab(emb, count = knear) for emb in emb_list ]
c_result = sum(c_result, []) # delinieate 2d list
#c_out_list = [ corr_model.inference(out) for out in out_list]
#c_result = [ hi_vocab.get_word(out.cpu().numpy()) for out in c_out_list]
result = pred_contrive(c_result, p_result)
return result
'''
##=============== For Fused Variant
'''
from tasks.lm_fusion_runner import model
model.eval()
def inferencer(word, topk = 5):
in_vec = torch.from_numpy(en_glyph.word2xlitvec(word)).to(device)
p_out_list = model.basenet_inference(in_vec, beam_width = topk)
# p_out_list.sort(reverse=True, key=model.lm_heuristics)
p_result = [ hi_glyph.xlitvec2word(out.cpu().numpy()) for out in p_out_list]
result = p_result
return result
def lambda_experimenter(word, topk = 10):
in_vec = torch.from_numpy(en_glyph.word2xlitvec(word)).to(device)
## [0]log_smx [0]pred_tnsrs
p_out_list = model.basenet_inference(in_vec, beam_width = topk, heuristics = True)
p_out_heur = []
for out in p_out_list:
prd_prob = float( out[0] )
lm_prob = float( model.lm_heuristics(out[1]) )
word = hi_glyph.xlitvec2word(out[1].cpu().numpy())
p_out_heur.append( (word, prd_prob, lm_prob) )
return p_out_heur
'''
##==================
def infer_analytics(word):
"""Analytics by ploting values
"""
save_path = os.path.dirname(weight_path) + "/viz_log/"
if not os.path.exists(save_path): os.makedirs(save_path)
in_vec = torch.from_numpy(en_glyph.word2xlitvec(word))
out, aw = model.inference(in_vec, debug=1)
result = hi_glyph.xlitvec2word(out.numpy())
rutl.attention_weight_plotter(result, word, aw.detach().numpy()[:len(result)],
save_path=save_path )
return result
def infer_annoy_analytics(word, topk = 1, knear = 1):
''' Analytics with respect to Annoy usage
'''
in_vec = torch.from_numpy(en_glyph.word2xlitvec(word)).to(device)
## change to active or passive beam
p_out_list = model.active_beam_inference(in_vec, beam_width = topk)
p_result = [ hi_glyph.xlitvec2word(out.cpu().numpy()) for out in p_out_list]
emb_list = [ emb_model.get_word_embedding(out) for out in p_out_list]
c_result = []
for i, emb in enumerate(emb_list):
c_res, c_val = annoy_obj.get_nearest_vocab_details(emb, count = knear)
c_result.append(c_res)
LOG2CSV([word, i+1, p_result[i], c_res[0], c_val[0]], csv_file="Annoy_115e5_setup.csv")
c_result = sum(c_result, []) # delinieate 2d list
result = pred_contrive(c_result, p_result)
return result
if __name__ == "__main__":
while(1):
a = input()
result = inferencer(a)
print(result) | 31.994737 | 101 | 0.681856 |
d1553d9108e407df986bef665d2e767a658d904c | 73,859 | py | Python | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_segment_routing_ms_oper.py | tkamata-test/ydk-py | b637e7853a8edbbd31fbc05afa3aa4110b31c5f9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_segment_routing_ms_oper.py | tkamata-test/ydk-py | b637e7853a8edbbd31fbc05afa3aa4110b31c5f9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_segment_routing_ms_oper.py | tkamata-test/ydk-py | b637e7853a8edbbd31fbc05afa3aa4110b31c5f9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | """ Cisco_IOS_XR_segment_routing_ms_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR segment\-routing\-ms package operational data.
This module contains definitions
for the following management objects\:
srms\: Segment Routing Mapping Server operational data
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class SrmsMiAfEBEnum(Enum):
"""
SrmsMiAfEBEnum
Srms mi af e b
.. data:: none = 0
None
.. data:: ipv4 = 1
IPv4
.. data:: ipv6 = 2
IPv6
"""
none = 0
ipv4 = 1
ipv6 = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_segment_routing_ms_oper as meta
return meta._meta_table['SrmsMiAfEBEnum']
class SrmsMiFlagEBEnum(Enum):
"""
SrmsMiFlagEBEnum
Srms mi flag e b
.. data:: false = 0
False
.. data:: true = 1
True
"""
false = 0
true = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_segment_routing_ms_oper as meta
return meta._meta_table['SrmsMiFlagEBEnum']
class SrmsMiSrcEBEnum(Enum):
"""
SrmsMiSrcEBEnum
Srms mi src e b
.. data:: none = 0
None
.. data:: local = 1
Local
.. data:: remote = 2
Remote
"""
none = 0
local = 1
remote = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_segment_routing_ms_oper as meta
return meta._meta_table['SrmsMiSrcEBEnum']
class Srms(object):
"""
Segment Routing Mapping Server operational data
.. attribute:: mapping
IP prefix to SID mappings
**type**\: :py:class:`Mapping <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.Srms.Mapping>`
.. attribute:: policy
Policy operational data
**type**\: :py:class:`Policy <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.Srms.Policy>`
"""
_prefix = 'segment-routing-ms-oper'
_revision = '2015-11-09'
def __init__(self):
self.mapping = Srms.Mapping()
self.mapping.parent = self
self.policy = Srms.Policy()
self.policy.parent = self
class Mapping(object):
"""
IP prefix to SID mappings
.. attribute:: mapping_ipv4
IPv4 prefix to SID mappings
**type**\: :py:class:`MappingIpv4 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.Srms.Mapping.MappingIpv4>`
.. attribute:: mapping_ipv6
IPv6 prefix to SID mappings
**type**\: :py:class:`MappingIpv6 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.Srms.Mapping.MappingIpv6>`
"""
_prefix = 'segment-routing-ms-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.mapping_ipv4 = Srms.Mapping.MappingIpv4()
self.mapping_ipv4.parent = self
self.mapping_ipv6 = Srms.Mapping.MappingIpv6()
self.mapping_ipv6.parent = self
class MappingIpv4(object):
"""
IPv4 prefix to SID mappings
.. attribute:: mapping_mi
IP prefix to SID mapping item. It's not possible to list all of the IP prefix to SID mappings, as the set of valid prefixes could be very large. Instead, SID map information must be retrieved individually for each prefix of interest
**type**\: list of :py:class:`MappingMi <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.Srms.Mapping.MappingIpv4.MappingMi>`
"""
_prefix = 'segment-routing-ms-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.mapping_mi = YList()
self.mapping_mi.parent = self
self.mapping_mi.name = 'mapping_mi'
class MappingMi(object):
"""
IP prefix to SID mapping item. It's not possible
to list all of the IP prefix to SID mappings, as
the set of valid prefixes could be very large.
Instead, SID map information must be retrieved
individually for each prefix of interest.
.. attribute:: addr
addr
**type**\: :py:class:`Addr <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.Srms.Mapping.MappingIpv4.MappingMi.Addr>`
.. attribute:: area
Area (OSPF) or Level (ISIS)
**type**\: str
**length:** 0..30
.. attribute:: flag_attached
Attached flag
**type**\: :py:class:`SrmsMiFlagEBEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.SrmsMiFlagEBEnum>`
.. attribute:: ip
IP
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: last_prefix
Last IP Prefix
**type**\: str
**length:** 0..50
.. attribute:: last_sid_index
Last SID Index
**type**\: int
**range:** 0..4294967295
.. attribute:: prefix
Prefix
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: prefix_xr
Prefix length
**type**\: int
**range:** 0..255
.. attribute:: router
Router ID
**type**\: str
**length:** 0..30
.. attribute:: sid_count
SID range
**type**\: int
**range:** 0..4294967295
.. attribute:: sid_start
Starting SID
**type**\: int
**range:** 0..4294967295
.. attribute:: src
src
**type**\: :py:class:`SrmsMiSrcEBEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.SrmsMiSrcEBEnum>`
"""
_prefix = 'segment-routing-ms-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.addr = Srms.Mapping.MappingIpv4.MappingMi.Addr()
self.addr.parent = self
self.area = None
self.flag_attached = None
self.ip = None
self.last_prefix = None
self.last_sid_index = None
self.prefix = None
self.prefix_xr = None
self.router = None
self.sid_count = None
self.sid_start = None
self.src = None
class Addr(object):
"""
addr
.. attribute:: af
AF
**type**\: :py:class:`SrmsMiAfEBEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.SrmsMiAfEBEnum>`
.. attribute:: ipv4
IPv4
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: ipv6
IPv6
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'segment-routing-ms-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.af = None
self.ipv4 = None
self.ipv6 = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-segment-routing-ms-oper:srms/Cisco-IOS-XR-segment-routing-ms-oper:mapping/Cisco-IOS-XR-segment-routing-ms-oper:mapping-ipv4/Cisco-IOS-XR-segment-routing-ms-oper:mapping-mi/Cisco-IOS-XR-segment-routing-ms-oper:addr'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.af is not None:
return True
if self.ipv4 is not None:
return True
if self.ipv6 is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_segment_routing_ms_oper as meta
return meta._meta_table['Srms.Mapping.MappingIpv4.MappingMi.Addr']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-segment-routing-ms-oper:srms/Cisco-IOS-XR-segment-routing-ms-oper:mapping/Cisco-IOS-XR-segment-routing-ms-oper:mapping-ipv4/Cisco-IOS-XR-segment-routing-ms-oper:mapping-mi'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.addr is not None and self.addr._has_data():
return True
if self.area is not None:
return True
if self.flag_attached is not None:
return True
if self.ip is not None:
return True
if self.last_prefix is not None:
return True
if self.last_sid_index is not None:
return True
if self.prefix is not None:
return True
if self.prefix_xr is not None:
return True
if self.router is not None:
return True
if self.sid_count is not None:
return True
if self.sid_start is not None:
return True
if self.src is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_segment_routing_ms_oper as meta
return meta._meta_table['Srms.Mapping.MappingIpv4.MappingMi']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-segment-routing-ms-oper:srms/Cisco-IOS-XR-segment-routing-ms-oper:mapping/Cisco-IOS-XR-segment-routing-ms-oper:mapping-ipv4'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.mapping_mi is not None:
for child_ref in self.mapping_mi:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_segment_routing_ms_oper as meta
return meta._meta_table['Srms.Mapping.MappingIpv4']['meta_info']
class MappingIpv6(object):
"""
IPv6 prefix to SID mappings
.. attribute:: mapping_mi
IP prefix to SID mapping item. It's not possible to list all of the IP prefix to SID mappings, as the set of valid prefixes could be very large. Instead, SID map information must be retrieved individually for each prefix of interest
**type**\: list of :py:class:`MappingMi <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.Srms.Mapping.MappingIpv6.MappingMi>`
"""
_prefix = 'segment-routing-ms-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.mapping_mi = YList()
self.mapping_mi.parent = self
self.mapping_mi.name = 'mapping_mi'
class MappingMi(object):
"""
IP prefix to SID mapping item. It's not possible
to list all of the IP prefix to SID mappings, as
the set of valid prefixes could be very large.
Instead, SID map information must be retrieved
individually for each prefix of interest.
.. attribute:: addr
addr
**type**\: :py:class:`Addr <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.Srms.Mapping.MappingIpv6.MappingMi.Addr>`
.. attribute:: area
Area (OSPF) or Level (ISIS)
**type**\: str
**length:** 0..30
.. attribute:: flag_attached
Attached flag
**type**\: :py:class:`SrmsMiFlagEBEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.SrmsMiFlagEBEnum>`
.. attribute:: ip
IP
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: last_prefix
Last IP Prefix
**type**\: str
**length:** 0..50
.. attribute:: last_sid_index
Last SID Index
**type**\: int
**range:** 0..4294967295
.. attribute:: prefix
Prefix
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: prefix_xr
Prefix length
**type**\: int
**range:** 0..255
.. attribute:: router
Router ID
**type**\: str
**length:** 0..30
.. attribute:: sid_count
SID range
**type**\: int
**range:** 0..4294967295
.. attribute:: sid_start
Starting SID
**type**\: int
**range:** 0..4294967295
.. attribute:: src
src
**type**\: :py:class:`SrmsMiSrcEBEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.SrmsMiSrcEBEnum>`
"""
_prefix = 'segment-routing-ms-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.addr = Srms.Mapping.MappingIpv6.MappingMi.Addr()
self.addr.parent = self
self.area = None
self.flag_attached = None
self.ip = None
self.last_prefix = None
self.last_sid_index = None
self.prefix = None
self.prefix_xr = None
self.router = None
self.sid_count = None
self.sid_start = None
self.src = None
class Addr(object):
"""
addr
.. attribute:: af
AF
**type**\: :py:class:`SrmsMiAfEBEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.SrmsMiAfEBEnum>`
.. attribute:: ipv4
IPv4
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: ipv6
IPv6
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'segment-routing-ms-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.af = None
self.ipv4 = None
self.ipv6 = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-segment-routing-ms-oper:srms/Cisco-IOS-XR-segment-routing-ms-oper:mapping/Cisco-IOS-XR-segment-routing-ms-oper:mapping-ipv6/Cisco-IOS-XR-segment-routing-ms-oper:mapping-mi/Cisco-IOS-XR-segment-routing-ms-oper:addr'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.af is not None:
return True
if self.ipv4 is not None:
return True
if self.ipv6 is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_segment_routing_ms_oper as meta
return meta._meta_table['Srms.Mapping.MappingIpv6.MappingMi.Addr']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-segment-routing-ms-oper:srms/Cisco-IOS-XR-segment-routing-ms-oper:mapping/Cisco-IOS-XR-segment-routing-ms-oper:mapping-ipv6/Cisco-IOS-XR-segment-routing-ms-oper:mapping-mi'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.addr is not None and self.addr._has_data():
return True
if self.area is not None:
return True
if self.flag_attached is not None:
return True
if self.ip is not None:
return True
if self.last_prefix is not None:
return True
if self.last_sid_index is not None:
return True
if self.prefix is not None:
return True
if self.prefix_xr is not None:
return True
if self.router is not None:
return True
if self.sid_count is not None:
return True
if self.sid_start is not None:
return True
if self.src is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_segment_routing_ms_oper as meta
return meta._meta_table['Srms.Mapping.MappingIpv6.MappingMi']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-segment-routing-ms-oper:srms/Cisco-IOS-XR-segment-routing-ms-oper:mapping/Cisco-IOS-XR-segment-routing-ms-oper:mapping-ipv6'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.mapping_mi is not None:
for child_ref in self.mapping_mi:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_segment_routing_ms_oper as meta
return meta._meta_table['Srms.Mapping.MappingIpv6']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-segment-routing-ms-oper:srms/Cisco-IOS-XR-segment-routing-ms-oper:mapping'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.mapping_ipv4 is not None and self.mapping_ipv4._has_data():
return True
if self.mapping_ipv6 is not None and self.mapping_ipv6._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_segment_routing_ms_oper as meta
return meta._meta_table['Srms.Mapping']['meta_info']
class Policy(object):
"""
Policy operational data
.. attribute:: policy_ipv4
IPv4 policy operational data
**type**\: :py:class:`PolicyIpv4 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.Srms.Policy.PolicyIpv4>`
.. attribute:: policy_ipv6
IPv6 policy operational data
**type**\: :py:class:`PolicyIpv6 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.Srms.Policy.PolicyIpv6>`
"""
_prefix = 'segment-routing-ms-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.policy_ipv4 = Srms.Policy.PolicyIpv4()
self.policy_ipv4.parent = self
self.policy_ipv6 = Srms.Policy.PolicyIpv6()
self.policy_ipv6.parent = self
class PolicyIpv4(object):
"""
IPv4 policy operational data
.. attribute:: policy_ipv4_active
IPv4 active policy operational data
**type**\: :py:class:`PolicyIpv4Active <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.Srms.Policy.PolicyIpv4.PolicyIpv4Active>`
.. attribute:: policy_ipv4_backup
IPv4 backup policy operational data
**type**\: :py:class:`PolicyIpv4Backup <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.Srms.Policy.PolicyIpv4.PolicyIpv4Backup>`
"""
_prefix = 'segment-routing-ms-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.policy_ipv4_active = Srms.Policy.PolicyIpv4.PolicyIpv4Active()
self.policy_ipv4_active.parent = self
self.policy_ipv4_backup = Srms.Policy.PolicyIpv4.PolicyIpv4Backup()
self.policy_ipv4_backup.parent = self
class PolicyIpv4Backup(object):
"""
IPv4 backup policy operational data
.. attribute:: policy_mi
Mapping Item
**type**\: list of :py:class:`PolicyMi <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.Srms.Policy.PolicyIpv4.PolicyIpv4Backup.PolicyMi>`
"""
_prefix = 'segment-routing-ms-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.policy_mi = YList()
self.policy_mi.parent = self
self.policy_mi.name = 'policy_mi'
class PolicyMi(object):
"""
Mapping Item
.. attribute:: mi_id <key>
Mapping Item ID (0, 1, 2, ...)
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: addr
addr
**type**\: :py:class:`Addr <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.Srms.Policy.PolicyIpv4.PolicyIpv4Backup.PolicyMi.Addr>`
.. attribute:: area
Area (OSPF) or Level (ISIS)
**type**\: str
**length:** 0..30
.. attribute:: flag_attached
Attached flag
**type**\: :py:class:`SrmsMiFlagEBEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.SrmsMiFlagEBEnum>`
.. attribute:: last_prefix
Last IP Prefix
**type**\: str
**length:** 0..50
.. attribute:: last_sid_index
Last SID Index
**type**\: int
**range:** 0..4294967295
.. attribute:: prefix_xr
Prefix length
**type**\: int
**range:** 0..255
.. attribute:: router
Router ID
**type**\: str
**length:** 0..30
.. attribute:: sid_count
SID range
**type**\: int
**range:** 0..4294967295
.. attribute:: sid_start
Starting SID
**type**\: int
**range:** 0..4294967295
.. attribute:: src
src
**type**\: :py:class:`SrmsMiSrcEBEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.SrmsMiSrcEBEnum>`
"""
_prefix = 'segment-routing-ms-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.mi_id = None
self.addr = Srms.Policy.PolicyIpv4.PolicyIpv4Backup.PolicyMi.Addr()
self.addr.parent = self
self.area = None
self.flag_attached = None
self.last_prefix = None
self.last_sid_index = None
self.prefix_xr = None
self.router = None
self.sid_count = None
self.sid_start = None
self.src = None
class Addr(object):
"""
addr
.. attribute:: af
AF
**type**\: :py:class:`SrmsMiAfEBEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.SrmsMiAfEBEnum>`
.. attribute:: ipv4
IPv4
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: ipv6
IPv6
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'segment-routing-ms-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.af = None
self.ipv4 = None
self.ipv6 = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-segment-routing-ms-oper:addr'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.af is not None:
return True
if self.ipv4 is not None:
return True
if self.ipv6 is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_segment_routing_ms_oper as meta
return meta._meta_table['Srms.Policy.PolicyIpv4.PolicyIpv4Backup.PolicyMi.Addr']['meta_info']
@property
def _common_path(self):
if self.mi_id is None:
raise YPYModelError('Key property mi_id is None')
return '/Cisco-IOS-XR-segment-routing-ms-oper:srms/Cisco-IOS-XR-segment-routing-ms-oper:policy/Cisco-IOS-XR-segment-routing-ms-oper:policy-ipv4/Cisco-IOS-XR-segment-routing-ms-oper:policy-ipv4-backup/Cisco-IOS-XR-segment-routing-ms-oper:policy-mi[Cisco-IOS-XR-segment-routing-ms-oper:mi-id = ' + str(self.mi_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.mi_id is not None:
return True
if self.addr is not None and self.addr._has_data():
return True
if self.area is not None:
return True
if self.flag_attached is not None:
return True
if self.last_prefix is not None:
return True
if self.last_sid_index is not None:
return True
if self.prefix_xr is not None:
return True
if self.router is not None:
return True
if self.sid_count is not None:
return True
if self.sid_start is not None:
return True
if self.src is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_segment_routing_ms_oper as meta
return meta._meta_table['Srms.Policy.PolicyIpv4.PolicyIpv4Backup.PolicyMi']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-segment-routing-ms-oper:srms/Cisco-IOS-XR-segment-routing-ms-oper:policy/Cisco-IOS-XR-segment-routing-ms-oper:policy-ipv4/Cisco-IOS-XR-segment-routing-ms-oper:policy-ipv4-backup'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.policy_mi is not None:
for child_ref in self.policy_mi:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_segment_routing_ms_oper as meta
return meta._meta_table['Srms.Policy.PolicyIpv4.PolicyIpv4Backup']['meta_info']
class PolicyIpv4Active(object):
"""
IPv4 active policy operational data
.. attribute:: policy_mi
Mapping Item
**type**\: list of :py:class:`PolicyMi <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.Srms.Policy.PolicyIpv4.PolicyIpv4Active.PolicyMi>`
"""
_prefix = 'segment-routing-ms-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.policy_mi = YList()
self.policy_mi.parent = self
self.policy_mi.name = 'policy_mi'
class PolicyMi(object):
"""
Mapping Item
.. attribute:: mi_id <key>
Mapping Item ID (0, 1, 2, ...)
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: addr
addr
**type**\: :py:class:`Addr <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.Srms.Policy.PolicyIpv4.PolicyIpv4Active.PolicyMi.Addr>`
.. attribute:: area
Area (OSPF) or Level (ISIS)
**type**\: str
**length:** 0..30
.. attribute:: flag_attached
Attached flag
**type**\: :py:class:`SrmsMiFlagEBEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.SrmsMiFlagEBEnum>`
.. attribute:: last_prefix
Last IP Prefix
**type**\: str
**length:** 0..50
.. attribute:: last_sid_index
Last SID Index
**type**\: int
**range:** 0..4294967295
.. attribute:: prefix_xr
Prefix length
**type**\: int
**range:** 0..255
.. attribute:: router
Router ID
**type**\: str
**length:** 0..30
.. attribute:: sid_count
SID range
**type**\: int
**range:** 0..4294967295
.. attribute:: sid_start
Starting SID
**type**\: int
**range:** 0..4294967295
.. attribute:: src
src
**type**\: :py:class:`SrmsMiSrcEBEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.SrmsMiSrcEBEnum>`
"""
_prefix = 'segment-routing-ms-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.mi_id = None
self.addr = Srms.Policy.PolicyIpv4.PolicyIpv4Active.PolicyMi.Addr()
self.addr.parent = self
self.area = None
self.flag_attached = None
self.last_prefix = None
self.last_sid_index = None
self.prefix_xr = None
self.router = None
self.sid_count = None
self.sid_start = None
self.src = None
class Addr(object):
"""
addr
.. attribute:: af
AF
**type**\: :py:class:`SrmsMiAfEBEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.SrmsMiAfEBEnum>`
.. attribute:: ipv4
IPv4
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: ipv6
IPv6
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'segment-routing-ms-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.af = None
self.ipv4 = None
self.ipv6 = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-segment-routing-ms-oper:addr'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.af is not None:
return True
if self.ipv4 is not None:
return True
if self.ipv6 is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_segment_routing_ms_oper as meta
return meta._meta_table['Srms.Policy.PolicyIpv4.PolicyIpv4Active.PolicyMi.Addr']['meta_info']
@property
def _common_path(self):
if self.mi_id is None:
raise YPYModelError('Key property mi_id is None')
return '/Cisco-IOS-XR-segment-routing-ms-oper:srms/Cisco-IOS-XR-segment-routing-ms-oper:policy/Cisco-IOS-XR-segment-routing-ms-oper:policy-ipv4/Cisco-IOS-XR-segment-routing-ms-oper:policy-ipv4-active/Cisco-IOS-XR-segment-routing-ms-oper:policy-mi[Cisco-IOS-XR-segment-routing-ms-oper:mi-id = ' + str(self.mi_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.mi_id is not None:
return True
if self.addr is not None and self.addr._has_data():
return True
if self.area is not None:
return True
if self.flag_attached is not None:
return True
if self.last_prefix is not None:
return True
if self.last_sid_index is not None:
return True
if self.prefix_xr is not None:
return True
if self.router is not None:
return True
if self.sid_count is not None:
return True
if self.sid_start is not None:
return True
if self.src is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_segment_routing_ms_oper as meta
return meta._meta_table['Srms.Policy.PolicyIpv4.PolicyIpv4Active.PolicyMi']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-segment-routing-ms-oper:srms/Cisco-IOS-XR-segment-routing-ms-oper:policy/Cisco-IOS-XR-segment-routing-ms-oper:policy-ipv4/Cisco-IOS-XR-segment-routing-ms-oper:policy-ipv4-active'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.policy_mi is not None:
for child_ref in self.policy_mi:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_segment_routing_ms_oper as meta
return meta._meta_table['Srms.Policy.PolicyIpv4.PolicyIpv4Active']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-segment-routing-ms-oper:srms/Cisco-IOS-XR-segment-routing-ms-oper:policy/Cisco-IOS-XR-segment-routing-ms-oper:policy-ipv4'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.policy_ipv4_active is not None and self.policy_ipv4_active._has_data():
return True
if self.policy_ipv4_backup is not None and self.policy_ipv4_backup._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_segment_routing_ms_oper as meta
return meta._meta_table['Srms.Policy.PolicyIpv4']['meta_info']
class PolicyIpv6(object):
"""
IPv6 policy operational data
.. attribute:: policy_ipv6_active
IPv6 active policy operational data
**type**\: :py:class:`PolicyIpv6Active <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.Srms.Policy.PolicyIpv6.PolicyIpv6Active>`
.. attribute:: policy_ipv6_backup
IPv6 backup policy operational data
**type**\: :py:class:`PolicyIpv6Backup <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.Srms.Policy.PolicyIpv6.PolicyIpv6Backup>`
"""
_prefix = 'segment-routing-ms-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.policy_ipv6_active = Srms.Policy.PolicyIpv6.PolicyIpv6Active()
self.policy_ipv6_active.parent = self
self.policy_ipv6_backup = Srms.Policy.PolicyIpv6.PolicyIpv6Backup()
self.policy_ipv6_backup.parent = self
class PolicyIpv6Backup(object):
"""
IPv6 backup policy operational data
.. attribute:: policy_mi
Mapping Item
**type**\: list of :py:class:`PolicyMi <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.Srms.Policy.PolicyIpv6.PolicyIpv6Backup.PolicyMi>`
"""
_prefix = 'segment-routing-ms-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.policy_mi = YList()
self.policy_mi.parent = self
self.policy_mi.name = 'policy_mi'
class PolicyMi(object):
"""
Mapping Item
.. attribute:: mi_id <key>
Mapping Item ID (0, 1, 2, ...)
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: addr
addr
**type**\: :py:class:`Addr <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.Srms.Policy.PolicyIpv6.PolicyIpv6Backup.PolicyMi.Addr>`
.. attribute:: area
Area (OSPF) or Level (ISIS)
**type**\: str
**length:** 0..30
.. attribute:: flag_attached
Attached flag
**type**\: :py:class:`SrmsMiFlagEBEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.SrmsMiFlagEBEnum>`
.. attribute:: last_prefix
Last IP Prefix
**type**\: str
**length:** 0..50
.. attribute:: last_sid_index
Last SID Index
**type**\: int
**range:** 0..4294967295
.. attribute:: prefix_xr
Prefix length
**type**\: int
**range:** 0..255
.. attribute:: router
Router ID
**type**\: str
**length:** 0..30
.. attribute:: sid_count
SID range
**type**\: int
**range:** 0..4294967295
.. attribute:: sid_start
Starting SID
**type**\: int
**range:** 0..4294967295
.. attribute:: src
src
**type**\: :py:class:`SrmsMiSrcEBEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.SrmsMiSrcEBEnum>`
"""
_prefix = 'segment-routing-ms-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.mi_id = None
self.addr = Srms.Policy.PolicyIpv6.PolicyIpv6Backup.PolicyMi.Addr()
self.addr.parent = self
self.area = None
self.flag_attached = None
self.last_prefix = None
self.last_sid_index = None
self.prefix_xr = None
self.router = None
self.sid_count = None
self.sid_start = None
self.src = None
class Addr(object):
"""
addr
.. attribute:: af
AF
**type**\: :py:class:`SrmsMiAfEBEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.SrmsMiAfEBEnum>`
.. attribute:: ipv4
IPv4
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: ipv6
IPv6
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'segment-routing-ms-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.af = None
self.ipv4 = None
self.ipv6 = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-segment-routing-ms-oper:addr'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.af is not None:
return True
if self.ipv4 is not None:
return True
if self.ipv6 is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_segment_routing_ms_oper as meta
return meta._meta_table['Srms.Policy.PolicyIpv6.PolicyIpv6Backup.PolicyMi.Addr']['meta_info']
@property
def _common_path(self):
if self.mi_id is None:
raise YPYModelError('Key property mi_id is None')
return '/Cisco-IOS-XR-segment-routing-ms-oper:srms/Cisco-IOS-XR-segment-routing-ms-oper:policy/Cisco-IOS-XR-segment-routing-ms-oper:policy-ipv6/Cisco-IOS-XR-segment-routing-ms-oper:policy-ipv6-backup/Cisco-IOS-XR-segment-routing-ms-oper:policy-mi[Cisco-IOS-XR-segment-routing-ms-oper:mi-id = ' + str(self.mi_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.mi_id is not None:
return True
if self.addr is not None and self.addr._has_data():
return True
if self.area is not None:
return True
if self.flag_attached is not None:
return True
if self.last_prefix is not None:
return True
if self.last_sid_index is not None:
return True
if self.prefix_xr is not None:
return True
if self.router is not None:
return True
if self.sid_count is not None:
return True
if self.sid_start is not None:
return True
if self.src is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_segment_routing_ms_oper as meta
return meta._meta_table['Srms.Policy.PolicyIpv6.PolicyIpv6Backup.PolicyMi']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-segment-routing-ms-oper:srms/Cisco-IOS-XR-segment-routing-ms-oper:policy/Cisco-IOS-XR-segment-routing-ms-oper:policy-ipv6/Cisco-IOS-XR-segment-routing-ms-oper:policy-ipv6-backup'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.policy_mi is not None:
for child_ref in self.policy_mi:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_segment_routing_ms_oper as meta
return meta._meta_table['Srms.Policy.PolicyIpv6.PolicyIpv6Backup']['meta_info']
class PolicyIpv6Active(object):
"""
IPv6 active policy operational data
.. attribute:: policy_mi
Mapping Item
**type**\: list of :py:class:`PolicyMi <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.Srms.Policy.PolicyIpv6.PolicyIpv6Active.PolicyMi>`
"""
_prefix = 'segment-routing-ms-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.policy_mi = YList()
self.policy_mi.parent = self
self.policy_mi.name = 'policy_mi'
class PolicyMi(object):
"""
Mapping Item
.. attribute:: mi_id <key>
Mapping Item ID (0, 1, 2, ...)
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: addr
addr
**type**\: :py:class:`Addr <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.Srms.Policy.PolicyIpv6.PolicyIpv6Active.PolicyMi.Addr>`
.. attribute:: area
Area (OSPF) or Level (ISIS)
**type**\: str
**length:** 0..30
.. attribute:: flag_attached
Attached flag
**type**\: :py:class:`SrmsMiFlagEBEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.SrmsMiFlagEBEnum>`
.. attribute:: last_prefix
Last IP Prefix
**type**\: str
**length:** 0..50
.. attribute:: last_sid_index
Last SID Index
**type**\: int
**range:** 0..4294967295
.. attribute:: prefix_xr
Prefix length
**type**\: int
**range:** 0..255
.. attribute:: router
Router ID
**type**\: str
**length:** 0..30
.. attribute:: sid_count
SID range
**type**\: int
**range:** 0..4294967295
.. attribute:: sid_start
Starting SID
**type**\: int
**range:** 0..4294967295
.. attribute:: src
src
**type**\: :py:class:`SrmsMiSrcEBEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.SrmsMiSrcEBEnum>`
"""
_prefix = 'segment-routing-ms-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.mi_id = None
self.addr = Srms.Policy.PolicyIpv6.PolicyIpv6Active.PolicyMi.Addr()
self.addr.parent = self
self.area = None
self.flag_attached = None
self.last_prefix = None
self.last_sid_index = None
self.prefix_xr = None
self.router = None
self.sid_count = None
self.sid_start = None
self.src = None
class Addr(object):
"""
addr
.. attribute:: af
AF
**type**\: :py:class:`SrmsMiAfEBEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_segment_routing_ms_oper.SrmsMiAfEBEnum>`
.. attribute:: ipv4
IPv4
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: ipv6
IPv6
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'segment-routing-ms-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.af = None
self.ipv4 = None
self.ipv6 = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-segment-routing-ms-oper:addr'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.af is not None:
return True
if self.ipv4 is not None:
return True
if self.ipv6 is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_segment_routing_ms_oper as meta
return meta._meta_table['Srms.Policy.PolicyIpv6.PolicyIpv6Active.PolicyMi.Addr']['meta_info']
@property
def _common_path(self):
if self.mi_id is None:
raise YPYModelError('Key property mi_id is None')
return '/Cisco-IOS-XR-segment-routing-ms-oper:srms/Cisco-IOS-XR-segment-routing-ms-oper:policy/Cisco-IOS-XR-segment-routing-ms-oper:policy-ipv6/Cisco-IOS-XR-segment-routing-ms-oper:policy-ipv6-active/Cisco-IOS-XR-segment-routing-ms-oper:policy-mi[Cisco-IOS-XR-segment-routing-ms-oper:mi-id = ' + str(self.mi_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.mi_id is not None:
return True
if self.addr is not None and self.addr._has_data():
return True
if self.area is not None:
return True
if self.flag_attached is not None:
return True
if self.last_prefix is not None:
return True
if self.last_sid_index is not None:
return True
if self.prefix_xr is not None:
return True
if self.router is not None:
return True
if self.sid_count is not None:
return True
if self.sid_start is not None:
return True
if self.src is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_segment_routing_ms_oper as meta
return meta._meta_table['Srms.Policy.PolicyIpv6.PolicyIpv6Active.PolicyMi']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-segment-routing-ms-oper:srms/Cisco-IOS-XR-segment-routing-ms-oper:policy/Cisco-IOS-XR-segment-routing-ms-oper:policy-ipv6/Cisco-IOS-XR-segment-routing-ms-oper:policy-ipv6-active'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.policy_mi is not None:
for child_ref in self.policy_mi:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_segment_routing_ms_oper as meta
return meta._meta_table['Srms.Policy.PolicyIpv6.PolicyIpv6Active']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-segment-routing-ms-oper:srms/Cisco-IOS-XR-segment-routing-ms-oper:policy/Cisco-IOS-XR-segment-routing-ms-oper:policy-ipv6'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.policy_ipv6_active is not None and self.policy_ipv6_active._has_data():
return True
if self.policy_ipv6_backup is not None and self.policy_ipv6_backup._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_segment_routing_ms_oper as meta
return meta._meta_table['Srms.Policy.PolicyIpv6']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-segment-routing-ms-oper:srms/Cisco-IOS-XR-segment-routing-ms-oper:policy'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.policy_ipv4 is not None and self.policy_ipv4._has_data():
return True
if self.policy_ipv6 is not None and self.policy_ipv6._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_segment_routing_ms_oper as meta
return meta._meta_table['Srms.Policy']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-segment-routing-ms-oper:srms'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.mapping is not None and self.mapping._has_data():
return True
if self.policy is not None and self.policy._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_segment_routing_ms_oper as meta
return meta._meta_table['Srms']['meta_info']
| 36.782371 | 341 | 0.425703 |
94b2a563540154605983e19030545d645dc2443c | 1,165 | py | Python | cctbx_website/run_tests.py | whart222/cctbx_project | 32bb901af1431f845143eac06c244f20b1fbc26a | [
"BSD-3-Clause-LBNL"
] | null | null | null | cctbx_website/run_tests.py | whart222/cctbx_project | 32bb901af1431f845143eac06c244f20b1fbc26a | [
"BSD-3-Clause-LBNL"
] | 170 | 2020-09-26T19:17:07.000Z | 2022-03-31T21:32:41.000Z | cctbx_website/run_tests.py | whart222/cctbx_project | 32bb901af1431f845143eac06c244f20b1fbc26a | [
"BSD-3-Clause-LBNL"
] | null | null | null | from __future__ import absolute_import, division, print_function
from libtbx import test_utils
import libtbx.load_env
#tst_list = [
# "$D/regression/tst_py_from_html.py"
# ]
tst_list = [
"$D/regression/tst_1_template.py",
"$D/regression/tst_2_doc_hlo_intro.py",
"$D/regression/tst_3_doc_hlo_model_manager.py",
"$D/regression/tst_4_doc_hlo_data_manager.py",
"$D/regression/tst_5_doc_hlo_map_manager.py",
"$D/regression/tst_6_doc_hlo_model_map_manager.py",
"$D/regression/tst_7_doc_low_flex_advanced.py",
"$D/regression/tst_8_doc_maps_intro.py",
"$D/regression/tst_9_doc_maps_boxing.py",
"$D/regression/tst_10_doc_programming_tips.py",
"$D/regression/tst_11_script_1.py",
"$D/regression/tst_12_script_compare_ss.py",
"$D/regression/tst_13_script_ideal_ss.py",
"$D/regression/tst_14_script_lbfgs_no_curvature.py",
"$D/regression/tst_15_doc_models_hierarchy.py",
"$D/regression/tst_16_script_lbfgs_with_curvature.py",
]
def run():
build_dir = libtbx.env.under_build("cctbx_website")
dist_dir = libtbx.env.dist_path("cctbx_website")
test_utils.run_tests(build_dir, dist_dir, tst_list)
if (__name__ == "__main__"):
run()
| 31.486486 | 64 | 0.771674 |
68a8b55d16b8f8c8374a2457142211d79c703d9d | 4,399 | py | Python | python/perspective/perspective/client/table_api.py | shinny-yangyang/perspective | 91ade3c19bf9cdd39ce2d019cb92c6fa0d31d724 | [
"Apache-2.0"
] | 1,821 | 2017-12-08T22:38:48.000Z | 2019-04-29T19:29:31.000Z | python/perspective/perspective/client/table_api.py | shinny-yangyang/perspective | 91ade3c19bf9cdd39ce2d019cb92c6fa0d31d724 | [
"Apache-2.0"
] | 278 | 2018-01-19T22:27:09.000Z | 2019-04-27T00:16:00.000Z | python/perspective/perspective/client/table_api.py | shinny-yangyang/perspective | 91ade3c19bf9cdd39ce2d019cb92c6fa0d31d724 | [
"Apache-2.0"
] | 125 | 2017-12-08T20:57:50.000Z | 2019-04-23T07:57:05.000Z | ################################################################################
#
# Copyright (c) 2019, the Perspective Authors.
#
# This file is part of the Perspective library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
import tornado
from functools import partial
from .dispatch import async_queue, subscribe, unsubscribe
from .view_api import view as make_view
def table(client, data, name, index=None, limit=None):
"""Create a Perspective `Table` by posting a message to a Perspective
server implementation through `client`, returning a `PerspectiveTableProxy`
object whose API is entirely async and must be called with `await` or
in a `yield`-based generator."""
options = {}
if index:
options["index"] = index
elif limit:
options["limit"] = limit
msg = {"cmd": "table", "name": name, "args": [data], "options": options}
future = tornado.concurrent.Future()
client.post(msg, future)
return future
class PerspectiveTableProxy(object):
def __init__(self, client, name):
"""A proxy for a Perspective `Table` object elsewhere, i.e. on a remote
server accessible through a Websocket.
All public API methods on this proxy are async, and must be called
with `await` or a `yield`-based coroutine.
Args:
client (:obj:`PerspectiveClient`): A `PerspectiveClient` that is
set up to send messages to a Perspective server implementation
elsewhere.
name (:obj:`str`): a `str` name for the Table. Automatically
generated if using the `table` function defined above.
"""
self._client = client
self._name = name
self._async_queue = partial(async_queue, self._client, self._name)
self._subscribe = partial(subscribe, self._client, self._name)
self._unsubscribe = partial(unsubscribe, self._client, self._name)
def make_port(self):
return self._async_queue("make_port", "table_method")
def remove_port(self):
return self._async_queue("remove_port", "table_method")
def get_index(self):
return self._async_queue("get_index", "table_method")
def get_limit(self):
return self._async_queue("get_limit", "table_method")
def clear(self):
return self._async_queue("clear", "table_method")
def replace(self, data):
return self._async_queue("replace", "table_method", data)
def size(self):
return self._async_queue("size", "table_method")
def schema(self, as_string=False):
return self._async_queue("schema", "table_method", as_string=as_string)
def expression_schema(self, expressions, **kwargs):
return self._async_queue(
"expression_schema", "table_method", expressions, **kwargs
)
def columns(self):
return self._async_queue("columns", "table_method")
def is_valid_filter(self, filter):
return self._async_queue("is_valid_filter", "table_method", filter)
def on_delete(self, callback):
return self._subscribe("on_delete", "table_method", callback)
def remove_delete(self, callback):
return self._unsubscribe("remove_delete", "table_method", callback)
def delete(self):
return self._async_queue("delete", "table_method")
def view(
self,
columns=None,
group_by=None,
split_by=None,
aggregates=None,
sort=None,
filter=None,
expressions=None,
):
return make_view(
self._client,
self._name,
columns,
group_by,
split_by,
aggregates,
sort,
filter,
expressions,
)
def update(self, data, port_id=0):
msg = {
"name": self._name,
"cmd": "table_method",
"method": "update",
"args": [data, {"port_id": port_id}],
"subscribe": False,
}
self._client.post(msg)
def remove(self, pkeys, port_id=0):
msg = {
"name": self._name,
"cmd": "table_method",
"method": "remove",
"args": [pkeys, {"port_id": port_id}],
"subscribe": False,
}
self._client.post(msg)
| 30.978873 | 80 | 0.604228 |
97dc988bf9a61942a4ac395141b4c293c7ae790e | 974 | py | Python | myproject/myproject/urls.py | ObukhovVladislav/mysite-django | 715a3793351c19a0e7b052d2711796cafee4d5a9 | [
"Apache-2.0"
] | null | null | null | myproject/myproject/urls.py | ObukhovVladislav/mysite-django | 715a3793351c19a0e7b052d2711796cafee4d5a9 | [
"Apache-2.0"
] | null | null | null | myproject/myproject/urls.py | ObukhovVladislav/mysite-django | 715a3793351c19a0e7b052d2711796cafee4d5a9 | [
"Apache-2.0"
] | null | null | null | """myproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
import myapp.views as myapp
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('', include('myapp.urls', namespace='my')),
path('auth/', include('authapp.urls', namespace='auth')),
path('basket/', include('basketapp.urls', namespace='basket')),
path('admin/', admin.site.urls),
]
| 33.586207 | 77 | 0.696099 |
251b9d76ec9342a5e9b9de515c92fbd245e7dbae | 12,034 | py | Python | wz/ui_modules/tab_grade_editor.py | gradgrind/WZ | 672d93a3c9d7806194d16d6d5b9175e4046bd068 | [
"Apache-2.0"
] | null | null | null | wz/ui_modules/tab_grade_editor.py | gradgrind/WZ | 672d93a3c9d7806194d16d6d5b9175e4046bd068 | [
"Apache-2.0"
] | null | null | null | wz/ui_modules/tab_grade_editor.py | gradgrind/WZ | 672d93a3c9d7806194d16d6d5b9175e4046bd068 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
ui/tab_grade_editor.py
Last updated: 2021-04-06
Editor for grades.
=+LICENCE=============================
Copyright 2021 Michael Towers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=-LICENCE========================================
"""
### Messages
_NOT_INTERRUPTABLE = "+++ Der Prozess kann nicht unterbrochen werden +++"
_MUST_SAVE_CHANGES = "Die Änderungen müssen zuerst gespeichert werden."
_TITLE_TABLE_REPLACE = "Neue Tabelle speichern"
# Would need to be a bit different for individual pupils:
_TABLE_REPLACE = "Die neue Tabelle wird die alte ersetzen.\n" \
"Soll sie jetzt gespeichert werden?"
_TABLE_OVERWRITE = "{n} Noten werden geändert. Übernehmen?"
_NOT_SAVED = "Änderungen nicht gespeichert"
_PDF_TABLE_SAVED = "Notentabelle gespeichert:\n {path}"
### Labels, etc.
_EDIT_GRADES = "Noten verwalten"
_TERM = "Anlass:"
_GROUP = "Klasse/Gruppe:"
_SAVE = "Änderungen speichern"
_TABLE_XLSX = "Noteneingabe-Tabelle\nerstellen"
_TT_TABLE_XLSX = "Tabelle der unterrichteten Fächer als xlsx-Datei erstellen"
_TABLE_PDF = "Tabelle als PDF"
_REPORT_PDF = "Zeugnis(se) erstellen"
_TABLE_IN1 = "Notentabelle ersetzen,\n externe einlesen"
_TT_TABLE_IN1 = "Ersetze die Notentabelle durch die gewählte Datei" \
" (xlsx, ods, tsv)"
_TABLE_IN_DIR = "Noten aktualisieren,\n von externem Ordner"
_TT_TABLE_IN_DIR = "Aktualisiere die Notentabelle von den Dateien" \
" (xlsx, ods, tsv) im gewählten Ordner"
_TAG_ENTER = "Geben Sie eine Bezeichnung für diesen Datensatz an.\n" \
"Buchstaben, Ziffern, '~' und '-' sind zulässig, andere Zeichen" \
" werden ersetzt."
_TABLE_FILE = "Tabellendatei (*.xlsx *.ods *.tsv)"
_PDF_FILE = "PDF-Datei (*.pdf)"
#####################################################
import os, glob
from qtpy.QtWidgets import QHBoxLayout, QVBoxLayout, QLabel, \
QPushButton, QFileDialog
from qtpy.QtCore import SIGNAL, QObject
from ui.grid import EditableGridView
from ui.grade_grid import GradeGrid
from ui.abitur_pupil_view import AbiPupilView
from ui.ui_support import VLine, KeySelect, TabPage, openDialog, \
QuestionDialog, dirDialog, saveDialog, LineDialog
###
class GView(EditableGridView):
def __init__(self, tab_widget):
self._tab = tab_widget
super().__init__()
#
def set_changed(self, show):
self._tab.enable('SAVE', show)
###
class GradeEdit(TabPage):
def __init__(self):
self._widgets = {}
super().__init__(_EDIT_GRADES)
topbox = QHBoxLayout()
self.vbox.addLayout(topbox)
#*********** The "main" widget ***********
self.gradeView = GView(self)
self.grade_scene = None
topbox.addWidget(self.gradeView)
topbox.addWidget(VLine())
cbox = QVBoxLayout()
topbox.addLayout(cbox)
### Select "term" (to which occasion the reports are to appear)
### That might be a term or semester, it might be a special
### unscheduled report, or a scheduled test (possibly no report)
### or something specific to the school form.
self.term_select = KeySelect(changed_callback = self.term_changed)
cbox.addWidget(QLabel(_TERM))
cbox.addWidget(self.term_select)
### Select group (might be just one entry ... perhaps even none)
self.group_select = KeySelect(changed_callback = self.group_changed)
cbox.addWidget(QLabel(_GROUP))
cbox.addWidget(self.group_select)
### Subselection: e.g. tags/dates/pupils
self.subselect = KeySelect(changed_callback = self.sub_changed)
cbox.addWidget(self.subselect)
cbox.addSpacing(30)
### Save button (active when there are unsaved modifications)
_w = QPushButton(_SAVE)
self._widgets['SAVE'] = _w
cbox.addWidget(_w)
_w.clicked.connect(self.save)
cbox.addStretch(1)
### Generate grade table (for inputting)
pbTable = QPushButton(_TABLE_XLSX)
pbTable.setToolTip(_TT_TABLE_XLSX)
cbox.addWidget(pbTable)
pbTable.clicked.connect(self.make_table)
cbox.addSpacing(10)
### Import grade table (replace internal one)
pbTableIn1 = QPushButton(_TABLE_IN1)
pbTableIn1.setToolTip(_TT_TABLE_IN1)
cbox.addWidget(pbTableIn1)
pbTableIn1.clicked.connect(self.input_table)
### Import grade tables (adding to internal one)
pbTableInDir = QPushButton(_TABLE_IN_DIR)
pbTableInDir.setToolTip(_TT_TABLE_IN_DIR)
cbox.addWidget(pbTableInDir)
pbTableInDir.clicked.connect(self.input_tables)
cbox.addSpacing(30)
### Produce a pdf of the grade table
pbPdf = QPushButton(_TABLE_PDF)
cbox.addWidget(pbPdf)
pbPdf.clicked.connect(self.print_table)
cbox.addSpacing(10)
### Produce the reports
pbReport = QPushButton(_REPORT_PDF)
cbox.addWidget(pbReport)
pbReport.clicked.connect(self.make_reports)
#
def enable(self, tag, on):
"""Enable or disable the widget with given tag.
"""
self._widgets[tag].setEnabled(on)
#
def is_modified(self):
if self.grade_scene:
return bool(self.grade_scene.changes())
return False
#
def set_scene(self, scene):
self.grade_scene = scene
self.gradeView.set_scene(scene)
#
def clear(self):
"""Check for changes in the current "scene", allowing these to
be discarded if desired. If accepted (or no changes), clear the
"scene" and return <True>, otherwise leave the display unaffected
and return <False>.
"""
if self.leave_ok():
self.set_scene(None)
return True
return False
#
def year_change_ok(self):
return self.clear()
#
def enter(self):
BACKEND('GRADES_init')
#
def leave(self):
# Drop the data structures associated with the grade view
self.set_scene(None)
#
def SET_TERMS(self, terms, term):
"""CALLBACK: Supplies the terms as a list of "keys" (the display
form substitutes ' ' for '_').
Also the selected term is passed. Set the term selection widget
and trigger a "change of term" signal.
"""
try:
ix = terms.index(term)
except ValueError:
ix = 0
self.term_select.set_items([(t, t.replace('_', ' ')) for t in terms],
index = ix)
self.term_select.trigger()
return True
#
def term_changed(self, key):
if not self.clear():
return False
BACKEND('GRADES_set_term', term = key)
self.term = key
return True
#
#TODO: group to set?
def SET_GROUPS(self, groups):
glist = [(grp, grp) for grp in groups]
self.group_select.set_items(glist)
self.group_select.trigger()
#
def group_changed(self, group):
if not self.clear():
return False
BACKEND('GRADES_set_group', group = group)
return True
#
def sub_changed(self, itemtag):
# For real terms there is no subselect, so this method will not
# be called.
if not self.clear():
return False
if self.term == 'Abitur':
# This is a special case ...
# Switch to/from individual pupil display.
# <itemtag> is the pid, empty to select the group.
if itemtag:
self.set_scene(AbiPupilView(self.gradeView))
BACKEND('ABITUR_set_pupil', pid = itemtag)
return True
BACKEND('GRADES_subselect', tag = itemtag)
return True
#
def SET_PUPILS_OR_TAGS(self, termx, group, select_list, pid_or_tag):
self.subselect.set_items(select_list)
if select_list:
self.subselect.reset(pid_or_tag)
#? self.subselect.trigger()
#
def SET_GRID(self, **parms):
self.set_scene(GradeGrid(self.gradeView, **parms))
#
def SET_GRADES(self, grades):
"""<grades> is a list: [[pid, sid, val], ... ]
"""
self.grade_scene.set_grades(grades)
#
def abitur_INIT_CELLS(self, data):
self.grade_scene.init_cells(data)
#
def abitur_SET_CELLS(self, data):
self.grade_scene.set_cells(data)
#
def save(self):
self.grade_scene.save_data()
#
def make_table(self):
"""Generate input table (xlsx) for the grades.
"""
if self.grade_scene.changes():
SHOW_WARNING(_MUST_SAVE_CHANGES)
return
BACKEND('GRADES_make_table')
#
def input_table(self):
"""Import a single grade table, replacing the internal table.
"""
fpath = openDialog(_TABLE_FILE)
if fpath:
if QuestionDialog(_TITLE_TABLE_REPLACE, _TABLE_REPLACE):
BACKEND('GRADES_load_table', filepath = fpath)
# On success, the table must be redisplayed
#
def input_tables(self):
"""Import a folder of grade tables, collate the contents and
update the internal table.
Only non-empty cells in the imported tables are taken into
consideration and only one imported table may supply the
value for a given cell.
The "information" fields are not affected.
"""
#TODO: At present only empty cells may be updated, but it may be better
# to allow grades to be updated (only by one of the input tables, though!).
# See gradetable.py: integrate_partial_data
if not self.clear():
return False
dpath = dirDialog()
if dpath:
BACKEND('GRADES_update_table', dirpath = dpath)
BACKEND('GRADES_save_new')
# On success, the table must be redisplayed
#
def QUESTION_UPDATE(self, n):
if QuestionDialog(_TITLE_TABLE_REPLACE, _TABLE_OVERWRITE.format(
n = n)):
BACKEND('GRADES_save_new')
# The table must be redisplayed
#
def make_reports(self):
"""Generate the grade report(s).
"""
if self.grade_scene.changes():
SHOW_WARNING(_MUST_SAVE_CHANGES)
return
BACKEND('GRADES_make_reports')
#
def print_table(self):
"""Output the table as pdf.
"""
if self.grade_scene.changes():
SHOW_WARNING(_MUST_SAVE_CHANGES)
return
BACKEND('GRADES_print_table')
#
def PDF_NAME(self, filename):
fpath = saveDialog(_PDF_FILE, filename)
if fpath:
SHOW_INFO(_PDF_TABLE_SAVED.format(
path = self.grade_scene.to_pdf(fpath)))
#
def GET_TAG(self):
tag = LineDialog(_TAG_ENTER)
if tag:
BACKEND('GRADES_save', tag = tag)
else:
SHOW_WARNING(_NOT_SAVED)
###
tab_grade_editor = GradeEdit()
TABS.append(tab_grade_editor)
FUNCTIONS['grades_SET_TERMS'] = tab_grade_editor.SET_TERMS
FUNCTIONS['grades_SET_GROUPS'] = tab_grade_editor.SET_GROUPS
FUNCTIONS['grades_SET_PUPILS_OR_TAGS'] = tab_grade_editor.SET_PUPILS_OR_TAGS
FUNCTIONS['grades_SET_GRADES'] = tab_grade_editor.SET_GRADES
FUNCTIONS['grades_SET_GRID'] = tab_grade_editor.SET_GRID
FUNCTIONS['grades_QUESTION_UPDATE'] = tab_grade_editor.QUESTION_UPDATE
FUNCTIONS['grades_PDF_NAME'] = tab_grade_editor.PDF_NAME
FUNCTIONS['grades_GET_TAG'] = tab_grade_editor.GET_TAG
FUNCTIONS['abitur_INIT_CELLS'] = tab_grade_editor.abitur_INIT_CELLS
FUNCTIONS['abitur_SET_CELLS'] = tab_grade_editor.abitur_SET_CELLS
| 33.99435 | 77 | 0.644424 |
465580a27506d20652f78949924c284c914a2235 | 660 | py | Python | Sistema/API/app/application/Controller/complejoController.py | francoo27/TPI | 53b7a88a491ef785046c208625c745de80200945 | [
"MIT"
] | 1 | 2021-04-27T21:22:30.000Z | 2021-04-27T21:22:30.000Z | Sistema/API/app/application/Controller/complejoController.py | francoo27/TPI | 53b7a88a491ef785046c208625c745de80200945 | [
"MIT"
] | null | null | null | Sistema/API/app/application/Controller/complejoController.py | francoo27/TPI | 53b7a88a491ef785046c208625c745de80200945 | [
"MIT"
] | null | null | null | from ..Model.ClasificacionModel import ClasificacionSchema
from ..Model.ComplejoModel import ComplejoSchema
from flask import Blueprint , Response , jsonify ,current_app as app
from flask.globals import request
from ..Logic import complejoService
from marshmallow import Schema, fields, ValidationError
# Blueprint Configuration
complejo_bp = Blueprint(
'complejo_bp', __name__
)
complejoSchema = ComplejoSchema()
complejosSchema = ComplejoSchema(many=True)
@complejo_bp.route('/api/complejo', methods=['GET'])
def query_complejo():
complejo = complejoService.query_complejo()
output = complejosSchema.dump(complejo)
return jsonify(output)
| 30 | 68 | 0.795455 |
5ec09aee03166ff42fcdeee8e2b432bd6f22c254 | 987 | py | Python | setup.py | mtrovo/python-clewareampel | 48cdec263d63b8a8549773d5fd787a1c326e7a9e | [
"MIT"
] | 1 | 2022-01-29T15:41:52.000Z | 2022-01-29T15:41:52.000Z | setup.py | mtrovo/python-clewareampel | 48cdec263d63b8a8549773d5fd787a1c326e7a9e | [
"MIT"
] | null | null | null | setup.py | mtrovo/python-clewareampel | 48cdec263d63b8a8549773d5fd787a1c326e7a9e | [
"MIT"
] | 1 | 2021-10-03T23:05:53.000Z | 2021-10-03T23:05:53.000Z | #!/usr/bin/env python
from setuptools import setup
import clewareampel
setup(
name='clewareampel',
version=clewareampel.__version__,
description='Control the Cleware USB Ampel (traffic lights) with Python.',
long_description='Control the Cleware USB Ampel (traffic lights) with '
'Python.',
author='Roderick Baier',
author_email='[email protected]',
license='MIT',
url='https://github.com/rbaier/python-clewareampel',
py_modules=['clewareampel'],
install_requires=[
'pyusb'
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: System :: Hardware :: Hardware Drivers',
'Topic :: Utilities'
],
entry_points={
'console_scripts': [
'clewareampel=clewareampel:main',
]
}
)
| 27.416667 | 78 | 0.614995 |
7cd286be5cd36f375d155442d95a5dbaf97b922a | 4,557 | py | Python | third_party/webports/src/src/lib/naclports/tests/test_main.py | ayaNader/chromeos_smart_card_connector | 78502e328634a210e27ef897405b66844ebefe62 | [
"Apache-2.0"
] | 79 | 2017-09-22T05:09:54.000Z | 2022-03-13T01:11:06.000Z | lib/naclports/tests/test_main.py | yeyus/naclports | ceb194315915c69a7266d695259e2c204f9cbbaf | [
"BSD-3-Clause"
] | 191 | 2017-10-23T22:34:58.000Z | 2022-03-05T18:10:06.000Z | lib/naclports/tests/test_main.py | yeyus/naclports | ceb194315915c69a7266d695259e2c204f9cbbaf | [
"BSD-3-Clause"
] | 32 | 2017-10-21T07:39:59.000Z | 2021-11-10T22:55:32.000Z | # Copyright 2014 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import mock
from mock import patch, Mock
import StringIO
import common
import naclports.__main__
from naclports import error
from naclports.configuration import Configuration
# pylint: disable=no-self-use
class TestMain(common.NaclportsTest):
def setUp(self):
super(TestMain, self).setUp()
self.AddPatch(patch('naclports.util.CheckSDKRoot'))
@patch('naclports.util.Log', Mock())
@patch('naclports.util.RemoveTree')
def testCleanAll(self, mock_rmtree):
config = Configuration()
naclports.__main__.CleanAll(config)
mock_rmtree.assert_any_call('/package/install/path')
@patch('naclports.__main__.RunMain', Mock(side_effect=error.Error('oops')))
def testErrorReport(self):
# Verify that exceptions of the type error.Error are printed
# to stderr and result in a return code of 1
with patch('sys.stderr', new_callable=StringIO.StringIO) as stderr:
self.assertEqual(naclports.__main__.main(None), 1)
self.assertRegexpMatches(stderr.getvalue(), '^naclports: oops')
@patch('naclports.__main__.CmdPkgClean')
def testMainCommandDispatch(self, cmd_pkg_clean):
mock_pkg = Mock()
with patch('naclports.source_package.CreatePackage',
Mock(return_value=mock_pkg)):
naclports.__main__.RunMain(['clean', 'foo'])
cmd_pkg_clean.assert_called_once_with(mock_pkg, mock.ANY)
@patch('naclports.__main__.CmdPkgClean',
Mock(side_effect=error.DisabledError()))
def testMainHandlePackageDisabled(self):
mock_pkg = Mock()
with patch('naclports.source_package.CreatePackage',
Mock(return_value=mock_pkg)):
with self.assertRaises(error.DisabledError):
naclports.__main__.RunMain(['clean', 'foo'])
@patch('naclports.__main__.CleanAll')
def testMainCleanAll(self, clean_all_mock):
naclports.__main__.RunMain(['clean', '--all'])
clean_all_mock.assert_called_once_with(Configuration())
class TestCommands(common.NaclportsTest):
def testListCommand(self):
config = Configuration()
pkg = Mock(NAME='foo', VERSION='0.1')
with patch('naclports.package.InstalledPackageIterator',
Mock(return_value=[pkg])):
with patch('sys.stdout', new_callable=StringIO.StringIO) as stdout:
options = Mock(all=False)
naclports.__main__.CmdList(config, options, [])
lines = stdout.getvalue().splitlines()
self.assertRegexpMatches(lines[0], '^foo\\s+0.1$')
self.assertEqual(len(lines), 1)
def testListCommandVerbose(self):
config = Configuration()
pkg = Mock(NAME='foo', VERSION='0.1')
with patch('naclports.package.InstalledPackageIterator',
Mock(return_value=[pkg])):
with patch('sys.stdout', new_callable=StringIO.StringIO) as stdout:
options = Mock(verbose=False, all=False)
naclports.__main__.CmdList(config, options, [])
lines = stdout.getvalue().splitlines()
self.assertRegexpMatches(lines[0], "^foo$")
self.assertEqual(len(lines), 1)
@patch('naclports.package.CreateInstalledPackage', Mock())
def testInfoCommand(self):
config = Configuration()
options = Mock()
file_mock = common.MockFileObject('FOO=bar\n')
with patch('sys.stdout', new_callable=StringIO.StringIO) as stdout:
with patch('__builtin__.open', Mock(return_value=file_mock), create=True):
naclports.__main__.CmdInfo(config, options, ['foo'])
self.assertRegexpMatches(stdout.getvalue(), "FOO=bar")
def testContentsCommand(self):
file_list = ['foo', 'bar']
options = Mock(verbose=False, all=False)
package = Mock(NAME='test', Files=Mock(return_value=file_list))
expected_output = '\n'.join(file_list) + '\n'
with patch('sys.stdout', new_callable=StringIO.StringIO) as stdout:
naclports.__main__.CmdPkgContents(package, options)
self.assertEqual(stdout.getvalue(), expected_output)
# when the verbose option is set expect CmdContents to output full paths.
naclports.util.log_level = naclports.util.LOG_VERBOSE
expected_output = [os.path.join('/package/install/path', f)
for f in file_list]
expected_output = '\n'.join(expected_output) + '\n'
with patch('sys.stdout', new_callable=StringIO.StringIO) as stdout:
naclports.__main__.CmdPkgContents(package, options)
self.assertEqual(stdout.getvalue(), expected_output)
| 38.948718 | 80 | 0.707483 |
3352bef5fbf40d331a92c6522ac7345d3b7b9bb9 | 8,627 | py | Python | beginner_source/blitz/neural_networks_tutorial.py | codewithkaranjeswani/tutorials | ce84ebbb764bef14e982076b5ca7d8906ac0db78 | [
"BSD-3-Clause"
] | null | null | null | beginner_source/blitz/neural_networks_tutorial.py | codewithkaranjeswani/tutorials | ce84ebbb764bef14e982076b5ca7d8906ac0db78 | [
"BSD-3-Clause"
] | null | null | null | beginner_source/blitz/neural_networks_tutorial.py | codewithkaranjeswani/tutorials | ce84ebbb764bef14e982076b5ca7d8906ac0db78 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Neural Networks
===============
Neural networks can be constructed using the ``torch.nn`` package.
Now that you had a glimpse of ``autograd``, ``nn`` depends on
``autograd`` to define models and differentiate them.
An ``nn.Module`` contains layers, and a method ``forward(input)``\ that
returns the ``output``.
For example, look at this network that classifies digit images:
.. figure:: /_static/img/mnist.png
:alt: convnet
convnet
It is a simple feed-forward network. It takes the input, feeds it
through several layers one after the other, and then finally gives the
output.
A typical training procedure for a neural network is as follows:
- Define the neural network that has some learnable parameters (or
weights)
- Iterate over a dataset of inputs
- Process input through the network
- Compute the loss (how far is the output from being correct)
- Propagate gradients back into the network’s parameters
- Update the weights of the network, typically using a simple update rule:
``weight = weight - learning_rate * gradient``
Define the network
------------------
Let’s define this network:
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# 1 input image channel, 6 output channels, 3x3 square convolution
# kernel
self.conv1 = nn.Conv2d(1, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
# an affine operation: y = Wx + b
self.fc1 = nn.Linear(16 * 5 * 5, 120) # 6*6 from image dimension
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
# Max pooling over a (2, 2) window
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
# If the size is a square you can only specify a single number
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
net = Net()
print(net)
########################################################################
# You just have to define the ``forward`` function, and the ``backward``
# function (where gradients are computed) is automatically defined for you
# using ``autograd``.
# You can use any of the Tensor operations in the ``forward`` function.
#
# The learnable parameters of a model are returned by ``net.parameters()``
params = list(net.parameters())
print(len(params))
print(params[0].size()) # conv1's .weight
########################################################################
# Let's try a random 32x32 input.
# Note: expected input size of this net (LeNet) is 32x32. To use this net on
# the MNIST dataset, please resize the images from the dataset to 32x32.
input = torch.randn(1, 1, 32, 32)
out = net(input)
print(out)
########################################################################
# Zero the gradient buffers of all parameters and backprop with random
# gradients:
net.zero_grad()
out.backward(torch.randn(1, 10))
########################################################################
# .. note::
#
# ``torch.nn`` only supports mini-batches. The entire ``torch.nn``
# package only supports inputs that are a mini-batch of samples, and not
# a single sample.
#
# For example, ``nn.Conv2d`` will take in a 4D Tensor of
# ``nSamples x nChannels x Height x Width``.
#
# If you have a single sample, just use ``input.unsqueeze(0)`` to add
# a fake batch dimension.
#
# Before proceeding further, let's recap all the classes you’ve seen so far.
#
# **Recap:**
# - ``torch.Tensor`` - A *multi-dimensional array* with support for autograd
# operations like ``backward()``. Also *holds the gradient* w.r.t. the
# tensor.
# - ``nn.Module`` - Neural network module. *Convenient way of
# encapsulating parameters*, with helpers for moving them to GPU,
# exporting, loading, etc.
# - ``nn.Parameter`` - A kind of Tensor, that is *automatically
# registered as a parameter when assigned as an attribute to a*
# ``Module``.
# - ``autograd.Function`` - Implements *forward and backward definitions
# of an autograd operation*. Every ``Tensor`` operation creates at
# least a single ``Function`` node that connects to functions that
# created a ``Tensor`` and *encodes its history*.
#
# **At this point, we covered:**
# - Defining a neural network
# - Processing inputs and calling backward
#
# **Still Left:**
# - Computing the loss
# - Updating the weights of the network
#
# Loss Function
# -------------
# A loss function takes the (output, target) pair of inputs, and computes a
# value that estimates how far away the output is from the target.
#
# There are several different
# `loss functions <https://pytorch.org/docs/nn.html#loss-functions>`_ under the
# nn package .
# A simple loss is: ``nn.MSELoss`` which computes the mean-squared error
# between the input and the target.
#
# For example:
output = net(input)
target = torch.randn(10) # a dummy target, for example
target = target.view(1, -1) # make it the same shape as output
criterion = nn.MSELoss()
loss = criterion(output, target)
print(loss)
########################################################################
# Now, if you follow ``loss`` in the backward direction, using its
# ``.grad_fn`` attribute, you will see a graph of computations that looks
# like this:
#
# ::
#
# input -> conv2d -> relu -> maxpool2d -> conv2d -> relu -> maxpool2d
# -> view -> linear -> relu -> linear -> relu -> linear
# -> MSELoss
# -> loss
#
# So, when we call ``loss.backward()``, the whole graph is differentiated
# w.r.t. the loss, and all Tensors in the graph that has ``requires_grad=True``
# will have their ``.grad`` Tensor accumulated with the gradient.
#
# For illustration, let us follow a few steps backward:
print(loss.grad_fn) # MSELoss
print(loss.grad_fn.next_functions[0][0]) # Linear
print(loss.grad_fn.next_functions[0][0].next_functions[0][0]) # ReLU
########################################################################
# Backprop
# --------
# To backpropagate the error all we have to do is to ``loss.backward()``.
# You need to clear the existing gradients though, else gradients will be
# accumulated to existing gradients.
#
#
# Now we shall call ``loss.backward()``, and have a look at conv1's bias
# gradients before and after the backward.
net.zero_grad() # zeroes the gradient buffers of all parameters
print('conv1.bias.grad before backward')
print(net.conv1.bias.grad)
loss.backward()
print('conv1.bias.grad after backward')
print(net.conv1.bias.grad)
########################################################################
# Now, we have seen how to use loss functions.
#
# **Read Later:**
#
# The neural network package contains various modules and loss functions
# that form the building blocks of deep neural networks. A full list with
# documentation is `here <https://pytorch.org/docs/nn>`_.
#
# **The only thing left to learn is:**
#
# - Updating the weights of the network
#
# Update the weights
# ------------------
# The simplest update rule used in practice is the Stochastic Gradient
# Descent (SGD):
#
# ``weight = weight - learning_rate * gradient``
#
# We can implement this using simple Python code:
#
# .. code:: python
#
# learning_rate = 0.01
# for f in net.parameters():
# f.data.sub_(f.grad.data * learning_rate)
#
# However, as you use neural networks, you want to use various different
# update rules such as SGD, Nesterov-SGD, Adam, RMSProp, etc.
# To enable this, we built a small package: ``torch.optim`` that
# implements all these methods. Using it is very simple:
import torch.optim as optim
# create your optimizer
optimizer = optim.SGD(net.parameters(), lr=0.01)
# in your training loop:
optimizer.zero_grad() # zero the gradient buffers
output = net(input)
loss = criterion(output, target)
loss.backward()
optimizer.step() # Does the update
###############################################################
# .. Note::
#
# Observe how gradient buffers had to be manually set to zero using
# ``optimizer.zero_grad()``. This is because gradients are accumulated
# as explained in the `Backprop`_ section.
| 32.927481 | 79 | 0.630115 |
590534b1eb0e72d2f54fc8b3b1b4337a132c97e8 | 39,077 | py | Python | pyopencl_extension/framework.py | piveloper/pyopencl-extension | 0f9fede4cfbb1c3f6d99c5e0aa94feddb23a5d4c | [
"MIT"
] | null | null | null | pyopencl_extension/framework.py | piveloper/pyopencl-extension | 0f9fede4cfbb1c3f6d99c5e0aa94feddb23a5d4c | [
"MIT"
] | null | null | null | pyopencl_extension/framework.py | piveloper/pyopencl-extension | 0f9fede4cfbb1c3f6d99c5e0aa94feddb23a5d4c | [
"MIT"
] | null | null | null | __author__ = "piveloper"
__copyright__ = "26.03.2020, piveloper"
__version__ = "1.0"
__email__ = "[email protected]"
__doc__ = """This script includes helpful functions to extended PyOpenCl functionality."""
import os
import re
import time
from abc import abstractmethod, ABC
from dataclasses import dataclass, field
from enum import Enum
from functools import lru_cache
from pathlib import Path
from typing import Union, Tuple, List, Dict, Callable
import numpy as np
import pyastyle
from mako import exceptions
from mako.template import Template
import pyopencl as cl
from pyopencl._cl import Device
from pyopencl.array import Array as ClArray
from pyopencl_extension import CommandQueue, Array, to_device
from pyopencl_extension.helpers.general import write_string_to_file
from pyopencl_extension.modifications_pyopencl.command_queue import QueueProperties, get_current_queue
from pyopencl_extension.modifications_pyopencl.context import Context, get_devices
from pyopencl_extension.types.auto_gen.cl_types import ClTypesScalar
from pyopencl_extension.types.utilities_np_cl import c_name_from_dtype, scalar_type_from_vec_type, \
get_vec_size, Types, number_vec_elements_of_cl_type, VEC_INDICES
from pyopencl_extension.emulation import create_py_file_and_load_module, unparse_c_code_to_python
@dataclass
class LocalArray:
shape: int
dtype: np.dtype
cl_local_memory: cl.LocalMemory = field(init=False, default=None)
def __post_init__(self):
self.cl_local_memory = cl.LocalMemory(int(self.shape * np.dtype(self.dtype).itemsize))
TypesClArray = Union[Array, ClArray]
TypesDefines = Union[str, float, int, bool]
TypesReplacement = Union[str, float, int, bool]
TypesArgArrays = Union[np.ndarray, Array, ClArray, LocalArray]
_ = ClTypesScalar
TypesArgScalar = Union[int, float,
_.char, _.short, _.int, _.long, _.uchar, _.ushort, _.uint, _.ulong, _.half, _.float, _.double]
# TypesKernelArg = Union[Array, TypesDefines] # todo: remove?
preamble_activate_double = """
#if defined(cl_khr_fp64) // Khronos extension available?
#pragma OPENCL EXTENSION cl_khr_fp64 : enable
#define PYOPENCL_DEFINE_CDOUBLE
#elif defined(cl_amd_fp64) // AMD extension available?
#pragma OPENCL EXTENSION cl_amd_fp64 : enable
#define PYOPENCL_DEFINE_CDOUBLE
#endif
"""
preamble_activate_complex_numbers = """
#include <pyopencl-complex.h>
#define TP_ROOT ${cplx_type}
"""
def preamble_precision(precision: str = 'single'):
"""
This function generates preamble to support either single or double precision floating point numbers.
:param precision:
:return:
"""
if precision == 'single':
return """
#define PI 3.14159265359f
"""
elif precision == 'double':
return preamble_activate_double + """\n
#define PI 3.14159265358979323846
"""
else:
raise NotImplementedError()
def preamble_generic_type_operations(number_format: str = 'real', precision: str = 'single'):
"""
This function returns a preamble which defines how generic operations are executed on device.
This becomes especially important when dealing with complex types which OpenCl does not support out of the box.
As a solution, pyopencl-complex.h includes several basic function for complex operations.
E.g. consider a kernel which adds two numbers, however the input type can be real or complex valued.
Typically one would implement c = a + b. However, OpenCl does not support + operation when a and b are complex
valued. Therefore using this preamble one can write c = ADD(a,b). ADD acts a a generic operation which supports
real and complex input depending on selection for number_format.
:param number_format: 'real' or 'complex
:param precision: 'single' or 'double'
:return: preamble to support generic operations
"""
if number_format == 'complex':
cplx_type_pyopencl = {'single': 'cfloat',
'double': 'cdouble'}[precision]
return preamble_activate_complex_numbers + """
#define MUL ${cplx_type}_mul
#define ADD ${cplx_type}_add
#define SUB ${cplx_type}_sub
#define ABS ${cplx_type}_abs
#define RMUL ${cplx_type}_rmul
#define NEW ${cplx_type}_new
#define CONJ ${cplx_type}_conj
#define REAL(x) x.real
#define IMAG(x) x.imag
""".replace('${cplx_type}', cplx_type_pyopencl)
elif number_format == 'real':
return """
#define MUL(x,y) (x*y)
#define ADD(x,y) (x+y)
#define SUB(x,y) (x-y)
#define ABS(x) (fabs(x))
#define RMUL(x,y) (x*y)
#define NEW(x,y) (x)
#define CONJ(x) (x)
#define REAL(x) (x)
#define IMAG(x) (0)
"""
else:
raise NotImplementedError()
def catch_invalid_argument_name(name: str):
"""
E.g. when using certain argument names like 'channel' the opencl compiler throws a compilation error, probably
because channel is an reserved opencl command. There we replace those names by appending '_' character.
:param name:
:return:
"""
invalid_names = ['channel']
if name in invalid_names:
raise ValueError('Invalid opencl name: \'{}\' used.'.format(name))
else:
return name
class OrderInMemory(Enum):
C_CONTIGUOUS: str = 'c_contiguous'
F_CONTIGUOUS: str = 'f_contiguous'
@dataclass
class ArgBase(ABC):
# too much restriction, shape of array might change during runtime
# shape: Tuple[int, ...] = (1,) # default: argument is scalar
@property
@abstractmethod
def address_space_qualifier(self) -> str:
# __global, __local, __private, __constant
pass
@property
@abstractmethod
def dtype(self) -> np.dtype:
# __global, __local, __private, __constant
pass
def to_string(self, name):
new_name = catch_invalid_argument_name(name)
if type(self) in [Scalar]: # scalar
return '{} {} {}'.format(self.address_space_qualifier, c_name_from_dtype(self.dtype), new_name)
else: # array
return '{} {} *{}'.format(self.address_space_qualifier, c_name_from_dtype(self.dtype), new_name)
@dataclass
class Scalar(ArgBase):
dtype: np.dtype = field(default=Types.int)
address_space_qualifier: str = field(default='')
default: np.dtype = field(init=False, default=None)
def __post_init__(self):
if np.isscalar(self.dtype):
self.default = self.dtype
if type(self.default) == float:
self.dtype = Types.double
elif type(self.default) == int:
self.dtype = Types.int
else:
self.dtype = type(self.dtype)
@dataclass
class Pointer(ArgBase, ABC):
dtype: np.dtype = field(default=Types.int)
address_space_qualifier: str = field(init=False, default='__global')
@dataclass
class Private(Pointer):
address_space_qualifier: str = field(init=False, default='__private')
@dataclass
class Local(Pointer):
dtype: Union[np.dtype, LocalArray] = field(default=Types.int)
address_space_qualifier: str = field(init=False, default='__local')
order_in_memory: OrderInMemory = OrderInMemory.C_CONTIGUOUS
default: cl.LocalMemory = field(init=False, default=None)
def __post_init__(self):
if isinstance(self.dtype, LocalArray):
self.default = self.dtype.cl_local_memory
self.dtype = self.dtype.dtype
@dataclass
class Global(Pointer):
dtype: Union[np.dtype, TypesClArray] = field(default=Types.int)
read_only: bool = False # adds 'const' qualifier to let compiler know that global array is never written
order_in_memory: OrderInMemory = OrderInMemory.C_CONTIGUOUS
address_space_qualifier: str = field(init=False, default='__global')
default: TypesClArray = field(init=False, default='')
def __post_init__(self):
if isinstance(self.dtype, TypesClArray.__args__):
self.default = self.dtype
self.dtype = self.dtype.dtype
if self.read_only:
self.address_space_qualifier = 'const __global'
@dataclass
class Constant(Pointer):
"""
const is only a hint for the compiler that the data does not change
__constant leads to usage of very fast constant cache memory which is shared among
multiple compute units. From AMD optimization guide, e.g. we can read 4 bytes/cycles.
Local memory can be ready twice as fast with 8bytes/cycle, however local memory is a even more scarce resource.
https://stackoverflow.com/questions/17991714/opencl-difference-between-constant-memory-and-const-global-memory/50931783
"""
dtype: Union[np.dtype, TypesClArray] = field(default=Types.int)
order_in_memory: str = OrderInMemory.C_CONTIGUOUS
address_space_qualifier: str = field(init=False, default='__constant')
default: TypesClArray = field(init=False, default='')
def __post_init__(self):
if isinstance(self.dtype, TypesClArray.__args__):
self.default = self.dtype
self.dtype = self.dtype.dtype
def template(func: Union['Kernel', 'Function']) -> str:
body = ''.join(func.body)
tpl = func.header + '\n{' + body + '}\n'
args = [value.to_string(key) + ',' for key, value in func.args.items()]
args = '{}'.format('\n'.join(args))
args = args[:-1] # remove last comma
replacements = {'name': func.name,
'args': args,
'returns': c_name_from_dtype(func.returns)}
for key, value in func.replacements.items():
replacements[key] = str(value)
try: # todo: e.g. if replacement has been forgotten, still save template as file
tpl = Template(tpl).render(**replacements)
except:
raise ValueError(exceptions.text_error_template().render())
defines = '\n'.join(['#define {} {}'.format(key, str(value)) for key, value in func.defines.items()])
tpl_formatted = pyastyle.format('{}\n\n{}'.format(defines, tpl), '--style=allman --indent=spaces=4')
return tpl_formatted
@dataclass
class FunctionBase(ABC):
name: str = 'func'
args: Dict[str, Union[TypesArgArrays, TypesArgScalar, Scalar, Global, Local, Private, Constant]] = \
field(default_factory=lambda: [])
body: Union[List[str], str] = field(default_factory=lambda: [])
replacements: Dict[str, TypesReplacement] = field(default_factory=lambda: {})
type_defs: Dict[str, np.dtype] = field(default_factory=lambda: {}) # todo
defines: Dict[str, TypesDefines] = field(default_factory=lambda: {})
functions: List['Function'] = field(default_factory=lambda: [])
def __post_init__(self):
if isinstance(self.body, str):
self.body = [self.body]
self._prepares_args()
@property
def header(self):
return '${returns} ${name}(${args})'
@property
@abstractmethod
def template(self) -> str:
pass
@staticmethod
def _prepare_arg(v):
"""
This is a convenience feature. Arguments might be provided only as numpy array, python integer or float etc.
Therefore, this function adds appropriate pointer type.
"""
if isinstance(v, np.ndarray):
g_arg = Global(v.dtype)
g_arg.default = v
return g_arg
elif isinstance(v, TypesClArray.__args__):
return Global(v)
elif isinstance(v, LocalArray):
return Local(v)
elif isinstance(v, TypesArgScalar.__args__):
return Scalar(v)
else:
return v
def _prepares_args(self):
self.args = {k: self._prepare_arg(v) for k, v in self.args.items()}
@dataclass
class Function(FunctionBase):
@property
def template(self) -> str:
return template(self)
returns: np.dtype = field(default_factory=lambda: np.dtype(np.void))
def __str__(self) -> str:
return super().__str__() + str(self.returns)
KernelGridType = Union[Tuple[int], Tuple[int, int], Tuple[int, int, int]]
class Compilable:
@abstractmethod
def compile(self, context: Context = None, emulate: bool = False):
pass
@staticmethod
def get_default_dir_pycl_kernels():
return Path(os.getcwd()).joinpath('cl_py_modules')
@dataclass
class Kernel(FunctionBase, Compilable):
def compile(self, context: Context = None, emulate: bool = False, file='$default_path'):
Program(kernels=[self]).compile(context=context, emulate=emulate, file=file)
return self.callable_kernel
global_size: KernelGridType = None
local_size: KernelGridType = None
returns: np.dtype = field(default_factory=lambda: np.dtype(np.void), init=False)
callable_kernel: 'CallableKernel' = field(default_factory=lambda: None, init=False)
def __str__(self) -> str:
return super().__str__()
@property
def template(self) -> str:
return template(self)
@property
def header(self):
return '__kernel ${returns} ${name}(${args})'
def __call__(self, global_size: KernelGridType = None, local_size: KernelGridType = None, **kwargs):
if self.callable_kernel is not None:
return self.callable_kernel(global_size=global_size, local_size=local_size, **kwargs)
else:
raise ValueError('Kernel has not been compiled yet.')
def _get_all_funcs(f: FunctionBase, flat_list=None) -> List[FunctionBase]:
if flat_list is None:
flat_list = []
for sub_f in f.functions:
_get_all_funcs(sub_f, flat_list)
flat_list.append(f)
return flat_list
else:
flat_list.extend([_get_all_funcs(sub_f, flat_list) for sub_f in f.functions])
flat_list.append(f)
def _get_list_with_unique_functions(functions, kernels):
functions_in_kernels = [f for k in kernels for f in k.functions]
all_funcs = [_f for f in functions + functions_in_kernels for _f in _get_all_funcs(f)]
all_funcs_unique, _func_names = [], []
for f in all_funcs:
if len(_func_names) == 0 or f.name not in _func_names:
all_funcs_unique.append(f)
_func_names.append(f.name)
return all_funcs_unique
@dataclass
class Program(Compilable):
"""
Models an OpenCl Program containing functions or kernels.
"""
def compile(self, context: Context = None, emulate: bool = False,
file: str = '$default_path') -> 'ProgramContainer':
return compile_cl_program(self, context, emulate, file)
functions: List[Function] = field(default_factory=lambda: [])
kernels: List[Kernel] = field(default_factory=lambda: [])
defines: Dict[str, TypesDefines] = field(default_factory=lambda: {})
type_defs: Dict[str, np.dtype] = field(default_factory=lambda: {})
@staticmethod
def _arg_to_str_for_hash(name, arg: ArgBase):
return name + str(type(arg)) + str(hash(arg.dtype)) + arg.address_space_qualifier
@staticmethod
def _func_to_str_for_hash(func: FunctionBase):
str_args = ''.join(Program._arg_to_str_for_hash(k, v) for k, v in func.args.items())
str_repl = ''.join(k + str(v) for k, v in func.replacements.items())
str_type_defs = ''.join(k + str(v) for k, v in func.type_defs.items())
str_defines = ''.join(k + str(v) for k, v in func.defines.items())
str_body = ''.join(func.body)
return str_type_defs + str_defines + str_repl + func.header + func.name + str_args + str_body
def __hash__(self) -> int:
str_funcs = ''.join(self._func_to_str_for_hash(knl) for knl in self.functions)
str_kernels = ''.join(self._func_to_str_for_hash(knl) for knl in self.kernels)
str_type_defs = ''.join(k + str(v) for k, v in self.type_defs.items())
str_defines = ''.join(k + str(v) for k, v in self.defines.items())
prog_str = str_defines + str_type_defs + str_funcs + str_kernels
return int(str(hash(prog_str)) + str(abs(hash(prog_str + 'something')))) # double hash to avoid collisions
@property
def rendered_template(self):
all_funcs = _get_list_with_unique_functions(self.functions, self.kernels)
functions = [f.template for f in all_funcs] + [k.template for k in self.kernels]
functions = '\n'.join(functions)
if 'double' in functions:
_preamble_precision = preamble_precision('double')
else:
_preamble_precision = preamble_precision('single')
if 'cfloat_t' in functions:
_preamble_generic_operations = preamble_generic_type_operations('complex', 'single')
elif 'cdouble_t' in functions:
_preamble_generic_operations = preamble_generic_type_operations('complex', 'double')
else:
_preamble_generic_operations = preamble_generic_type_operations('real')
preamble_buff_t = f'{_preamble_precision}\n{_preamble_generic_operations}'
# join program typedefs with typedefs from kernels and functions
# todo: consider replacing type strings directly to avoid name conflicts
def update_and_checks_for_duplicates_same_type(items: dict, dic: dict):
for key, value in items.items():
if key in dic:
if not dic[key] == value:
raise ValueError('Same type def name for different types')
else:
dic[key] = value
[update_and_checks_for_duplicates_same_type(func.type_defs, self.type_defs) for func in self.functions]
[update_and_checks_for_duplicates_same_type(func.type_defs, self.type_defs) for func in self.kernels]
# remove since defines are inserted before function/kernels
# [update_and_checks_for_duplicates_same_type(func.defines, self.defines) for func in self.functions]
# [update_and_checks_for_duplicates_same_type(func.defines, self.defines) for func in self.kernels]
defines = '\n'.join(['#define {} {}'.format(key, str(value)) for key, value in self.defines.items()])
type_defs = '\n'.join(
['typedef {c_name} {new_name};\n#define convert_{new_name}(x) convert_{c_name}(x)'.format(
c_name=c_name_from_dtype(value),
new_name=str(key))
for key, value in self.type_defs.items()])
tpl_all = self._get_tpl(preamble_buff_t, defines, type_defs, functions)
tpl_formatted = pyastyle.format(tpl_all, '--style=allman --indent=spaces=4')
return tpl_formatted
def _get_tpl(self, preamble_complex, defines, type_defs, functions):
return '{}\n\n{}\n\n{}\n\n{}\n\n'.format(preamble_complex, defines, type_defs, functions)
def build_for_device(context: Context, template_to_be_compiled: str, file: str = None) -> cl.Program:
if file is not None:
write_string_to_file(template_to_be_compiled, file + '.cl', b_logging=False)
try:
program = cl.Program(context, str(template_to_be_compiled)).build()
except Exception as error:
tpl_line_numbers = '\n'.join(
['{:<4}{}'.format(i + 1, line) for i, line in enumerate(template_to_be_compiled.split('\n'))])
raise ValueError('\n{}\n\n{}'.format(tpl_line_numbers, str(error)))
return program
# Todo: Find good structure for modeling cl and python kernels
@dataclass
class CallableKernel(ABC):
kernel_model: Kernel
def __getattr__(self, name):
if name in self.kernel_model.args.keys():
return self.kernel_model.args[name].default
return super().__getattribute__(name)
@abstractmethod
def __call__(self, global_size: KernelGridType = None,
local_size: KernelGridType = None,
**kwargs):
pass
@staticmethod
def _typing_scalar_argument(arg_model: Union[Scalar, Scalar],
scalar_value_provided: TypesArgScalar):
if get_vec_size(arg_model.dtype) == 1:
return np.dtype(arg_model.dtype).type(scalar_value_provided)
else:
dtype_scalar = scalar_type_from_vec_type(arg_model.dtype)
scalar = np.dtype(dtype_scalar).type(scalar_value_provided) # converts to bytes like object
return scalar.astype(arg_model.dtype) # converts to vector type
@staticmethod
def _prepare_arguments(queue: CommandQueue, knl: Kernel, **kwargs):
global_size = kwargs.pop('global_size', None)
local_size = kwargs.pop('local_size', None)
global_size = knl.global_size if global_size is None else global_size
local_size = knl.local_size if local_size is None else local_size
supported_kws = [k for k in knl.args.keys()]
kw_not_in_kernel_arguments = [kw for kw in kwargs if kw not in supported_kws]
if len(kw_not_in_kernel_arguments) > 0:
raise ValueError(
f'keyword argument {kw_not_in_kernel_arguments} does not exist in kernel argument list {supported_kws}')
# If kernel arguments are of type np.ndarray they are converted to cl arrays here
# This is done here, since thq queue is available at this point for sure.
# todo: deal with case if kwarg is numpy argument
def deal_with_np_arrays(v):
if isinstance(v, Global) and isinstance(v.default, np.ndarray):
v.default = to_device(ary=v.default, queue=queue)
return v
else:
return v
knl.args = {k: deal_with_np_arrays(v) for k, v in knl.args.items()}
# set default arguments. Looping over kernel model forces correct order of arguments
args_call = [kwargs.pop(key, value.default if isinstance(value, (Constant, Global, Scalar, Local)) else None)
for key, value in knl.args.items()]
if any(arg is None for arg in args_call):
raise ValueError('Argument equal to None can lead to system crash')
if global_size is None:
raise ValueError('global_size not provided!')
if global_size == ():
raise ValueError('Global size is empty')
if 0 in global_size:
raise ValueError(f'Parameter in global size {global_size} equal to zero')
if local_size is not None and 0 in local_size:
raise ValueError(f'Parameter in local size {local_size} equal to zero')
# convert scalar argument to correct type. E.g. argument can be python int and is converted to char
args_model = list(knl.args.values())
args_call = [CallableKernel._typing_scalar_argument(args_model[i], arg)
if type(args_model[i]) in [Scalar, Scalar] else arg
for i, arg in enumerate(args_call)]
# if argument of type LocalArray extract cl.LocalMemory instance to be passed as argument
args_call = [arg.cl_local_memory if isinstance(arg, LocalArray) else arg for arg in args_call]
# check if buffer have same type as defined in the kernel function header
b_types_equal = [args_call[i].dtype == v.dtype for i, v in enumerate(args_model) if isinstance(v, Global)]
if not np.all(b_types_equal):
idx_buffer_list = int(np.argmin(b_types_equal))
idx = [i for i, kv in enumerate(knl.args.items()) if isinstance(kv[1], Global)][idx_buffer_list]
buffer_name = [k for k, v in knl.args.items()][idx]
buffer_type_expected = args_model[idx].dtype
buffer_type_call = args_call[idx].dtype
raise ValueError(f'Expected buffer argument \'{buffer_name}\' with type {buffer_type_expected} '
f'but got buffer with type {buffer_type_call}')
# check if buffer elements of array arguments have memory order as expected (c or f contiguous)
def b_array_memory_order_as_expected(ary_model: Global, ary_call: TypesClArray):
if ary_model.order_in_memory == OrderInMemory.C_CONTIGUOUS:
return ary_call.flags.c_contiguous
else: # f_contiguous
return ary_call.flags.f_contiguous
knl_args_invalid_memory_order = [(k, v) for idx, (k, v) in enumerate(knl.args.items())
if isinstance(v, Global) and
not b_array_memory_order_as_expected(v, args_call[idx])]
if len(knl_args_invalid_memory_order) > 0:
msg = '\n'.join([f'Array argument \'{arg[0]}\' is not {arg[1].order_in_memory} (as defined in Kernel)'
for arg in knl_args_invalid_memory_order])
raise ValueError(msg)
non_supported_types = [np.ndarray]
if any(_ := [type(arg) in non_supported_types for arg in args_call]):
raise ValueError(f'Type of argument \'{list(knl.args.items())[np.where(_)[0][0]][0]}\' is not supported in '
f'kernel call')
return global_size, local_size, args_call
@dataclass
class CallableKernelEmulation(CallableKernel):
function: Callable
def __call__(self,
global_size: KernelGridType = None,
local_size: KernelGridType = None,
**kwargs: Union[TypesClArray, object]) -> cl.Event:
# e.g. if two kernels of a program shall run concurrently, this can be enable by passing another queue here
queue = kwargs.pop('queue', get_current_queue())
global_size, local_size, args = self._prepare_arguments(queue=queue, knl=self.kernel_model,
global_size=global_size,
local_size=local_size, **kwargs)
self.function(global_size, local_size, *args)
# create user event with context retrieved from first arg of type Array
event = cl.UserEvent([_ for _ in args if isinstance(_, TypesClArray.__args__)][0].context)
event.set_status(cl.command_execution_status.COMPLETE)
return event
@dataclass
class CallableKernelDevice(CallableKernel):
compiled: cl.Kernel
@staticmethod
def check_local_size_not_exceeding_device_limits(device: Device, local_size):
# E.g. on nvidia the local size might be individually limited to be (1024,1024,64).
# This shall trigger an exception, when wrong local size is provided.
if local_size is not None and any([desired_local_size > device.max_work_item_sizes[dim]
for dim, desired_local_size in enumerate(local_size)]):
raise ValueError(f'Requested local dimensions {local_size} exceed {device.max_work_item_sizes=}')
def __call__(self,
global_size: KernelGridType = None,
local_size: KernelGridType = None,
**kwargs) -> cl.Event:
# e.g. if two kernels of a program shall run concurrently, this can be enable by passing another queue here
queue = kwargs.pop('queue', get_current_queue())
assert self.compiled.context.int_ptr == queue.context.int_ptr
global_size, local_size, args = self._prepare_arguments(queue=queue, knl=self.kernel_model,
global_size=global_size,
local_size=local_size, **kwargs)
self.check_local_size_not_exceeding_device_limits(queue.device, local_size)
# extract buffer from cl arrays separate, since in emulation we need cl arrays
args_cl = [arg.data if isinstance(arg, TypesClArray.__args__) else arg for i, arg in enumerate(args)]
event = self.compiled(queue, global_size, local_size, *args_cl)
queue.add_event(event, self.kernel_model.name)
return event
@dataclass
class ProgramContainer:
"""
Responsibility:
A callable kernel is returned with program.kernel_name. Depending on value of b_run_python_emulation a call of this
kernel is executed on device or in emulation.
"""
program_model: Program
file: str
init: CommandQueue
callable_kernels: Dict[str, Union[CallableKernelEmulation, CallableKernelDevice]] = None
def __getattr__(self, name) -> CallableKernel:
if name in self.callable_kernels:
return self.callable_kernels[name]
else:
return super().__getattribute__(name)
# https://stackoverflow.com/questions/1988804/what-is-memoization-and-how-can-i-use-it-in-python
class MemoizeKernelFunctions:
def __init__(self, f):
self.f = f
self.memo = {}
def __call__(self, program_model: Program, context: Context, file: str = None):
# body = ''.join(program_model.rendered_template)
_id = hash(f'{hash(context)}{hash(program_model)}')
if _id not in self.memo:
self.memo[_id] = self.f(program_model, context, file)
return self.memo[_id]
@MemoizeKernelFunctions
def compile_cl_program_device(program_model: Program, context: Context = None, file: str = None) -> Dict[str, Kernel]:
code_cl = program_model.rendered_template
program = build_for_device(context, code_cl, file)
kernels_model = program_model.kernels
# set scalar arguments for each kernel from kernel model
callable_kernels = {knl.function_name: knl
for i, knl in enumerate(program.all_kernels())}
for i, knl in enumerate(kernels_model):
arg_types = [arg.dtype if type(arg) in [Scalar, Scalar] else None
for _, arg in kernels_model[i].args.items()]
callable_kernels[knl.name].set_scalar_arg_dtypes(arg_types)
return callable_kernels
@MemoizeKernelFunctions
def compile_cl_program_emulation(program_model: Program, context: Context, file: str = None,
*args, **kwargs) -> Dict[str, Callable]:
code_py = unparse_c_code_to_python(code_c=program_model.rendered_template)
module = create_py_file_and_load_module(code_py, file)
kernels_model = program_model.kernels
callable_kernels = {knl.name: module.__getattribute__(knl.name) for knl in kernels_model}
return callable_kernels
def compile_cl_program(program_model: Program, context: Context = None, emulate: bool = False,
file: str = '$default_path') -> ProgramContainer:
t_ns_start = time.perf_counter_ns()
# deal with file name
if isinstance(file, Path):
file = str(file)
if file is None and emulate:
raise ValueError('You intended to create no file by setting file=None. '
'However, a file must be created for debugging.') # todo can python debugging run without file?
elif file == '$default_path':
file = str(program_model.get_default_dir_pycl_kernels().joinpath(program_model.kernels[0].name))
if context is None:
context = get_current_queue().context
dict_kernels_program_model = {knl.name: knl for knl in program_model.kernels}
if emulate:
dict_emulation_kernel_functions = compile_cl_program_emulation(program_model, context, file)
callable_kernels = {k: CallableKernelEmulation(kernel_model=dict_kernels_program_model[k], function=v)
for k, v in dict_emulation_kernel_functions.items()}
else:
dict_device_kernel_functions = compile_cl_program_device(program_model, context, file)
callable_kernels = {k: CallableKernelDevice(kernel_model=dict_kernels_program_model[k], compiled=v)
for k, v in dict_device_kernel_functions.items()}
# make callable kernel available in knl model instance
for knl in program_model.kernels:
knl.callable_kernel = callable_kernels[knl.name]
context.add_time_compilation(time.perf_counter_ns() - t_ns_start)
return ProgramContainer(program_model=program_model,
file=file,
init=context,
callable_kernels=callable_kernels)
def int_safe(val: float):
if val.is_integer():
return int(val)
else:
raise ValueError(f'val={val} is no integer')
class HashArray(Array):
def __init__(self, *args, **kwargs):
if isinstance(args[0], TypesClArray.__args__):
a = args[0]
super().__init__(a.queue, a.shape, a.dtype, order="C", allocator=a.allocator,
data=a.data, offset=a.offset, strides=a.strides, events=a.events)
else:
super().__init__(*args, **kwargs)
self.hash = hash(self.get().tobytes())
def __setitem__(self, key, value):
super().__setitem__(key, value)
self.update_hash()
def set(self, ary, queue=None, async_=None, **kwargs):
res = super().set(ary, queue, async_, **kwargs)
self.update_hash()
return res
def update_hash(self):
self.hash = hash(self.get().tobytes())
class Helpers:
# helper methods which can be useful in interplay with this framwork
@staticmethod
def _camel_to_snake(name):
name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower()
@staticmethod
def command_compute_address(n_dim: int) -> str:
command = '0'
for i in range(n_dim):
offset = '1'
for j in range(i + 1, n_dim):
offset += '*get_global_size({})'.format(j)
command += '+get_global_id({})*{}'.format(i, offset)
return command
# helpers for using vector types
@staticmethod
def get_vec_dtype(dtype_vec: np.dtype, dtype_scalar: np.dtype) -> np.dtype:
if number_vec_elements_of_cl_type(dtype_vec) == 1:
return dtype_scalar
else:
c_name = '{}{}'.format(c_name_from_dtype(dtype_scalar), number_vec_elements_of_cl_type(dtype_vec))
return getattr(Types, c_name)
@staticmethod
def array_indexing_for_vec_type(array: str, index: str, dtype: np.dtype):
"""
https://stackoverflow.com/questions/24746221/using-a-vector-as-an-array-index
e.g.
uchar4 keys = (uchar4)(5, 0, 2, 6);
uint4 results = (uint4)(data[keys.s0], data[keys.s1], data[keys.s2], data[keys.s3]);
:param dtype:
:param array:
:param index:
:return:
"""
if number_vec_elements_of_cl_type(dtype) == 1:
return '{array_name}[{index_name}]'.format(array_name=array, index_name=index)
else:
return '({c_type_name})({vector})'.format(c_type_name=c_name_from_dtype(dtype),
vector=', '.join(
['{array_name}[{index_name}.s{i_vec_element}]'.format(
array_name=array,
index_name=index,
i_vec_element=VEC_INDICES[i])
for i in range(number_vec_elements_of_cl_type(dtype))]))
@staticmethod
def command_const_vec_type(param: Union[str, float, int], dtype: np.dtype) -> str:
"""
param = 1.5, dtype=ClTypes.float -> 'convert_float(1.5)'
param = 1.5, dtype=ClTypes.float2 -> '(float2)(convert_float(1.5), convert_float(1.5))
:param param:
:param dtype:
:return:
"""
if number_vec_elements_of_cl_type(dtype) == 1:
return 'convert_{}({})'.format(c_name_from_dtype(dtype), param)
else:
dtype_c_name = c_name_from_dtype(scalar_type_from_vec_type(dtype))
return '({})(({}))'.format(c_name_from_dtype(dtype),
', '.join(['convert_{}({})'.format(dtype_c_name,
param)] * get_vec_size(dtype)))
@staticmethod
def command_vec_sum(var_name: str, dtype: np.dtype) -> str:
"""
Cases:
float var_name -> return 'var_name'
float4 var_name -> return 'var_name.s0 + var_name.s1 + var_name.s2 + var_name.s3'
:param var_name:
:return:
"""
if get_vec_size(dtype) == 1:
return var_name
else:
return ' + '.join(
['{}.s{}'.format(var_name, VEC_INDICES[i]) for i in range(get_vec_size(dtype))])
# todo: use splay method of pyopencl library instead
# from pyopencl.array import splay
# splay
@staticmethod
def _get_local_size_coalesced_last_dim(global_size, desired_wg_size):
"""
E.g. global_size = (1000, 25) and desired_wg_size=64
Then a local_size=(2,25) is returned for multiple reasons:
The work group size must be equal or smaller than the desired work group size.
We make the last local dimension is large as possible (cannot exceed global size of last dimension).
If possible the second last dimension is set to a value larger than 1, such that we get close to our desired
work group size.
:param global_size:
:param desired_wg_size:
:return:
"""
local_size = [1] * len(global_size)
for i_dim in range(1, len(global_size) + 1):
if global_size[-i_dim] * local_size[-i_dim + 1] < desired_wg_size:
local_size[-i_dim] = global_size[-i_dim]
else:
local_size[-i_dim] = np.max([i for i in range(1, desired_wg_size + 1)
if (global_size[-i_dim] / i).is_integer() and
i * local_size[-i_dim + 1] <= desired_wg_size])
if np.product(local_size) < desired_wg_size:
pass
# res = inspect.stack()
# logging.info(f'Local size {local_size} is suboptimal for desired work group size of {desired_wg_size}. '
# f'For best performance increase the global size of the most inner dimension, until it is '
# f'divisible by {desired_wg_size}. \n'
# f'More information: '
# f'https://stackoverflow.com/questions/3957125/questions-about-global-and-local-work-size')
return tuple(local_size)
# return None
@staticmethod
def get_local_size_coalesced_last_dim(global_size, context: Context):
"""
If global size is no multiple of the local size, according to following link it should not work.
https://community.khronos.org/t/opencl-ndrange-global-size-local-size/4167
However (only for AMD GPU), simple tests have shown that it still works. Therefore this class gives a local size, where the global
size is not necessarily a multiple.
:param global_size:
:param context:
:return:
"""
desired_wg_size = 4 * context.devices[0].global_mem_cacheline_size
return Helpers._get_local_size_coalesced_last_dim(global_size, desired_wg_size)
| 42.382863 | 138 | 0.650152 |
717004c16aa303c94b3845706ad4664cb45eb6f9 | 3,602 | py | Python | src/mesh/azext_mesh/servicefabricmesh/mgmt/servicefabricmesh/models/application_properties.py | mayank88mahajan/azure-cli-extensions | 8bd389a1877bffd14052bec5519ce75dc6fc34cf | [
"MIT"
] | 1 | 2018-09-22T10:39:43.000Z | 2018-09-22T10:39:43.000Z | src/mesh/azext_mesh/servicefabricmesh/mgmt/servicefabricmesh/models/application_properties.py | mayank88mahajan/azure-cli-extensions | 8bd389a1877bffd14052bec5519ce75dc6fc34cf | [
"MIT"
] | null | null | null | src/mesh/azext_mesh/servicefabricmesh/mgmt/servicefabricmesh/models/application_properties.py | mayank88mahajan/azure-cli-extensions | 8bd389a1877bffd14052bec5519ce75dc6fc34cf | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ApplicationProperties(Model):
"""This type describes properties of an application resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param description: User readable description of the application.
:type description: str
:param debug_params: Internal use.
:type debug_params: str
:param services: describes the services in the application.
:type services:
list[~azure.mgmt.servicefabricmesh.models.ServiceResourceDescription]
:ivar health_state: Describes the health state of an application resource.
Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown'
:vartype health_state: str or
~azure.mgmt.servicefabricmesh.models.HealthState
:ivar unhealthy_evaluation: When the application's health state is not
'Ok', this additional details from service fabric Health Manager for the
user to know why the application is marked unhealthy.
:vartype unhealthy_evaluation: str
:ivar status: Status of the application resource. Possible values include:
'Invalid', 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed'
:vartype status: str or
~azure.mgmt.servicefabricmesh.models.ApplicationResourceStatus
:ivar status_details: Gives additional information about the current
status of the application deployment.
:vartype status_details: str
:ivar service_names: Names of the services in the application.
:vartype service_names: list[str]
:param diagnostics: Describes the diagnostics definition and usage for an
application resource.
:type diagnostics:
~azure.mgmt.servicefabricmesh.models.DiagnosticsDescription
"""
_validation = {
'health_state': {'readonly': True},
'unhealthy_evaluation': {'readonly': True},
'status': {'readonly': True},
'status_details': {'readonly': True},
'service_names': {'readonly': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'debug_params': {'key': 'debugParams', 'type': 'str'},
'services': {'key': 'services', 'type': '[ServiceResourceDescription]'},
'health_state': {'key': 'healthState', 'type': 'str'},
'unhealthy_evaluation': {'key': 'unhealthyEvaluation', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'status_details': {'key': 'statusDetails', 'type': 'str'},
'service_names': {'key': 'serviceNames', 'type': '[str]'},
'diagnostics': {'key': 'diagnostics', 'type': 'DiagnosticsDescription'},
}
def __init__(self, description=None, debug_params=None, services=None, diagnostics=None):
super(ApplicationProperties, self).__init__()
self.description = description
self.debug_params = debug_params
self.services = services
self.health_state = None
self.unhealthy_evaluation = None
self.status = None
self.status_details = None
self.service_names = None
self.diagnostics = diagnostics
| 43.926829 | 93 | 0.659078 |
c8593ebe105e13b970234b2da20db343df69b261 | 9,941 | py | Python | main.py | franciscorpuz/curl_rainbow | e3f6f2a9bee1ed6436e8cc55384b664220c7ab3b | [
"MIT"
] | 38 | 2020-07-07T11:29:18.000Z | 2022-03-28T13:38:04.000Z | main.py | franciscorpuz/curl_rainbow | e3f6f2a9bee1ed6436e8cc55384b664220c7ab3b | [
"MIT"
] | 6 | 2020-08-01T11:44:39.000Z | 2021-06-24T00:15:23.000Z | main.py | franciscorpuz/curl_rainbow | e3f6f2a9bee1ed6436e8cc55384b664220c7ab3b | [
"MIT"
] | 18 | 2020-08-07T04:42:37.000Z | 2021-12-08T22:42:14.000Z | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2017 Kai Arulkumaran
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# ==============================================================================
from __future__ import division
import argparse
import bz2
from datetime import datetime
import os
import pickle
import atari_py
import numpy as np
import torch
from tqdm import trange
from agent import Agent
from env import Env
from memory import ReplayMemory
from test import test
seed = np.random.randint(12345)
# Note that hyperparameters may originally be reported in ATARI game frames instead of agent steps
parser = argparse.ArgumentParser(description='Rainbow')
parser.add_argument('--id', type=str, default='default', help='Experiment ID')
parser.add_argument('--seed', type=int, default=seed, help='Random seed')
parser.add_argument('--disable-cuda', action='store_true', help='Disable CUDA')
parser.add_argument('--game', type=str, default='ms_pacman', choices=atari_py.list_games(), help='ATARI game')
parser.add_argument('--T-max', type=int, default=int(1e5), metavar='STEPS', help='Number of training steps (4x number of frames)')
parser.add_argument('--max-episode-length', type=int, default=int(108e3), metavar='LENGTH', help='Max episode length in game frames (0 to disable)')
parser.add_argument('--history-length', type=int, default=4, metavar='T', help='Number of consecutive states processed')
parser.add_argument('--architecture', type=str, default='data-efficient', choices=['canonical', 'data-efficient'], metavar='ARCH', help='Network architecture')
parser.add_argument('--hidden-size', type=int, default=256, metavar='SIZE', help='Network hidden size')
parser.add_argument('--noisy-std', type=float, default=0.1, metavar='σ', help='Initial standard deviation of noisy linear layers')
parser.add_argument('--atoms', type=int, default=51, metavar='C', help='Discretised size of value distribution')
parser.add_argument('--V-min', type=float, default=-10, metavar='V', help='Minimum of value distribution support')
parser.add_argument('--V-max', type=float, default=10, metavar='V', help='Maximum of value distribution support')
parser.add_argument('--model', type=str, metavar='PARAMS', help='Pretrained model (state dict)')
parser.add_argument('--memory-capacity', type=int, default=int(1e5), metavar='CAPACITY', help='Experience replay memory capacity')
parser.add_argument('--replay-frequency', type=int, default=1, metavar='k', help='Frequency of sampling from memory')
parser.add_argument('--priority-exponent', type=float, default=0.5, metavar='ω', help='Prioritised experience replay exponent (originally denoted α)')
parser.add_argument('--priority-weight', type=float, default=0.4, metavar='β', help='Initial prioritised experience replay importance sampling weight')
parser.add_argument('--multi-step', type=int, default=20, metavar='n', help='Number of steps for multi-step return')
parser.add_argument('--discount', type=float, default=0.99, metavar='γ', help='Discount factor')
parser.add_argument('--target-update', type=int, default=int(2e3), metavar='τ', help='Number of steps after which to update target network')
parser.add_argument('--reward-clip', type=int, default=1, metavar='VALUE', help='Reward clipping (0 to disable)')
parser.add_argument('--learning-rate', type=float, default=0.0001, metavar='η', help='Learning rate')
parser.add_argument('--adam-eps', type=float, default=1.5e-4, metavar='ε', help='Adam epsilon')
parser.add_argument('--batch-size', type=int, default=32, metavar='SIZE', help='Batch size')
parser.add_argument('--norm-clip', type=float, default=10, metavar='NORM', help='Max L2 norm for gradient clipping')
parser.add_argument('--learn-start', type=int, default=int(1600), metavar='STEPS', help='Number of steps before starting training')
parser.add_argument('--evaluate', action='store_true', help='Evaluate only')
parser.add_argument('--evaluation-interval', type=int, default=10000, metavar='STEPS', help='Number of training steps between evaluations')
parser.add_argument('--evaluation-episodes', type=int, default=10, metavar='N', help='Number of evaluation episodes to average over')
# TODO: Note that DeepMind's evaluation method is running the latest agent for 500K frames ever every 1M steps
parser.add_argument('--evaluation-size', type=int, default=500, metavar='N', help='Number of transitions to use for validating Q')
parser.add_argument('--render', action='store_true', help='Display screen (testing only)')
parser.add_argument('--enable-cudnn', action='store_true', help='Enable cuDNN (faster but nondeterministic)')
parser.add_argument('--checkpoint-interval', default=0, help='How often to checkpoint the model, defaults to 0 (never checkpoint)')
parser.add_argument('--memory', help='Path to save/load the memory from')
parser.add_argument('--disable-bzip-memory', action='store_true', help='Don\'t zip the memory file. Not recommended (zipping is a bit slower and much, much smaller)')
# Setup
args = parser.parse_args()
xid = 'curl-' + args.game + '-' + str(seed)
args.id = xid
print(' ' * 26 + 'Options')
for k, v in vars(args).items():
print(' ' * 26 + k + ': ' + str(v))
results_dir = os.path.join('results', args.id)
if not os.path.exists(results_dir):
os.makedirs(results_dir)
metrics = {'steps': [], 'rewards': [], 'Qs': [], 'best_avg_reward': -float('inf')}
np.random.seed(args.seed)
torch.manual_seed(np.random.randint(1, 10000))
if torch.cuda.is_available() and not args.disable_cuda:
args.device = torch.device('cuda')
torch.cuda.manual_seed(np.random.randint(1, 10000))
torch.backends.cudnn.enabled = args.enable_cudnn
else:
args.device = torch.device('cpu')
# Simple ISO 8601 timestamped logger
def log(s):
print('[' + str(datetime.now().strftime('%Y-%m-%dT%H:%M:%S')) + '] ' + s)
def load_memory(memory_path, disable_bzip):
if disable_bzip:
with open(memory_path, 'rb') as pickle_file:
return pickle.load(pickle_file)
else:
with bz2.open(memory_path, 'rb') as zipped_pickle_file:
return pickle.load(zipped_pickle_file)
def save_memory(memory, memory_path, disable_bzip):
if disable_bzip:
with open(memory_path, 'wb') as pickle_file:
pickle.dump(memory, pickle_file)
else:
with bz2.open(memory_path, 'wb') as zipped_pickle_file:
pickle.dump(memory, zipped_pickle_file)
# Environment
env = Env(args)
env.train()
action_space = env.action_space()
# Agent
dqn = Agent(args, env)
# If a model is provided, and evaluate is fale, presumably we want to resume, so try to load memory
if args.model is not None and not args.evaluate:
if not args.memory:
raise ValueError('Cannot resume training without memory save path. Aborting...')
elif not os.path.exists(args.memory):
raise ValueError('Could not find memory file at {path}. Aborting...'.format(path=args.memory))
mem = load_memory(args.memory, args.disable_bzip_memory)
else:
mem = ReplayMemory(args, args.memory_capacity)
priority_weight_increase = (1 - args.priority_weight) / (args.T_max - args.learn_start)
# Construct validation memory
val_mem = ReplayMemory(args, args.evaluation_size)
T, done = 0, True
while T < args.evaluation_size:
if done:
state, done = env.reset(), False
next_state, _, done = env.step(np.random.randint(0, action_space))
val_mem.append(state, None, None, done)
state = next_state
T += 1
if args.evaluate:
dqn.eval() # Set DQN (online network) to evaluation mode
avg_reward, avg_Q = test(args, 0, dqn, val_mem, metrics, results_dir, evaluate=True) # Test
print('Avg. reward: ' + str(avg_reward) + ' | Avg. Q: ' + str(avg_Q))
else:
# Training loop
dqn.train()
T, done = 0, True
for T in trange(1, args.T_max + 1):
if done:
state, done = env.reset(), False
if T % args.replay_frequency == 0:
dqn.reset_noise() # Draw a new set of noisy weights
action = dqn.act(state) # Choose an action greedily (with noisy weights)
next_state, reward, done = env.step(action) # Step
if args.reward_clip > 0:
reward = max(min(reward, args.reward_clip), -args.reward_clip) # Clip rewards
mem.append(state, action, reward, done) # Append transition to memory
# Train and test
if T >= args.learn_start:
mem.priority_weight = min(mem.priority_weight + priority_weight_increase, 1) # Anneal importance sampling weight β to 1
if T % args.replay_frequency == 0:
#for _ in range(4):
dqn.learn(mem) # Train with n-step distributional double-Q learning
dqn.update_momentum_net() # MoCo momentum upate
if T % args.evaluation_interval == 0:
dqn.eval() # Set DQN (online network) to evaluation mode
avg_reward, avg_Q = test(args, T, dqn, val_mem, metrics, results_dir) # Test
log('T = ' + str(T) + ' / ' + str(args.T_max) + ' | Avg. reward: ' + str(avg_reward) + ' | Avg. Q: ' + str(avg_Q))
dqn.train() # Set DQN (online network) back to training mode
# If memory path provided, save it
if args.memory is not None:
save_memory(mem, args.memory, args.disable_bzip_memory)
# Update target network
if T % args.target_update == 0:
dqn.update_target_net()
# Checkpoint the network
if (args.checkpoint_interval != 0) and (T % args.checkpoint_interval == 0):
dqn.save(results_dir, 'checkpoint.pth')
state = next_state
env.close()
| 49.954774 | 434 | 0.715622 |
8cfd8e2575e7866e00b1ce9e159b6b15f15ffd69 | 10,070 | py | Python | contrib/spendfrom/spendfrom.py | Babacoins/Babacoin-v2 | 126ad0e4a744a1d7ae1629cf414ae6b033d59c9f | [
"MIT"
] | 1 | 2018-08-13T15:47:53.000Z | 2018-08-13T15:47:53.000Z | contrib/spendfrom/spendfrom.py | Babacoins/Babacoin-v2 | 126ad0e4a744a1d7ae1629cf414ae6b033d59c9f | [
"MIT"
] | null | null | null | contrib/spendfrom/spendfrom.py | Babacoins/Babacoin-v2 | 126ad0e4a744a1d7ae1629cf414ae6b033d59c9f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Use the raw transactions API to spend BABAs received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a babacoind or babacoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the babacoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Babacoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Babacoin")
return os.path.expanduser("~/.babacoin")
def read_bitcoin_config(dbdir):
"""Read the babacoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "babacoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a babacoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 18887 if testnet else 8887
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the babacoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(babacoind):
info = babacoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
babacoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = babacoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(babacoind):
address_summary = dict()
address_to_account = dict()
for info in babacoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = babacoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = babacoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-babacoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(babacoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(babacoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to babacoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = babacoind.createrawtransaction(inputs, outputs)
signed_rawtx = babacoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(babacoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = babacoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(babacoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = babacoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(babacoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get BABAs from")
parser.add_option("--to", dest="to", default=None,
help="address to get send BABAs to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of babacoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
babacoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(babacoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(babacoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(babacoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(babacoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = babacoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| 37.574627 | 111 | 0.633863 |
5cedbcc3ff41c810fc57341964a7f174ecfe3834 | 6,301 | py | Python | CustomExtension.extension/STVTools.tab/Modify.panel/Modify1.stack3/Replace.pulldown/Batch parameter change.pushbutton/script.py | Melca-G/Aeolus | e014cdbbffc1c650d569efd8750480bc5a4cdc3b | [
"MIT"
] | null | null | null | CustomExtension.extension/STVTools.tab/Modify.panel/Modify1.stack3/Replace.pulldown/Batch parameter change.pushbutton/script.py | Melca-G/Aeolus | e014cdbbffc1c650d569efd8750480bc5a4cdc3b | [
"MIT"
] | null | null | null | CustomExtension.extension/STVTools.tab/Modify.panel/Modify1.stack3/Replace.pulldown/Batch parameter change.pushbutton/script.py | Melca-G/Aeolus | e014cdbbffc1c650d569efd8750480bc5a4cdc3b | [
"MIT"
] | null | null | null | import sys
import ConfigParser
from os.path import expanduser
import csv
# Set system path
home = expanduser("~")
cfgfile = open(home + "\\STVTools.ini", 'r')
config = ConfigParser.ConfigParser()
config.read(home + "\\STVTools.ini")
# Master Path
syspath1 = config.get('SysDir','MasterPackage')
sys.path.append(syspath1)
# Built Path
syspath2 = config.get('SysDir','SecondaryPackage')
sys.path.append(syspath2)
from pyrevit.framework import List
from pyrevit import revit, DB, forms
import re, clr, os, threading
import EAMQcUtils
import xlsxwriter
clr.AddReference('RevitAPI')
clr.AddReference("System")
from Autodesk.Revit.DB import FilteredElementCollector, Transaction, ImportInstance, \
OpenOptions,WorksetConfiguration, WorksetConfigurationOption, DetachFromCentralOption,\
ModelPathUtils, SaveAsOptions, WorksharingSaveAsOptions, RevitLinkType, ViewFamilyType, \
ViewFamily, View3D, IndependentTag, ElementId, StorageType
from System.Collections.Generic import List
from Autodesk.Revit.UI.Events import DialogBoxShowingEventArgs
from Autodesk.Revit.UI import UIApplication
from Autodesk.Revit.ApplicationServices import Application
clr.AddReferenceByPartialName('PresentationCore')
clr.AddReferenceByPartialName('PresentationFramework')
clr.AddReferenceByPartialName('System.Windows.Forms')
clr.AddReference('RevitAPIUI')
# Collect Save location and Rvt Files
def Importcsv(Filename):
flat_list = []
with open(Filename, 'r') as f:
reader = csv.reader(f)
Lst = list(reader)
for sublist in Lst:
flat_list.append(sublist)
#for item in sublist:
#flat_list.append(item)
return flat_list
class ChangeElement:
def ChangeParameter(self, id, parameter, value):
self.Id = id
self.Parameter = parameter
self.Value = value
return self
def OpenFiles(oFile, app, audit):
openOpt = OpenOptions()
if audit == True:
openOpt.Audit = True
else:
openOpt.Audit = False
openOpt.DetachFromCentralOption = DetachFromCentralOption.DetachAndPreserveWorksets
wsopt = WorksetConfiguration(WorksetConfigurationOption.CloseAllWorksets)
# wsopt.Open(worksetList)
openOpt.SetOpenWorksetsConfiguration(wsopt)
modelPath = ModelPathUtils.ConvertUserVisiblePathToModelPath(oFile)
currentdoc = app.OpenDocumentFile(modelPath, openOpt)
try:
DialogBoxShowingEventArgs.OverrideResult(1)
except:
pass
return currentdoc
collectorCSVFile = forms.pick_file(file_ext='csv', multi_file=False, unc_paths=False)
collectorFiles = forms.pick_file(file_ext='rvt', multi_file=True, unc_paths=False)
destinationFolder = forms.pick_folder()
# Main
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
__doc__ = 'Extract information from csv file and batch apply parameter value changes.'\
'Format of csv: "model name, element Id, parameter name, new parameter value".'\
'Step 1: Select the csv File'\
'Step 2: Select the Revit Files'\
'Step 3: Select the directory new models to be placed.'
# set the first row to header to avoid the unicode issue
uiapp = UIApplication(doc.Application)
application = uiapp.Application
if len(collectorFiles) > 0:
for aDoc in collectorFiles:
openedDoc = OpenFiles(aDoc, application, audit=False)
t = Transaction(openedDoc, "Apply Parameters")
t.Start()
print(str(openedDoc.Title) + ' Opened')
workshareOp = WorksharingSaveAsOptions()
# Define the name and location of excel file
rawTitle = re.split('detached', openedDoc.Title)[0]
#title = rawTitle[0:len(rawTitle) -1]
title = rawTitle[0:len(rawTitle) -1]
print(str(title) + ' is being modified:')
for line in Importcsv(collectorCSVFile):
modelName = line[0]
id = line[1]
parameterName = line[2]
parameterValue = line[3]
v1 = ()
print(modelName, title)
if modelName == title + '.rvt':
element = ()
try:
element = openedDoc.GetElement(ElementId(int(id)))
except:
print("ElementId {0} Does not Exist".format(str(id)))
try:
v1 = element.LookupParameter(parameterName)
except:
print("Error finding the value to {0} for {1}".format(parameterName, str(id)))
if v1:
if v1.StorageType == StorageType.Integer:
try:
element.LookupParameter(parameterName).Set(int(parameterValue))
print("Applied change {0} as {1}".format(id, parameterValue))
except:
print("Error Applying the value to {0} ".format(str(id)) + parameterName + " as integer")
elif v1.StorageType == StorageType.String:
try:
element.LookupParameter(parameterName).Set(str(parameterValue))
print("Applied change {0} as {1}".format(id, parameterValue))
except:
print("Error Applying the value to {0} ".format(str(id)) + parameterName + " as string")
elif v1.StorageType == StorageType.Double:
try:
element.LookupParameter(parameterName).Set(float(parameterValue))
print("Applied change {0} as {1}".format(id, parameterValue))
except:
print("Error Applying the value to {0} ".format(str(id)) + parameterName + " as double")
else:
print("Error Applying the value to {0} ".format(str(id)) + parameterName + " format error")
t.Commit()
saveOp = SaveAsOptions()
workOp = WorksharingSaveAsOptions()
workOp.SaveAsCentral = True
saveOp.SetWorksharingOptions(workOp)
saveAsTitle = openedDoc.Title
openedDoc.SaveAs(destinationFolder + '\\' + saveAsTitle, saveOp)
openedDoc.Close(False)
print("--------------------------")
| 42.288591 | 117 | 0.634026 |
deb13753ffcdf7bf53566168da155940070cac1f | 1,601 | py | Python | deepocr/models/detection/predictor/pytorch.py | das-projects/deepOCR | ffc6db691605b7b4837da9619ab6e918fa1c18de | [
"Apache-2.0"
] | 1 | 2022-01-28T09:48:34.000Z | 2022-01-28T09:48:34.000Z | deepocr/models/detection/predictor/pytorch.py | das-projects/deepOCR | ffc6db691605b7b4837da9619ab6e918fa1c18de | [
"Apache-2.0"
] | null | null | null | deepocr/models/detection/predictor/pytorch.py | das-projects/deepOCR | ffc6db691605b7b4837da9619ab6e918fa1c18de | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2022, Arijit Das.
# Code adapted from doctr and huggingface
# This program is licensed under the Apache License version 2.
# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
from typing import Any, List, Union
import numpy as np
import torch
from torch import nn
from deepocr.models.preprocessor import PreProcessor
__all__ = ['DetectionPredictor']
class DetectionPredictor(nn.Module):
"""Implements an object able to localize text elements in a document
Args:
pre_processor: transform inputs for easier batched model inference
model: core detection architecture
"""
def __init__(
self,
pre_processor: PreProcessor,
model: nn.Module,
) -> None:
super().__init__()
self.pre_processor = pre_processor
self.model = model.eval()
@torch.no_grad()
def forward(
self,
pages: List[Union[np.ndarray, torch.Tensor]],
**kwargs: Any,
) -> List[np.ndarray]:
# Dimension check
if any(page.ndim != 3 for page in pages):
raise ValueError("incorrect input shape: all pages are expected to be multi-channel 2D images.")
processed_batches = self.pre_processor(pages)
_device = next(self.model.parameters()).device
predicted_batches = [
self.model(batch.to(device=_device), return_preds=True, **kwargs)['preds'] # type:ignore[operator]
for batch in processed_batches
]
return [pred for batch in predicted_batches for pred in batch]
| 30.207547 | 111 | 0.665209 |
37d818c6c8e76aef3cd39ee6847361955dd32599 | 8,584 | py | Python | src/python/turicreate/test/test_util.py | Bpowers4/turicreate | 73dad213cc1c4f74337b905baea2b3a1e5a0266c | [
"BSD-3-Clause"
] | 1 | 2020-02-21T02:24:45.000Z | 2020-02-21T02:24:45.000Z | src/python/turicreate/test/test_util.py | Bpowers4/turicreate | 73dad213cc1c4f74337b905baea2b3a1e5a0266c | [
"BSD-3-Clause"
] | 2 | 2019-03-28T00:17:14.000Z | 2019-03-28T00:17:47.000Z | src/python/turicreate/test/test_util.py | Bpowers4/turicreate | 73dad213cc1c4f74337b905baea2b3a1e5a0266c | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright © 2017 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
import os
import unittest
import tempfile
import shutil
import uuid
import sys as _sys
from .. import util as glutil
from .. import SFrame, SArray, SGraph
from ..util import get_turicreate_object_type
from ..config import get_runtime_config, set_runtime_config
from . import util
class UtilTests(unittest.TestCase):
def test_archive_utils(self):
# Arrange
sf = SFrame([1, 2, 3, 4, 5])
dir = tempfile.mkdtemp(prefix="archive-tests")
try:
sf.save(dir)
# Act & Assert
self.assertTrue(glutil.is_directory_archive(dir))
self.assertEqual(glutil.get_archive_type(dir), "sframe")
self.assertFalse(glutil.is_directory_archive("/tmp"))
self.assertRaises(TypeError, lambda: glutil.get_archive_type("/tmp"))
finally:
shutil.rmtree(dir)
def test_crossproduct(self):
s = util.SFrameComparer()
d = {"opt1": [1, 2, 3], "opt2": ["a", "b"]}
actual = glutil.crossproduct(d)
actual = actual.sort("opt1")
expected = SFrame(
{"opt1": [1, 1, 2, 2, 3, 3], "opt2": ["a", "b", "a", "b", "a", "b"]}
)
# Check columns individually since there is no
# guaranteed ordering among columns.
for k in d.keys():
s._assert_sarray_equal(actual[k], expected[k])
def _validate_gl_object_type(self, obj, expected):
with util.TempDirectory() as temp_dir:
obj.save(temp_dir)
t = get_turicreate_object_type(temp_dir)
self.assertEqual(t, expected)
def test_get_turicreate_object_type(self):
sf = SFrame({"a": [1, 2]})
self._validate_gl_object_type(sf, "sframe")
sa = SArray([1, 2])
self._validate_gl_object_type(sa, "sarray")
d = SFrame(
{
"__src_id": [175343, 163607, 44041, 101370, 64892],
"__dst_id": [1011, 7928, 7718, 12966, 11080],
}
)
g = SGraph()
self._validate_gl_object_type(g, "sgraph")
def test_sframe_equals(self):
# Empty SFrames should be equal
sf_a = SFrame()
sf_b = SFrame()
glutil._assert_sframe_equal(sf_a, sf_b)
the_data = [i for i in range(0, 10)]
sf = SFrame()
sf["ints"] = SArray(data=the_data, dtype=int)
sf["floats"] = SArray(data=the_data, dtype=float)
sf["floats"] = sf["floats"] * 0.5
sf["strings"] = SArray(data=the_data, dtype=str)
sf["strings"] = sf["strings"].apply(lambda x: x + x + x)
# Make sure these aren't pointing to the same SFrame
sf_a = sf.filter_by([43], "ints", exclude=True)
sf_b = sf.filter_by([43], "ints", exclude=True)
glutil._assert_sframe_equal(sf_a, sf_b)
# Difference in number of columns
sf_a["extra"] = SArray(data=the_data)
with self.assertRaises(AssertionError):
glutil._assert_sframe_equal(sf_a, sf_b)
del sf_a["extra"]
glutil._assert_sframe_equal(sf_a, sf_b)
# Difference in number of rows
with self.assertRaises(AssertionError):
glutil._assert_sframe_equal(sf_a, sf_b[0:5])
# Difference in types
sf_a["diff_type"] = sf_a["ints"].astype(str)
sf_b["diff_type"] = sf_b["ints"]
with self.assertRaises(AssertionError):
glutil._assert_sframe_equal(sf_a, sf_b)
del sf_a["diff_type"]
del sf_b["diff_type"]
glutil._assert_sframe_equal(sf_a, sf_b)
# Difference in column name
sf_a.rename({"strings": "string"}, inplace=True)
with self.assertRaises(AssertionError):
glutil._assert_sframe_equal(sf_a, sf_b)
glutil._assert_sframe_equal(sf_a, sf_b, check_column_names=False)
sf_a.rename({"string": "strings"}, inplace=True)
glutil._assert_sframe_equal(sf_a, sf_b)
sf_a.rename({"ints": "floats1"}, inplace=True)
sf_a.rename({"floats": "ints"}, inplace=True)
sf_a.rename({"floats1": "floats"}, inplace=True)
glutil._assert_sframe_equal(sf_a, sf_b, check_column_names=False)
sf_a = sf.filter_by([43], "ints", exclude=True)
# Difference in column order
sf_a.swap_columns("strings", "ints", inplace=True)
with self.assertRaises(AssertionError):
glutil._assert_sframe_equal(sf_a, sf_b)
glutil._assert_sframe_equal(sf_a, sf_b, check_column_order=False)
sf_a.swap_columns("strings", "ints", inplace=True)
glutil._assert_sframe_equal(sf_a, sf_b)
# Difference in row order
sf_a = sf_a.append(sf[0:5])
sf_b = sf[0:5].append(sf_b)
with self.assertRaises(AssertionError):
glutil._assert_sframe_equal(sf_a, sf_b)
glutil._assert_sframe_equal(sf_a, sf_b, check_row_order=False)
# Difference in column order AND row order
sf_a.swap_columns("floats", "strings", inplace=True)
with self.assertRaises(AssertionError):
glutil._assert_sframe_equal(sf_a, sf_b)
glutil._assert_sframe_equal(
sf_a, sf_b, check_column_order=False, check_row_order=False
)
# Column order, row order, names
sf_a.rename({"floats": "foo", "strings": "bar", "ints": "baz"}, inplace=True)
with self.assertRaises(AssertionError):
glutil._assert_sframe_equal(sf_a, sf_b)
# Illegal stuff
with self.assertRaises(ValueError):
glutil._assert_sframe_equal(
sf_a, sf_b, check_column_names=False, check_column_order=False
)
with self.assertRaises(ValueError):
glutil._assert_sframe_equal(
sf_a,
sf_b,
check_column_names=False,
check_column_order=False,
check_row_order=False,
)
with self.assertRaises(TypeError):
glutil._assert_sframe_equal(sf_b["floats"], sf_a["foo"])
def test_get_temp_file_location(self):
from ..util import _get_temp_file_location
from ..util import _convert_slashes
location = _get_temp_file_location()
self.assertTrue(os.path.isdir(location))
tmp = tempfile.mkdtemp(prefix="test_gl_util")
default_tmp = get_runtime_config()["TURI_CACHE_FILE_LOCATIONS"]
try:
set_runtime_config("TURI_CACHE_FILE_LOCATIONS", tmp)
location = _convert_slashes(_get_temp_file_location())
self.assertTrue(location.startswith(_convert_slashes(tmp)))
finally:
shutil.rmtree(tmp)
set_runtime_config("TURI_CACHE_FILE_LOCATIONS", default_tmp)
def test_make_temp_directory(self):
from ..util import _make_temp_directory, _get_temp_file_location
tmp_root = _get_temp_file_location()
location = _make_temp_directory(prefix=None)
try:
self.assertTrue(os.path.isdir(location))
self.assertTrue(location.startswith(tmp_root))
finally:
shutil.rmtree(location)
prefix = "abc_"
location = _make_temp_directory(prefix=prefix)
try:
self.assertTrue(os.path.isdir(location))
self.assertTrue(location.startswith(tmp_root))
self.assertTrue(os.path.basename(location).startswith(prefix))
finally:
shutil.rmtree(location)
def test_make_temp_filename(self):
from ..util import _make_temp_filename, _get_temp_file_location
tmp_root = _get_temp_file_location()
location = _make_temp_filename(prefix=None)
self.assertFalse(os.path.isfile(location))
self.assertFalse(os.path.exists(location))
self.assertTrue(location.startswith(tmp_root))
self.assertTrue(isinstance(location, str))
prefix = "abc_"
location = _make_temp_filename(prefix=prefix)
self.assertFalse(os.path.isfile(location))
self.assertFalse(os.path.exists(location))
self.assertTrue(location.startswith(tmp_root))
self.assertTrue(isinstance(location, str))
self.assertTrue(os.path.basename(location).startswith(prefix))
| 35.471074 | 85 | 0.632106 |
060418e1c83a7a5463c768915fb7e44420ada077 | 2,328 | py | Python | src/601_700/0639_decode-ways-ii/decode-ways-ii.py | himichael/LeetCode | d54f48e785af3d47a2a67a95fd3343d2b23f8ae5 | [
"Apache-2.0"
] | 1 | 2019-12-18T06:08:47.000Z | 2019-12-18T06:08:47.000Z | src/601_700/0639_decode-ways-ii/decode-ways-ii.py | himichael/LeetCode | d54f48e785af3d47a2a67a95fd3343d2b23f8ae5 | [
"Apache-2.0"
] | 1 | 2019-05-18T09:35:22.000Z | 2019-05-18T09:35:22.000Z | src/601_700/0639_decode-ways-ii/decode-ways-ii.py | himichael/LeetCode | d54f48e785af3d47a2a67a95fd3343d2b23f8ae5 | [
"Apache-2.0"
] | null | null | null | class Solution(object):
def numDecodings(self, s):
n = len(s)
cache = {}
M = 10 ** 9 + 7
def dfs(i):
if i >= n:
return 1
if i in cache:
return cache[i]
res = 0
if s[i] == '*':
res = 9 * dfs(i + 1) % M
if i + 1 < n and s[i + 1] == '*':
res = (res + 15 * dfs(i + 2)) % M
elif i + 1 < n and s[i + 1] <= '6':
res = (res + 2 * dfs(i + 2)) % M
elif i + 1 < n and s[i + 1] > '6':
res = (res + dfs(i + 2)) % M
else:
res = dfs(i + 1) % M if s[i] != '0' else 0
if i + 1 < n and s[i + 1] == '*':
if s[i] == '1':
res = (res + 9 * dfs(i + 2)) % M
elif s[i] == '2':
res = (res + 6 * dfs(i + 2)) % M
elif i + 1 < n and 10 <= int(s[i] + s[i + 1]) <= 26:
res = (res + dfs(i + 2)) % M
cache[i] = res
return cache[i]
return dfs(0)
# 动态规划
def numDecodings(self, s):
if not s or s[0] == '0' or len(s) == 1:
return 0 if s[0] == '0' else (9 if s[0] == '*' else 1)
n = len(s)
M = 10 ** 9 + 7
dp = [0] * (n + 1)
dp[0] = 1
dp[1] = 9 if s[0] == '*' else (1 if s[0] != '0' else 0)
for i in range(1, n):
if s[i] == '*':
dp[i + 1] = 9 * dp[i] % M
if s[i - 1] == '*':
dp[i + 1] = (dp[i + 1] + 15 * dp[i -1]) % M
elif s[i - 1] == '1':
dp[i + 1] = (dp[i + 1] + 9 * dp[i - 1]) % M
elif s[i - 1] == '2':
dp[i + 1] = (dp[i + 1] + 6 * dp[i - 1]) % M
else:
dp[i + 1] = dp[i] if s[i] != '0' else 0
if s[i - 1] == '*':
if s[i] <= '6':
dp[i + 1] = (dp[i + 1] + 2 * dp[i - 1]) % M
elif s[i] > '6':
dp[i + 1] = (dp[i + 1] + dp[i - 1]) % M
elif 10 <= int(s[i - 1] + s[i]) <= 26:
dp[i + 1] = (dp[i + 1] + dp[i - 1]) % M
return dp[-1]
| 34.235294 | 68 | 0.270619 |
3966694bc9cdbc7f2cb633478c58c3c89c53a5fd | 1,042 | py | Python | day24.py | binarygondola/adventofcode-2017 | fbcb545c19df6c5b3169714e74686d9a7b28a598 | [
"MIT"
] | null | null | null | day24.py | binarygondola/adventofcode-2017 | fbcb545c19df6c5b3169714e74686d9a7b28a598 | [
"MIT"
] | null | null | null | day24.py | binarygondola/adventofcode-2017 | fbcb545c19df6c5b3169714e74686d9a7b28a598 | [
"MIT"
] | null | null | null | def make_bridge(parts, current, end, m):
possible = [x for x in parts if end == x[0] or end == x[1]]
for p in possible:
parts.remove(p)
current.append(p)
if end == p[0]:
tmp = p[1]
else:
tmp = p[0]
b = sum(sum(x) for x in current)
max_length = max(b, m[0])
parts_len_max = m[1]
longest_max_length = m[2]
if len(current) > parts_len_max:
parts_len_max = len(current)
longest_max_length = b
elif len(current) == parts_len_max:
longest_max_length = max(b, m[2])
triple = [max_length, parts_len_max, longest_max_length]
m = make_bridge(parts, current, tmp, triple)
current.remove(p)
parts.append(p)
return m
file = open('day24.txt').read().split('\n')
parts = []
for i in file:
a = list(map(int, i.split('/')))
a.sort()
parts.append(a)
parts.sort()
mm = make_bridge(parts, [], 0, [0, 0, 0])
print("part1:", mm[0])
print("part2:", mm[2])
| 22.170213 | 64 | 0.537428 |
fb42363fe0404e63e343602742d7855cdce91e38 | 5,400 | py | Python | hotpot_km/tests/test_pooled_manager.py | maartenbreddels/hotpot_km | 59640727ebef76064c9a4681a1f425987a1cccb4 | [
"BSD-3-Clause"
] | null | null | null | hotpot_km/tests/test_pooled_manager.py | maartenbreddels/hotpot_km | 59640727ebef76064c9a4681a1f425987a1cccb4 | [
"BSD-3-Clause"
] | null | null | null | hotpot_km/tests/test_pooled_manager.py | maartenbreddels/hotpot_km | 59640727ebef76064c9a4681a1f425987a1cccb4 | [
"BSD-3-Clause"
] | null | null | null |
from contextlib import contextmanager
from subprocess import PIPE
from unittest import TestCase
from jupyter_client.kernelspec import NATIVE_KERNEL_NAME
import pytest
from traitlets.config.loader import Config
from .. import (
PooledKernelManager,
MaximumKernelsException,
)
from .utils import shutdown_all_direct, TestKernelManager
# Test that it works as normal with default config
class TestPooledKernelManagerUnused(TestKernelManager):
__test__ = True
@contextmanager
def _get_tcp_km(self):
c = Config()
km = PooledKernelManager(config=c)
try:
yield km
finally:
km.shutdown_all()
# Test that it works with an unstrict pool
class TestPooledKernelManagerApplied(TestKernelManager):
__test__ = True
@contextmanager
def _get_tcp_km(self):
c = Config()
c.PooledKernelManager.kernel_pool_size = 2
c.PooledKernelManager.pool_kwargs = dict(stdout=PIPE, stderr=PIPE)
km = PooledKernelManager(config=c)
try:
yield km
finally:
km.shutdown_all()
def test_exceed_pool_size(self):
with self._get_tcp_km() as km:
self.assertEqual(len(km._pool), 2)
kids = []
for i in range(4):
kid = km.start_kernel(stdout=PIPE, stderr=PIPE)
self.assertIn(kid, km)
kids.append(kid)
self.assertEqual(len(km._pool), 2)
shutdown_all_direct(km)
for kid in kids:
self.assertNotIn(kid, km)
# Cycle again to assure the pool survives that
kids = []
for i in range(4):
kid = km.start_kernel(stdout=PIPE, stderr=PIPE)
self.assertIn(kid, km)
kids.append(kid)
self.assertEqual(len(km._pool), 2)
km.shutdown_all()
for kid in kids:
self.assertNotIn(kid, km)
def test_decrease_pool_size(self):
with self._get_tcp_km() as km:
km.kernel_pool_size = 1
self.assertEqual(len(km._pool), 1)
def test_increase_pool_size(self):
with self._get_tcp_km() as km:
km.kernel_pool_size = 3
self.assertEqual(len(km._pool), 3)
# Test that it works with an strict pool
class TestPooledKernelManagerStrict(TestCase):
@contextmanager
def _get_tcp_km(self):
c = Config()
c.PooledKernelManager.kernel_pool_size = 2
c.PooledKernelManager.pool_kwargs = dict(stdout=PIPE, stderr=PIPE)
km = PooledKernelManager(config=c)
try:
yield km
finally:
km.shutdown_all()
def test_strict_name_correct(self):
c = Config()
c.PooledKernelManager.kernel_pool_size = 1
c.PooledKernelManager.pool_kernel_name = NATIVE_KERNEL_NAME
c.PooledKernelManager.strict_pool_names = True
km = PooledKernelManager(config=c)
try:
kid = km.start_kernel(kernel_name=NATIVE_KERNEL_NAME, stdout=PIPE, stderr=PIPE)
self.assertIn(kid, km)
finally:
km.shutdown_all()
self.assertNotIn(kid, km)
def test_strict_name_incorrect(self):
c = Config()
c.PooledKernelManager.kernel_pool_size = 1
c.PooledKernelManager.pool_kernel_name = NATIVE_KERNEL_NAME
c.PooledKernelManager.strict_pool_names = True
km = PooledKernelManager(config=c)
try:
with self.assertRaisesRegex(ValueError, 'Cannot start kernel with name'):
kid = km.start_kernel(kernel_name='foo', stdout=PIPE, stderr=PIPE)
self.assertEqual(len(km), 1)
finally:
km.shutdown_all()
def test_strict_kwargs_correct(self):
c = Config()
c.PooledKernelManager.kernel_pool_size = 1
c.PooledKernelManager.pool_kwargs = dict(stdout=PIPE, stderr=PIPE)
c.PooledKernelManager.strict_pool_kwargs = True
km = PooledKernelManager(config=c)
try:
kid = km.start_kernel(stdout=PIPE, stderr=PIPE)
self.assertIn(kid, km)
finally:
km.shutdown_all()
self.assertNotIn(kid, km)
def test_strict_kwargs_incorrect(self):
c = Config()
c.PooledKernelManager.kernel_pool_size = 1
c.PooledKernelManager.pool_kwargs = dict(stdout=PIPE, stderr=PIPE)
c.PooledKernelManager.strict_pool_kwargs = True
km = PooledKernelManager(config=c)
try:
with self.assertRaisesRegex(ValueError, 'Cannot start kernel with kwargs'):
kid = km.start_kernel()
self.assertEqual(len(km), 1)
finally:
km.shutdown_all()
def test_both_strict_correct(self):
c = Config()
c.PooledKernelManager.kernel_pool_size = 1
c.PooledKernelManager.pool_kernel_name = NATIVE_KERNEL_NAME
c.PooledKernelManager.strict_pool_names = True
c.PooledKernelManager.pool_kwargs = dict(stdout=PIPE, stderr=PIPE)
c.PooledKernelManager.strict_pool_kwargs = True
km = PooledKernelManager(config=c)
try:
kid = km.start_kernel(kernel_name=NATIVE_KERNEL_NAME, stdout=PIPE, stderr=PIPE)
self.assertIn(kid, km)
finally:
km.shutdown_all()
self.assertNotIn(kid, km)
| 31.952663 | 91 | 0.629074 |
e6f48b68068814bbbb753e2aa6e8f723ebb74094 | 7,170 | py | Python | apyfal/_iterators.py | Accelize/apyfal | 22dfe791e0956d3d3353daeba0c7a21dfe2f9b77 | [
"Apache-2.0"
] | 5 | 2018-09-23T23:15:06.000Z | 2019-07-04T00:19:44.000Z | apyfal/_iterators.py | Accelize/apyfal | 22dfe791e0956d3d3353daeba0c7a21dfe2f9b77 | [
"Apache-2.0"
] | null | null | null | apyfal/_iterators.py | Accelize/apyfal | 22dfe791e0956d3d3353daeba0c7a21dfe2f9b77 | [
"Apache-2.0"
] | 4 | 2018-07-17T08:39:41.000Z | 2020-01-10T23:15:38.000Z | # coding=utf-8
"""Accelerator iterators"""
from concurrent.futures import ThreadPoolExecutor, as_completed
from itertools import chain
import re
from apyfal.host import Host
import apyfal.configuration as _cfg
import apyfal.exceptions as _exc
class _LazyClass:
"""
Class that get attributes from cached dict or
from real accelerator
"""
def __setattr__(self, name, value):
# Set privates variables locally
if name.startswith('_'):
# Python 2 don't support object.__setattr__(self, name, value)
self.__dict__[name] = value
return
# Tries to set other names on real accelerator
setattr(self._get_accelerator_object(True), name, value)
def __getattr__(self, item):
# If accelerator instantiated, redirects getattr to it
if self._get_accelerator_object() is not None:
return getattr(self._get_accelerator_object(), item)
# If not, tries to get information from properties
try:
return self._properties[item]
# If not in properties, instantiates accelerator and get
# attribute from it
except KeyError:
return getattr(self._get_accelerator_object(True), item)
def __str__(self):
return str(self._get_accelerator_object() or self._properties['_repr'])
__repr__ = __str__
class _LazyMember(_LazyClass):
"""Lazy proxy class that represent Accelerator member
Args:
properties (dict): Member properties.
get_accelerator_object (function): Get accelerator function.
"""
def __init__(self, properties, get_accelerator_object):
self._get_accelerator_object = get_accelerator_object
self._properties = properties
class _LazyAccelerator(_LazyClass):
"""Accelerator proxy that store information and lazy instantiates
accelerator if needed.
Allows to iterate over accelerators and getting some base
information without losing time to instantiates them.
But, if needed, instantiates accelerator to provides its public interfaces.
Args:
host_properties (dict): Host properties directory.
config (apyfal.configuration.Configuration): Configuration.
"""
def __init__(self, host_properties, config):
self._accelerator_object = None
# Get accelerator keyword arguments
self._accelerator_kwargs = dict(
accelerator=host_properties['accelerator'], config=config,
stop_mode='keep', host_type=host_properties['host_type'])
if 'instance_id' in host_properties:
self._accelerator_kwargs[
'instance_id'] = host_properties['instance_id']
# Generates client properties
client_properties = dict(
name=host_properties['accelerator'],
_repr="<apyfal.client.Client accelerator='%s'>" %
host_properties['accelerator'])
if 'url' in host_properties:
# Remote clients
client_properties['url'] = host_properties['url']
client_properties['_repr'] = (
client_properties['_repr'].rstrip('>') +
" url='%s'>" % host_properties['url'])
# Generates accelerator members
self._properties = dict(
host=_LazyMember(host_properties, self._get_accelerator_object),
client=_LazyMember(client_properties, self._get_accelerator_object),
_repr="<apyfal.Accelerator client=(%s) host=(%s)>" % (
client_properties['_repr'], host_properties['_repr']))
def _get_accelerator_object(self, force_real_one=False):
"""
Lazy instantiates accelerator.
Args:
force_real_one (bool): Forces to instantiate real accelerator if
not already instantiated.
Returns:
apyfal.Accelerator
"""
if self._accelerator_object is None and force_real_one:
# Can't import it at top level
from apyfal import Accelerator
# Instantiates accelerator
self._accelerator_object = Accelerator(**self._accelerator_kwargs)
return self._accelerator_object
def _is_valid(host_dict, filters):
"""Validates host.
Args:
host_dict (dict): Host
filters (dict): Dict of re.match filters.
Returns:
bool: True if host is valid
"""
for key, match in filters.items():
if not match(host_dict[key]):
return False
return True
def _get_host_iter(host_type, config, host_name_prefix):
"""
Get hosts generator for the specified host_type
Args:
host_type (str): host type
config (apyfal.configuration.Configuration): Configuration.
host_name_prefix (bool or str): see iter_accelerators
host_name_prefix
Returns:
generator: Hosts generator
"""
try:
# Gets generator
generator = Host(host_type=host_type, config=config).iter_hosts(
host_name_prefix)
# Initializes generator and returns it
return chain((next(generator),), generator)
except (_exc.HostException, StopIteration):
return iter(())
def iter_accelerators(config=None, host_name_prefix=True, **filters):
"""
Iterates over all accelerators available on remote hosts.
Args:
config (apyfal.configuration.Configuration, path-like object or file-like object):
If not set, will search it in current working directory,
in current user "home" folder. If none found, will use default
configuration values.
Path-like object can be path, URL or cloud object URL.
host_name_prefix (bool or str): If True,
use "host_name_prefix" from configuration; if False
don't filter by prefix; if str, uses this str as prefix
filters: Arguments names are host properties to filter,
values are regular expressions.
Returns:
generator: Accelerators generator
"""
# Get configuration
config = _cfg.create_configuration(config)
# Initializes filters
for attr, pattern in filters.items():
filters[attr] = re.compile(pattern).match
host_type_match = filters.get('host_type')
# List available host_types
host_types = set()
host_types.add(config['host']['host_type'])
for section in config:
if section.startswith('host.'):
host_type = section.split('.', 1)[1]
if host_type_match is None or host_type_match(host_type):
host_types.add(host_type)
# Gets information for each host_type
futures = []
with ThreadPoolExecutor(max_workers=len(host_types)) as executor:
for host_type in host_types:
futures.append(executor.submit(
_get_host_iter, host_type, config, host_name_prefix))
# Yields lazy accelerators that match filters
for future in as_completed(futures):
for host in future.result():
if _is_valid(host, filters):
yield _LazyAccelerator(host_properties=host, config=config)
| 33.194444 | 90 | 0.65537 |
b145ad21c6109e164655f6075698d6fa4f10288c | 393 | py | Python | profiles/wsgi.py | rithik220/profiles-rest-api | d960ff98d75bec32f89fcfa11d6daa53f1b1db2d | [
"MIT"
] | null | null | null | profiles/wsgi.py | rithik220/profiles-rest-api | d960ff98d75bec32f89fcfa11d6daa53f1b1db2d | [
"MIT"
] | null | null | null | profiles/wsgi.py | rithik220/profiles-rest-api | d960ff98d75bec32f89fcfa11d6daa53f1b1db2d | [
"MIT"
] | null | null | null | """
WSGI config for profiles project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'profiles.settings')
application = get_wsgi_application()
| 23.117647 | 78 | 0.78626 |
98d628cc76db2b0feeb5f0b1db865c74fc5174ac | 518 | py | Python | isubscribe/migrations/0003_auto_20161007_2110.py | ilavender/sensu_drive | e874024aa157c7076ccc9465e9d6ae00a4f19fd0 | [
"MIT"
] | 71 | 2016-12-25T12:06:07.000Z | 2021-02-21T21:14:48.000Z | isubscribe/migrations/0003_auto_20161007_2110.py | ilavender/sensu_drive | e874024aa157c7076ccc9465e9d6ae00a4f19fd0 | [
"MIT"
] | 7 | 2016-12-23T23:18:45.000Z | 2021-06-10T18:58:14.000Z | isubscribe/migrations/0003_auto_20161007_2110.py | ilavender/sensu_drive | e874024aa157c7076ccc9465e9d6ae00a4f19fd0 | [
"MIT"
] | 30 | 2017-01-01T16:18:19.000Z | 2021-04-21T15:06:47.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-07 21:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('isubscribe', '0002_auto_20161007_2108'),
]
operations = [
migrations.AlterField(
model_name='subscribe',
name='status',
field=models.IntegerField(choices=[(0, 'unknown'), (1, 'warning'), (2, 'critical')], max_length=3),
),
]
| 24.666667 | 111 | 0.6139 |
08f992626f6e23ddaa9284dafd3c3fe3f2a3c49c | 46 | py | Python | Exercicios/ex016/teste1.py | Carlos-Allison/Curso_de_HTML5_e_CCS3 | 20f2a639d792cd9e49ff9b54eeb69de44ba418e9 | [
"MIT"
] | null | null | null | Exercicios/ex016/teste1.py | Carlos-Allison/Curso_de_HTML5_e_CCS3 | 20f2a639d792cd9e49ff9b54eeb69de44ba418e9 | [
"MIT"
] | null | null | null | Exercicios/ex016/teste1.py | Carlos-Allison/Curso_de_HTML5_e_CCS3 | 20f2a639d792cd9e49ff9b54eeb69de44ba418e9 | [
"MIT"
] | null | null | null | def carro():
print('oi, mundo do caralho') | 23 | 33 | 0.630435 |
60177bcc979e6a6e5b4dfa37be153087d3d89cfb | 9,917 | py | Python | test/python/pulse/test_discrete_pulses.py | siddharthdangwal/qiskit-terra | af34eb06f28de18ef276e1e9029c62a4e35dd6a9 | [
"Apache-2.0"
] | null | null | null | test/python/pulse/test_discrete_pulses.py | siddharthdangwal/qiskit-terra | af34eb06f28de18ef276e1e9029c62a4e35dd6a9 | [
"Apache-2.0"
] | null | null | null | test/python/pulse/test_discrete_pulses.py | siddharthdangwal/qiskit-terra | af34eb06f28de18ef276e1e9029c62a4e35dd6a9 | [
"Apache-2.0"
] | 1 | 2020-07-13T17:56:46.000Z | 2020-07-13T17:56:46.000Z | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests discrete sampled pulse functions."""
import numpy as np
from qiskit.test import QiskitTestCase
from qiskit.pulse import Waveform, PulseError
import qiskit.pulse.library as library
import qiskit.pulse.library.continuous as continuous
class TestDiscretePulses(QiskitTestCase):
"""Test discreted sampled pulses."""
def test_constant(self):
"""Test discrete sampled constant pulse."""
amp = 0.5j
duration = 10
times = np.arange(0, duration) + 0.5 # to match default midpoint sampling strategy
constant_ref = continuous.constant(times, amp=amp)
constant_pulse = library.constant(duration, amp=amp)
self.assertIsInstance(constant_pulse, Waveform)
np.testing.assert_array_almost_equal(constant_pulse.samples, constant_ref)
def test_zero(self):
"""Test discrete sampled constant pulse."""
duration = 10
times = np.arange(0, duration) + 0.5
zero_ref = continuous.zero(times)
zero_pulse = library.zero(duration)
self.assertIsInstance(zero_pulse, Waveform)
np.testing.assert_array_almost_equal(zero_pulse.samples, zero_ref)
def test_square(self):
"""Test discrete sampled square wave."""
amp = 0.5
freq = 0.2
duration = 10
times = np.arange(0, duration) + 0.5
square_ref = continuous.square(times, amp=amp, freq=freq)
square_pulse = library.square(duration, amp=amp, freq=freq)
self.assertIsInstance(square_pulse, Waveform)
np.testing.assert_array_almost_equal(square_pulse.samples, square_ref)
# test single cycle
cycle_freq = 1./duration
square_cycle_ref = continuous.square(times, amp=amp, freq=cycle_freq)
square_cycle_pulse = library.square(duration, amp=amp)
np.testing.assert_array_almost_equal(square_cycle_pulse.samples, square_cycle_ref)
def test_sawtooth(self):
"""Test discrete sampled sawtooth wave."""
amp = 0.5
freq = 0.2
duration = 10
times = np.arange(0, duration) + 0.5
sawtooth_ref = continuous.sawtooth(times, amp=amp, freq=freq)
sawtooth_pulse = library.sawtooth(duration, amp=amp, freq=freq)
self.assertIsInstance(sawtooth_pulse, Waveform)
np.testing.assert_array_equal(sawtooth_pulse.samples, sawtooth_ref)
# test single cycle
cycle_freq = 1./duration
sawtooth_cycle_ref = continuous.sawtooth(times, amp=amp, freq=cycle_freq)
sawtooth_cycle_pulse = library.sawtooth(duration, amp=amp)
np.testing.assert_array_almost_equal(sawtooth_cycle_pulse.samples, sawtooth_cycle_ref)
def test_triangle(self):
"""Test discrete sampled triangle wave."""
amp = 0.5
freq = 0.2
duration = 10
times = np.arange(0, duration) + 0.5
triangle_ref = continuous.triangle(times, amp=amp, freq=freq)
triangle_pulse = library.triangle(duration, amp=amp, freq=freq)
self.assertIsInstance(triangle_pulse, Waveform)
np.testing.assert_array_almost_equal(triangle_pulse.samples, triangle_ref)
# test single cycle
cycle_freq = 1./duration
triangle_cycle_ref = continuous.triangle(times, amp=amp, freq=cycle_freq)
triangle_cycle_pulse = library.triangle(duration, amp=amp)
np.testing.assert_array_equal(triangle_cycle_pulse.samples, triangle_cycle_ref)
def test_cos(self):
"""Test discrete sampled cosine wave."""
amp = 0.5
period = 5
freq = 1/period
duration = 10
times = np.arange(0, duration) + 0.5
cos_ref = continuous.cos(times, amp=amp, freq=freq)
cos_pulse = library.cos(duration, amp=amp, freq=freq)
self.assertIsInstance(cos_pulse, Waveform)
np.testing.assert_array_almost_equal(cos_pulse.samples, cos_ref)
# test single cycle
cycle_freq = 1/duration
cos_cycle_ref = continuous.cos(times, amp=amp, freq=cycle_freq)
cos_cycle_pulse = library.cos(duration, amp=amp)
np.testing.assert_array_almost_equal(cos_cycle_pulse.samples, cos_cycle_ref)
def test_sin(self):
"""Test discrete sampled sine wave."""
amp = 0.5
period = 5
freq = 1/period
duration = 10
times = np.arange(0, duration) + 0.5
sin_ref = continuous.sin(times, amp=amp, freq=freq)
sin_pulse = library.sin(duration, amp=amp, freq=freq)
self.assertIsInstance(sin_pulse, Waveform)
np.testing.assert_array_equal(sin_pulse.samples, sin_ref)
# test single cycle
cycle_freq = 1/duration
sin_cycle_ref = continuous.sin(times, amp=amp, freq=cycle_freq)
sin_cycle_pulse = library.sin(duration, amp=amp)
np.testing.assert_array_almost_equal(sin_cycle_pulse.samples, sin_cycle_ref)
def test_gaussian(self):
"""Test gaussian pulse."""
amp = 0.5
sigma = 2
duration = 10
center = duration/2
times = np.arange(0, duration) + 0.5
gaussian_ref = continuous.gaussian(times, amp, center, sigma,
zeroed_width=2*center, rescale_amp=True)
gaussian_pulse = library.gaussian(duration, amp, sigma)
self.assertIsInstance(gaussian_pulse, Waveform)
np.testing.assert_array_almost_equal(gaussian_pulse.samples, gaussian_ref)
def test_gaussian_deriv(self):
"""Test discrete sampled gaussian derivative pulse."""
amp = 0.5
sigma = 2
duration = 10
center = duration/2
times = np.arange(0, duration) + 0.5
gaussian_deriv_ref = continuous.gaussian_deriv(times, amp, center, sigma)
gaussian_deriv_pulse = library.gaussian_deriv(duration, amp, sigma)
self.assertIsInstance(gaussian_deriv_pulse, Waveform)
np.testing.assert_array_almost_equal(gaussian_deriv_pulse.samples, gaussian_deriv_ref)
def test_sech(self):
"""Test sech pulse."""
amp = 0.5
sigma = 2
duration = 10
center = duration/2
times = np.arange(0, duration) + 0.5
sech_ref = continuous.sech(times, amp, center, sigma,
zeroed_width=2*center, rescale_amp=True)
sech_pulse = library.sech(duration, amp, sigma)
self.assertIsInstance(sech_pulse, Waveform)
np.testing.assert_array_almost_equal(sech_pulse.samples, sech_ref)
def test_sech_deriv(self):
"""Test discrete sampled sech derivative pulse."""
amp = 0.5
sigma = 2
duration = 10
center = duration/2
times = np.arange(0, duration) + 0.5
sech_deriv_ref = continuous.sech_deriv(times, amp, center, sigma)
sech_deriv_pulse = library.sech_deriv(duration, amp, sigma)
self.assertIsInstance(sech_deriv_pulse, Waveform)
np.testing.assert_array_almost_equal(sech_deriv_pulse.samples, sech_deriv_ref)
def test_gaussian_square(self):
"""Test discrete sampled gaussian square pulse."""
amp = 0.5
sigma = 0.1
risefall = 2
duration = 10
center = duration/2
width = duration-2*risefall
center = duration/2
times = np.arange(0, duration) + 0.5
gaussian_square_ref = continuous.gaussian_square(times, amp, center, width, sigma)
gaussian_square_pulse = library.gaussian_square(duration, amp, sigma, risefall)
self.assertIsInstance(gaussian_square_pulse, Waveform)
np.testing.assert_array_almost_equal(gaussian_square_pulse.samples, gaussian_square_ref)
def test_gaussian_square_args(self):
"""Gaussian square allows the user to specify risefall or width. Test this."""
amp = 0.5
sigma = 0.1
duration = 10
# risefall and width consistent: no error
library.gaussian_square(duration, amp, sigma, 2, width=6)
# supply width instead: no error
library.gaussian_square(duration, amp, sigma, width=6)
with self.assertRaises(PulseError):
library.gaussian_square(duration, amp, sigma, width=2, risefall=2)
with self.assertRaises(PulseError):
library.gaussian_square(duration, amp, sigma)
def test_drag(self):
"""Test discrete sampled drag pulse."""
amp = 0.5
sigma = 0.1
beta = 0
duration = 10
center = 10/2
times = np.arange(0, duration) + 0.5
# reference drag pulse
drag_ref = continuous.drag(times, amp, center, sigma, beta=beta,
zeroed_width=2*(center+1), rescale_amp=True)
drag_pulse = library.drag(duration, amp, sigma, beta=beta)
self.assertIsInstance(drag_pulse, Waveform)
np.testing.assert_array_almost_equal(drag_pulse.samples, drag_ref)
def test_period_deprecation_warning(self):
"""Tests for DeprecationWarning"""
amp = 0.5
period = 5.
duration = 10
self.assertWarns(DeprecationWarning,
lambda: library.triangle(duration, amp=amp, period=period))
self.assertWarns(DeprecationWarning,
lambda: library.sawtooth(duration, amp=amp, period=period))
self.assertWarns(DeprecationWarning,
lambda: library.square(duration, amp=amp, period=period))
| 41.493724 | 96 | 0.658163 |
817a7b305e39993614036e0a1674a611abd0b122 | 3,185 | py | Python | OathSaveFileParser.py | Ecophagy/OathSaveFileParser | c61d33417f7e7233b7289ceabd7604e48b7fdeec | [
"MIT"
] | 1 | 2021-04-05T07:59:59.000Z | 2021-04-05T07:59:59.000Z | OathSaveFileParser.py | Ecophagy/OathSaveFileParser | c61d33417f7e7233b7289ceabd7604e48b7fdeec | [
"MIT"
] | null | null | null | OathSaveFileParser.py | Ecophagy/OathSaveFileParser | c61d33417f7e7233b7289ceabd7604e48b7fdeec | [
"MIT"
] | null | null | null | import json
from os import path
from pathlib import Path
from suits import Suit
from visions import visions
import tkinter as tk
from tkinter import filedialog
tts_save_location = path.join(str(Path.home()), "Documents", "My Games", "Tabletop Simulator", "Saves")
game_state_key = "LuaScriptState"
dispossessed_cards_key = "curDispossessedDeckCards"
dispossessed_cards_count_key = "curDispossessedDeckCardCount"
world_deck_cards_key = "curWorldDeckCards"
world_deck_cards_count_key = "curWorldDeckCardCount"
map_cards_key = "curMapNormalCards"
def read_json_file(file_name):
with open(file_name, 'r') as f:
data = f.read()
return json.loads(data)
def order_by_suit(card_list):
suit_lists = [[], [], [], [], [], []]
full_card_list = read_json_file("cardsuits.json")
for card in card_list:
if card not in visions: # ignore visions - they have no suit
suit_id = full_card_list[card]
suit_lists[suit_id].append(card)
return suit_lists
def print_suit_ordered_card_list(suit_ordered_card_list):
i = 0
for suit in suit_ordered_card_list:
print(Suit(i).name)
if not suit:
print("\tNone")
for card in suit:
print(f"\t{card}")
i += 1
def parse_oath_save_json(json_data):
save_game_state = json.loads(json_data[game_state_key])
full_card_list = read_json_file("cardsuits.json")
dispossessed = save_game_state[dispossessed_cards_key]
world_deck = save_game_state[world_deck_cards_key]
cards_on_map = save_game_state[map_cards_key]
# Remove sites, relics, and edifices - we only care about denizens
denizen_cards_on_map = []
for site in cards_on_map:
for card, flipped in site:
if card in full_card_list:
denizen_cards_on_map.append(card)
# The archive is everything else
archive = []
for card in full_card_list:
if card not in dispossessed \
and card not in world_deck \
and card not in denizen_cards_on_map:
archive.append(card)
# Print out our card lists ordered by suit
print(f"Cards on map ({len(denizen_cards_on_map)}):")
print_suit_ordered_card_list(order_by_suit(denizen_cards_on_map))
print()
print(f"The Dispossessed ({save_game_state[dispossessed_cards_count_key]}):")
print_suit_ordered_card_list(order_by_suit(dispossessed))
print()
print(f"World Deck ({save_game_state[world_deck_cards_count_key]} including 5 Visions):")
print_suit_ordered_card_list(order_by_suit(world_deck))
print()
print(f"The Archive: {len(archive)}")
print_suit_ordered_card_list(order_by_suit(archive))
if __name__ == '__main__':
default_path = tts_save_location if path.isdir(tts_save_location) else "."
root = tk.Tk()
root.withdraw()
file_path = filedialog.askopenfilename(initialdir=default_path,
title="Oath Save File",
filetypes=[("json files","*.json")])
if file_path:
save_file_json = read_json_file(file_path)
parse_oath_save_json(save_file_json)
| 31.534653 | 103 | 0.686342 |
2c88d693c8677652e9d81616596b3c6d534e7491 | 1,522 | py | Python | src/flask_tat/http2kafka.py | cdumay/flask-tat | 94a1cbee2e4be424eefc9009004df819e90c2b32 | [
"Apache-2.0"
] | null | null | null | src/flask_tat/http2kafka.py | cdumay/flask-tat | 94a1cbee2e4be424eefc9009004df819e90c2b32 | [
"Apache-2.0"
] | null | null | null | src/flask_tat/http2kafka.py | cdumay/flask-tat | 94a1cbee2e4be424eefc9009004df819e90c2b32 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. codeauthor:: Cédric Dumay <[email protected]>
"""
from cdumay_rest_client.client import RESTClient
from flask_tat.base import BaseTATClient
class HTTP2KafkaClient(BaseTATClient):
def message_add(self, topic, **kwargs):
return self.client.do_request(
method="POST", path="/message/{}".format(topic.lstrip('/')),
data=kwargs, parse_output=False
)
def message_reply(self, topic, tag_ref, text):
return self.client.do_request(
method="POST", path="/message/{}".format(topic.lstrip('/')),
data=dict(text=text, tagReference=tag_ref, action="reply"),
parse_output=False
)
def message_relabel(self, topic, tag_ref, labels):
return self.client.do_request(
method="PUT", path="/message/{}".format(topic.lstrip('/')),
data=dict(labels=labels, tagReference=tag_ref, action="relabel"),
parse_output=False
)
@property
def client(self):
if self._client is None:
self._client = RESTClient(
server=self.app.config['TAT_URL'],
headers={
"X-Tat_username": self.app.config["TAT_USERNAME"],
"X-Tat_password": self.app.config["TAT_PASSWORD"],
"Content-type": "application/json",
},
ssl_verify=self.app.config["TAT_SSL_VERIFY"],
)
return self._client
| 32.382979 | 77 | 0.580815 |
d06787023b511b1cd1c7ac93c9a31c5366cb9162 | 5,308 | py | Python | modeling/backbones/deit.py | BrandonHanx/reid-strong-baseline | 9df1dc3d6217af2d3cb40d0627f77b36a66e5f89 | [
"MIT"
] | null | null | null | modeling/backbones/deit.py | BrandonHanx/reid-strong-baseline | 9df1dc3d6217af2d3cb40d0627f77b36a66e5f89 | [
"MIT"
] | null | null | null | modeling/backbones/deit.py | BrandonHanx/reid-strong-baseline | 9df1dc3d6217af2d3cb40d0627f77b36a66e5f89 | [
"MIT"
] | null | null | null | import copy
import math
from functools import partial
import numpy as np
import timm.models.vision_transformer as ViTcls
import torch
import torch.nn.functional as F
from timm.models.helpers import load_pretrained
class ViT(ViTcls.VisionTransformer):
def __init__(self, mode, **kwargs):
super().__init__(**kwargs)
self.mode = mode
def forward(self, x):
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
x = x + self.pos_embed
x = self.pos_drop(x)
if self.mode == "jpm":
for blk in self.blocks[:-1]:
x = blk(x)
return x
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
if self.mode == "first":
return x[:, 0]
if self.mode == "average":
return x[:, 1:].mean(dim=1)
return NotImplementedError
class ViTWithJPM(torch.nn.Module):
def __init__(self, vit, shift_offset=5, shuffle_group=4):
super().__init__()
self.vit = vit
self.jpm = copy.deepcopy(
self.vit.blocks[-1]
) # initialize the weight same as last layer
self.jpm_norm = copy.deepcopy(self.vit.norm)
self.shift_offset = shift_offset
self.shuffle_group = shuffle_group
def forward(self, x):
x = self.vit(x)
global_feat = self.vit.blocks[-1](x)
global_feat = self.vit.norm(global_feat)[:, 0]
cls_token = x[:, 0].unsqueeze(dim=1)
feat_len = x.shape[1] - 1
local_feat = torch.cat(
[x[:, self.shift_offset + 1 :], x[:, 1 : self.shift_offset + 1]], dim=1
) # shift
random_idx = list(np.random.permutation(feat_len))
local_feat = local_feat[:, random_idx] # shuffle
jpm_feats = [global_feat]
group_idxs = np.linspace(0, feat_len, self.shuffle_group + 1, dtype=int)
for i in range(len(group_idxs) - 1):
feat = torch.cat(
[cls_token, local_feat[:, group_idxs[i] : group_idxs[i + 1]]], dim=1
)
feat = self.jpm(feat)
feat = self.jpm_norm(feat)
jpm_feats.append(feat[:, 0])
return jpm_feats
def resize_pos_embed(posemb, posemb_new, gs_new):
# Rescale the grid of position embeddings when loading from state_dict.
print("Resized position embedding: {} to {}".format(posemb.shape, posemb_new.shape))
posemb_tok, posemb_grid = posemb[:, :1], posemb[0, 1:]
gs_old = int(math.sqrt(len(posemb_grid)))
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
posemb_grid = F.interpolate(
posemb_grid, size=gs_new, mode="bilinear", align_corners=False
)
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1)
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
return posemb
def checkpoint_filter_fn(state_dict, model, gs_new):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
if "model" in state_dict:
# For deit models
state_dict = state_dict["model"]
for k, v in state_dict.items():
if "patch_embed.proj.weight" in k and len(v.shape) < 4:
# For old models that I trained prior to conv based patchification
O, I, H, W = model.patch_embed.proj.weight.shape
v = v.reshape(O, -1, H, W)
elif k == "pos_embed" and v.shape != model.pos_embed.shape:
# To resize pos embedding when using model at different size from pretrained weights
v = resize_pos_embed(v, model.pos_embed, gs_new)
out_dict[k] = v
return out_dict
def create_vit(variant, mode, img_size, pretrained, patch_size, **kwargs):
model = ViT(mode=mode, img_size=img_size, **kwargs)
model.default_cfg = ViTcls.default_cfgs[variant]
gs_new = (int(img_size[0] / patch_size), int(img_size[1] / patch_size))
if pretrained:
load_pretrained(
model, filter_fn=partial(checkpoint_filter_fn, model=model, gs_new=gs_new)
)
return model
model_archs = {}
model_archs["vit_deit_small_patch16_224"] = dict(
patch_size=16, embed_dim=384, depth=12, num_heads=6
)
model_archs["vit_deit_base_patch16_224"] = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12
)
model_archs["vit_base_patch16_224_in21k"] = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12
)
def deit(arch="vit_deit_small_patch16_224"):
if arch == "deit_jpm_small_patch16_224":
arch = "vit_deit_small_patch16_224"
model_arch = model_archs[arch]
vit = create_vit(
variant=arch, mode="jpm", img_size=(256, 128), pretrained=True, **model_arch
)
return ViTWithJPM(vit)
elif arch == "vit_jpm_base_patch16_224_in21k":
arch = "vit_base_patch16_224_in21k"
model_arch = model_archs[arch]
vit = create_vit(
variant=arch, mode="jpm", img_size=(256, 128), pretrained=True, **model_arch
)
return ViTWithJPM(vit)
model_arch = model_archs[arch]
return create_vit(
variant=arch, mode="first", img_size=(256, 128), pretrained=True, **model_arch
)
| 34.025641 | 96 | 0.623964 |
62c8449617cd10b2b9ad513a2b62d50465bea566 | 24,374 | py | Python | compiler/dna/parser/parser.py | AnonymousDeveloper65535/libpandadna | 3110a8d576d22093e4c735081c5f639d28397a17 | [
"BSD-3-Clause"
] | 1 | 2021-02-13T22:40:50.000Z | 2021-02-13T22:40:50.000Z | compiler/dna/parser/parser.py | AnonymousDeveloper65535/libpandadna | 3110a8d576d22093e4c735081c5f639d28397a17 | [
"BSD-3-Clause"
] | 1 | 2018-07-28T20:07:04.000Z | 2018-07-30T18:28:34.000Z | compiler/dna/parser/parser.py | AnonymousDeveloper65535/libpandadna | 3110a8d576d22093e4c735081c5f639d28397a17 | [
"BSD-3-Clause"
] | 2 | 2019-12-02T01:39:10.000Z | 2021-02-13T22:41:00.000Z | import os
from dna.components.DNAAnimBuilding import DNAAnimBuilding
from dna.components.DNAAnimProp import DNAAnimProp
from dna.components.DNABattleCell import DNABattleCell
from dna.components.DNACornice import DNACornice
from dna.components.DNADoor import DNADoor
from dna.components.DNAFlatBuilding import DNAFlatBuilding
from dna.components.DNAFlatDoor import DNAFlatDoor
from dna.components.DNAGroup import DNAGroup
from dna.components.DNAInteractiveProp import DNAInteractiveProp
from dna.components.DNALandmarkBuilding import DNALandmarkBuilding
from dna.components.DNANode import DNANode
from dna.components.DNAProp import DNAProp
from dna.components.DNASign import DNASign
from dna.components.DNASignBaseline import DNASignBaseline
from dna.components.DNASignGraphic import DNASignGraphic
from dna.components.DNASignText import DNASignText
from dna.components.DNAStreet import DNAStreet
from dna.components.DNASuitPoint import DNASuitPoint
from dna.components.DNAVisGroup import DNAVisGroup
from dna.components.DNAWall import DNAWall
from dna.components.DNAWindows import DNAWindows
def p_dna(p):
pass
p_dna.__doc__ = '''\
dna : dna object
| object'''
def p_object(p):
p[0] = p[1]
p_object.__doc__ = '''\
object : suitpoint
| group
| model
| font
| store_texture'''
def p_number(p):
p[0] = p[1]
p_number.__doc__ = '''\
number : FLOAT
| INTEGER'''
def p_lpoint3f(p):
lpoint3f = (p[1], p[2], p[3])
p[0] = lpoint3f
p_lpoint3f.__doc__ = '''\
lpoint3f : number number number'''
def p_suitpoint(p):
argCount = len(p)
if argCount == 9:
index = p[3]
pointTypeStr = p[5]
pos = p[7]
landmarkBuildingIndex = -1
else:
index = p[3]
pointTypeStr = p[5]
pos = p[7]
landmarkBuildingIndex = p[9]
point = DNASuitPoint(index, pointTypeStr, pos,
landmarkBuildingIndex=landmarkBuildingIndex)
p.parser.dnaStore.storeSuitPoint(point)
p_suitpoint.__doc__ = '''\
suitpoint : STORE_SUIT_POINT "[" number "," suitpointtype "," lpoint3f "]"
| STORE_SUIT_POINT "[" number "," suitpointtype "," lpoint3f "," number "]"'''
def p_suitpointtype(p):
pointTypeStr = p[1]
p[0] = DNASuitPoint.pointTypeMap[pointTypeStr]
p_suitpointtype.__doc__ = '''\
suitpointtype : STREET_POINT
| FRONT_DOOR_POINT
| SIDE_DOOR_POINT
| COGHQ_IN_POINT
| COGHQ_OUT_POINT'''
def p_string(p):
p[0] = p[1]
p_string.__doc__ = '''\
string : QUOTED_STRING
| UNQUOTED_STRING'''
def p_dnagroupdef(p):
name = p[2]
p[0] = DNAGroup(name)
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_dnagroupdef.__doc__ = '''\
dnagroupdef : GROUP string'''
def p_dnanodedef(p):
name = p[2]
p[0] = DNANode(name)
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_dnanodedef.__doc__ = '''\
dnanodedef : NODE string'''
def p_visgroupdef(p):
name = p[2]
p[0] = DNAVisGroup(name)
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_visgroupdef.__doc__ = '''\
visgroupdef : VISGROUP string'''
def p_dnagroup(p):
p[0] = p[1]
p.parser.parentGroup = p[0].parent
p_dnagroup.__doc__ = '''\
dnagroup : dnagroupdef "[" subgroup_list "]"'''
def p_visgroup(p):
p[0] = p[1]
p.parser.parentGroup = p[0].parent
p_visgroup.__doc__ = '''\
visgroup : visgroupdef "[" subvisgroup_list "]"'''
def p_string_opt_list(p):
argCount = len(p)
if argCount == 2:
p[0] = []
elif (argCount == 3) and (p[2] is not None):
p[0] = p[1]
p[0].append(p[2])
p_string_opt_list.__doc__ = '''\
string_opt_list : string_opt_list string
| empty'''
def p_vis(p):
parentVis, visList = p[3], p[4]
p.parser.parentGroup.addVisible(parentVis)
for vis in visList:
p.parser.parentGroup.addVisible(vis)
p_vis.__doc__ = '''\
vis : VIS "[" string string_opt_list "]"'''
def p_empty(p):
pass
p_empty.__doc__ = '''\
empty : '''
def p_group(p):
p[0] = p[1]
p_group.__doc__ = '''\
group : dnagroup
| visgroup
| dnanode
| windows
| cornice
| door'''
def p_dnanode(p):
p[0] = p[1]
p_dnanode.__doc__ = '''\
dnanode : prop
| sign
| signbaseline
| signtext
| flatbuilding
| wall
| landmarkbuilding
| street
| signgraphic
| dnanode_grp'''
def p_dnanode_grp(p):
p[0] = p[1]
p.parser.parentGroup = p[0].parent
p_dnanode_grp.__doc__ = '''\
dnanode_grp : dnanodedef "[" subdnanode_list "]"'''
def p_sign(p):
p[0] = p[1]
p.parser.parentGroup = p[0].parent
p_sign.__doc__ = '''\
sign : signdef "[" subprop_list "]"'''
def p_signgraphic(p):
p[0] = p[1]
p.parser.parentGroup = p[0].parent
p_signgraphic.__doc__ = '''\
signgraphic : signgraphicdef "[" subsigngraphic_list "]"'''
def p_prop(p):
p[0] = p[1]
p.parser.parentGroup = p[0].parent
p_prop.__doc__ = '''\
prop : propdef "[" subprop_list "]"
| animpropdef "[" subanimprop_list "]"
| interactivepropdef "[" subinteractiveprop_list "]"'''
def p_signbaseline(p):
p[0] = p[1]
p.parser.parentGroup = p[0].parent
p_signbaseline.__doc__ = '''\
signbaseline : baselinedef "[" subbaseline_list "]"'''
def p_signtest(p):
p[0] = p[1]
p.parser.parentGroup = p[0].parent
p_signtest.__doc__ = '''\
signtext : signtextdef "[" subtext_list "]"'''
def p_flatbuilding(p):
p[0] = p[1]
p.parser.parentGroup = p[0].parent
p_flatbuilding.__doc__ = '''\
flatbuilding : flatbuildingdef "[" subflatbuilding_list "]"'''
def p_wall(p):
p[0] = p[1]
p.parser.parentGroup = p[0].parent
p_wall.__doc__ = '''\
wall : walldef "[" subwall_list "]"'''
def p_windows(p):
p[0] = p[1]
p.parser.parentGroup = p[0].parent
p_windows.__doc__ = '''\
windows : windowsdef "[" subwindows_list "]"'''
def p_cornice(p):
p[0] = p[1]
p.parser.parentGroup = p[0].parent
p_cornice.__doc__ = '''\
cornice : cornicedef "[" subcornice_list "]"'''
def p_landmarkbuilding(p):
p[0] = p[1]
p.parser.parentGroup = p[0].parent
p_landmarkbuilding.__doc__ = '''\
landmarkbuilding : landmarkbuildingdef "[" sublandmarkbuilding_list "]"
| animbuildingdef "[" subanimbuilding_list "]"'''
def p_street(p):
p[0] = p[1]
p.parser.parentGroup = p[0].parent
p_street.__doc__ = '''\
street : streetdef "[" substreet_list "]"'''
def p_door(p):
p[0] = p[1]
p.parser.parentGroup = p[0].parent
p_door.__doc__ = '''\
door : doordef "[" subdoor_list "]"
| flatdoordef "[" subdoor_list "]"'''
def p_propdef(p):
name = p[2]
p[0] = DNAProp(name)
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_propdef.__doc__ = '''\
propdef : PROP string'''
def p_animpropdef(p):
name = p[2]
p[0] = DNAAnimProp(name)
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_animpropdef.__doc__ = '''\
animpropdef : ANIM_PROP string'''
def p_interactivepropdef(p):
name = p[2]
p[0] = DNAInteractiveProp(name)
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_interactivepropdef.__doc__ = '''\
interactivepropdef : INTERACTIVE_PROP string'''
def p_flatbuildingdef(p):
name = p[2]
p[0] = DNAFlatBuilding(name)
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_flatbuildingdef.__doc__ = '''\
flatbuildingdef : FLAT_BUILDING string'''
def p_walldef(p):
p[0] = DNAWall('')
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_walldef.__doc__ = '''\
walldef : WALL'''
def p_windowsdef(p):
p[0] = DNAWindows('')
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_windowsdef.__doc__ = '''\
windowsdef : WINDOWS'''
def p_cornicedef(p):
p[0] = DNACornice('')
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_cornicedef.__doc__ = '''\
cornicedef : CORNICE'''
def p_landmarkbuildingdef(p):
name = p[2]
p[0] = DNALandmarkBuilding(name)
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
blockNumber = int(p.parser.dnaStore.getBlock(name))
p.parser.dnaStore.storeBlockNumber(blockNumber)
zoneId = 0
try:
zoneId = int(p[0].getVisGroup().name.split(':')[0])
except:
pass
finally:
p.parser.dnaStore.storeBlockZone(blockNumber, zoneId)
p_landmarkbuildingdef.__doc__ = '''\
landmarkbuildingdef : LANDMARK_BUILDING string'''
def p_animbuildingdef(p):
name = p[2]
p[0] = DNAAnimBuilding(name)
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
blockNumber = int(p.parser.dnaStore.getBlock(name))
p.parser.dnaStore.storeBlockNumber(blockNumber)
zoneId = int(p[0].getVisGroup().name.split(':')[0])
p.parser.dnaStore.storeBlockZone(blockNumber, zoneId)
p_animbuildingdef.__doc__ = '''\
animbuildingdef : ANIM_BUILDING string'''
def p_doordef(p):
p[0] = DNADoor('')
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_doordef.__doc__ = '''\
doordef : DOOR'''
def p_flatdoordef(p):
p[0] = DNAFlatDoor('')
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup.parent.setHasDoor(True)
p.parser.parentGroup = p[0]
p_flatdoordef.__doc__ = '''\
flatdoordef : FLAT_DOOR'''
def p_streetdef(p):
p[0] = DNAStreet(p[2])
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_streetdef.__doc__ = '''\
streetdef : STREET string'''
def p_signdef(p):
p[0] = DNASign()
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_signdef.__doc__ = '''\
signdef : SIGN'''
def p_signgraphicdef(p):
p[0] = DNASignGraphic('')
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_signgraphicdef.__doc__ = '''\
signgraphicdef : GRAPHIC'''
def p_baselinedef(p):
p[0] = DNASignBaseline()
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_baselinedef.__doc__ = '''\
baselinedef : BASELINE'''
def p_signtextdef(p):
p[0] = DNASignText()
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_signtextdef.__doc__ = '''\
signtextdef : TEXT'''
def p_suitedge(p):
startPointIndex, endPointIndex = p[3], p[4]
zoneId = int(p.parser.parentGroup.name)
edge = p.parser.dnaStore.storeSuitEdge(
startPointIndex, endPointIndex, zoneId)
p.parser.parentGroup.addSuitEdge(edge)
p_suitedge.__doc__ = '''\
suitedge : SUIT_EDGE "[" number number "]"'''
def p_battlecell(p):
width, height, pos = p[3], p[4], p[5]
p[0] = DNABattleCell(width, height, pos)
p.parser.parentGroup.addBattleCell(p[0])
p_battlecell.__doc__ = '''\
battlecell : BATTLE_CELL "[" number number lpoint3f "]"'''
def p_subgroup_list(p):
p[0] = p[1]
argCount = len(p)
if argCount == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_subgroup_list.__doc__ = '''\
subgroup_list : subgroup_list group
| empty'''
def p_subvisgroup_list(p):
p[0] = p[1]
argCount = len(p)
if argCount == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_subvisgroup_list.__doc__ = '''\
subvisgroup_list : subvisgroup_list group
| subvisgroup_list suitedge
| subvisgroup_list battlecell
| subvisgroup_list vis
| empty'''
def p_pos(p):
p.parser.parentGroup.setPos(p[3])
p_pos.__doc__ = '''\
pos : POS "[" lpoint3f "]"'''
def p_hpr(p):
p.parser.parentGroup.setHpr(p[3])
p_hpr.__doc__ = '''\
hpr : HPR "[" lpoint3f "]"
| NHPR "[" lpoint3f "]"'''
def p_scale(p):
p.parser.parentGroup.setScale(p[3])
p_scale.__doc__ = '''\
scale : SCALE "[" lpoint3f "]"'''
def p_flags(p):
p.parser.parentGroup.setFlags(p[3])
p_flags.__doc__ = '''\
flags : FLAGS "[" string "]"'''
def p_dnanode_sub(p):
p[0] = p[1]
p_dnanode_sub.__doc__ = '''\
dnanode_sub : group
| pos
| hpr
| scale'''
def p_dnaprop_sub(p):
p[0] = p[1]
p_dnaprop_sub.__doc__ = '''\
dnaprop_sub : code
| color'''
def p_dnaanimprop_sub(p):
p[0] = p[1]
p_dnaanimprop_sub.__doc__ = '''\
dnaanimprop_sub : anim'''
def p_dnainteractiveprop_sub(p):
p[0] = p[1]
p_dnainteractiveprop_sub.__doc__ = '''\
dnainteractiveprop_sub : cell_id'''
def p_anim(p):
p.parser.parentGroup.setAnim(p[3])
p_anim.__doc__ = '''\
anim : ANIM "[" string "]"'''
def p_cell_id(p):
p.parser.parentGroup.setCellId(p[3])
p_cell_id.__doc__ = '''\
cell_id : CELL_ID "[" number "]"'''
def p_baseline_sub(p):
p[0] = p[1]
p_baseline_sub.__doc__ = '''\
baseline_sub : code
| color
| width
| height
| indent
| kern
| stomp
| stumble
| wiggle
| flags'''
def p_text_sub(p):
p[0] = p[1]
p_text_sub.__doc__ = '''\
text_sub : letters'''
def p_signgraphic_sub(p):
p[0] = p[1]
p_signgraphic_sub.__doc__ = '''\
signgraphic_sub : width
| height
| code
| color'''
def p_flatbuilding_sub(p):
p[0] = p[1]
p_flatbuilding_sub.__doc__ = '''\
flatbuilding_sub : width'''
def p_wall_sub(p):
p[0] = p[1]
p_wall_sub.__doc__ = '''\
wall_sub : height
| code
| color'''
def p_windows_sub(p):
p[0] = p[1]
p_windows_sub.__doc__ = '''\
windows_sub : code
| color
| windowcount'''
def p_cornice_sub(p):
p[0] = p[1]
p_cornice_sub.__doc__ = '''\
cornice_sub : code
| color'''
def p_landmarkbuilding_sub(p):
p[0] = p[1]
p_landmarkbuilding_sub.__doc__ = '''\
landmarkbuilding_sub : code
| title
| article
| building_type
| wall_color'''
def p_animbuilding_sub(p):
p[0] = p[1]
p_animbuilding_sub.__doc__ = '''\
animbuilding_sub : anim'''
def p_door_sub(p):
p[0] = p[1]
p_door_sub.__doc__ = '''\
door_sub : code
| color'''
def p_street_sub(p):
p[0] = p[1]
p_street_sub.__doc__ = '''\
street_sub : code
| texture
| color'''
def p_texture(p):
p.parser.parentGroup.setTexture(p[3])
p_texture.__doc__ = '''\
texture : TEXTURE "[" string "]"'''
def p_title(p):
title = p[3]
parentName = p.parser.parentGroup.name
blockNumber = int(p.parser.dnaStore.getBlock(parentName))
p.parser.dnaStore.storeBlockTitle(blockNumber, title)
p_title.__doc__ = '''\
title : TITLE "[" string "]"'''
def p_article(p):
article = p[3]
parentName = p.parser.parentGroup.name
blockNumber = int(p.parser.dnaStore.getBlock(parentName))
p.parser.dnaStore.storeBlockArticle(blockNumber, article)
p_article.__doc__ = '''\
article : ARTICLE "[" string "]"'''
def p_building_type(p):
buildingType = p[3]
parentName = p.parser.parentGroup.name
blockNumber = int(p.parser.dnaStore.getBlock(parentName))
p.parser.dnaStore.storeBlockBuildingType(blockNumber, buildingType)
p_building_type.__doc__ = '''\
building_type : BUILDING_TYPE "[" string "]"'''
def p_wall_color(p):
wallColor = (p[3], p[4], p[5], p[6])
p.parser.parentGroup.setWallColor(wallColor)
p_wall_color.__doc__ = '''\
wall_color : COLOR "[" number number number number "]"'''
def p_count(p):
p.parser.parentGroup.setWindowCount(p[3])
p_count.__doc__ = '''\
windowcount : COUNT "[" number "]"'''
def p_letters(p):
p.parser.parentGroup.setLetters(p[3])
p_letters.__doc__ = '''\
letters : LETTERS "[" string "]"'''
def p_width(p):
p.parser.parentGroup.setWidth(p[3])
p_width.__doc__ = '''\
width : WIDTH "[" number "]"'''
def p_height(p):
p.parser.parentGroup.setHeight(p[3])
p_height.__doc__ = '''\
height : HEIGHT "[" number "]"'''
def p_stomp(p):
p.parser.parentGroup.setStomp(p[3])
p_stomp.__doc__ = '''\
stomp : STOMP "[" number "]"'''
def p_indent(p):
p.parser.parentGroup.setIndent(p[3])
p_indent.__doc__ = '''\
indent : INDENT "[" number "]"'''
def p_kern(p):
p.parser.parentGroup.setKern(p[3])
p_kern.__doc__ = '''\
kern : KERN "[" number "]"'''
def p_stumble(p):
p.parser.parentGroup.setStumble(p[3])
p_stumble.__doc__ = '''\
stumble : STUMBLE "[" number "]"'''
def p_wiggle(p):
p.parser.parentGroup.setWiggle(p[3])
p_wiggle.__doc__ = '''\
wiggle : WIGGLE "[" number "]"'''
def p_code(p):
p.parser.parentGroup.setCode(p[3])
p_code.__doc__ = '''\
code : CODE "[" string "]"'''
def p_color(p):
p.parser.parentGroup.setColor((p[3], p[4], p[5], p[6]))
p_color.__doc__ = '''\
color : COLOR "[" number number number number "]"'''
def p_subprop_list(p):
p[0] = p[1]
if len(p) == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_subprop_list.__doc__ = '''\
subprop_list : subprop_list dnanode_sub
| subprop_list dnaprop_sub
| empty'''
def p_subanimprop_list(p):
p[0] = p[1]
if len(p) == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_subanimprop_list.__doc__ = '''\
subanimprop_list : subanimprop_list dnanode_sub
| subanimprop_list dnaprop_sub
| subanimprop_list dnaanimprop_sub
| empty'''
def p_subinteractiveprop_list(p):
p[0] = p[1]
if len(p) == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_subinteractiveprop_list.__doc__ = '''\
subinteractiveprop_list : subinteractiveprop_list dnanode_sub
| subinteractiveprop_list dnaprop_sub
| subinteractiveprop_list dnaanimprop_sub
| subinteractiveprop_list dnainteractiveprop_sub
| empty'''
def p_subbaseline_list(p):
p[0] = p[1]
if len(p) == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_subbaseline_list.__doc__ = '''\
subbaseline_list : subbaseline_list dnanode_sub
| subbaseline_list baseline_sub
| empty'''
def p_subtext_list(p):
p[0] = p[1]
if len(p) == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_subtext_list.__doc__ = '''\
subtext_list : subtext_list dnanode_sub
| subtext_list text_sub
| empty'''
def p_subdnanode_list(p):
p[0] = p[1]
if len(p) == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_subdnanode_list.__doc__ = '''\
subdnanode_list : subdnanode_list dnanode_sub
| empty'''
def p_subsigngraphic_list(p):
p[0] = p[1]
if len(p) == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_subsigngraphic_list.__doc__ = '''\
subsigngraphic_list : subsigngraphic_list dnanode_sub
| subsigngraphic_list signgraphic_sub
| empty'''
def p_subflatbuilding_list(p):
p[0] = p[1]
if len(p) == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_subflatbuilding_list.__doc__ = '''\
subflatbuilding_list : subflatbuilding_list dnanode_sub
| subflatbuilding_list flatbuilding_sub
| empty'''
def p_subwall_list(p):
p[0] = p[1]
if len(p) == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_subwall_list.__doc__ = '''\
subwall_list : subwall_list dnanode_sub
| subwall_list wall_sub
| empty'''
def p_subwindows_list(p):
p[0] = p[1]
if len(p) == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_subwindows_list.__doc__ = '''\
subwindows_list : subwindows_list dnanode_sub
| subwindows_list windows_sub
| empty'''
def p_subcornice_list(p):
p[0] = p[1]
if len(p) == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_subcornice_list.__doc__ = '''\
subcornice_list : subcornice_list dnanode_sub
| subcornice_list cornice_sub
| empty'''
def p_sublandmarkbuilding_list(p):
p[0] = p[1]
if len(p) == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_sublandmarkbuilding_list.__doc__ = '''\
sublandmarkbuilding_list : sublandmarkbuilding_list dnanode_sub
| sublandmarkbuilding_list landmarkbuilding_sub
| empty'''
def p_subanimbuilding_list(p):
p[0] = p[1]
if len(p) == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_subanimbuilding_list.__doc__ = '''\
subanimbuilding_list : subanimbuilding_list dnanode_sub
| subanimbuilding_list landmarkbuilding_sub
| subanimbuilding_list animbuilding_sub
| empty'''
def p_subdoor_list(p):
p[0] = p[1]
if len(p) == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_subdoor_list.__doc__ = '''\
subdoor_list : subdoor_list dnanode_sub
| subdoor_list door_sub
| empty'''
def p_substreet_list(p):
p[0] = p[1]
if len(p) == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_substreet_list.__doc__ = '''\
substreet_list : substreet_list dnanode_sub
| substreet_list street_sub
| empty'''
def p_modeldef(p):
modelType, filename = p[1], p[2]
filename, extension = os.path.splitext(filename)
if not extension:
extension = '.bam'
filename += extension
p.parser.modelType = modelType
p.parser.modelName = filename
p_modeldef.__doc__ = '''\
modeldef : MODEL string
| HOODMODEL string
| PLACEMODEL string'''
def p_model(p):
pass
p_model.__doc__ = '''\
model : modeldef "[" modelnode_list "]"'''
def p_modelnode_list(p):
pass
p_modelnode_list.__doc__ = '''\
modelnode_list : modelnode_list node
| empty'''
def p_node(p):
argCount = len(p)
if argCount == 6:
root, code, search = p[3], p[4], p[4]
else:
root, code, search = p[3], p[4], p[5]
p.parser.dnaStore.storeCatalogCode(root, code)
modelName = p.parser.modelName
if p.parser.modelType == 'hood_model':
p.parser.dnaStore.storeHoodNode(code, modelName, search)
elif p.parser.modelType == 'place_model':
p.parser.dnaStore.storePlaceNode(code, modelName, search)
else:
p.parser.dnaStore.storeNode(code, modelName, search)
p_node.__doc__ = '''\
node : STORE_NODE "[" string string "]"
| STORE_NODE "[" string string string "]"'''
def p_store_texture(p):
argCount = len(p)
if argCount == 6:
code, filename = p[3], p[4]
else:
root, code, filename = p[3], p[4], p[5]
p.parser.dnaStore.storeCatalogCode(root, code)
p.parser.dnaStore.storeTexture(code, filename)
p_store_texture.__doc__ = '''\
store_texture : STORE_TEXTURE "[" string string "]"
| STORE_TEXTURE "[" string string string "]"'''
def p_font(p):
root, code, filename = p[3], p[4], p[5]
filename, extension = os.path.splitext(filename)
if not extension:
extension = '.bam'
filename += extension
p.parser.dnaStore.storeCatalogCode(root, code)
p.parser.dnaStore.storeFont(filename, code)
p_font.__doc__ = '''\
font : STORE_FONT "[" string string string "]"'''
def p_error(p):
if p is None:
raise DNAError('Syntax error unexpected EOF')
sub = (str(p.lexer.lineno), str(p))
raise DNAError('Syntax error at line %s token=%s' % sub)
| 23.618217 | 88 | 0.613071 |
3c0e30fd8826a6d64e72c087d608db1e0990c05f | 23,667 | py | Python | cardpay/api/payments_api.py | Sinkler/python-sdk-v2 | a1ad7cc9900f8adf967ca4dec0bb05d8eddc2999 | [
"MIT"
] | null | null | null | cardpay/api/payments_api.py | Sinkler/python-sdk-v2 | a1ad7cc9900f8adf967ca4dec0bb05d8eddc2999 | [
"MIT"
] | null | null | null | cardpay/api/payments_api.py | Sinkler/python-sdk-v2 | a1ad7cc9900f8adf967ca4dec0bb05d8eddc2999 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
CardPay REST API
Welcome to the CardPay REST API. The CardPay API uses HTTP verbs and a [REST](https://en.wikipedia.org/wiki/Representational_state_transfer) resources endpoint structure (see more info about REST). Request and response payloads are formatted as JSON. Merchant uses API to create payments, refunds, payouts or recurrings, check or update transaction status and get information about created transactions. In API authentication process based on [OAuth 2.0](https://oauth.net/2/) standard. For recent changes see changelog section. # noqa: E501
OpenAPI spec version: 3.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from cardpay.api_client import ApiClient
class PaymentsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_payment(self, payment_request, **kwargs): # noqa: E501
"""Create payment # noqa: E501
Endpoint for creation payments. Request example presented for Gateway mode. # noqa: E501
:param PaymentRequest payment_request: paymentRequest (required)
:return: PaymentGatewayCreationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
(data) = self.create_payment_with_http_info(payment_request, **kwargs) # noqa: E501
return data
def create_payment_with_http_info(self, payment_request, **kwargs): # noqa: E501
"""Create payment # noqa: E501
Endpoint for creation payments. Request example presented for Gateway mode. # noqa: E501
:param PaymentRequest payment_request: paymentRequest (required)
:return: PaymentGatewayCreationResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['payment_request'] # noqa: E501
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_payment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'payment_request' is set
if ('payment_request' not in params or
params['payment_request'] is None):
raise ValueError("Missing the required parameter `payment_request` when calling `create_payment`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'payment_request' in params:
body_params = params['payment_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
return self.api_client.call_api(
'/api/payments', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PaymentGatewayCreationResponse", # noqa: E501
_return_http_data_only=params.get("_return_http_data_only"),
_preload_content=params.get("_preload_content", True),
_request_timeout=params.get("_request_timeout"),
collection_formats=collection_formats,
)
def get_authentication_data1(self, payment_id, **kwargs): # noqa: E501
"""Get payment 3DS result information # noqa: E501
:param str payment_id: Payment ID (required)
:return: AuthenticationDataResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
(data) = self.get_authentication_data1_with_http_info(
payment_id, **kwargs
) # noqa: E501
return data
def get_authentication_data1_with_http_info(
self, payment_id, **kwargs
): # noqa: E501
"""Get payment 3DS result information # noqa: E501
:param str payment_id: Payment ID (required)
:return: AuthenticationDataResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ["payment_id"] # noqa: E501
all_params.append("_return_http_data_only")
all_params.append("_preload_content")
all_params.append("_request_timeout")
params = locals()
for key, val in six.iteritems(params["kwargs"]):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_authentication_data1" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'payment_id' is set
if "payment_id" not in params or params["payment_id"] is None:
raise ValueError(
"Missing the required parameter `payment_id` when calling `get_authentication_data1`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "payment_id" in params:
path_params["paymentId"] = params["payment_id"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
return self.api_client.call_api(
"/api/payments/{paymentId}/threedsecure",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="AuthenticationDataResponse", # noqa: E501
_return_http_data_only=params.get("_return_http_data_only"),
_preload_content=params.get("_preload_content", True),
_request_timeout=params.get("_request_timeout"),
collection_formats=collection_formats,
)
def get_payment(self, payment_id, **kwargs): # noqa: E501
"""Get payment information # noqa: E501
:param str payment_id: Payment ID (required)
:return: PaymentResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
(data) = self.get_payment_with_http_info(payment_id, **kwargs) # noqa: E501
return data
def get_payment_with_http_info(self, payment_id, **kwargs): # noqa: E501
"""Get payment information # noqa: E501
:param str payment_id: Payment ID (required)
:return: PaymentResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['payment_id'] # noqa: E501
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_payment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'payment_id' is set
if ('payment_id' not in params or
params['payment_id'] is None):
raise ValueError("Missing the required parameter `payment_id` when calling `get_payment`") # noqa: E501
collection_formats = {}
path_params = {}
if 'payment_id' in params:
path_params['paymentId'] = params['payment_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
return self.api_client.call_api(
'/api/payments/{paymentId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PaymentResponse', # noqa: E501
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_payment_methods(self, **kwargs): # noqa: E501
"""Get payment methods # noqa: E501
Endpoint for get payment methods by current terminal code # noqa: E501
:return: PaymentMethodsList
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
(data) = self.get_payment_methods_with_http_info(**kwargs) # noqa: E501
return data
def get_payment_methods_with_http_info(self, **kwargs): # noqa: E501
"""Get payment methods # noqa: E501
Endpoint for get payment methods by current terminal code # noqa: E501
:return: PaymentMethodsList
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append("_return_http_data_only")
all_params.append("_preload_content")
all_params.append("_request_timeout")
params = locals()
for key, val in six.iteritems(params["kwargs"]):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_payment_methods" % key
)
params[key] = val
del params["kwargs"]
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
return self.api_client.call_api(
"/api/payment_methods",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PaymentMethodsList", # noqa: E501
_return_http_data_only=params.get("_return_http_data_only"),
_preload_content=params.get("_preload_content", True),
_request_timeout=params.get("_request_timeout"),
collection_formats=collection_formats,
)
def get_payments(self, request_id, **kwargs): # noqa: E501
"""Get payments information # noqa: E501
:param str request_id: Request ID (required)
:param str currency: [ISO 4217](https://en.wikipedia.org/wiki/ISO_4217) currency code of transactions currency
:param datetime end_time: Date and time up to milliseconds (in [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) format) when requested period ends (not inclusive), UTC time, must be less than 7 days after 'start_time', default is current time (format: yyyy-MM-dd'T'HH:mm:ss'Z')
:param int max_count: Limit number of returned transactions (must be less than 10000, default is 1000)
:param str merchant_order_id: Merchant order number from the merchant system
:param str payment_method: Used payment method type name from payment methods list
:param str sort_order: Sort based on order of results. `asc` for ascending order or `desc` for descending order (default value)
:param datetime start_time: Date and time up to milliseconds (in [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) format) when requested period starts (inclusive), UTC time, default is 24 hours before 'end_time' (format: yyyy-MM-dd'T'HH:mm:ss'Z')
:return: PaymentsList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
(data) = self.get_payments_with_http_info(request_id, **kwargs) # noqa: E501
return data
def get_payments_with_http_info(self, request_id, **kwargs): # noqa: E501
"""Get payments information # noqa: E501
:param str request_id: Request ID (required)
:param str currency: [ISO 4217](https://en.wikipedia.org/wiki/ISO_4217) currency code of transactions currency
:param datetime end_time: Date and time up to milliseconds (in [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) format) when requested period ends (not inclusive), UTC time, must be less than 7 days after 'start_time', default is current time (format: yyyy-MM-dd'T'HH:mm:ss'Z')
:param int max_count: Limit number of returned transactions (must be less than 10000, default is 1000)
:param str merchant_order_id: Merchant order number from the merchant system
:param str payment_method: Used payment method type name from payment methods list
:param str sort_order: Sort based on order of results. `asc` for ascending order or `desc` for descending order (default value)
:param datetime start_time: Date and time up to milliseconds (in [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) format) when requested period starts (inclusive), UTC time, default is 24 hours before 'end_time' (format: yyyy-MM-dd'T'HH:mm:ss'Z')
:return: PaymentsList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['request_id', 'currency', 'end_time', 'max_count', 'merchant_order_id', 'payment_method', 'sort_order', 'start_time'] # noqa: E501
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_payments" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'request_id' is set
if ('request_id' not in params or
params['request_id'] is None):
raise ValueError("Missing the required parameter `request_id` when calling `get_payments`") # noqa: E501
if "request_id" in params and len(params["request_id"]) > 50:
raise ValueError(
"Invalid value for parameter `request_id` when calling `get_payments`, length must be less than or equal to `50`"
) # noqa: E501
if "request_id" in params and len(params["request_id"]) < 1:
raise ValueError(
"Invalid value for parameter `request_id` when calling `get_payments`, length must be greater than or equal to `1`"
) # noqa: E501
if "max_count" in params and params["max_count"] > 10000: # noqa: E501
raise ValueError(
"Invalid value for parameter `max_count` when calling `get_payments`, must be a value less than or equal to `10000`"
) # noqa: E501
if "merchant_order_id" in params and len(params["merchant_order_id"]) > 50:
raise ValueError(
"Invalid value for parameter `merchant_order_id` when calling `get_payments`, length must be less than or equal to `50`"
) # noqa: E501
if "merchant_order_id" in params and len(params["merchant_order_id"]) < 0:
raise ValueError(
"Invalid value for parameter `merchant_order_id` when calling `get_payments`, length must be greater than or equal to `0`"
) # noqa: E501
if "payment_method" in params and len(params["payment_method"]) > 50:
raise ValueError(
"Invalid value for parameter `payment_method` when calling `get_payments`, length must be less than or equal to `50`"
) # noqa: E501
if "payment_method" in params and len(params["payment_method"]) < 0:
raise ValueError(
"Invalid value for parameter `payment_method` when calling `get_payments`, length must be greater than or equal to `0`"
) # noqa: E501
if "sort_order" in params and not re.search(
r"asc|desc", params["sort_order"]
): # noqa: E501
raise ValueError(
"Invalid value for parameter `sort_order` when calling `get_payments`, must conform to the pattern `/asc|desc/`"
) # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'currency' in params:
query_params.append(('currency', params['currency'])) # noqa: E501
if 'end_time' in params:
query_params.append(('end_time', params['end_time'])) # noqa: E501
if 'max_count' in params:
query_params.append(('max_count', params['max_count'])) # noqa: E501
if 'merchant_order_id' in params:
query_params.append(('merchant_order_id', params['merchant_order_id'])) # noqa: E501
if 'payment_method' in params:
query_params.append(('payment_method', params['payment_method'])) # noqa: E501
if 'request_id' in params:
query_params.append(('request_id', params['request_id'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sort_order', params['sort_order'])) # noqa: E501
if 'start_time' in params:
query_params.append(('start_time', params['start_time'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
return self.api_client.call_api(
'/api/payments', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PaymentsList', # noqa: E501
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_payment(self, payment_id, payment_patch_request, **kwargs): # noqa: E501
"""Update payment # noqa: E501
:param str payment_id: Payment ID (required)
:param PaymentPatchRequest payment_patch_request: paymentPatchRequest (required)
:return: PaymentUpdateResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
(data) = self.update_payment_with_http_info(payment_id, payment_patch_request, **kwargs) # noqa: E501
return data
def update_payment_with_http_info(self, payment_id, payment_patch_request, **kwargs): # noqa: E501
"""Update payment # noqa: E501
:param str payment_id: Payment ID (required)
:param PaymentPatchRequest payment_patch_request: paymentPatchRequest (required)
:return: PaymentUpdateResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['payment_id', 'payment_patch_request'] # noqa: E501
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_payment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'payment_id' is set
if ('payment_id' not in params or
params['payment_id'] is None):
raise ValueError("Missing the required parameter `payment_id` when calling `update_payment`") # noqa: E501
# verify the required parameter 'payment_patch_request' is set
if ('payment_patch_request' not in params or
params['payment_patch_request'] is None):
raise ValueError("Missing the required parameter `payment_patch_request` when calling `update_payment`") # noqa: E501
collection_formats = {}
path_params = {}
if 'payment_id' in params:
path_params['paymentId'] = params['payment_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'payment_patch_request' in params:
body_params = params['payment_patch_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
return self.api_client.call_api(
'/api/payments/{paymentId}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PaymentUpdateResponse', # noqa: E501
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 42.490126 | 546 | 0.620357 |
f530970a2ccba43192e634aa9f1b6a2bea51339a | 839 | py | Python | ykdl/extractors/bilibili/vc.py | panda-mute/ykdl | 56cea24f1513f21aedbe80b75c25f7c3b1e07704 | [
"MIT"
] | null | null | null | ykdl/extractors/bilibili/vc.py | panda-mute/ykdl | 56cea24f1513f21aedbe80b75c25f7c3b1e07704 | [
"MIT"
] | null | null | null | ykdl/extractors/bilibili/vc.py | panda-mute/ykdl | 56cea24f1513f21aedbe80b75c25f7c3b1e07704 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from .._common import *
class BiliVC(VideoExtractor):
name = '哔哩哔哩 小视频 (Bili VC)'
def prepare(self):
info = VideoInfo(self.name)
self.vid = match1(self.url, 'video/(\d+)')
video_data = get_response(
'https://api.vc.bilibili.com/clip/v1/video/detail',
params={'video_id': self.vid}).json()
info.title = video_data['data']['item']['description']
info.artist = video_data['data']['user']['name']
info.stream_types.append('current')
info.streams['current'] = {
'container': 'mp4',
'video_profile': 'current',
'src' : [video_data['data']['item']['video_playurl']],
'size': int(video_data['data']['item']['video_size'])
}
return info
site = BiliVC()
| 25.424242 | 67 | 0.539928 |
0d97cc9704487494d5ab19baa806a7d3e64d8b5e | 123 | py | Python | simfile/_private/dedent.py | ianklatzco/simfile | 8f4ec2fb9437b4071f6f92cca3d8de1b4071a2bc | [
"MIT"
] | 22 | 2017-04-24T05:37:13.000Z | 2022-03-08T00:41:37.000Z | simfile/_private/dedent.py | ianklatzco/simfile | 8f4ec2fb9437b4071f6f92cca3d8de1b4071a2bc | [
"MIT"
] | 10 | 2021-05-31T01:21:56.000Z | 2022-03-17T04:26:54.000Z | simfile/_private/dedent.py | ianklatzco/simfile | 8f4ec2fb9437b4071f6f92cca3d8de1b4071a2bc | [
"MIT"
] | 3 | 2019-06-05T15:23:53.000Z | 2021-09-11T02:39:36.000Z | from textwrap import dedent
def dedent_and_trim(string: str) -> str:
return dedent(string.lstrip('\r\n').rstrip(' ')) | 24.6 | 52 | 0.699187 |
500a52a206b7dbc231a02183c2e61b698775f744 | 4,496 | py | Python | env/lib/python3.6/site-packages/django_coverage/utils/module_tools/module_walker.py | anthowen/duplify | 846d01c1b21230937fdf0281b0cf8c0b08a8c24e | [
"MIT"
] | 1 | 2019-04-21T18:57:57.000Z | 2019-04-21T18:57:57.000Z | env/lib/python3.6/site-packages/django_coverage/utils/module_tools/module_walker.py | anthowen/duplify | 846d01c1b21230937fdf0281b0cf8c0b08a8c24e | [
"MIT"
] | null | null | null | env/lib/python3.6/site-packages/django_coverage/utils/module_tools/module_walker.py | anthowen/duplify | 846d01c1b21230937fdf0281b0cf8c0b08a8c24e | [
"MIT"
] | null | null | null | """
Copyright 2009 55 Minutes (http://www.55minutes.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os, re, sys
from glob import glob
from data_storage import *
from module_loader import find_or_load_module
try:
set
except:
from sets import Set as set
__all__ = ('get_all_modules',)
def _build_pkg_path(pkg_name, pkg, path):
for rp in [x for x in pkg.__path__ if path.startswith(x)]:
p = path.replace(rp, '').replace(os.path.sep, '.')
return pkg_name + p
def _build_module_path(pkg_name, pkg, path):
return _build_pkg_path(pkg_name, pkg, os.path.splitext(path)[0])
def _prune_whitelist(whitelist, blacklist):
excluded = Excluded().excluded
for wp in whitelist[:]:
for bp in blacklist:
if re.search(bp, wp):
whitelist.remove(wp)
excluded.append(wp)
break
return whitelist
def _parse_module_list(m_list):
packages = Packages().packages
modules = Modules().modules
excluded = Excluded().excluded
errors = Errors().errors
for m in m_list:
components = m.split('.')
m_name = ''
search_path = []
processed=False
for i, c in enumerate(components):
m_name = '.'.join([x for x in m_name.split('.') if x] + [c])
try:
module = find_or_load_module(m_name, search_path or None)
except ImportError:
processed=True
errors.append(m)
break
try:
search_path.extend(module.__path__)
except AttributeError:
processed = True
if i+1==len(components):
modules[m_name] = module
else:
errors.append(m)
break
if not processed:
packages[m_name] = module
def prune_dirs(root, dirs, exclude_dirs):
regexes = [re.compile(exclude_dir) for exclude_dir in exclude_dirs]
for path, dir_ in [(os.path.join(root, dir_), dir_) for dir_ in dirs]:
for regex in regexes:
if regex.search(path):
dirs.remove(dir_)
break
def _get_all_packages(pkg_name, pkg, blacklist, exclude_dirs):
packages = Packages().packages
errors = Errors().errors
for path in pkg.__path__:
for root, dirs, files in os.walk(path):
prune_dirs(root, dirs, exclude_dirs or [])
m_name = _build_pkg_path(pkg_name, pkg, root)
try:
if _prune_whitelist([m_name], blacklist):
m = find_or_load_module(m_name, [os.path.split(root)[0]])
packages[m_name] = m
else:
for d in dirs[:]:
dirs.remove(d)
except ImportError:
errors.append(m_name)
for d in dirs[:]:
dirs.remove(d)
def _get_all_modules(pkg_name, pkg, blacklist):
modules = Modules().modules
errors = Errors().errors
for p in pkg.__path__:
for f in glob('%s/*.py' %p):
m_name = _build_module_path(pkg_name, pkg, f)
try:
if _prune_whitelist([m_name], blacklist):
m = find_or_load_module(m_name, [p])
modules[m_name] = m
except ImportError:
errors.append(m_name)
def get_all_modules(whitelist, blacklist=None, exclude_dirs=None):
packages = Packages().packages
modules = Modules().modules
excluded = Excluded().excluded
errors = Errors().errors
whitelist = _prune_whitelist(whitelist, blacklist or [])
_parse_module_list(whitelist)
for pkg_name, pkg in packages.copy().iteritems():
_get_all_packages(pkg_name, pkg, blacklist, exclude_dirs)
for pkg_name, pkg in packages.copy().iteritems():
_get_all_modules(pkg_name, pkg, blacklist)
return packages, modules, list(set(excluded)), list(set(errors))
| 33.058824 | 77 | 0.604315 |
fc3df84d8c1609864fd145a2874cb5a587e3b768 | 2,334 | py | Python | tests/models/test_vision.py | lavoiems/lightning-bolts | 208e92ba3dcdbc029afd37e09ec9461fbcf3f293 | [
"Apache-2.0"
] | 822 | 2020-04-21T03:30:43.000Z | 2021-03-07T06:41:31.000Z | tests/models/test_vision.py | lavoiems/lightning-bolts | 208e92ba3dcdbc029afd37e09ec9461fbcf3f293 | [
"Apache-2.0"
] | 538 | 2020-04-18T01:07:58.000Z | 2021-03-09T13:48:50.000Z | tests/models/test_vision.py | lavoiems/lightning-bolts | 208e92ba3dcdbc029afd37e09ec9461fbcf3f293 | [
"Apache-2.0"
] | 162 | 2020-04-17T15:44:54.000Z | 2021-03-09T14:04:02.000Z | import pytest
import torch
from packaging import version
from pytorch_lightning import LightningDataModule, Trainer
from pytorch_lightning import __version__ as pl_version
from pytorch_lightning import seed_everything
from torch.utils.data import DataLoader
from pl_bolts.datamodules import FashionMNISTDataModule, MNISTDataModule
from pl_bolts.datasets import DummyDataset
from pl_bolts.models.vision import GPT2, ImageGPT, SemSegment, UNet
class DummyDataModule(LightningDataModule):
def train_dataloader(self):
train_ds = DummyDataset((3, 35, 120), (35, 120), num_samples=100)
return DataLoader(train_ds, batch_size=1)
@pytest.mark.skipif(
version.parse(pl_version) > version.parse("1.1.0"), reason="igpt code not updated for latest lightning"
)
def test_igpt(tmpdir, datadir):
seed_everything(0)
dm = MNISTDataModule(data_dir=datadir, normalize=False)
model = ImageGPT()
trainer = Trainer(
limit_train_batches=2,
limit_val_batches=2,
limit_test_batches=2,
max_epochs=1,
)
trainer.fit(model, datamodule=dm)
trainer.test(datamodule=dm)
assert trainer.callback_metrics["test_loss"] < 1.7
dm = FashionMNISTDataModule(data_dir=datadir, num_workers=1)
model = ImageGPT(classify=True)
trainer = Trainer(
limit_train_batches=2,
limit_val_batches=2,
limit_test_batches=2,
max_epochs=1,
logger=False,
checkpoint_callback=False,
)
trainer.fit(model, datamodule=dm)
@torch.no_grad()
def test_gpt2():
seed_everything(0)
seq_len = 17
batch_size = 32
vocab_size = 16
x = torch.randint(0, vocab_size, (seq_len, batch_size))
model = GPT2(
embed_dim=16,
heads=2,
layers=2,
num_positions=seq_len,
vocab_size=vocab_size,
num_classes=10,
)
model(x)
@torch.no_grad()
def test_unet():
x = torch.rand(10, 3, 28, 28)
model = UNet(num_classes=2)
y = model(x)
assert y.shape == torch.Size([10, 2, 28, 28])
def test_semantic_segmentation(tmpdir):
dm = DummyDataModule()
model = SemSegment(num_classes=19)
trainer = Trainer(fast_dev_run=True, default_root_dir=tmpdir)
trainer.fit(model, datamodule=dm)
loss = trainer.progress_bar_dict["loss"]
assert float(loss) > 0
| 26.522727 | 107 | 0.697087 |
d5589c962083b52d957741cadbcac76a2de5af2b | 4,614 | py | Python | theano/sparse/tests/test_sp2.py | intel/Theano-dev | 6ca6fd4646f9e958058c7bce52cd51923c05c2f4 | [
"BSD-3-Clause"
] | 64 | 2016-10-02T20:41:56.000Z | 2020-03-11T14:59:40.000Z | theano/sparse/tests/test_sp2.py | intel/Theano-dev | 6ca6fd4646f9e958058c7bce52cd51923c05c2f4 | [
"BSD-3-Clause"
] | 4 | 2017-06-12T05:12:38.000Z | 2018-03-15T03:16:30.000Z | theano/sparse/tests/test_sp2.py | intel/Theano-dev | 6ca6fd4646f9e958058c7bce52cd51923c05c2f4 | [
"BSD-3-Clause"
] | 30 | 2016-10-27T21:59:00.000Z | 2021-02-20T09:55:14.000Z | from __future__ import absolute_import, print_function, division
import unittest
from nose.plugins.skip import SkipTest
import numpy
try:
import scipy.sparse as sp
except ImportError:
pass # The variable enable_sparse will be used to disable the test file.
import theano
from theano import config
from theano import tensor
from theano import sparse
if not theano.sparse.enable_sparse:
raise SkipTest('Optional package sparse disabled')
from theano.sparse.sandbox.sp2 import (
Poisson, poisson, Binomial, Multinomial, multinomial)
from theano.tests import unittest_tools as utt
from theano.sparse.tests.test_basic import as_sparse_format
class PoissonTester(utt.InferShapeTester):
x = {}
a = {}
for format in sparse.sparse_formats:
variable = getattr(theano.sparse, format + '_matrix')
rand = numpy.array(numpy.random.randint(1, 4, size=(3, 4)) - 1,
dtype=theano.config.floatX)
x[format] = variable()
a[format] = as_sparse_format(rand, format)
def setUp(self):
super(PoissonTester, self).setUp()
self.op_class = Poisson
def test_op(self):
for format in sparse.sparse_formats:
f = theano.function(
[self.x[format]],
poisson(self.x[format]))
tested = f(self.a[format])
assert tested.format == format
assert tested.dtype == self.a[format].dtype
assert numpy.allclose(numpy.floor(tested.data), tested.data)
assert tested.shape == self.a[format].shape
def test_infer_shape(self):
for format in sparse.sparse_formats:
self._compile_and_check([self.x[format]],
[poisson(self.x[format])],
[self.a[format]],
self.op_class)
class BinomialTester(utt.InferShapeTester):
n = tensor.scalar()
p = tensor.scalar()
shape = tensor.lvector()
_n = 5
_p = .25
_shape = numpy.asarray([3, 5], dtype='int64')
inputs = [n, p, shape]
_inputs = [_n, _p, _shape]
def setUp(self):
super(BinomialTester, self).setUp()
self.op_class = Binomial
def test_op(self):
for sp_format in sparse.sparse_formats:
for o_type in sparse.float_dtypes:
f = theano.function(
self.inputs,
Binomial(sp_format, o_type)(*self.inputs))
tested = f(*self._inputs)
assert tested.shape == tuple(self._shape)
assert tested.format == sp_format
assert tested.dtype == o_type
assert numpy.allclose(numpy.floor(tested.todense()),
tested.todense())
def test_infer_shape(self):
for sp_format in sparse.sparse_formats:
for o_type in sparse.float_dtypes:
self._compile_and_check(
self.inputs,
[Binomial(sp_format, o_type)(*self.inputs)],
self._inputs,
self.op_class)
class MultinomialTester(utt.InferShapeTester):
p = sparse.csr_matrix()
_p = sp.csr_matrix(numpy.asarray([[0.0, 0.5, 0.0, 0.5],
[0.1, 0.2, 0.3, 0.4],
[0.0, 1.0, 0.0, 0.0],
[0.3, 0.3, 0.0, 0.4]],
dtype=config.floatX))
def setUp(self):
super(MultinomialTester, self).setUp()
self.op_class = Multinomial
def test_op(self):
n = tensor.lscalar()
f = theano.function([self.p, n], multinomial(n, self.p))
_n = 5
tested = f(self._p, _n)
assert tested.shape == self._p.shape
assert numpy.allclose(numpy.floor(tested.todense()), tested.todense())
assert tested[2, 1] == _n
n = tensor.lvector()
f = theano.function([self.p, n], multinomial(n, self.p))
_n = numpy.asarray([1, 2, 3, 4], dtype='int64')
tested = f(self._p, _n)
assert tested.shape == self._p.shape
assert numpy.allclose(numpy.floor(tested.todense()), tested.todense())
assert tested[2, 1] == _n[2]
def test_infer_shape(self):
self._compile_and_check([self.p],
[multinomial(5, self.p)],
[self._p],
self.op_class,
warn=False)
if __name__ == '__main__':
unittest.main()
| 31.82069 | 78 | 0.55505 |
9d2c49c2e414ca26cb7ee9138aebc5ce489201b9 | 146 | py | Python | haweb/apps/issues/models.py | edilio/tobeawebproperty | 317205bf27ab76a430ea56a474e1739ee71f164e | [
"MIT"
] | null | null | null | haweb/apps/issues/models.py | edilio/tobeawebproperty | 317205bf27ab76a430ea56a474e1739ee71f164e | [
"MIT"
] | 4 | 2015-01-02T21:39:58.000Z | 2015-06-23T02:18:57.000Z | haweb/apps/issues/models.py | edilio/tobeawebproperty | 317205bf27ab76a430ea56a474e1739ee71f164e | [
"MIT"
] | null | null | null | from django.db import models
ISSUES_TYPE_OPTIONS = (
(1, 'Maintenance Issues'),
(2, 'Tenant Health Issues'),
(3, 'Safety Issues'),
)
| 18.25 | 32 | 0.636986 |
d61339c23639e7098fc2c352fd233c048038e0a8 | 5,401 | py | Python | tests/test_root_object_type.py | Informasjonsforvaltning/modelldcatnotordf | 995129ff9f6fb95f9a9d875b27f3aa14bac9b7f1 | [
"Apache-2.0"
] | 1 | 2020-11-29T18:36:21.000Z | 2020-11-29T18:36:21.000Z | tests/test_root_object_type.py | Informasjonsforvaltning/modelldcatnotordf | 995129ff9f6fb95f9a9d875b27f3aa14bac9b7f1 | [
"Apache-2.0"
] | 142 | 2020-10-07T08:52:55.000Z | 2021-11-18T15:09:31.000Z | tests/test_root_object_type.py | Informasjonsforvaltning/modelldcatnotordf | 995129ff9f6fb95f9a9d875b27f3aa14bac9b7f1 | [
"Apache-2.0"
] | null | null | null | """Test cases for the root object type module."""
from concepttordf import Concept
import pytest
from pytest_mock import MockFixture
from rdflib import Graph
from skolemizer.testutils import skolemization
from modelldcatnotordf.modelldcatno import RootObjectType
from tests.testutils import assert_isomorphic
"""
A test class for testing the class RootObjectType.
"""
def test_instantiate_rootobjecttype() -> None:
"""It does not raise an exception."""
try:
_ = RootObjectType()
except Exception:
pytest.fail("Unexpected Exception ..")
def test_to_graph_should_return_identifier_set_at_constructor() -> None:
"""It returns a title graph isomorphic to spec."""
"""It returns an identifier graph isomorphic to spec."""
rootobjecttype = RootObjectType("http://example.com/rootobjecttypes/1")
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix modelldcatno: <https://data.norge.no/vocabulary/modelldcatno#> .
<http://example.com/rootobjecttypes/1> a modelldcatno:RootObjectType;
.
"""
g1 = Graph().parse(data=rootobjecttype.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
assert_isomorphic(g1, g2)
def test_to_graph_should_return_title_and_identifier() -> None:
"""It returns a title graph isomorphic to spec."""
"""It returns an identifier graph isomorphic to spec."""
rootobjecttype = RootObjectType()
rootobjecttype.identifier = "http://example.com/rootobjecttypes/1"
rootobjecttype.title = {"nb": "Tittel 1", "en": "Title 1"}
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix modelldcatno: <https://data.norge.no/vocabulary/modelldcatno#> .
<http://example.com/rootobjecttypes/1> a modelldcatno:RootObjectType;
dct:title "Title 1"@en, "Tittel 1"@nb ;
.
"""
g1 = Graph().parse(data=rootobjecttype.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
assert_isomorphic(g1, g2)
def test_to_graph_should_return_title_and_skolemization(mocker: MockFixture) -> None:
"""It returns a title graph isomorphic to spec."""
rootobjecttype = RootObjectType()
rootobjecttype.title = {"nb": "Tittel 1", "en": "Title 1"}
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix modelldcatno: <https://data.norge.no/vocabulary/modelldcatno#> .
<http://example.com/.well-known/skolem/284db4d2-80c2-11eb-82c3-83e80baa2f94>
a modelldcatno:RootObjectType ;
dct:title "Title 1"@en, "Tittel 1"@nb ;
.
"""
mocker.patch(
"skolemizer.Skolemizer.add_skolemization", return_value=skolemization,
)
g1 = Graph().parse(data=rootobjecttype.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
assert_isomorphic(g1, g2)
def test_to_graph_should_return_dct_identifier_as_graph() -> None:
"""It returns a dct_identifier graph isomorphic to spec."""
rootobjecttype = RootObjectType()
rootobjecttype.identifier = "http://example.com/rootobjecttypes/1"
rootobjecttype.dct_identifier = "123456789"
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix modelldcatno: <https://data.norge.no/vocabulary/modelldcatno#> .
<http://example.com/rootobjecttypes/1> a modelldcatno:RootObjectType ;
dct:identifier "123456789";
.
"""
g1 = Graph().parse(data=rootobjecttype.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
assert_isomorphic(g1, g2)
def test_to_graph_should_return_subject() -> None:
"""It returns a subject graph isomorphic to spec."""
rootobjecttype = RootObjectType()
rootobjecttype.identifier = "http://example.com/rootobjecttypes/1"
subject = Concept()
subject.identifier = "https://example.com/subjects/1"
rootobjecttype.subject = subject
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix modelldcatno: <https://data.norge.no/vocabulary/modelldcatno#> .
@prefix skos: <http://www.w3.org/2004/02/skos/core#> .
<http://example.com/rootobjecttypes/1> a modelldcatno:RootObjectType ;
dct:subject <https://example.com/subjects/1> ;
.
<https://example.com/subjects/1> a skos:Concept .
"""
g1 = Graph().parse(data=rootobjecttype.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
assert_isomorphic(g1, g2)
| 36.248322 | 85 | 0.65673 |
227dba9107d875763f662515083c420a6e39b4d2 | 10,812 | py | Python | fake news challenge (FNC-1)/util.py | kishormishra3/DeepLearn | bc0dfad7b4694aa5d872b5bdddd6e3a17d139d7d | [
"MIT"
] | 1,756 | 2017-05-24T12:46:44.000Z | 2022-03-30T15:23:26.000Z | fake news challenge (FNC-1)/util.py | kshitizbhansali/DeepLearn | e4b72d921695062d5cc84f4968c3fb57e258428f | [
"Apache-2.0"
] | 20 | 2017-05-23T15:23:39.000Z | 2019-04-12T18:07:04.000Z | fake news challenge (FNC-1)/util.py | kshitizbhansali/DeepLearn | e4b72d921695062d5cc84f4968c3fb57e258428f | [
"Apache-2.0"
] | 355 | 2017-05-29T12:37:19.000Z | 2022-01-25T15:23:50.000Z | # -*- coding: utf-8 -*-
from csv import DictReader
from csv import DictWriter
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
# Initialise global variables
label_ref = {'agree': 0, 'disagree': 1, 'discuss': 2, 'unrelated': 3}
label_ref_rev = {0: 'agree', 1: 'disagree', 2: 'discuss', 3: 'unrelated'}
stop_words = [
"a", "about", "above", "across", "after", "afterwards", "again", "against", "all", "almost", "alone", "along",
"already", "also", "although", "always", "am", "among", "amongst", "amoungst", "amount", "an", "and", "another",
"any", "anyhow", "anyone", "anything", "anyway", "anywhere", "are", "around", "as", "at", "back", "be",
"became", "because", "become", "becomes", "becoming", "been", "before", "beforehand", "behind", "being",
"below", "beside", "besides", "between", "beyond", "bill", "both", "bottom", "but", "by", "call", "can", "co",
"con", "could", "cry", "de", "describe", "detail", "do", "done", "down", "due", "during", "each", "eg", "eight",
"either", "eleven", "else", "elsewhere", "empty", "enough", "etc", "even", "ever", "every", "everyone",
"everything", "everywhere", "except", "few", "fifteen", "fifty", "fill", "find", "fire", "first", "five", "for",
"former", "formerly", "forty", "found", "four", "from", "front", "full", "further", "get", "give", "go", "had",
"has", "have", "he", "hence", "her", "here", "hereafter", "hereby", "herein", "hereupon", "hers", "herself",
"him", "himself", "his", "how", "however", "hundred", "i", "ie", "if", "in", "inc", "indeed", "interest",
"into", "is", "it", "its", "itself", "keep", "last", "latter", "latterly", "least", "less", "ltd", "made",
"many", "may", "me", "meanwhile", "might", "mill", "mine", "more", "moreover", "most", "mostly", "move", "much",
"must", "my", "myself", "name", "namely", "neither", "nevertheless", "next", "nine", "nobody", "now", "nowhere",
"of", "off", "often", "on", "once", "one", "only", "onto", "or", "other", "others", "otherwise", "our", "ours",
"ourselves", "out", "over", "own", "part", "per", "perhaps", "please", "put", "rather", "re", "same", "see",
"serious", "several", "she", "should", "show", "side", "since", "sincere", "six", "sixty", "so", "some",
"somehow", "someone", "something", "sometime", "sometimes", "somewhere", "still", "such", "system", "take",
"ten", "than", "that", "the", "their", "them", "themselves", "then", "thence", "there", "thereafter", "thereby",
"therefore", "therein", "thereupon", "these", "they", "thick", "thin", "third", "this", "those", "though",
"three", "through", "throughout", "thru", "thus", "to", "together", "too", "top", "toward", "towards", "twelve",
"twenty", "two", "un", "under", "until", "up", "upon", "us", "very", "via", "was", "we", "well", "were", "what",
"whatever", "when", "whence", "whenever", "where", "whereafter", "whereas", "whereby", "wherein", "whereupon",
"wherever", "whether", "which", "while", "whither", "who", "whoever", "whole", "whom", "whose", "why", "will",
"with", "within", "without", "would", "yet", "you", "your", "yours", "yourself", "yourselves"
]
# Define data class
class FNCData:
"""
Define class for Fake News Challenge data
"""
def __init__(self, file_instances, file_bodies):
# Load data
self.instances = self.read(file_instances)
bodies = self.read(file_bodies)
self.heads = {}
self.bodies = {}
# Process instances
for instance in self.instances:
if instance['Headline'] not in self.heads:
head_id = len(self.heads)
self.heads[instance['Headline']] = head_id
instance['Body ID'] = int(instance['Body ID'])
# Process bodies
for body in bodies:
self.bodies[int(body['Body ID'])] = body['articleBody']
def read(self, filename):
"""
Read Fake News Challenge data from CSV file
Args:
filename: str, filename + extension
Returns:
rows: list, of dict per instance
"""
# Initialise
rows = []
# Process file
with open(filename, "r") as table:
r = DictReader(table)
for line in r:
rows.append(line)
return rows
# Define relevant functions
def pipeline_train(train, test, lim_unigram):
"""
Process train set, create relevant vectorizers
Args:
train: FNCData object, train set
test: FNCData object, test set
lim_unigram: int, number of most frequent words to consider
Returns:
train_set: list, of numpy arrays
train_stances: list, of ints
bow_vectorizer: sklearn CountVectorizer
tfreq_vectorizer: sklearn TfidfTransformer(use_idf=False)
tfidf_vectorizer: sklearn TfidfVectorizer()
"""
# Initialise
heads = []
heads_track = {}
bodies = []
bodies_track = {}
body_ids = []
id_ref = {}
train_set = []
train_stances = []
cos_track = {}
test_heads = []
test_heads_track = {}
test_bodies = []
test_bodies_track = {}
test_body_ids = []
head_tfidf_track = {}
body_tfidf_track = {}
# Identify unique heads and bodies
for instance in train.instances:
head = instance['Headline']
body_id = instance['Body ID']
if head not in heads_track:
heads.append(head)
heads_track[head] = 1
if body_id not in bodies_track:
bodies.append(train.bodies[body_id])
bodies_track[body_id] = 1
body_ids.append(body_id)
for instance in test.instances:
head = instance['Headline']
body_id = instance['Body ID']
if head not in test_heads_track:
test_heads.append(head)
test_heads_track[head] = 1
if body_id not in test_bodies_track:
test_bodies.append(test.bodies[body_id])
test_bodies_track[body_id] = 1
test_body_ids.append(body_id)
# Create reference dictionary
for i, elem in enumerate(heads + body_ids):
id_ref[elem] = i
# Create vectorizers and BOW and TF arrays for train set
bow_vectorizer = CountVectorizer(max_features=lim_unigram, stop_words=stop_words)
bow = bow_vectorizer.fit_transform(heads + bodies) # Train set only
tfreq_vectorizer = TfidfTransformer(use_idf=False).fit(bow)
tfreq = tfreq_vectorizer.transform(bow).toarray() # Train set only
tfidf_vectorizer = TfidfVectorizer(max_features=lim_unigram, stop_words=stop_words).\
fit(heads + bodies + test_heads + test_bodies) # Train and test sets
# Process train set
for instance in train.instances:
head = instance['Headline']
body_id = instance['Body ID']
head_tf = tfreq[id_ref[head]].reshape(1, -1)
body_tf = tfreq[id_ref[body_id]].reshape(1, -1)
if head not in head_tfidf_track:
head_tfidf = tfidf_vectorizer.transform([head]).toarray()
head_tfidf_track[head] = head_tfidf
else:
head_tfidf = head_tfidf_track[head]
if body_id not in body_tfidf_track:
body_tfidf = tfidf_vectorizer.transform([train.bodies[body_id]]).toarray()
body_tfidf_track[body_id] = body_tfidf
else:
body_tfidf = body_tfidf_track[body_id]
if (head, body_id) not in cos_track:
tfidf_cos = cosine_similarity(head_tfidf, body_tfidf)[0].reshape(1, 1)
cos_track[(head, body_id)] = tfidf_cos
else:
tfidf_cos = cos_track[(head, body_id)]
feat_vec = np.squeeze(np.c_[head_tf, body_tf, tfidf_cos])
train_set.append(feat_vec)
train_stances.append(label_ref[instance['Stance']])
return train_set, train_stances, bow_vectorizer, tfreq_vectorizer, tfidf_vectorizer
def pipeline_test(test, bow_vectorizer, tfreq_vectorizer, tfidf_vectorizer):
"""
Process test set
Args:
test: FNCData object, test set
bow_vectorizer: sklearn CountVectorizer
tfreq_vectorizer: sklearn TfidfTransformer(use_idf=False)
tfidf_vectorizer: sklearn TfidfVectorizer()
Returns:
test_set: list, of numpy arrays
"""
# Initialise
test_set = []
heads_track = {}
bodies_track = {}
cos_track = {}
# Process test set
for instance in test.instances:
head = instance['Headline']
body_id = instance['Body ID']
if head not in heads_track:
head_bow = bow_vectorizer.transform([head]).toarray()
head_tf = tfreq_vectorizer.transform(head_bow).toarray()[0].reshape(1, -1)
head_tfidf = tfidf_vectorizer.transform([head]).toarray().reshape(1, -1)
heads_track[head] = (head_tf, head_tfidf)
else:
head_tf = heads_track[head][0]
head_tfidf = heads_track[head][1]
if body_id not in bodies_track:
body_bow = bow_vectorizer.transform([test.bodies[body_id]]).toarray()
body_tf = tfreq_vectorizer.transform(body_bow).toarray()[0].reshape(1, -1)
body_tfidf = tfidf_vectorizer.transform([test.bodies[body_id]]).toarray().reshape(1, -1)
bodies_track[body_id] = (body_tf, body_tfidf)
else:
body_tf = bodies_track[body_id][0]
body_tfidf = bodies_track[body_id][1]
if (head, body_id) not in cos_track:
tfidf_cos = cosine_similarity(head_tfidf, body_tfidf)[0].reshape(1, 1)
cos_track[(head, body_id)] = tfidf_cos
else:
tfidf_cos = cos_track[(head, body_id)]
feat_vec = np.squeeze(np.c_[head_tf, body_tf, tfidf_cos])
test_set.append(feat_vec)
return test_set
def save_predictions(pred, file):
"""
Save predictions to CSV file
Args:
pred: numpy array, of numeric predictions
file: str, filename + extension
"""
with open(file, 'w') as csvfile:
fieldnames = ['Stance']
writer = DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for instance in pred:
writer.writerow({'Stance': label_ref_rev[instance]}) | 42.070039 | 121 | 0.584073 |
36e9d603260d35f07378e699ad3aaf617af8ddb3 | 46,998 | py | Python | daal4py/sklearn/linear_model/_logistic_path_0_21.py | agorshk/daal4py | 58a9b2301c47cd2d5144a403a59c210e10b75f8f | [
"Apache-2.0"
] | null | null | null | daal4py/sklearn/linear_model/_logistic_path_0_21.py | agorshk/daal4py | 58a9b2301c47cd2d5144a403a59c210e10b75f8f | [
"Apache-2.0"
] | null | null | null | daal4py/sklearn/linear_model/_logistic_path_0_21.py | agorshk/daal4py | 58a9b2301c47cd2d5144a403a59c210e10b75f8f | [
"Apache-2.0"
] | null | null | null | #
#*******************************************************************************
# Copyright 2014-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#******************************************************************************/
import numpy as np
import scipy.sparse as sparse
import scipy.optimize as optimize
import numbers
import warnings
from .logistic_loss import (_daal4py_loss_and_grad,
_daal4py_logistic_loss_extra_args,
_daal4py_cross_entropy_loss_extra_args,
_daal4py_loss_, _daal4py_grad_,
_daal4py_grad_hess_)
from sklearn import __version__ as sklearn_version
from distutils.version import LooseVersion
from sklearn.utils import (check_array,
check_consistent_length,
compute_class_weight,
check_random_state)
from sklearn.linear_model.sag import sag_solver
from sklearn.utils.optimize import newton_cg
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model.logistic import (
_check_solver,
_check_multi_class,
_fit_liblinear,
_logistic_loss_and_grad,
_logistic_loss,
_logistic_grad_hess,
_multinomial_loss,
_multinomial_loss_grad,
_multinomial_grad_hess,
LogisticRegression as LogisticRegression_original)
from sklearn.preprocessing import (LabelEncoder, LabelBinarizer)
from sklearn.linear_model.base import (LinearClassifierMixin, SparseCoefMixin, BaseEstimator)
use_daal = True
# Code adapted from sklearn.linear_model.logistic prior to 0.21
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='warn',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Note that there will be no speedup with liblinear solver, since it does
not handle warm-starting.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Input data, target values.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial', 'auto'}, default: 'ovr'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.20
Default will change from 'ovr' to 'auto' in 0.22.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag' or
'liblinear'.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept. For
``multiclass='multinomial'``, the shape is (n_classes, n_cs,
n_features) or (n_classes, n_cs, n_features + 1).
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
.. versionchanged:: 0.19
The "copy" parameter was removed.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
solver = _check_solver(solver, penalty, dual)
# Preprocessing.
if check_input:
X = check_array(X, accept_sparse='csr', dtype=np.float64,
accept_large_sparse=solver != 'liblinear')
y = check_array(y, ensure_2d=False, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
multi_class = _check_multi_class(multi_class, solver, len(classes))
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
if sample_weight is not None:
sample_weight = np.array(sample_weight, dtype=X.dtype, order='C')
check_consistent_length(y, sample_weight)
default_weights = False
else:
default_weights = (class_weight is None)
sample_weight = np.ones(X.shape[0], dtype=X.dtype)
daal_ready = use_daal and solver in ['lbfgs', 'newton-cg'] and not sparse.issparse(X)
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == 'multinomial':
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
mask_classes = np.array([-1, 1])
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=X.dtype)
y_bin[~mask] = -1.
# for compute_class_weight
if class_weight == "balanced":
class_weight_ = compute_class_weight(class_weight, mask_classes,
y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
daal_ready = daal_ready and (default_weights or np.allclose(sample_weight, np.ones_like(sample_weight)))
if daal_ready:
w0 = np.zeros(n_features + 1, dtype=X.dtype)
y_bin[~mask] = 0.
else:
w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype)
else:
daal_ready = daal_ready and (default_weights or np.allclose(sample_weight, np.ones_like(sample_weight)))
if solver not in ['sag', 'saga']:
if daal_ready:
Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)
else:
lbin = LabelBinarizer()
Y_multi = lbin.fit_transform(y)
if Y_multi.shape[1] == 1:
Y_multi = np.hstack([1 - Y_multi, Y_multi])
else:
# SAG multinomial solver needs LabelEncoder, not LabelBinarizer
Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)
if daal_ready:
w0 = np.zeros((classes.size, n_features + 1),
order='C', dtype=X.dtype)
else:
w0 = np.zeros((classes.size, n_features + int(fit_intercept)),
order='F', dtype=X.dtype)
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
if daal_ready:
w0[-coef.size:] = np.roll(coef, 1, -1) if coef.size != n_features else coef
else:
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_classes = classes.size
if n_classes == 2:
n_classes = 1
if (coef.shape[0] != n_classes or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
if daal_ready:
w0[:, -coef.shape[1]:] = np.roll(coef, 1, -1) if coef.shape[1] != n_features else coef
else:
if n_classes == 1:
w0[0, :coef.shape[1]] = -coef
w0[1, :coef.shape[1]] = coef
else:
w0[:, :coef.shape[1]] = coef
C_daal_multiplier = 1
# commented out because this is Py3 feature
#def _map_to_binary_logistic_regression():
# nonlocal C_daal_multiplier
# nonlocal w0
# C_daal_multiplier = 2
# w0 *= 2
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
if solver in ['lbfgs', 'newton-cg']:
if daal_ready and classes.size == 2:
w0_saved = w0
w0 = w0[-1:, :]
w0 = w0.ravel()
target = Y_multi
if solver == 'lbfgs':
if daal_ready:
if classes.size == 2:
# _map_to_binary_logistic_regression()
C_daal_multiplier = 2
w0 *= 2
daal_extra_args_func = _daal4py_logistic_loss_extra_args
else:
daal_extra_args_func = _daal4py_cross_entropy_loss_extra_args
func = _daal4py_loss_and_grad
else:
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
if daal_ready:
if classes.size == 2:
# _map_to_binary_logistic_regression()
C_daal_multiplier = 2
w0 *= 2
daal_extra_args_func = _daal4py_logistic_loss_extra_args
else:
daal_extra_args_func = _daal4py_cross_entropy_loss_extra_args
func = _daal4py_loss_
grad = _daal4py_grad_
hess = _daal4py_grad_hess_
else:
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
warm_start_sag = {'coef': w0.T}
else:
target = y_bin
if solver == 'lbfgs':
if daal_ready:
func = _daal4py_loss_and_grad
daal_extra_args_func = _daal4py_logistic_loss_extra_args
else:
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
if daal_ready:
daal_extra_args_func = _daal4py_logistic_loss_extra_args
func = _daal4py_loss_
grad = _daal4py_grad_
hess = _daal4py_grad_hess_
else:
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
warm_start_sag = {'coef': np.expand_dims(w0, axis=1)}
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == 'lbfgs':
if daal_ready:
extra_args = daal_extra_args_func(classes.size, w0, X, target, 0., 0.5 / C / C_daal_multiplier,
fit_intercept, value=True, gradient=True, hessian=False)
else:
extra_args = (X, target, 1. / C, sample_weight)
iprint = [-1, 50, 1, 100, 101][
np.searchsorted(np.array([0, 1, 2, 3]), verbose)]
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=extra_args,
iprint=iprint, pgtol=tol, maxiter=max_iter)
if daal_ready and C_daal_multiplier == 2:
w0 *= 0.5
if info["warnflag"] == 1:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.", ConvergenceWarning)
# In scipy <= 1.0.0, nit may exceed maxiter.
# See https://github.com/scipy/scipy/issues/7854.
n_iter_i = min(info['nit'], max_iter)
elif solver == 'newton-cg':
if daal_ready:
def make_ncg_funcs(f, value=False, gradient=False, hessian=False):
daal_penaltyL2 = 0.5 / C / C_daal_multiplier
_obj_, X_, y_, n_samples = daal_extra_args_func(
classes.size, w0, X, target, 0., daal_penaltyL2, fit_intercept,
value=value, gradient=gradient, hessian=hessian)
_func_ = lambda x, *args: f(x, _obj_, *args)
return _func_, (X_, y_, n_samples, daal_penaltyL2)
loss_func, extra_args = make_ncg_funcs(func, value=True)
grad_func, _ = make_ncg_funcs(grad, gradient=True)
grad_hess_func, _ = make_ncg_funcs(hess, gradient=True)
w0, n_iter_i = newton_cg(grad_hess_func, loss_func, grad_func, w0, args=extra_args,
maxiter=max_iter, tol=tol)
else:
args = (X, target, 1. / C, sample_weight)
w0, n_iter_i = newton_cg(hess, func, grad, w0, args=args,
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
coef_, intercept_, n_iter_i, = _fit_liblinear(
X, target, C, fit_intercept, intercept_scaling, None,
penalty, dual, verbose, max_iter, tol, random_state,
sample_weight=sample_weight)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver in ['sag', 'saga']:
if multi_class == 'multinomial':
target = target.astype(np.float64)
loss = 'multinomial'
else:
loss = 'log'
if penalty == 'l1':
alpha = 0.
beta = 1. / C
else:
alpha = 1. / C
beta = 0.
w0, n_iter_i, warm_start_sag = sag_solver(
X, target, sample_weight, loss, alpha,
beta, max_iter, tol,
verbose, random_state, False, max_squared_sum, warm_start_sag,
is_saga=(solver == 'saga'))
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
if daal_ready:
if classes.size == 2:
multi_w0 = w0[np.newaxis, :]
else:
multi_w0 = np.reshape(w0, (classes.size, -1))
else:
n_classes = max(2, classes.size)
multi_w0 = np.reshape(w0, (n_classes, -1))
if n_classes == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(np.require(multi_w0, requirements='O'))
else:
coefs.append(np.require(w0, requirements='O'))
n_iter[i] = n_iter_i
if daal_ready:
if fit_intercept:
for i, ci in enumerate(coefs):
coefs[i] = np.roll(ci, -1, -1)
else:
for i, ci in enumerate(coefs):
coefs[i] = np.delete(ci, 0, axis=-1)
return coefs, np.array(Cs), n_iter
# Code adapted from sklearn.linear_model.logistic version 0.21
def __logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='warn',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None,
l1_ratio=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Note that there will be no speedup with liblinear solver, since it does
not handle warm-starting.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Input data, target values.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1', 'l2', or 'elasticnet'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial', 'auto'}, default: 'ovr'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.20
Default will change from 'ovr' to 'auto' in 0.22.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag' or
'liblinear'.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
l1_ratio : float or None, optional (default=None)
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept. For
``multiclass='multinomial'``, the shape is (n_classes, n_cs,
n_features) or (n_classes, n_cs, n_features + 1).
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
.. versionchanged:: 0.19
The "copy" parameter was removed.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
solver = _check_solver(solver, penalty, dual)
# Preprocessing.
if check_input:
X = check_array(X, accept_sparse='csr', dtype=np.float64,
accept_large_sparse=solver != 'liblinear')
y = check_array(y, ensure_2d=False, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
multi_class = _check_multi_class(multi_class, solver, len(classes))
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
if sample_weight is not None:
sample_weight = np.array(sample_weight, dtype=X.dtype, order='C')
check_consistent_length(y, sample_weight)
default_weights = False
else:
sample_weight = np.ones(X.shape[0], dtype=X.dtype)
default_weights = (class_weight is None)
daal_ready = use_daal and solver in ['lbfgs', 'newton-cg'] and not sparse.issparse(X)
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == 'multinomial':
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype)
mask_classes = np.array([-1, 1])
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=X.dtype)
y_bin[~mask] = -1.
# for compute_class_weight
if class_weight == "balanced":
class_weight_ = compute_class_weight(class_weight, mask_classes,
y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
daal_ready = daal_ready and (default_weights or np.allclose(sample_weight, np.ones_like(sample_weight)))
if daal_ready:
w0 = np.zeros(n_features + 1, dtype=X.dtype)
y_bin[~mask] = 0.
else:
w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype)
else:
daal_ready = daal_ready and (default_weights or np.allclose(sample_weight, np.ones_like(sample_weight)))
if solver not in ['sag', 'saga']:
if daal_ready:
Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)
else:
lbin = LabelBinarizer()
Y_multi = lbin.fit_transform(y)
if Y_multi.shape[1] == 1:
Y_multi = np.hstack([1 - Y_multi, Y_multi])
else:
# SAG multinomial solver needs LabelEncoder, not LabelBinarizer
le = LabelEncoder()
Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)
if daal_ready:
w0 = np.zeros((classes.size, n_features + 1),
order='C', dtype=X.dtype)
else:
w0 = np.zeros((classes.size, n_features + int(fit_intercept)),
order='F', dtype=X.dtype)
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
if daal_ready:
w0[-coef.size:] = np.roll(coef, 1, -1) if coef.size != n_features else coef
else:
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_classes = classes.size
if n_classes == 2:
n_classes = 1
if (coef.shape[0] != n_classes or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
if daal_ready:
w0[:, -coef.shape[1]:] = np.roll(coef, 1, -1) if coef.shape[1] != n_features else coef
else:
if n_classes == 1:
w0[0, :coef.shape[1]] = -coef
w0[1, :coef.shape[1]] = coef
else:
w0[:, :coef.shape[1]] = coef
C_daal_multiplier = 1
# commented out because this is Py3 feature
#def _map_to_binary_logistic_regression():
# nonlocal C_daal_multiplier
# nonlocal w0
# C_daal_multiplier = 2
# w0 *= 2
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
if solver in ['lbfgs', 'newton-cg']:
if daal_ready and classes.size == 2:
w0_saved = w0
w0 = w0[-1:, :]
w0 = w0.ravel()
target = Y_multi
if solver == 'lbfgs':
if daal_ready:
if classes.size == 2:
# _map_to_binary_logistic_regression()
C_daal_multiplier = 2
w0 *= 2
daal_extra_args_func = _daal4py_logistic_loss_extra_args
else:
daal_extra_args_func = _daal4py_cross_entropy_loss_extra_args
func = _daal4py_loss_and_grad
else:
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
if daal_ready:
if classes.size == 2:
# _map_to_binary_logistic_regression()
C_daal_multiplier = 2
w0 *= 2
daal_extra_args_func = _daal4py_logistic_loss_extra_args
else:
daal_extra_args_func = _daal4py_cross_entropy_loss_extra_args
func = _daal4py_loss_
grad = _daal4py_grad_
hess = _daal4py_grad_hess_
else:
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
warm_start_sag = {'coef': w0.T}
else:
target = y_bin
if solver == 'lbfgs':
if daal_ready:
func = _daal4py_loss_and_grad
daal_extra_args_func = _daal4py_logistic_loss_extra_args
else:
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
if daal_ready:
daal_extra_args_func = _daal4py_logistic_loss_extra_args
func = _daal4py_loss_
grad = _daal4py_grad_
hess = _daal4py_grad_hess_
else:
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
warm_start_sag = {'coef': np.expand_dims(w0, axis=1)}
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == 'lbfgs':
if daal_ready:
extra_args = daal_extra_args_func(classes.size, w0, X, target, 0., 0.5 / C / C_daal_multiplier,
fit_intercept, value=True, gradient=True, hessian=False)
else:
extra_args = (X, target, 1. / C, sample_weight)
iprint = [-1, 50, 1, 100, 101][
np.searchsorted(np.array([0, 1, 2, 3]), verbose)]
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=extra_args,
iprint=iprint, pgtol=tol, maxiter=max_iter)
if daal_ready and C_daal_multiplier == 2:
w0 *= 0.5
if info["warnflag"] == 1:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.", ConvergenceWarning)
# In scipy <= 1.0.0, nit may exceed maxiter.
# See https://github.com/scipy/scipy/issues/7854.
n_iter_i = min(info['nit'], max_iter)
elif solver == 'newton-cg':
if daal_ready:
def make_ncg_funcs(f, value=False, gradient=False, hessian=False):
daal_penaltyL2 = 0.5 / C / C_daal_multiplier
_obj_, X_, y_, n_samples = daal_extra_args_func(
classes.size, w0, X, target, 0., daal_penaltyL2, fit_intercept,
value=value, gradient=gradient, hessian=hessian)
_func_ = lambda x, *args: f(x, _obj_, *args)
return _func_, (X_, y_, n_samples, daal_penaltyL2)
loss_func, extra_args = make_ncg_funcs(func, value=True)
grad_func, _ = make_ncg_funcs(grad, gradient=True)
grad_hess_func, _ = make_ncg_funcs(hess, gradient=True)
w0, n_iter_i = newton_cg(grad_hess_func, loss_func, grad_func, w0, args=extra_args,
maxiter=max_iter, tol=tol)
else:
args = (X, target, 1. / C, sample_weight)
w0, n_iter_i = newton_cg(hess, func, grad, w0, args=args,
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
coef_, intercept_, n_iter_i, = _fit_liblinear(
X, target, C, fit_intercept, intercept_scaling, None,
penalty, dual, verbose, max_iter, tol, random_state,
sample_weight=sample_weight)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver in ['sag', 'saga']:
if multi_class == 'multinomial':
target = target.astype(X.dtype, copy=False)
loss = 'multinomial'
else:
loss = 'log'
# alpha is for L2-norm, beta is for L1-norm
if penalty == 'l1':
alpha = 0.
beta = 1. / C
elif penalty == 'l2':
alpha = 1. / C
beta = 0.
else: # Elastic-Net penalty
alpha = (1. / C) * (1 - l1_ratio)
beta = (1. / C) * l1_ratio
w0, n_iter_i, warm_start_sag = sag_solver(
X, target, sample_weight, loss, alpha,
beta, max_iter, tol,
verbose, random_state, False, max_squared_sum, warm_start_sag,
is_saga=(solver == 'saga'))
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
if daal_ready:
if classes.size == 2:
multi_w0 = w0[np.newaxis, :]
else:
multi_w0 = np.reshape(w0, (classes.size, -1))
else:
n_classes = max(2, classes.size)
multi_w0 = np.reshape(w0, (n_classes, -1))
if n_classes == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(np.require(multi_w0, requirements='O'))
else:
coefs.append(np.require(w0, requirements='O'))
n_iter[i] = n_iter_i
if daal_ready:
if fit_intercept:
for i, ci in enumerate(coefs):
coefs[i] = np.roll(ci, -1, -1)
else:
for i, ci in enumerate(coefs):
coefs[i] = np.delete(ci, 0, axis=-1)
return np.array(coefs), np.array(Cs), n_iter
if (LooseVersion(sklearn_version) >= LooseVersion("0.22")):
def _logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='auto',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None,
l1_ratio=None):
return __logistic_regression_path(X, y, pos_class=pos_class,
Cs=Cs, fit_intercept=fit_intercept,
max_iter=max_iter, tol=tol, verbose=verbose,
solver=solver, coef=coef,
class_weight=class_weight,
dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling,
multi_class=multi_class,
random_state=random_state,
check_input=check_input,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight,
l1_ratio=l1_ratio)
class LogisticRegression(LogisticRegression_original, BaseEstimator,
LinearClassifierMixin, SparseCoefMixin):
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='lbfgs', max_iter=100,
multi_class='auto', verbose=0, warm_start=False, n_jobs=None,
l1_ratio=None):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
self.l1_ratio = l1_ratio
elif (LooseVersion(sklearn_version) >= LooseVersion("0.21")):
def _logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='warn',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None,
l1_ratio=None):
return __logistic_regression_path(X, y, pos_class=pos_class,
Cs=Cs, fit_intercept=fit_intercept,
max_iter=max_iter, tol=tol, verbose=verbose,
solver=solver, coef=coef,
class_weight=class_weight,
dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling,
multi_class=multi_class,
random_state=random_state,
check_input=check_input,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight,
l1_ratio=l1_ratio)
class LogisticRegression(LogisticRegression_original, BaseEstimator,
LinearClassifierMixin, SparseCoefMixin):
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='warn', max_iter=100,
multi_class='warn', verbose=0, warm_start=False, n_jobs=None,
l1_ratio=None):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
self.l1_ratio = l1_ratio
else:
class LogisticRegression(LogisticRegression_original, BaseEstimator,
LinearClassifierMixin, SparseCoefMixin):
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='warn', max_iter=100,
multi_class='warn', verbose=0, warm_start=False, n_jobs=None):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
| 44.379603 | 112 | 0.575365 |
7a4b43adb96afcf047df0bdc3dea6b32a6926822 | 12,377 | py | Python | sourcecode/src/vx/bone/Util.py | ivarvb/BONE | 92efabe4873495e5e7d35a953135f414b4e2dcb0 | [
"MIT"
] | null | null | null | sourcecode/src/vx/bone/Util.py | ivarvb/BONE | 92efabe4873495e5e7d35a953135f414b4e2dcb0 | [
"MIT"
] | null | null | null | sourcecode/src/vx/bone/Util.py | ivarvb/BONE | 92efabe4873495e5e7d35a953135f414b4e2dcb0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Author: Ivar
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
import ujson
import copy
from datetime import datetime
class Util:
@staticmethod
def write(file, obj):
with open(file, "w") as filef:
filef.write(ujson.dumps(obj))
@staticmethod
def read(file):
data = {}
with open(file,"r") as filef:
data = (ujson.load(filef))
return data
@staticmethod
def now():
return datetime.now().strftime("%Y%m%d%H%M%S")
@staticmethod
def makedir(ndir):
if not os.path.exists(ndir):
os.makedirs(ndir)
@staticmethod
def makeheatmap(id, inputDir, outputDir, fileclass):
classdata = Util.read(inputDir+"/"+fileclass)
index = []
#columns = ["KNN","SVCGRID","SVCLINEAR","SVC","DTC","RFC","MLPC","ADBC","GNBC"]
columns = []
data = []
for row in classdata:
r = []
columns = []
#print("row[evals]", row["evals"])
for k, v in row["evals"].items():
r.append(v["metrics"]["f1"])
columns.append(k)
data.append(r)
name = row["parameters"]["train"]["label"] +" "+ row["name"]
if row["norm"]=="None":
index.append(name+" (0)")
elif row["norm"]=="std":
index.append(name+" (1)")
elif row["norm"]=="minmax":
index.append(name+" (2)")
df = pd.DataFrame(data, index=index, columns=columns)
plt.subplots(figsize=(8,15))
color_map = plt.cm.get_cmap('YlOrBr_r')
#color_map = color_map.reversed()
ax = sns.heatmap(df, cmap=color_map, square=True, annot=True, annot_kws={"size":6})
for item in ax.get_yticklabels():
item.set_rotation(0)
for item in ax.get_xticklabels():
item.set_rotation(90)
fileout = f'classification_{id}.pdf'
plt.savefig(outputDir+'/'+fileout, dpi=100, bbox_inches='tight')
plt.close("all")
@staticmethod
def makebar(id, inputDir, outputDir, fileclass):
classdata = Util.read(inputDir+"/"+fileclass)
index = []
#columns = ["KNN","SVCGRID","SVCLINEAR","SVC","DTC","RFC","MLPC","ADBC","GNBC"]
data = []
for row in classdata:
for k, v in row["evals"].items():
ac = v["metrics"]["f1"]
#ac = row["evals"][c]["acc"]
data.append(ac)
name = row["parameters"]["train"]["label"] +" "+ row["name"]
if row["norm"]=="None":
name += " (0)"
elif row["norm"]=="std":
name += " (1)"
elif row["norm"]=="minmax":
name += " (2)"
name = k+" "+name
index.append(name)
data, index = zip(*sorted(zip(data, index), reverse=True))
plt.subplots(figsize=(5,60))
df = pd.DataFrame({"lab":index,"val":data})
ax = sns.barplot(x = 'val', y = 'lab', data = df, color='#0091eb')
for x_ticks in ax.get_xticklabels():
x_ticks.set_rotation(90)
i = 0
for p in ax.patches:
ax.annotate(format(data[i], '.2f'),
(p.get_x() + p.get_width(), p.get_y()+1),
ha = 'center', va = 'center',
xytext = (0, 5),
textcoords = 'offset points')
i+=1
fileout = f'bars_{id}.pdf'
plt.savefig(outputDir+'/'+fileout, dpi=100, bbox_inches='tight')
plt.close("all")
@staticmethod
def XXsplitImage(image, tileSize):
height, width = image.shape
# print(image.shape)
tiles = []
positions = []
maxMultHeight = height - (height % tileSize)
maxMultWidth = width - (width % tileSize)
# print(maxMultHeight, maxMultWidth)
for i in range(0, maxMultHeight, tileSize):
for j in range(0, maxMultWidth, tileSize):
# yield image[i:i+tileSize, j:j+tileSize]
positions.append(np.asarray((i, i + tileSize, j, j + tileSize)))
tiles.append(image[i:i + tileSize, j:j + tileSize])
# print(image[i:i+tileSize, j:j+tileSize])
lastTile = image[maxMultHeight:height, maxMultWidth:width]
if lastTile.shape[0] > 0 and lastTile.shape[1] > 0:
tiles.append(lastTile)
positions.append(np.asarray((maxMultHeight, height, maxMultWidth, width)))
#print(tiles)
return tiles, positions
def splitImage(image, tileSize):
height, width = image.shape
# print(image.shape)
tiles = []
positions = []
maxMultHeight = height - (height % tileSize)
maxMultWidth = width - (width % tileSize)
# print(maxMultHeight, maxMultWidth)
for i in range(0, height, tileSize):
for j in range(0, width, tileSize):
# yield image[i:i+tileSize, j:j+tileSize]
aux_i = i + tileSize
ls_i = aux_i if aux_i<(height-1) else height-1
aux_j = j + tileSize
ls_j = aux_j if aux_j<(width-1) else width-1
positions.append(np.asarray((i, ls_i, j, ls_j)))
tiles.append(image[i:ls_i, j:ls_j])
# print(image[i:i+tileSize, j:j+tileSize])
#lastTile = image[maxMultHeight:height, maxMultWidth:width]
#if lastTile.shape[0] > 0 and lastTile.shape[1] > 0:
# tiles.append(lastTile)
# positions.append(np.asarray((maxMultHeight, height, maxMultWidth, width)))
return tiles, positions
@staticmethod
def getFileName(arg):
#print(arg["targetSet"],arg["boundaryDataSet"],arg["name"], arg["label"])
#return arg["targetSet"]+"_"+arg["boundaryDataSet"]+"_"+arg["name"]+"_"+arg["label"]+".csv"
return arg["targetSet"]+"_"+arg["name"]+"_"+Util.getLabel(arg["parameters"])+".csv"
#return arg["targetSet"]+"_"+arg["name"]+"_"+arg["boundaryDataSet_id"]+str(arg["parameters"]["tile_size"])+"_"+arg["label"]+".csv"
@staticmethod
def getLabel(arg):
d = []
for k, v in arg.items():
if type(v) == list:
s = [str(a) for a in v]
s = ",".join(s)
d.append(str(k)+":["+str(s)+"]")
else:
d.append(str(k)+"_"+str(v))
#d = "{"+" ".join(d)+"}"
d = "_".join(d)
return d
@staticmethod
def curvePlot(dat):
Util.makedir(dat["outputdir"])
#dat["inputdir"]
#dat["outputdir"]
#dat["files"]
#dat["metric"]
dato = {}
dato_aux = []
#filres = []
for name, fil in dat["files"].items():
#filres.append(Util.read(fil))
dato[name] = {}
dato_aux
obj = Util.read(dat["inputdir"]+"/"+fil)
#print("obj",obj)
for row in obj:
#print("EE",row["evals"])
for k, v in row["evals"].items():
#print("row", v["metrics"][dat["metric"]])
dato[name][row["xval"]] = v["metrics"][dat["metric"]]
metrics = {}
metrics["name"] = name
metrics["xval"] = row["xval"]
for kk, vv in v["metrics"].items():
metrics[kk] = vv
dato_aux.append(metrics)
print(dato_aux)
df_aux = pd.DataFrame(dato_aux)
df_aux.to_csv(dat["outputdir"]+"/"+dat["filename"]+"_info.csv")
index = []
curves = {}
for k, v in dato.items():
curves[k] = []
index = v.keys()
for kv in index:
curves[k].append(v[kv])
#print(obj["evals"])
#print(curves)
#print(index)
df = pd.DataFrame(curves, index=index)
df.to_csv(dat["outputdir"]+"/"+dat["filename"]+".csv")
lines = df.plot.line(figsize=[5,3])
plt.xlabel(dat["xlabel"])
plt.ylabel(dat["ylabel"])
if len(dat["ylim"])==2:
plt.ylim(dat["ylim"])
plt.xticks([i for i in index],[str(i) for i in index])
plt.legend(loc='lower right')
plt.grid(True, linestyle='--')
fig = lines.get_figure()
fig.savefig(dat["outputdir"]+"/"+dat["filename"]+".pdf", dpi=300, bbox_inches='tight')
@staticmethod
def curvePlotFromCSV(dat):
dfin = pd.read_csv(dat["inputdir"]+"/"+dat["file"])
print("dfin", dfin)
index = dfin["ID"].tolist()
dfin = dfin.drop(["ID"], axis=1)
lines = dfin.plot.line(figsize=[5,3])
plt.xlabel(dat["xlabel"])
plt.ylabel(dat["ylabel"])
if len(dat["ylim"])==2:
plt.ylim(dat["ylim"])
plt.xticks([i for i in range(len(index))],[str(i) for i in index])
if "legendloc" in dat:
plt.legend(loc=dat["legendloc"])
else:
plt.legend(loc='lower right')
plt.grid(True, linestyle='--')
if "islogy" in dat and dat["islogy"]==True:
plt.yscale('log')
fig = lines.get_figure()
fig.savefig(dat["outputdir"]+"/"+dat["filename"]+".pdf", dpi=300, bbox_inches='tight')
@staticmethod
def curvePlotFromDIR(dat):
Util.makedir(dat["outputdir"])
dato = {}
dato_aux = []
for name, fil in dat["files"].items():
dato[name] = {}
for di in range(dat["from"], dat["to"]+1, dat["increment"]):
obj = Util.read(dat["inputdir"]+"/"+str(di)+"/"+fil)
for row in obj:
for k, v in row["evals"].items():
xxx = di
dato[name][xxx] = v["metrics"][dat["metric"]]
metrics = {}
metrics["name"] = name
metrics["xval"] = xxx
for kk, vv in v["metrics"].items():
metrics[kk] = vv
dato_aux.append(metrics)
print(dato_aux)
df_aux = pd.DataFrame(dato_aux)
df_aux.to_csv(dat["outputdir"]+"/"+dat["filename"]+"_info.csv")
index = []
curves = {}
for k, v in dato.items():
curves[k] = []
index = v.keys()
for kv in index:
curves[k].append(v[kv])
print("curves", curves)
df = pd.DataFrame(curves, index=index)
df.to_csv(dat["outputdir"]+"/"+dat["filename"]+".csv")
lines = df.plot.line(figsize=[5,3])
if "weight" in dat and "height" in dat:
lines = df.plot.line(figsize=[dat["weight"], dat["height"]])
plt.xlabel(dat["xlabel"])
plt.ylabel(dat["ylabel"])
if len(dat["ylim"])==2:
plt.ylim(dat["ylim"])
plt.xticks([i for i in index],[str(i) for i in index])
if "xinterval" in dat:
plt.xticks(np.arange(min(index), max(index)+1, dat["xinterval"]))
if "xrotation" in dat:
plt.xticks(rotation=dat["xrotation"])
if "xticksfontsize" in dat:
plt.xticks(fontsize=dat["xticksfontsize"])
plt.legend(loc='lower right')
plt.grid(True, linestyle='--')
fig = lines.get_figure()
fig.savefig(dat["outputdir"]+"/"+dat["filename"]+".pdf", dpi=300, bbox_inches='tight')
@staticmethod
def makeConfigureFormUtil(dat):
#dat = dat[0]
#print(dat)
dao = []
a = dat["fromUtil"]["limits"][0]
b = dat["fromUtil"]["limits"][1]
c = dat["fromUtil"]["limits"][2]
for idx in range(a, b+c, c):
dat_copy = copy.deepcopy(dat)
dat_copy["outputdir"] = dat_copy["outputdir"]+"/"+str(idx)
dat_copy["featureselection"]["n_features"] = idx
dao.append(dat_copy)
return dao | 32.830239 | 138 | 0.492446 |
f0a67a1431c3a0c3327f1b9af36c6f8e1ac7d22e | 1,612 | py | Python | chariot/transformer/tokenizer/ja_tokenizer.py | Y-Kuro-u/chariot | 032f3eecdd55b30c65351e1e636c939c4b20919e | [
"Apache-2.0"
] | 134 | 2018-06-11T01:40:14.000Z | 2021-11-15T12:34:38.000Z | chariot/transformer/tokenizer/ja_tokenizer.py | Y-Kuro-u/chariot | 032f3eecdd55b30c65351e1e636c939c4b20919e | [
"Apache-2.0"
] | 10 | 2018-06-17T10:45:50.000Z | 2021-04-05T05:51:11.000Z | chariot/transformer/tokenizer/ja_tokenizer.py | Y-Kuro-u/chariot | 032f3eecdd55b30c65351e1e636c939c4b20919e | [
"Apache-2.0"
] | 8 | 2019-02-23T06:43:21.000Z | 2021-02-18T06:05:11.000Z | from collections import namedtuple
from chariot.transformer.tokenizer.token import Token
class MeCabTokenizer():
JanomeToken = namedtuple("JanomeToken", ("surface", "part_of_speech",
"infl_type", "infl_form",
"base_form", "reading",
"phonetic"))
def __init__(self):
import MeCab
self.tagger = MeCab.Tagger("-Ochasen")
def tokenize(self, text):
self.tagger.parse("")
node = self.tagger.parseToNode(text)
tokens = []
while node:
# Ignore BOS/EOS
if node.surface:
surface = node.surface
features = node.feature.split(",")
if len(features) < 9:
pad_size = 9 - len(features)
features += ["*"] * pad_size
token = MeCabTokenizer.JanomeToken(
surface, ",".join(features[:4]),
features[4], features[5],
features[6], features[7],
features[8])
token = Token(token, token_type="ja")
tokens.append(token)
node = node.next
return tokens
class JanomeTokenizer():
def __init__(self):
from janome.tokenizer import Tokenizer
self.tokenizer = Tokenizer()
def tokenize(self, text):
tokens = self.tokenizer.tokenize(text)
tokens = [Token(t, token_type="ja") for t in tokens]
return tokens
| 32.897959 | 73 | 0.491935 |
2cf8130e9ecd14f6389b8db60bed4707ccdcd037 | 2,050 | py | Python | mlfromscratch/examples/lasso_regression.py | Krishna00111/Machine-Learning-from-Scratch | 5d6f5b1a2096acbb57a060385e471123b77b9a68 | [
"MIT"
] | null | null | null | mlfromscratch/examples/lasso_regression.py | Krishna00111/Machine-Learning-from-Scratch | 5d6f5b1a2096acbb57a060385e471123b77b9a68 | [
"MIT"
] | null | null | null | mlfromscratch/examples/lasso_regression.py | Krishna00111/Machine-Learning-from-Scratch | 5d6f5b1a2096acbb57a060385e471123b77b9a68 | [
"MIT"
] | null | null | null | from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Import helper functions
from mlfromscratch.supervised_learning import LassoRegression
from mlfromscratch.utils import k_fold_cross_validation_sets, normalize, mean_squared_error
from mlfromscratch.utils import train_test_split, polynomial_features, Plot
def main():
# Load temperature data
data = pd.read_csv('mlfromscratch/data/TempLinkoping2016.txt', sep="\t")
time = np.atleast_2d(data["time"].values).T
temp = data["temp"].values
X = time # fraction of the year [0, 1]
y = temp
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4)
poly_degree = 13
model = LassoRegression(degree=15,
reg_factor=0.05,
learning_rate=0.001,
n_iterations=4000)
model.fit(X_train, y_train)
# Training error plot
n = len(model.training_errors)
training, = plt.plot(range(n), model.training_errors, label="Training Error")
plt.legend(handles=[training])
plt.title("Error Plot")
plt.ylabel('Mean Squared Error')
plt.xlabel('Iterations')
plt.show()
y_pred = model.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
print ("Mean squared error: %s (given by reg. factor: %s)" % (mse, 0.05))
y_pred_line = model.predict(X)
# Color map
cmap = plt.get_cmap('viridis')
# Plot the results
m1 = plt.scatter(366 * X_train, y_train, color=cmap(0.9), s=10)
m2 = plt.scatter(366 * X_test, y_test, color=cmap(0.5), s=10)
plt.plot(366 * X, y_pred_line, color='black', linewidth=2, label="Prediction")
plt.suptitle("Lasso Regression")
plt.title("MSE: %.2f" % mse, fontsize=10)
plt.xlabel('Day')
plt.ylabel('Temperature in Celcius')
plt.legend((m1, m2), ("Training data", "Test data"), loc='lower right')
plt.show()
if __name__ == "__main__":
main()
| 32.539683 | 92 | 0.640976 |
0d342ac7c6277d255d46456db4279d130eb796fa | 657 | py | Python | src/shortner/validators.py | diabolicfreak/url_shortner | 4ea30962afaed530e873e6613641883a60f380c7 | [
"MIT"
] | null | null | null | src/shortner/validators.py | diabolicfreak/url_shortner | 4ea30962afaed530e873e6613641883a60f380c7 | [
"MIT"
] | null | null | null | src/shortner/validators.py | diabolicfreak/url_shortner | 4ea30962afaed530e873e6613641883a60f380c7 | [
"MIT"
] | null | null | null | from django.core.validators import URLValidator
from django.core.exceptions import ValidationError
def validate_url(value):
url_validator = URLValidator()
value_1_valid = False
value_2_valid = False
try:
url_validator(value)
except:
value_1_valid = True
value_2 = "http://"+value
try:
url_validator(value_2)
except:
value_2_valid = True
if value_1_valid and value_2_valid:
raise ValidationError("invalid url for this field")
return value
def validate_dot_com(value):
if not "com" in value:
raise ValidationError("Not valid because no .com")
return value
| 21.9 | 59 | 0.681887 |
61bb009ff8f21c78ebe60a67b2d013135f2b8a27 | 3,453 | py | Python | tests/components/spaceapi/test_init.py | shanbs/home-assistant | 818776d2b4f11e4f51992dc88bc0a6f9055833b2 | [
"Apache-2.0"
] | 4 | 2019-07-03T22:36:57.000Z | 2019-08-10T15:33:25.000Z | tests/components/spaceapi/test_init.py | shanbs/home-assistant | 818776d2b4f11e4f51992dc88bc0a6f9055833b2 | [
"Apache-2.0"
] | 7 | 2019-08-23T05:26:02.000Z | 2022-03-11T23:57:18.000Z | tests/components/spaceapi/test_init.py | shanbs/home-assistant | 818776d2b4f11e4f51992dc88bc0a6f9055833b2 | [
"Apache-2.0"
] | 3 | 2019-04-28T16:35:45.000Z | 2020-05-28T15:21:59.000Z | """The tests for the Home Assistant SpaceAPI component."""
# pylint: disable=protected-access
from unittest.mock import patch
import pytest
from tests.common import mock_coro
from homeassistant.components.spaceapi import (
DOMAIN, SPACEAPI_VERSION, URL_API_SPACEAPI)
from homeassistant.setup import async_setup_component
CONFIG = {
DOMAIN: {
'space': 'Home',
'logo': 'https://home-assistant.io/logo.png',
'url': 'https://home-assistant.io',
'location': {'address': 'In your Home'},
'contact': {'email': '[email protected]'},
'issue_report_channels': ['email'],
'state': {
'entity_id': 'test.test_door',
'icon_open': 'https://home-assistant.io/open.png',
'icon_closed': 'https://home-assistant.io/close.png',
},
'sensors': {
'temperature': ['test.temp1', 'test.temp2'],
'humidity': ['test.hum1'],
}
}
}
SENSOR_OUTPUT = {
'temperature': [
{
'location': 'Home',
'name': 'temp1',
'unit': '°C',
'value': '25'
},
{
'location': 'Home',
'name': 'temp2',
'unit': '°C',
'value': '23'
},
],
'humidity': [
{
'location': 'Home',
'name': 'hum1',
'unit': '%',
'value': '88'
},
]
}
@pytest.fixture
def mock_client(hass, hass_client):
"""Start the Home Assistant HTTP component."""
with patch('homeassistant.components.spaceapi',
return_value=mock_coro(True)):
hass.loop.run_until_complete(
async_setup_component(hass, 'spaceapi', CONFIG))
hass.states.async_set('test.temp1', 25,
attributes={'unit_of_measurement': '°C'})
hass.states.async_set('test.temp2', 23,
attributes={'unit_of_measurement': '°C'})
hass.states.async_set('test.hum1', 88,
attributes={'unit_of_measurement': '%'})
return hass.loop.run_until_complete(hass_client())
async def test_spaceapi_get(hass, mock_client):
"""Test response after start-up Home Assistant."""
resp = await mock_client.get(URL_API_SPACEAPI)
assert resp.status == 200
data = await resp.json()
assert data['api'] == SPACEAPI_VERSION
assert data['space'] == 'Home'
assert data['contact']['email'] == '[email protected]'
assert data['location']['address'] == 'In your Home'
assert data['location']['latitude'] == 32.87336
assert data['location']['longitude'] == -117.22743
assert data['state']['open'] == 'null'
assert data['state']['icon']['open'] == \
'https://home-assistant.io/open.png'
assert data['state']['icon']['close'] == \
'https://home-assistant.io/close.png'
async def test_spaceapi_state_get(hass, mock_client):
"""Test response if the state entity was set."""
hass.states.async_set('test.test_door', True)
resp = await mock_client.get(URL_API_SPACEAPI)
assert resp.status == 200
data = await resp.json()
assert data['state']['open'] == bool(1)
async def test_spaceapi_sensors_get(hass, mock_client):
"""Test the response for the sensors."""
resp = await mock_client.get(URL_API_SPACEAPI)
assert resp.status == 200
data = await resp.json()
assert data['sensors'] == SENSOR_OUTPUT
| 30.289474 | 67 | 0.580075 |
3122d364c922e9d4ca267ce7689339686d4df60a | 58,287 | py | Python | src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py | akashsinghal/azure-cli | 8ab2f7604a834de790bdea849b3e83f2466428b9 | [
"MIT"
] | 2 | 2020-08-08T11:00:25.000Z | 2020-08-08T11:00:30.000Z | src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py | akashsinghal/azure-cli | 8ab2f7604a834de790bdea849b3e83f2466428b9 | [
"MIT"
] | 1 | 2021-06-02T02:49:48.000Z | 2021-06-02T02:49:48.000Z | src/azure-cli/azure/cli/command_modules/storage/_validators_azure_stack.py | akashsinghal/azure-cli | 8ab2f7604a834de790bdea849b3e83f2466428b9 | [
"MIT"
] | 1 | 2020-07-31T17:22:13.000Z | 2020-07-31T17:22:13.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=protected-access
import argparse
from azure.cli.core.commands.validators import validate_key_value_pairs
from azure.cli.core.profiles import ResourceType, get_sdk
from azure.cli.command_modules.storage._client_factory import (get_storage_data_service_client,
blob_data_service_factory,
file_data_service_factory,
storage_client_factory)
from azure.cli.command_modules.storage.util import glob_files_locally, guess_content_type
from azure.cli.command_modules.storage.sdkutil import get_table_data_type
from azure.cli.command_modules.storage.url_quote_util import encode_for_url
from azure.cli.command_modules.storage.oauth_token_util import TokenUpdater
from knack.log import get_logger
from knack.util import CLIError
storage_account_key_options = {'primary': 'key1', 'secondary': 'key2'}
logger = get_logger(__name__)
# Utilities
# pylint: disable=inconsistent-return-statements,too-many-lines
def _query_account_key(cli_ctx, account_name):
"""Query the storage account key. This is used when the customer doesn't offer account key but name."""
rg, scf = _query_account_rg(cli_ctx, account_name)
t_storage_account_keys = get_sdk(
cli_ctx, ResourceType.MGMT_STORAGE, 'models.storage_account_keys#StorageAccountKeys')
scf.config.enable_http_logger = False
logger.debug('Disable HTTP logging to avoid having storage keys in debug logs')
if t_storage_account_keys:
return scf.storage_accounts.list_keys(rg, account_name).key1
# of type: models.storage_account_list_keys_result#StorageAccountListKeysResult
return scf.storage_accounts.list_keys(rg, account_name).keys[0].value # pylint: disable=no-member
def _query_account_rg(cli_ctx, account_name):
"""Query the storage account's resource group, which the mgmt sdk requires."""
scf = storage_client_factory(cli_ctx)
acc = next((x for x in scf.storage_accounts.list() if x.name == account_name), None)
if acc:
from msrestazure.tools import parse_resource_id
return parse_resource_id(acc.id)['resource_group'], scf
raise ValueError("Storage account '{}' not found.".format(account_name))
def _create_token_credential(cli_ctx):
from knack.cli import EVENT_CLI_POST_EXECUTE
TokenCredential = get_sdk(cli_ctx, ResourceType.DATA_STORAGE, 'common#TokenCredential')
token_credential = TokenCredential()
updater = TokenUpdater(token_credential, cli_ctx)
def _cancel_timer_event_handler(_, **__):
updater.cancel()
cli_ctx.register_event(EVENT_CLI_POST_EXECUTE, _cancel_timer_event_handler)
return token_credential
# region PARAMETER VALIDATORS
def parse_storage_account(cmd, namespace):
"""Parse storage account which can be either account name or account id"""
from msrestazure.tools import parse_resource_id, is_valid_resource_id
if namespace.account_name and is_valid_resource_id(namespace.account_name):
namespace.resource_group_name = parse_resource_id(namespace.account_name)['resource_group']
namespace.account_name = parse_resource_id(namespace.account_name)['name']
elif namespace.account_name and not is_valid_resource_id(namespace.account_name) and \
not namespace.resource_group_name:
namespace.resource_group_name = _query_account_rg(cmd.cli_ctx, namespace.account_name)[0]
def process_resource_group(cmd, namespace):
"""Processes the resource group parameter from the account name"""
if namespace.account_name and not namespace.resource_group_name:
namespace.resource_group_name = _query_account_rg(cmd.cli_ctx, namespace.account_name)[0]
def validate_table_payload_format(cmd, namespace):
t_table_payload = get_table_data_type(cmd.cli_ctx, 'table', 'TablePayloadFormat')
if namespace.accept:
formats = {
'none': t_table_payload.JSON_NO_METADATA,
'minimal': t_table_payload.JSON_MINIMAL_METADATA,
'full': t_table_payload.JSON_FULL_METADATA
}
namespace.accept = formats[namespace.accept.lower()]
def validate_bypass(namespace):
if namespace.bypass:
namespace.bypass = ', '.join(namespace.bypass) if isinstance(namespace.bypass, list) else namespace.bypass
def get_config_value(cmd, section, key, default):
return cmd.cli_ctx.config.get(section, key, default)
def is_storagev2(import_prefix):
return import_prefix.startswith('azure.multiapi.storagev2.')
def validate_client_parameters(cmd, namespace):
""" Retrieves storage connection parameters from environment variables and parses out connection string into
account name and key """
n = namespace
if hasattr(n, 'auth_mode'):
auth_mode = n.auth_mode or get_config_value(cmd, 'storage', 'auth_mode', None)
del n.auth_mode
if not n.account_name:
n.account_name = get_config_value(cmd, 'storage', 'account', None)
if auth_mode == 'login':
prefix = cmd.command_kwargs['resource_type'].value[0]
# is_storagv2() is used to distinguish if the command is in track2 SDK
# If yes, we will use get_login_credentials() as token credential
if is_storagev2(prefix):
from azure.cli.core._profile import Profile
profile = Profile(cli_ctx=cmd.cli_ctx)
n.token_credential, _, _ = profile.get_login_credentials(
resource="https://storage.azure.com", subscription_id=n._subscription)
# Otherwise, we will assume it is in track1 and keep previous token updater
else:
n.token_credential = _create_token_credential(cmd.cli_ctx)
if hasattr(n, 'token_credential') and n.token_credential:
# give warning if there are account key args being ignored
account_key_args = [n.account_key and "--account-key", n.sas_token and "--sas-token",
n.connection_string and "--connection-string"]
account_key_args = [arg for arg in account_key_args if arg]
if account_key_args:
logger.warning('In "login" auth mode, the following arguments are ignored: %s',
' ,'.join(account_key_args))
return
if not n.connection_string:
n.connection_string = get_config_value(cmd, 'storage', 'connection_string', None)
# if connection string supplied or in environment variables, extract account key and name
if n.connection_string:
conn_dict = validate_key_value_pairs(n.connection_string)
n.account_name = conn_dict.get('AccountName')
n.account_key = conn_dict.get('AccountKey')
n.sas_token = conn_dict.get('SharedAccessSignature')
# otherwise, simply try to retrieve the remaining variables from environment variables
if not n.account_name:
n.account_name = get_config_value(cmd, 'storage', 'account', None)
if not n.account_key:
n.account_key = get_config_value(cmd, 'storage', 'key', None)
if not n.sas_token:
n.sas_token = get_config_value(cmd, 'storage', 'sas_token', None)
# strip the '?' from sas token. the portal and command line are returns sas token in different
# forms
if n.sas_token:
n.sas_token = n.sas_token.lstrip('?')
# if account name is specified but no key, attempt to query
if n.account_name and not n.account_key and not n.sas_token:
logger.warning('There is no credential provided in your command and environment, we will query account key '
'for your storage account. \nPlease provide --connection-string, --account-key or --sas-token '
'as credential, or use `--auth-mode login` if you have required RBAC roles in your command. '
'For more information about RBAC roles in storage, you can see '
'https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli. \n'
'Setting corresponding environment variable can avoid inputting credential in your command. '
'Please use --help to get more information.')
n.account_key = _query_account_key(cmd.cli_ctx, n.account_name)
def validate_encryption_key(cmd, namespace):
encryption_key_source = cmd.get_models('EncryptionScopeSource', resource_type=ResourceType.MGMT_STORAGE)
if namespace.key_source == encryption_key_source.microsoft_key_vault and \
not namespace.key_uri:
raise CLIError("usage error: Please specify --key-uri when using {} as key source."
.format(encryption_key_source.microsoft_key_vault))
if namespace.key_source != encryption_key_source.microsoft_key_vault and namespace.key_uri:
raise CLIError("usage error: Specify `--key-source={}` and --key-uri to configure key vault properties."
.format(encryption_key_source.microsoft_key_vault))
def process_blob_source_uri(cmd, namespace):
"""
Validate the parameters referenced to a blob source and create the source URI from them.
"""
from .util import create_short_lived_blob_sas
usage_string = \
'Invalid usage: {}. Supply only one of the following argument sets to specify source:' \
'\n\t --source-uri' \
'\n\tOR --source-container --source-blob --source-snapshot [--source-account-name & sas] ' \
'\n\tOR --source-container --source-blob --source-snapshot [--source-account-name & key] '
ns = vars(namespace)
# source as blob
container = ns.pop('source_container', None)
blob = ns.pop('source_blob', None)
snapshot = ns.pop('source_snapshot', None)
# source credential clues
source_account_name = ns.pop('source_account_name', None)
source_account_key = ns.pop('source_account_key', None)
sas = ns.pop('source_sas', None)
# source in the form of an uri
uri = ns.get('copy_source', None)
if uri:
if any([container, blob, sas, snapshot, source_account_name, source_account_key]):
raise ValueError(usage_string.format('Unused parameters are given in addition to the '
'source URI'))
# simplest scenario--no further processing necessary
return
validate_client_parameters(cmd, namespace) # must run first to resolve storage account
# determine if the copy will happen in the same storage account
if not source_account_name and source_account_key:
raise ValueError(usage_string.format('Source account key is given but account name is not'))
if not source_account_name and not source_account_key:
# neither source account name or key is given, assume that user intends to copy blob in
# the same account
source_account_name = ns.get('account_name', None)
source_account_key = ns.get('account_key', None)
elif source_account_name and not source_account_key:
if source_account_name == ns.get('account_name', None):
# the source account name is same as the destination account name
source_account_key = ns.get('account_key', None)
else:
# the source account is different from destination account but the key is missing
# try to query one.
try:
source_account_key = _query_account_key(cmd.cli_ctx, source_account_name)
except ValueError:
raise ValueError('Source storage account {} not found.'.format(source_account_name))
# else: both source account name and key are given by user
if not source_account_name:
raise ValueError(usage_string.format('Storage account name not found'))
if not sas:
sas = create_short_lived_blob_sas(cmd, source_account_name, source_account_key, container, blob)
query_params = []
if sas:
query_params.append(sas)
if snapshot:
query_params.append('snapshot={}'.format(snapshot))
uri = 'https://{}.blob.{}/{}/{}{}{}'.format(source_account_name,
cmd.cli_ctx.cloud.suffixes.storage_endpoint,
container,
blob,
'?' if query_params else '',
'&'.join(query_params))
namespace.copy_source = uri
def validate_source_uri(cmd, namespace): # pylint: disable=too-many-statements
from .util import create_short_lived_blob_sas, create_short_lived_file_sas
usage_string = \
'Invalid usage: {}. Supply only one of the following argument sets to specify source:' \
'\n\t --source-uri [--source-sas]' \
'\n\tOR --source-container --source-blob [--source-account-name & sas] [--source-snapshot]' \
'\n\tOR --source-container --source-blob [--source-account-name & key] [--source-snapshot]' \
'\n\tOR --source-share --source-path' \
'\n\tOR --source-share --source-path [--source-account-name & sas]' \
'\n\tOR --source-share --source-path [--source-account-name & key]'
ns = vars(namespace)
# source as blob
container = ns.pop('source_container', None)
blob = ns.pop('source_blob', None)
snapshot = ns.pop('source_snapshot', None)
# source as file
share = ns.pop('source_share', None)
path = ns.pop('source_path', None)
file_snapshot = ns.pop('file_snapshot', None)
# source credential clues
source_account_name = ns.pop('source_account_name', None)
source_account_key = ns.pop('source_account_key', None)
source_sas = ns.pop('source_sas', None)
# source in the form of an uri
uri = ns.get('copy_source', None)
if uri:
if any([container, blob, snapshot, share, path, file_snapshot, source_account_name,
source_account_key]):
raise ValueError(usage_string.format('Unused parameters are given in addition to the '
'source URI'))
if source_sas:
source_sas = source_sas.lstrip('?')
uri = '{}{}{}'.format(uri, '?', source_sas)
namespace.copy_source = uri
return
# ensure either a file or blob source is specified
valid_blob_source = container and blob and not share and not path and not file_snapshot
valid_file_source = share and path and not container and not blob and not snapshot
if not valid_blob_source and not valid_file_source:
raise ValueError(usage_string.format('Neither a valid blob or file source is specified'))
if valid_blob_source and valid_file_source:
raise ValueError(usage_string.format('Ambiguous parameters, both blob and file sources are '
'specified'))
validate_client_parameters(cmd, namespace) # must run first to resolve storage account
if not source_account_name:
if source_account_key:
raise ValueError(usage_string.format('Source account key is given but account name is not'))
# assume that user intends to copy blob in the same account
source_account_name = ns.get('account_name', None)
# determine if the copy will happen in the same storage account
same_account = False
if not source_account_key and not source_sas:
if source_account_name == ns.get('account_name', None):
same_account = True
source_account_key = ns.get('account_key', None)
source_sas = ns.get('sas_token', None)
else:
# the source account is different from destination account but the key is missing try to query one.
try:
source_account_key = _query_account_key(cmd.cli_ctx, source_account_name)
except ValueError:
raise ValueError('Source storage account {} not found.'.format(source_account_name))
# Both source account name and either key or sas (or both) are now available
if not source_sas:
# generate a sas token even in the same account when the source and destination are not the same kind.
if valid_file_source and (ns.get('container_name', None) or not same_account):
import os
dir_name, file_name = os.path.split(path) if path else (None, '')
source_sas = create_short_lived_file_sas(cmd, source_account_name, source_account_key, share,
dir_name, file_name)
elif valid_blob_source and (ns.get('share_name', None) or not same_account):
source_sas = create_short_lived_blob_sas(cmd, source_account_name, source_account_key, container, blob)
query_params = []
if source_sas:
query_params.append(source_sas.lstrip('?'))
if snapshot:
query_params.append('snapshot={}'.format(snapshot))
if file_snapshot:
query_params.append('sharesnapshot={}'.format(file_snapshot))
uri = 'https://{0}.{1}.{6}/{2}/{3}{4}{5}'.format(
source_account_name,
'blob' if valid_blob_source else 'file',
container if valid_blob_source else share,
encode_for_url(blob if valid_blob_source else path),
'?' if query_params else '',
'&'.join(query_params),
cmd.cli_ctx.cloud.suffixes.storage_endpoint)
namespace.copy_source = uri
def validate_blob_type(namespace):
if not namespace.blob_type:
namespace.blob_type = 'page' if namespace.file_path.endswith('.vhd') else 'block'
def validate_storage_data_plane_list(namespace):
if namespace.num_results == '*':
namespace.num_results = None
else:
namespace.num_results = int(namespace.num_results)
def get_content_setting_validator(settings_class, update, guess_from_file=None):
def _class_name(class_type):
return class_type.__module__ + "." + class_type.__class__.__name__
def validator(cmd, namespace):
t_base_blob_service, t_file_service, t_blob_content_settings, t_file_content_settings = cmd.get_models(
'blob.baseblobservice#BaseBlobService',
'file#FileService',
'blob.models#ContentSettings',
'file.models#ContentSettings')
# must run certain validators first for an update
if update:
validate_client_parameters(cmd, namespace)
if update and _class_name(settings_class) == _class_name(t_file_content_settings):
get_file_path_validator()(namespace)
ns = vars(namespace)
clear_content_settings = ns.pop('clear_content_settings', False)
# retrieve the existing object properties for an update
if update and not clear_content_settings:
account = ns.get('account_name')
key = ns.get('account_key')
cs = ns.get('connection_string')
sas = ns.get('sas_token')
token_credential = ns.get('token_credential')
if _class_name(settings_class) == _class_name(t_blob_content_settings):
client = get_storage_data_service_client(cmd.cli_ctx,
service=t_base_blob_service,
name=account,
key=key, connection_string=cs, sas_token=sas,
token_credential=token_credential)
container = ns.get('container_name')
blob = ns.get('blob_name')
lease_id = ns.get('lease_id')
props = client.get_blob_properties(container, blob, lease_id=lease_id).properties.content_settings
elif _class_name(settings_class) == _class_name(t_file_content_settings):
client = get_storage_data_service_client(cmd.cli_ctx, t_file_service, account, key, cs, sas)
share = ns.get('share_name')
directory = ns.get('directory_name')
filename = ns.get('file_name')
props = client.get_file_properties(share, directory, filename).properties.content_settings
# create new properties
new_props = settings_class(
content_type=ns.pop('content_type', None),
content_disposition=ns.pop('content_disposition', None),
content_encoding=ns.pop('content_encoding', None),
content_language=ns.pop('content_language', None),
content_md5=ns.pop('content_md5', None),
cache_control=ns.pop('content_cache_control', None)
)
# if update, fill in any None values with existing
if update:
if not clear_content_settings:
for attr in ['content_type', 'content_disposition', 'content_encoding', 'content_language',
'content_md5', 'cache_control']:
if getattr(new_props, attr) is None:
setattr(new_props, attr, getattr(props, attr))
else:
if guess_from_file:
new_props = guess_content_type(ns[guess_from_file], new_props, settings_class)
ns['content_settings'] = new_props
return validator
def validate_custom_domain(namespace):
if namespace.use_subdomain and not namespace.custom_domain:
raise ValueError('usage error: --custom-domain DOMAIN [--use-subdomain]')
def validate_encryption_services(cmd, namespace):
"""
Builds up the encryption services object for storage account operations based on the list of services passed in.
"""
if namespace.encryption_services:
t_encryption_services, t_encryption_service = get_sdk(cmd.cli_ctx, ResourceType.MGMT_STORAGE,
'EncryptionServices', 'EncryptionService', mod='models')
services = {service: t_encryption_service(enabled=True) for service in namespace.encryption_services}
namespace.encryption_services = t_encryption_services(**services)
def validate_encryption_source(namespace):
if namespace.encryption_key_source == 'Microsoft.Keyvault' and \
not (namespace.encryption_key_name and namespace.encryption_key_vault):
raise ValueError('--encryption-key-name and --encryption-key-vault are required '
'when --encryption-key-source=Microsoft.Keyvault is specified.')
if namespace.encryption_key_name or namespace.encryption_key_version is not None or namespace.encryption_key_vault:
if namespace.encryption_key_source and namespace.encryption_key_source != 'Microsoft.Keyvault':
raise ValueError('--encryption-key-name, --encryption-key-vault, and --encryption-key-version are not '
'applicable without Microsoft.Keyvault key-source.')
def validate_entity(namespace):
""" Converts a list of key value pairs into a dictionary. Ensures that required
RowKey and PartitionKey are converted to the correct case and included. """
values = dict(x.split('=', 1) for x in namespace.entity)
keys = values.keys()
for key in list(keys):
if key.lower() == 'rowkey':
val = values[key]
del values[key]
values['RowKey'] = val
elif key.lower() == 'partitionkey':
val = values[key]
del values[key]
values['PartitionKey'] = val
keys = values.keys()
missing_keys = 'RowKey ' if 'RowKey' not in keys else ''
missing_keys = '{}PartitionKey'.format(missing_keys) \
if 'PartitionKey' not in keys else missing_keys
if missing_keys:
raise argparse.ArgumentError(
None, 'incorrect usage: entity requires: {}'.format(missing_keys))
def cast_val(key, val):
""" Attempts to cast numeric values (except RowKey and PartitionKey) to numbers so they
can be queried correctly. """
if key in ['PartitionKey', 'RowKey']:
return val
def try_cast(to_type):
try:
return to_type(val)
except ValueError:
return None
return try_cast(int) or try_cast(float) or val
# ensure numbers are converted from strings so querying will work correctly
values = {key: cast_val(key, val) for key, val in values.items()}
namespace.entity = values
def validate_marker(namespace):
""" Converts a list of key value pairs into a dictionary. Ensures that required
nextrowkey and nextpartitionkey are included. """
if not namespace.marker:
return
marker = dict(x.split('=', 1) for x in namespace.marker)
expected_keys = {'nextrowkey', 'nextpartitionkey'}
for key in list(marker.keys()):
new_key = key.lower()
if new_key in expected_keys:
expected_keys.remove(key.lower())
val = marker[key]
del marker[key]
marker[new_key] = val
if expected_keys:
raise argparse.ArgumentError(
None, 'incorrect usage: marker requires: {}'.format(' '.join(expected_keys)))
namespace.marker = marker
def get_file_path_validator(default_file_param=None):
""" Creates a namespace validator that splits out 'path' into 'directory_name' and 'file_name'.
Allows another path-type parameter to be named which can supply a default filename. """
def validator(namespace):
import os
if not hasattr(namespace, 'path'):
return
path = namespace.path
dir_name, file_name = os.path.split(path) if path else (None, '')
if default_file_param and '.' not in file_name:
dir_name = path
file_name = os.path.split(getattr(namespace, default_file_param))[1]
dir_name = None if dir_name in ('', '.') else dir_name
namespace.directory_name = dir_name
namespace.file_name = file_name
del namespace.path
return validator
def validate_included_datasets(cmd, namespace):
if namespace.include:
include = namespace.include
if set(include) - set('cmsd'):
help_string = '(c)opy-info (m)etadata (s)napshots (d)eleted'
raise ValueError('valid values are {} or a combination thereof.'.format(help_string))
t_blob_include = cmd.get_models('blob#Include')
namespace.include = t_blob_include('s' in include, 'm' in include, False, 'c' in include, 'd' in include)
def validate_key_name(namespace):
key_options = {'primary': '1', 'secondary': '2'}
if hasattr(namespace, 'key_type') and namespace.key_type:
namespace.key_name = namespace.key_type + key_options[namespace.key_name]
else:
namespace.key_name = storage_account_key_options[namespace.key_name]
def validate_metadata(namespace):
if namespace.metadata:
namespace.metadata = dict(x.split('=', 1) for x in namespace.metadata)
def get_permission_help_string(permission_class):
allowed_values = [x.lower() for x in dir(permission_class) if not x.startswith('__')]
return ' '.join(['({}){}'.format(x[0], x[1:]) for x in allowed_values])
def get_permission_validator(permission_class):
allowed_values = [x.lower() for x in dir(permission_class) if not x.startswith('__')]
allowed_string = ''.join(x[0] for x in allowed_values)
def validator(namespace):
if namespace.permission:
if set(namespace.permission) - set(allowed_string):
help_string = get_permission_help_string(permission_class)
raise ValueError(
'valid values are {} or a combination thereof.'.format(help_string))
namespace.permission = permission_class(_str=namespace.permission)
return validator
def table_permission_validator(cmd, namespace):
""" A special case for table because the SDK associates the QUERY permission with 'r' """
t_table_permissions = get_table_data_type(cmd.cli_ctx, 'table', 'TablePermissions')
if namespace.permission:
if set(namespace.permission) - set('raud'):
help_string = '(r)ead/query (a)dd (u)pdate (d)elete'
raise ValueError('valid values are {} or a combination thereof.'.format(help_string))
namespace.permission = t_table_permissions(_str=namespace.permission)
def validate_container_public_access(cmd, namespace):
from .sdkutil import get_container_access_type
t_base_blob_svc = cmd.get_models('blob.baseblobservice#BaseBlobService')
if namespace.public_access:
namespace.public_access = get_container_access_type(cmd.cli_ctx, namespace.public_access.lower())
if hasattr(namespace, 'signed_identifiers'):
# must retrieve the existing ACL to simulate a patch operation because these calls
# are needlessly conflated
ns = vars(namespace)
validate_client_parameters(cmd, namespace)
account = ns.get('account_name')
key = ns.get('account_key')
cs = ns.get('connection_string')
sas = ns.get('sas_token')
client = get_storage_data_service_client(cmd.cli_ctx, t_base_blob_svc, account, key, cs, sas)
container = ns.get('container_name')
lease_id = ns.get('lease_id')
ns['signed_identifiers'] = client.get_container_acl(container, lease_id=lease_id)
def validate_fs_public_access(cmd, namespace):
from .sdkutil import get_fs_access_type
if namespace.public_access:
namespace.public_access = get_fs_access_type(cmd.cli_ctx, namespace.public_access.lower())
def validate_select(namespace):
if namespace.select:
namespace.select = ','.join(namespace.select)
# pylint: disable=too-many-statements
def get_source_file_or_blob_service_client(cmd, namespace):
"""
Create the second file service or blob service client for batch copy command, which is used to
list the source files or blobs. If both the source account and source URI are omitted, it
indicates that user want to copy files or blobs in the same storage account, therefore the
destination client will be set None hence the command will use destination client.
"""
t_file_svc, t_block_blob_svc = cmd.get_models('file#FileService', 'blob.blockblobservice#BlockBlobService')
usage_string = 'invalid usage: supply only one of the following argument sets:' + \
'\n\t --source-uri [--source-sas]' + \
'\n\tOR --source-container' + \
'\n\tOR --source-container --source-account-name --source-account-key' + \
'\n\tOR --source-container --source-account-name --source-sas' + \
'\n\tOR --source-share --source-account-name --source-account-key' + \
'\n\tOR --source-share --source-account-name --source-account-sas'
ns = vars(namespace)
source_account = ns.pop('source_account_name', None)
source_key = ns.pop('source_account_key', None)
source_uri = ns.pop('source_uri', None)
source_sas = ns.get('source_sas', None)
source_container = ns.get('source_container', None)
source_share = ns.get('source_share', None)
if source_uri and source_account:
raise ValueError(usage_string)
if not source_uri and bool(source_container) == bool(source_share): # must be container or share
raise ValueError(usage_string)
if (not source_account) and (not source_uri):
# Set the source_client to None if neither source_account or source_uri is given. This
# indicates the command that the source files share or blob container is in the same storage
# account as the destination file share or blob container.
#
# The command itself should create the source service client since the validator can't
# access the destination client through the namespace.
#
# A few arguments check will be made as well so as not to cause ambiguity.
if source_key or source_sas:
raise ValueError('invalid usage: --source-account-name is missing; the source account is assumed to be the'
' same as the destination account. Do not provide --source-sas or --source-account-key')
ns['source_client'] = None
if 'token_credential' not in ns: # not using oauth
return
# oauth is only possible through destination, must still get source creds
source_account, source_key, source_sas = ns['account_name'], ns['account_key'], ns['sas_token']
if source_account:
if not (source_key or source_sas):
# when neither storage account key or SAS is given, try to fetch the key in the current
# subscription
source_key = _query_account_key(cmd.cli_ctx, source_account)
if source_container:
ns['source_client'] = get_storage_data_service_client(
cmd.cli_ctx, t_block_blob_svc, name=source_account, key=source_key, sas_token=source_sas)
elif source_share:
ns['source_client'] = get_storage_data_service_client(
cmd.cli_ctx, t_file_svc, name=source_account, key=source_key, sas_token=source_sas)
elif source_uri:
if source_key or source_container or source_share:
raise ValueError(usage_string)
from .storage_url_helpers import StorageResourceIdentifier
if source_sas:
source_uri = '{}{}{}'.format(source_uri, '?', source_sas.lstrip('?'))
identifier = StorageResourceIdentifier(cmd.cli_ctx.cloud, source_uri)
nor_container_or_share = not identifier.container and not identifier.share
if not identifier.is_url():
raise ValueError('incorrect usage: --source-uri expects a URI')
if identifier.blob or identifier.directory or identifier.filename or nor_container_or_share:
raise ValueError('incorrect usage: --source-uri has to be blob container or file share')
if identifier.sas_token:
ns['source_sas'] = identifier.sas_token
else:
source_key = _query_account_key(cmd.cli_ctx, identifier.account_name)
if identifier.container:
ns['source_container'] = identifier.container
if identifier.account_name != ns.get('account_name'):
ns['source_client'] = get_storage_data_service_client(
cmd.cli_ctx, t_block_blob_svc, name=identifier.account_name, key=source_key,
sas_token=identifier.sas_token)
elif identifier.share:
ns['source_share'] = identifier.share
if identifier.account_name != ns.get('account_name'):
ns['source_client'] = get_storage_data_service_client(
cmd.cli_ctx, t_file_svc, name=identifier.account_name, key=source_key,
sas_token=identifier.sas_token)
def add_progress_callback(cmd, namespace):
def _update_progress(current, total):
message = getattr(_update_progress, 'message', 'Alive')
reuse = getattr(_update_progress, 'reuse', False)
if total:
hook.add(message=message, value=current, total_val=total)
if total == current and not reuse:
hook.end()
hook = cmd.cli_ctx.get_progress_controller(det=True)
_update_progress.hook = hook
if not namespace.no_progress:
namespace.progress_callback = _update_progress
del namespace.no_progress
def process_container_delete_parameters(cmd, namespace):
"""Process the parameters for storage container delete command"""
# check whether to use mgmt or data-plane
if namespace.bypass_immutability_policy:
# use management-plane
namespace.processed_account_name = namespace.account_name
namespace.processed_resource_group, namespace.mgmt_client = _query_account_rg(
cmd.cli_ctx, namespace.account_name)
del namespace.auth_mode
else:
# use data-plane, like before
validate_client_parameters(cmd, namespace)
def process_blob_download_batch_parameters(cmd, namespace):
"""Process the parameters for storage blob download command"""
import os
# 1. quick check
if not os.path.exists(namespace.destination) or not os.path.isdir(namespace.destination):
raise ValueError('incorrect usage: destination must be an existing directory')
# 2. try to extract account name and container name from source string
_process_blob_batch_container_parameters(cmd, namespace)
# 3. Call validators
add_progress_callback(cmd, namespace)
def process_blob_upload_batch_parameters(cmd, namespace):
"""Process the source and destination of storage blob upload command"""
import os
# 1. quick check
if not os.path.exists(namespace.source) or not os.path.isdir(namespace.source):
raise ValueError('incorrect usage: source must be an existing directory')
# 2. try to extract account name and container name from destination string
_process_blob_batch_container_parameters(cmd, namespace, source=False)
# 3. collect the files to be uploaded
namespace.source = os.path.realpath(namespace.source)
namespace.source_files = [c for c in glob_files_locally(namespace.source, namespace.pattern)]
# 4. determine blob type
if namespace.blob_type is None:
vhd_files = [f for f in namespace.source_files if f[0].endswith('.vhd')]
if any(vhd_files) and len(vhd_files) == len(namespace.source_files):
# when all the listed files are vhd files use page
namespace.blob_type = 'page'
elif any(vhd_files):
# source files contain vhd files but not all of them
raise CLIError("""Fail to guess the required blob type. Type of the files to be
uploaded are not consistent. Default blob type for .vhd files is "page", while
others are "block". You can solve this problem by either explicitly set the blob
type or ensure the pattern matches a correct set of files.""")
else:
namespace.blob_type = 'block'
# 5. call other validators
validate_metadata(namespace)
t_blob_content_settings = cmd.loader.get_sdk('blob.models#ContentSettings')
get_content_setting_validator(t_blob_content_settings, update=False)(cmd, namespace)
add_progress_callback(cmd, namespace)
def process_blob_delete_batch_parameters(cmd, namespace):
_process_blob_batch_container_parameters(cmd, namespace)
def _process_blob_batch_container_parameters(cmd, namespace, source=True):
"""Process the container parameters for storage blob batch commands before populating args from environment."""
if source:
container_arg, container_name_arg = 'source', 'source_container_name'
else:
# destination
container_arg, container_name_arg = 'destination', 'destination_container_name'
# try to extract account name and container name from source string
from .storage_url_helpers import StorageResourceIdentifier
container_arg_val = getattr(namespace, container_arg) # either a url or name
identifier = StorageResourceIdentifier(cmd.cli_ctx.cloud, container_arg_val)
if not identifier.is_url():
setattr(namespace, container_name_arg, container_arg_val)
elif identifier.blob:
raise ValueError('incorrect usage: {} should be either a container URL or name'.format(container_arg))
else:
setattr(namespace, container_name_arg, identifier.container)
if namespace.account_name is None:
namespace.account_name = identifier.account_name
elif namespace.account_name != identifier.account_name:
raise ValueError('The given storage account name is not consistent with the '
'account name in the destination URL')
# if no sas-token is given and the container url contains one, use it
if not namespace.sas_token and identifier.sas_token:
namespace.sas_token = identifier.sas_token
# Finally, grab missing storage connection parameters from environment variables
validate_client_parameters(cmd, namespace)
def process_file_upload_batch_parameters(cmd, namespace):
"""Process the parameters of storage file batch upload command"""
import os
# 1. quick check
if not os.path.exists(namespace.source):
raise ValueError('incorrect usage: source {} does not exist'.format(namespace.source))
if not os.path.isdir(namespace.source):
raise ValueError('incorrect usage: source must be a directory')
# 2. try to extract account name and container name from destination string
from .storage_url_helpers import StorageResourceIdentifier
identifier = StorageResourceIdentifier(cmd.cli_ctx.cloud, namespace.destination)
if identifier.is_url():
if identifier.filename or identifier.directory:
raise ValueError('incorrect usage: destination must be a file share url')
namespace.destination = identifier.share
if not namespace.account_name:
namespace.account_name = identifier.account_name
namespace.source = os.path.realpath(namespace.source)
def process_file_download_batch_parameters(cmd, namespace):
"""Process the parameters for storage file batch download command"""
import os
# 1. quick check
if not os.path.exists(namespace.destination) or not os.path.isdir(namespace.destination):
raise ValueError('incorrect usage: destination must be an existing directory')
# 2. try to extract account name and share name from source string
process_file_batch_source_parameters(cmd, namespace)
def process_file_batch_source_parameters(cmd, namespace):
from .storage_url_helpers import StorageResourceIdentifier
identifier = StorageResourceIdentifier(cmd.cli_ctx.cloud, namespace.source)
if identifier.is_url():
if identifier.filename or identifier.directory:
raise ValueError('incorrect usage: source should be either share URL or name')
namespace.source = identifier.share
if not namespace.account_name:
namespace.account_name = identifier.account_name
def process_file_download_namespace(namespace):
import os
get_file_path_validator()(namespace)
dest = namespace.file_path
if not dest or os.path.isdir(dest):
namespace.file_path = os.path.join(dest, namespace.file_name) \
if dest else namespace.file_name
def process_metric_update_namespace(namespace):
namespace.hour = namespace.hour == 'true'
namespace.minute = namespace.minute == 'true'
namespace.api = namespace.api == 'true' if namespace.api else None
if namespace.hour is None and namespace.minute is None:
raise argparse.ArgumentError(
None, 'incorrect usage: must specify --hour and/or --minute')
if (namespace.hour or namespace.minute) and namespace.api is None:
raise argparse.ArgumentError(
None, 'incorrect usage: specify --api when hour or minute metrics are enabled')
def validate_subnet(cmd, namespace):
from msrestazure.tools import resource_id, is_valid_resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
subnet = namespace.subnet
subnet_is_id = is_valid_resource_id(subnet)
vnet = namespace.vnet_name
if (subnet_is_id and not vnet) or (not subnet and not vnet):
return
if subnet and not subnet_is_id and vnet:
namespace.subnet = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet,
child_type_1='subnets',
child_name_1=subnet)
else:
raise CLIError('incorrect usage: [--subnet ID | --subnet NAME --vnet-name NAME]')
def get_datetime_type(to_string):
""" Validates UTC datetime. Examples of accepted forms:
2017-12-31T01:11:59Z,2017-12-31T01:11Z or 2017-12-31T01Z or 2017-12-31 """
from datetime import datetime
def datetime_type(string):
""" Validates UTC datetime. Examples of accepted forms:
2017-12-31T01:11:59Z,2017-12-31T01:11Z or 2017-12-31T01Z or 2017-12-31 """
accepted_date_formats = ['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%MZ',
'%Y-%m-%dT%HZ', '%Y-%m-%d']
for form in accepted_date_formats:
try:
if to_string:
return datetime.strptime(string, form).strftime(form)
return datetime.strptime(string, form)
except ValueError:
continue
raise ValueError("Input '{}' not valid. Valid example: 2000-12-31T12:59:59Z".format(string))
return datetime_type
def ipv4_range_type(string):
""" Validates an IPv4 address or address range. """
import re
ip_format = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}'
if not re.match("^{}$".format(ip_format), string):
if not re.match("^{ip_format}-{ip_format}$".format(ip_format=ip_format), string):
raise ValueError
return string
def resource_type_type(loader):
""" Returns a function which validates that resource types string contains only a combination of service,
container, and object. Their shorthand representations are s, c, and o. """
def impl(string):
t_resources = loader.get_models('common.models#ResourceTypes')
if set(string) - set("sco"):
raise ValueError
return t_resources(_str=''.join(set(string)))
return impl
def services_type(loader):
""" Returns a function which validates that services string contains only a combination of blob, queue, table,
and file. Their shorthand representations are b, q, t, and f. """
def impl(string):
t_services = loader.get_models('common.models#Services')
if set(string) - set("bqtf"):
raise ValueError
return t_services(_str=''.join(set(string)))
return impl
def get_char_options_validator(types, property_name):
def _validator(namespace):
service_types = set(getattr(namespace, property_name, list()))
if not service_types:
raise ValueError('Missing options --{}.'.format(property_name.replace('_', '-')))
if service_types - set(types):
raise ValueError(
'--{}: only valid values are: {}.'.format(property_name.replace('_', '-'), ', '.join(types)))
setattr(namespace, property_name, service_types)
return _validator
def page_blob_tier_validator(cmd, namespace):
if not namespace.tier:
return
if namespace.blob_type != 'page' and namespace.tier:
raise ValueError('Blob tier is only applicable to page blobs on premium storage accounts.')
try:
namespace.tier = getattr(cmd.get_models('blob.models#PremiumPageBlobTier'), namespace.tier)
except AttributeError:
from azure.cli.command_modules.storage.sdkutil import get_blob_tier_names
raise ValueError('Unknown premium page blob tier name. Choose among {}'.format(', '.join(
get_blob_tier_names(cmd.cli_ctx, 'PremiumPageBlobTier'))))
def block_blob_tier_validator(cmd, namespace):
if not namespace.tier:
return
if namespace.blob_type != 'block' and namespace.tier:
raise ValueError('Blob tier is only applicable to block blobs on standard storage accounts.')
try:
namespace.tier = getattr(cmd.get_models('blob.models#StandardBlobTier'), namespace.tier)
except AttributeError:
from azure.cli.command_modules.storage.sdkutil import get_blob_tier_names
raise ValueError('Unknown block blob tier name. Choose among {}'.format(', '.join(
get_blob_tier_names(cmd.cli_ctx, 'StandardBlobTier'))))
def blob_tier_validator(cmd, namespace):
if namespace.blob_type == 'page':
page_blob_tier_validator(cmd, namespace)
elif namespace.blob_type == 'block':
block_blob_tier_validator(cmd, namespace)
else:
raise ValueError('Blob tier is only applicable to block or page blob.')
def blob_rehydrate_priority_validator(namespace):
if namespace.blob_type == 'page' and namespace.rehydrate_priority:
raise ValueError('--rehydrate-priority is only applicable to block blob.')
if namespace.tier == 'Archive' and namespace.rehydrate_priority:
raise ValueError('--rehydrate-priority is only applicable to rehydrate blob data from the archive tier.')
if namespace.rehydrate_priority is None:
namespace.rehydrate_priority = 'Standard'
def validate_azcopy_upload_destination_url(cmd, namespace):
client = blob_data_service_factory(cmd.cli_ctx, {
'account_name': namespace.account_name, 'connection_string': namespace.connection_string})
destination_path = namespace.destination_path
if not destination_path:
destination_path = ''
url = client.make_blob_url(namespace.destination_container, destination_path)
namespace.destination = url
del namespace.destination_container
del namespace.destination_path
def validate_azcopy_remove_arguments(cmd, namespace):
usage_string = \
'Invalid usage: {}. Supply only one of the following argument sets to specify source:' \
'\n\t --container-name [--name]' \
'\n\tOR --share-name [--path]'
ns = vars(namespace)
# source as blob
container = ns.pop('container_name', None)
blob = ns.pop('blob_name', None)
# source as file
share = ns.pop('share_name', None)
path = ns.pop('path', None)
# ensure either a file or blob source is specified
valid_blob = container and not share
valid_file = share and not container
if not valid_blob and not valid_file:
raise ValueError(usage_string.format('Neither a valid blob or file source is specified'))
if valid_blob and valid_file:
raise ValueError(usage_string.format('Ambiguous parameters, both blob and file sources are '
'specified'))
if valid_blob:
client = blob_data_service_factory(cmd.cli_ctx, {
'account_name': namespace.account_name})
if not blob:
blob = ''
url = client.make_blob_url(container, blob)
namespace.service = 'blob'
namespace.target = url
if valid_file:
import os
client = file_data_service_factory(cmd.cli_ctx, {
'account_name': namespace.account_name,
'account_key': namespace.account_key})
dir_name, file_name = os.path.split(path) if path else (None, '')
dir_name = None if dir_name in ('', '.') else dir_name
url = client.make_file_url(share, dir_name, file_name)
namespace.service = 'file'
namespace.target = url
def as_user_validator(namespace):
if hasattr(namespace, 'token_credential') and not namespace.as_user:
raise CLIError('incorrect usage: specify --as-user when --auth-mode login is used to get user delegation key.')
if namespace.as_user:
if namespace.expiry is None:
raise argparse.ArgumentError(
None, 'incorrect usage: specify --expiry when as-user is enabled')
expiry = get_datetime_type(False)(namespace.expiry)
from datetime import datetime, timedelta
if expiry > datetime.utcnow() + timedelta(days=7):
raise argparse.ArgumentError(
None, 'incorrect usage: --expiry should be within 7 days from now')
if ((not hasattr(namespace, 'token_credential') or namespace.token_credential is None) and
(not hasattr(namespace, 'auth_mode') or namespace.auth_mode != 'login')):
raise argparse.ArgumentError(
None, "incorrect usage: specify '--auth-mode login' when as-user is enabled")
def validator_delete_retention_days(namespace):
if namespace.enable_delete_retention is True and namespace.delete_retention_days is None:
raise ValueError(
"incorrect usage: you have to provide value for '--delete-retention-days' when '--enable-delete-retention' "
"is set to true")
if namespace.enable_delete_retention is False and namespace.delete_retention_days is not None:
raise ValueError(
"incorrect usage: '--delete-retention-days' is invalid when '--enable-delete-retention' is set to false")
if namespace.enable_delete_retention is None and namespace.delete_retention_days is not None:
raise ValueError(
"incorrect usage: please specify '--enable-delete-retention true' if you want to set the value for "
"'--delete-retention-days'")
if namespace.delete_retention_days or namespace.delete_retention_days == 0:
if namespace.delete_retention_days < 1:
raise ValueError(
"incorrect usage: '--delete-retention-days' must be greater than or equal to 1")
if namespace.delete_retention_days > 365:
raise ValueError(
"incorrect usage: '--delete-retention-days' must be less than or equal to 365")
def validate_delete_retention_days(namespace):
if namespace.enable_delete_retention is True and namespace.delete_retention_days is None:
raise ValueError(
"incorrect usage: you have to provide value for '--delete-retention-days' when '--enable-delete-retention' "
"is set to true")
if namespace.enable_delete_retention is False and namespace.delete_retention_days is not None:
raise ValueError(
"incorrect usage: '--delete-retention-days' is invalid when '--enable-delete-retention' is set to false")
# pylint: disable=too-few-public-methods
class BlobRangeAddAction(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
if not namespace.blob_ranges:
namespace.blob_ranges = []
if isinstance(values, list):
values = ' '.join(values)
BlobRange = namespace._cmd.get_models('BlobRestoreRange', resource_type=ResourceType.MGMT_STORAGE)
try:
start_range, end_range = values.split(' ')
except (ValueError, TypeError):
raise CLIError('usage error: --blob-range VARIABLE OPERATOR VALUE')
namespace.blob_ranges.append(BlobRange(
start_range=start_range,
end_range=end_range
))
def validate_private_endpoint_connection_id(cmd, namespace):
if namespace.connection_id:
from azure.cli.core.util import parse_proxy_resource_id
result = parse_proxy_resource_id(namespace.connection_id)
namespace.resource_group_name = result['resource_group']
namespace.account_name = result['name']
namespace.private_endpoint_connection_name = result['child_name_1']
if namespace.account_name and not namespace.resource_group_name:
namespace.resource_group_name = _query_account_rg(cmd.cli_ctx, namespace.account_name)[0]
if not all([namespace.account_name, namespace.resource_group_name, namespace.private_endpoint_connection_name]):
raise CLIError('incorrect usage: [--id ID | --name NAME --account-name NAME]')
del namespace.connection_id
def pop_data_client_auth(ns):
del ns.auth_mode
del ns.account_key
del ns.connection_string
del ns.sas_token
def validate_client_auth_parameter(cmd, ns):
from .sdkutil import get_container_access_type
if ns.public_access:
ns.public_access = get_container_access_type(cmd.cli_ctx, ns.public_access.lower())
if ns.default_encryption_scope and ns.prevent_encryption_scope_override is not None:
# simply try to retrieve the remaining variables from environment variables
if not ns.account_name:
ns.account_name = get_config_value(cmd, 'storage', 'account', None)
if ns.account_name and not ns.resource_group_name:
ns.resource_group_name = _query_account_rg(cmd.cli_ctx, account_name=ns.account_name)[0]
pop_data_client_auth(ns)
elif (ns.default_encryption_scope and ns.prevent_encryption_scope_override is None) or \
(not ns.default_encryption_scope and ns.prevent_encryption_scope_override is not None):
raise CLIError("usage error: You need to specify both --default-encryption-scope and "
"--prevent-encryption-scope-override to set encryption scope information "
"when creating container.")
else:
validate_client_parameters(cmd, ns)
def validate_encryption_scope_client_params(ns):
if ns.encryption_scope:
# will use track2 client and socket_timeout is unused
del ns.socket_timeout
def validate_access_control(namespace):
if namespace.acl and namespace.permissions:
raise CLIError('usage error: invalid when specifying both --acl and --permissions.')
def validate_service_type(services, service_type):
if service_type == 'table':
return 't' in services
if service_type == 'blob':
return 'b' in services
if service_type == 'queue':
return 'q' in services
def validate_logging_version(namespace):
if validate_service_type(namespace.services, 'table') and namespace.version != 1.0:
raise CLIError(
'incorrect usage: for table service, the supported version for logging is `1.0`. For more information, '
'please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/storage-analytics-log-format.')
| 45.009266 | 120 | 0.676686 |
0502674c44ef059c42049a98f38ffc9d89f1c684 | 5,093 | py | Python | end2end_detector.py | penolove/keras-yolo3 | 4b0b9a3c998b35be73a3509baf275ff862a086de | [
"MIT"
] | null | null | null | end2end_detector.py | penolove/keras-yolo3 | 4b0b9a3c998b35be73a3509baf275ff862a086de | [
"MIT"
] | null | null | null | end2end_detector.py | penolove/keras-yolo3 | 4b0b9a3c998b35be73a3509baf275ff862a086de | [
"MIT"
] | null | null | null | import argparse
import os
import arrow
import cv2
import time
import PIL
from eyewitness.config import (IN_MEMORY, BBOX, RAW_IMAGE_PATH)
from eyewitness.image_id import ImageId
from eyewitness.image_utils import (ImageProducer, swap_channel_rgb_bgr, ImageHandler, Image)
from eyewitness.result_handler.db_writer import BboxPeeweeDbWriter
from peewee import SqliteDatabase
from naive_detector import YoloV3DetectorWrapper
from yolo import YOLO
from line_detection_result_handler import LineAnnotationSender
# class YOLO defines the default value, so suppress any default here
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
'''
Command line options
'''
parser.add_argument(
'--model', type=str,
help='path to model weight file, default: ' + YOLO.get_defaults("model_path")
)
parser.add_argument(
'--anchors', type=str,
help='path to anchor definitions, default: ' + YOLO.get_defaults("anchors_path")
)
parser.add_argument(
'--classes', type=str,
help='path to class definitions, default: ' + YOLO.get_defaults("classes_path")
)
parser.add_argument(
'--gpu_num', type=int,
help='Number of GPU to use, default: ' + str(YOLO.get_defaults("gpu_num"))
)
parser.add_argument(
'--db_path', type=str, default='::memory::',
help='the path used to store detection result records'
)
parser.add_argument(
'--interval_s', type=int, default=3, help='the interval of image generation'
)
parser.add_argument(
'--raw_image_folder', type=str, default=None, help='store raw image to folder if given'
)
class InMemoryImageProducer(ImageProducer):
def __init__(self, video_path, interval_s):
self.vid = cv2.VideoCapture(video_path)
self.interval_s = interval_s
if not self.vid.isOpened():
raise IOError("Couldn't open webcam or video")
def produce_method(self):
return IN_MEMORY
def produce_image(self):
while True:
# clean buffer hack: for Linux V4L capture backend with a internal fifo
for iter_ in range(5):
self.vid.grab()
_, frame = self.vid.read()
yield PIL.Image.fromarray(swap_channel_rgb_bgr(frame))
time.sleep(self.interval_s)
def image_url_handler(drawn_image_path):
"""if site_domain not set in env, will pass a pickchu image"""
site_domain = os.environ.get('site_domain')
if site_domain is None:
return 'https://upload.wikimedia.org/wikipedia/en/a/a6/Pok%C3%A9mon_Pikachu_art.png'
else:
return '%s/%s' % (site_domain, drawn_image_path)
def line_detection_result_filter(detection_result):
"""
used to check if sent notification or not
"""
return any(i.label == 'person' for i in detection_result.detected_objects)
if __name__ == '__main__':
args = parser.parse_args()
raw_image_folder = args.raw_image_folder
# image producer from webcam
image_producer = InMemoryImageProducer(0, interval_s=args.interval_s)
# object detector
object_detector = YoloV3DetectorWrapper(args)
# detection result handlers
result_handlers = []
# update image_info drawn_image_path, insert detection result
database = SqliteDatabase(args.db_path)
bbox_sqlite_handler = BboxPeeweeDbWriter(database)
result_handlers.append(bbox_sqlite_handler)
# setup your line channel token and audience
channel_access_token = os.environ.get('LINE_CHANNEL_ACCESS_TOKEN')
if channel_access_token:
line_annotation_sender = LineAnnotationSender(
channel_access_token=channel_access_token,
image_url_handler=image_url_handler,
detection_result_filter=line_detection_result_filter,
detection_method=BBOX,
update_audience_period=10,
database=database)
result_handlers.append(line_annotation_sender)
for image in image_producer.produce_image():
image_id = ImageId(channel='demo', timestamp=arrow.now().timestamp, file_format='jpg')
# store the raw image or not
if raw_image_folder:
raw_image_path = "%s/%s_%s.%s" % (
raw_image_folder, image_id.channel, image_id.timestamp, image_id.file_format)
ImageHandler.save(image, raw_image_path)
else:
raw_image_path = None
image_obj = Image(image_id, pil_image_obj=image)
bbox_sqlite_handler.register_image(image_id, {RAW_IMAGE_PATH: raw_image_path})
detection_result = object_detector.detect(image_obj)
if len(detection_result.detected_objects) > 0:
# draw and save image, update detection result
drawn_image_path = "detected_image/%s_%s.%s" % (
image_id.channel, image_id.timestamp, image_id.file_format)
ImageHandler.draw_bbox(image, detection_result.detected_objects)
ImageHandler.save(image, drawn_image_path)
detection_result.image_dict['drawn_image_path'] = drawn_image_path
for result_handler in result_handlers:
result_handler.handle(detection_result)
| 34.646259 | 94 | 0.709798 |
8c60a1cc690a0a300ae5ee018dfa35dd018b5f0a | 297 | py | Python | nj_resoldhouse/pipelines.py | TedMore/nj_resoldhouse | 5851135df23b5d09e7162098c724195f1d105613 | [
"MIT"
] | null | null | null | nj_resoldhouse/pipelines.py | TedMore/nj_resoldhouse | 5851135df23b5d09e7162098c724195f1d105613 | [
"MIT"
] | null | null | null | nj_resoldhouse/pipelines.py | TedMore/nj_resoldhouse | 5851135df23b5d09e7162098c724195f1d105613 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class NjResoldhousePipeline(object):
def process_item(self, item, spider):
return item
| 24.75 | 65 | 0.707071 |
945a765bb6b0aaaeb864b456df323a834a890d96 | 17,009 | py | Python | readthedocs/rtd_tests/tests/test_sync_versions.py | kennethlarsen/readthedocs.org | 735d630d83f79ae24772d10e66fd35b8f5675a30 | [
"MIT"
] | 2 | 2018-01-14T14:04:00.000Z | 2021-02-07T19:25:45.000Z | readthedocs/rtd_tests/tests/test_sync_versions.py | kennethlarsen/readthedocs.org | 735d630d83f79ae24772d10e66fd35b8f5675a30 | [
"MIT"
] | 4 | 2021-03-31T20:17:21.000Z | 2021-12-13T20:49:19.000Z | readthedocs/rtd_tests/tests/test_sync_versions.py | kennethlarsen/readthedocs.org | 735d630d83f79ae24772d10e66fd35b8f5675a30 | [
"MIT"
] | 1 | 2021-01-28T19:18:28.000Z | 2021-01-28T19:18:28.000Z | # -*- coding: utf-8 -*-
from __future__ import (
absolute_import, division, print_function, unicode_literals)
import json
from django.test import TestCase
from readthedocs.builds.constants import BRANCH, STABLE, TAG
from readthedocs.builds.models import Version
from readthedocs.projects.models import Project
class TestSyncVersions(TestCase):
fixtures = ['eric', 'test_data']
def setUp(self):
self.client.login(username='eric', password='test')
self.pip = Project.objects.get(slug='pip')
Version.objects.create(
project=self.pip,
identifier='origin/master',
verbose_name='master',
active=True,
machine=True,
type=BRANCH,
)
Version.objects.create(
project=self.pip,
identifier='to_delete',
verbose_name='to_delete',
active=False,
type=TAG,
)
def test_proper_url_no_slash(self):
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
{
'identifier': 'origin/to_add',
'verbose_name': 'to_add',
},
],
}
r = self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
json_data = json.loads(r.content)
self.assertEqual(json_data['deleted_versions'], ['to_delete'])
self.assertEqual(json_data['added_versions'], ['to_add'])
def test_new_tag_update_active(self):
Version.objects.create(
project=self.pip,
identifier='0.8.3',
verbose_name='0.8.3',
active=True,
)
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
{
'identifier': 'origin/to_add',
'verbose_name': 'to_add',
},
],
'tags': [
{
'identifier': '0.9',
'verbose_name': '0.9',
},
{
'identifier': '0.8.3',
'verbose_name': '0.8.3',
},
],
}
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
version_9 = Version.objects.get(slug='0.9')
self.assertTrue(version_9.active)
# Version 0.9 becomes the stable version
self.assertEqual(
version_9.identifier,
self.pip.get_stable_version().identifier,
)
def test_new_tag_update_inactive(self):
Version.objects.create(
project=self.pip,
identifier='0.8.3',
verbose_name='0.8.3',
active=False,
)
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
{
'identifier': 'origin/to_add',
'verbose_name': 'to_add',
},
],
'tags': [
{
'identifier': '0.9',
'verbose_name': '0.9',
},
{
'identifier': '0.8.3',
'verbose_name': '0.8.3',
},
],
}
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
# Version 0.9 becomes the stable version and active
version_9 = Version.objects.get(slug='0.9')
self.assertEqual(
version_9.identifier,
self.pip.get_stable_version().identifier,
)
self.assertTrue(version_9.active)
# Version 0.8.3 is still inactive
version_8 = Version.objects.get(slug='0.8.3')
self.assertFalse(version_8.active)
class TestStableVersion(TestCase):
fixtures = ['eric', 'test_data']
def setUp(self):
self.client.login(username='eric', password='test')
self.pip = Project.objects.get(slug='pip')
def test_stable_versions(self):
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
{
'identifier': 'origin/to_add',
'verbose_name': 'to_add',
},
],
'tags': [
{
'identifier': '0.9',
'verbose_name': '0.9',
},
{
'identifier': '0.8',
'verbose_name': '0.8',
},
],
}
self.assertRaises(
Version.DoesNotExist,
Version.objects.get,
slug=STABLE,
)
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertTrue(version_stable.active)
self.assertEqual(version_stable.identifier, '0.9')
def test_pre_release_are_not_stable(self):
version_post_data = {
'branches': [],
'tags': [
{'identifier': '1.0a1', 'verbose_name': '1.0a1'},
{'identifier': '0.9', 'verbose_name': '0.9'},
{'identifier': '0.9b1', 'verbose_name': '0.9b1'},
{'identifier': '0.8', 'verbose_name': '0.8'},
{'identifier': '0.8rc2', 'verbose_name': '0.8rc2'},
],
}
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertTrue(version_stable.active)
self.assertEqual(version_stable.identifier, '0.9')
def test_post_releases_are_stable(self):
version_post_data = {
'branches': [],
'tags': [
{'identifier': '1.0', 'verbose_name': '1.0'},
{'identifier': '1.0.post1', 'verbose_name': '1.0.post1'},
],
}
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertTrue(version_stable.active)
self.assertEqual(version_stable.identifier, '1.0.post1')
def test_invalid_version_numbers_are_not_stable(self):
self.pip.versions.all().delete()
version_post_data = {
'branches': [],
'tags': [
{
'identifier': 'this.is.invalid',
'verbose_name': 'this.is.invalid'
},
],
}
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
self.assertFalse(Version.objects.filter(slug=STABLE).exists())
version_post_data = {
'branches': [],
'tags': [
{
'identifier': '1.0',
'verbose_name': '1.0',
},
{
'identifier': 'this.is.invalid',
'verbose_name': 'this.is.invalid'
},
],
}
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertTrue(version_stable.active)
self.assertEqual(version_stable.identifier, '1.0')
def test_update_stable_version(self):
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
],
'tags': [
{
'identifier': '0.9',
'verbose_name': '0.9',
},
{
'identifier': '0.8',
'verbose_name': '0.8',
},
],
}
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertTrue(version_stable.active)
self.assertEqual(version_stable.identifier, '0.9')
version_post_data = {
'tags': [
{
'identifier': '1.0.0',
'verbose_name': '1.0.0',
},
]
}
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertTrue(version_stable.active)
self.assertEqual(version_stable.identifier, '1.0.0')
version_post_data = {
'tags': [
{
'identifier': '0.7',
'verbose_name': '0.7',
},
],
}
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertTrue(version_stable.active)
self.assertEqual(version_stable.identifier, '1.0.0')
def test_update_inactive_stable_version(self):
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
],
'tags': [
{
'identifier': '0.9',
'verbose_name': '0.9',
},
],
}
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertEqual(version_stable.identifier, '0.9')
version_stable.active = False
version_stable.save()
version_post_data['tags'].append({
'identifier': '1.0.0',
'verbose_name': '1.0.0',
})
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertFalse(version_stable.active)
self.assertEqual(version_stable.identifier, '0.9')
def test_stable_version_tags_over_branches(self):
version_post_data = {
'branches': [
# 2.0 development
{'identifier': 'origin/2.0', 'verbose_name': '2.0'},
{'identifier': 'origin/0.9.1rc1', 'verbose_name': '0.9.1rc1'},
],
'tags': [
{'identifier': '1.0rc1', 'verbose_name': '1.0rc1'},
{'identifier': '0.9', 'verbose_name': '0.9'},
],
}
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
# If there is a branch with a higher version, tags takes preferences
# over the branches to select the stable version
version_stable = Version.objects.get(slug=STABLE)
self.assertTrue(version_stable.active)
self.assertEqual(version_stable.identifier, '0.9')
version_post_data['tags'].append({
'identifier': '1.0',
'verbose_name': '1.0',
})
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertTrue(version_stable.active)
self.assertEqual(version_stable.identifier, '1.0')
def test_stable_version_same_id_tag_branch(self):
version_post_data = {
'branches': [
# old 1.0 development branch
{'identifier': 'origin/1.0', 'verbose_name': '1.0'},
],
'tags': [
# tagged 1.0 final version
{'identifier': '1.0', 'verbose_name': '1.0'},
{'identifier': '0.9', 'verbose_name': '0.9'},
],
}
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertTrue(version_stable.active)
self.assertEqual(version_stable.identifier, '1.0')
self.assertEqual(version_stable.type, 'tag')
def test_unicode(self):
version_post_data = {
'branches': [],
'tags': [
{'identifier': 'foo-£', 'verbose_name': 'foo-£'},
],
}
resp = self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
self.assertEqual(resp.status_code, 200)
def test_user_defined_stable_version_with_tags(self):
Version.objects.create(
project=self.pip,
identifier='0.8.3',
verbose_name='0.8.3',
active=True,
)
# A pre-existing active stable branch that was machine created
Version.objects.create(
project=self.pip,
identifier='foo',
type='branch',
verbose_name='stable',
active=True,
machine=True,
)
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
# A new user-defined stable branch
{
'identifier': 'origin/stable',
'verbose_name': 'stable',
},
],
'tags': [
{
'identifier': '0.9',
'verbose_name': '0.9',
},
{
'identifier': '0.8.3',
'verbose_name': '0.8.3',
},
],
}
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
# Didn't update to newest tag
version_9 = Version.objects.get(slug='0.9')
self.assertFalse(version_9.active)
# Did update to user-defined stable version
version_stable = Version.objects.get(slug='stable')
self.assertFalse(version_stable.machine)
self.assertTrue(version_stable.active)
self.assertEqual('origin/stable', self.pip.get_stable_version().identifier)
# Check that posting again doesn't change anything from current state.
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
self.assertEqual('origin/stable', self.pip.get_stable_version().identifier)
| 31.556586 | 83 | 0.491857 |
fd338e1b3f2941410376544f9ebe22dd3b60b564 | 1,986 | py | Python | script.mrknow.urlresolver/lib/urlresolver9/plugins/sharerepo.py | mrknow/filmkodi | 0162cde9ae25ddbf4a69330948714833ff2f78c9 | [
"Apache-2.0"
] | 105 | 2015-11-28T00:03:11.000Z | 2021-05-05T20:47:42.000Z | script.mrknow.urlresolver/lib/urlresolver9/plugins/sharerepo.py | rrosajp/filmkodi | 0162cde9ae25ddbf4a69330948714833ff2f78c9 | [
"Apache-2.0"
] | 918 | 2015-11-28T14:12:40.000Z | 2022-03-23T20:24:49.000Z | script.mrknow.urlresolver/lib/urlresolver9/plugins/sharerepo.py | rrosajp/filmkodi | 0162cde9ae25ddbf4a69330948714833ff2f78c9 | [
"Apache-2.0"
] | 111 | 2015-12-01T14:06:10.000Z | 2020-08-01T10:44:39.000Z | '''
Sharerepo urlresolver plugin
Copyright (C) 2013 Vinnydude
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
import urllib
import urllib2
from urlresolver9 import common
from urlresolver9.resolver import UrlResolver, ResolverError
class SharerepoResolver(UrlResolver):
name = "sharerepo"
domains = ["sharerepo.com"]
pattern = '(?://|\.)(sharerepo\.com)(?:/f)?/([0-9a-zA-Z]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
headers = {
'User-Agent': common.IE_USER_AGENT,
'Referer': web_url
}
try:
html = self.net.http_GET(web_url, headers=headers).content
except urllib2.HTTPError as e:
if e.code == 404:
web_url = 'http://sharerepo.com/%s' % media_id
html = self.net.http_GET(web_url, headers=headers).content
else:
raise
link = re.search("file\s*:\s*'([^']+)", html)
if link:
common.log_utils.log_debug('ShareRepo Link Found: %s' % link.group(1))
return link.group(1) + '|' + urllib.urlencode({'User-Agent': common.IE_USER_AGENT})
else:
raise ResolverError('Unable to resolve ShareRepo Link')
def get_url(self, host, media_id):
return 'http://sharerepo.com/f/%s' % media_id
| 34.241379 | 95 | 0.656596 |
ac743b6e8a8b04df3a5ace42ec013316e3709b47 | 102 | py | Python | core/miscellaneous.py | 0alpha/magma | d302029b1f36ba1fdae6c776a47405ceb72e9817 | [
"MIT"
] | 10 | 2018-03-18T13:00:44.000Z | 2021-07-10T09:22:50.000Z | core/miscellaneous.py | 0alpha/magma | d302029b1f36ba1fdae6c776a47405ceb72e9817 | [
"MIT"
] | 1 | 2018-06-05T07:32:50.000Z | 2018-06-05T07:32:50.000Z | core/miscellaneous.py | 0alpha/magma | d302029b1f36ba1fdae6c776a47405ceb72e9817 | [
"MIT"
] | 5 | 2018-06-05T07:12:03.000Z | 2021-12-09T19:08:03.000Z | import time
def format_time(millis):
return time.strftime('%H:%M:%S', time.gmtime(millis/1000))
| 17 | 62 | 0.696078 |
27184996077a27b142d68df24e0284e55439a7bf | 5,838 | py | Python | homeassistant/config/custom_components/home_connect_alt/switch.py | yuvalabou/homeassistant | e25885db33d2144455928d07d7e9b044278ba291 | [
"Unlicense"
] | 5 | 2022-02-17T09:22:24.000Z | 2022-03-15T20:14:50.000Z | homeassistant/config/custom_components/home_connect_alt/switch.py | yuvalabou/homeassistant | e25885db33d2144455928d07d7e9b044278ba291 | [
"Unlicense"
] | 11 | 2022-02-11T06:56:55.000Z | 2022-03-20T15:53:43.000Z | homeassistant/config/custom_components/home_connect_alt/switch.py | yuvalabou/homeassistant | e25885db33d2144455928d07d7e9b044278ba291 | [
"Unlicense"
] | 5 | 2022-02-13T11:15:58.000Z | 2022-03-05T19:07:57.000Z | """ Implement the Switch entities of this implementation """
from __future__ import annotations
from typing import Any
from home_connect_async import Appliance, HomeConnect, HomeConnectError, Events
from homeassistant.components.switch import SwitchEntity
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType
from .common import InteractiveEntityBase, EntityManager
from .const import DOMAIN, SPECIAL_ENTITIES
async def async_setup_entry(hass:HomeAssistant , config_entry:ConfigType, async_add_entities:AddEntitiesCallback) -> None:
"""Add sensors for passed config_entry in HA."""
homeconnect:HomeConnect = hass.data[DOMAIN]['homeconnect']
entity_manager = EntityManager(async_add_entities)
def add_appliance(appliance:Appliance) -> None:
if appliance.available_programs:
for program in appliance.available_programs.values():
if program.options:
for option in program.options.values():
if option.key not in SPECIAL_ENTITIES['ignore'] and (option.type == "Boolean" or isinstance(option.value, bool)):
device = OptionSwitch(appliance, option.key)
entity_manager.add(device)
for setting in appliance.settings.values():
if setting.key not in SPECIAL_ENTITIES['ignore'] and (setting.type == "Boolean" or isinstance(setting.value, bool)):
device = SettingsSwitch(appliance, setting.key)
entity_manager.add(device)
entity_manager.register()
def remove_appliance(appliance:Appliance) -> None:
entity_manager.remove_appliance(appliance)
homeconnect.register_callback(add_appliance, [Events.PAIRED, Events.PROGRAM_SELECTED])
homeconnect.register_callback(remove_appliance, Events.DEPAIRED)
for appliance in homeconnect.appliances.values():
add_appliance(appliance)
class OptionSwitch(InteractiveEntityBase, SwitchEntity):
""" Switch for binary options """
@property
def device_class(self) -> str:
return f"{DOMAIN}__options"
@property
def name_ext(self) -> str|None:
if self._appliance.available_programs:
for program in self._appliance.available_programs.values():
if program.options and self._key in program.options and program.options[self._key].name:
return program.options[self._key].name
return None
@property
def icon(self) -> str:
return self._conf.get('icon', 'mdi:office-building-cog')
@property
def available(self) -> bool:
return self.program_option_available
@property
def is_on(self) -> bool:
"""Return True if entity is on."""
if self.program_option_available:
return self._appliance.selected_program.options[self._key].value
return None
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the entity on."""
try:
await self._appliance.async_set_option(self._key, True)
except HomeConnectError as ex:
if ex.error_description:
raise HomeAssistantError(f"Failed to set the option: {ex.error_description} ({ex.code})")
else:
raise HomeAssistantError(f"Failed to set the option: ({ex.code})")
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the entity off."""
try:
await self._appliance.async_set_option(self._key, False)
except HomeConnectError as ex:
if ex.error_description:
raise HomeAssistantError(f"Failed to set the option: {ex.error_description} ({ex.code})")
else:
raise HomeAssistantError(f"Failed to set the option: ({ex.code})")
async def async_on_update(self, appliance:Appliance, key:str, value) -> None:
self.async_write_ha_state()
class SettingsSwitch(InteractiveEntityBase, SwitchEntity):
""" Switch for binary settings """
@property
def device_class(self) -> str:
return f"{DOMAIN}__settings"
@property
def name_ext(self) -> str|None:
if self._key in self._appliance.settings and self._appliance.settings[self._key].name:
return self._appliance.settings[self._key].name
return None
@property
def icon(self) -> str:
return self._conf.get('icon', 'mdi:tune')
@property
def available(self) -> bool:
return self._key in self._appliance.settings \
and super().available \
and (
"BSH.Common.Status.RemoteControlActive" not in self._appliance.status or
self._appliance.status["BSH.Common.Status.RemoteControlActive"]
)
@property
def is_on(self) -> bool:
"""Return True if entity is on."""
if self._key in self._appliance.settings:
return self._appliance.settings[self._key].value
return None
async def async_turn_on(self, **kwargs: Any) -> None:
try:
await self._appliance.async_apply_setting(self._key, True)
except HomeConnectError as ex:
if ex.error_description:
raise HomeAssistantError(f"Failed to apply the setting: {ex.error_description} ({ex.code})")
else:
raise HomeAssistantError(f"Failed to apply the setting: ({ex.code})")
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the entity off."""
await self._appliance.async_apply_setting(self._key, False)
async def async_on_update(self, appliance:Appliance, key:str, value) -> None:
self.async_write_ha_state()
| 38.662252 | 137 | 0.667181 |
5a6ab35298c09adaf625595ee3b2ef77f9b92935 | 661 | py | Python | goticket/users/tests/test_drf_urls.py | pmburu/GoTicketV2 | 97ca68a9ca5e1c5793c03c6983c5b343f59dc4d2 | [
"MIT"
] | null | null | null | goticket/users/tests/test_drf_urls.py | pmburu/GoTicketV2 | 97ca68a9ca5e1c5793c03c6983c5b343f59dc4d2 | [
"MIT"
] | 7 | 2022-02-14T23:32:37.000Z | 2022-03-31T23:29:05.000Z | goticket/users/tests/test_drf_urls.py | pmburu/GoTicketV2 | 97ca68a9ca5e1c5793c03c6983c5b343f59dc4d2 | [
"MIT"
] | null | null | null | import pytest
from django.urls import resolve, reverse
from goticket.users.models import User
pytestmark = pytest.mark.django_db
def test_user_detail(user: User):
assert (
reverse("api:user-detail", kwargs={"username": user.username})
== f"/api/users/{user.username}/"
)
assert resolve(f"/api/users/{user.username}/").view_name == "api:user-detail"
def test_user_list():
assert reverse("api:user-list") == "/api/users/"
assert resolve("/api/users/").view_name == "api:user-list"
def test_user_me():
assert reverse("api:user-me") == "/api/users/me/"
assert resolve("/api/users/me/").view_name == "api:user-me"
| 26.44 | 81 | 0.668684 |
e75b2790ee39809f0af144c1ba2b986396be6783 | 1,810 | py | Python | projects/faces/pcn/test.py | Bingwen-Hu/hackaway | 69727d76fd652390d9660e9ea4354ba5cc76dd5c | [
"BSD-2-Clause"
] | null | null | null | projects/faces/pcn/test.py | Bingwen-Hu/hackaway | 69727d76fd652390d9660e9ea4354ba5cc76dd5c | [
"BSD-2-Clause"
] | null | null | null | projects/faces/pcn/test.py | Bingwen-Hu/hackaway | 69727d76fd652390d9660e9ea4354ba5cc76dd5c | [
"BSD-2-Clause"
] | null | null | null | from pcn import *
import unittest
class TestPCN(unittest.TestCase):
def test_smooth_angel(self):
a = 120
b = 60
output = smooth_angle(a, b)
self.assertEqual(output, 90)
def test_iou(self):
w1 = Window2(100, 20, 40, 60, 80.5, 0.5, 1)
w2 = Window2(90, 22, 38, 50, 76, 0.6, 2)
iou = IoU(w1, w2)
self.assertAlmostEqual(0.482759, iou, delta=0.001)
def test_nms(self):
w1 = Window2(100, 20, 40, 60, 80.5, 0.5, 1)
w2 = Window2(90, 22, 38, 50, 76, 0.6, 2)
w3 = Window2(90, 21, 40, 50, 76, 0.6, 3)
w4 = Window2(85, 22, 38, 60, 76, 0.8, 4)
winlist = [w1, w2, w3, w4]
winlist = NMS(winlist, True, 0.8)
expect = [4, 3, 1]
self.assertEqual(expect, [w.conf for w in winlist])
winlist = NMS(winlist, False, 0.3)
expect = [4]
self.assertEqual(expect, [w.conf for w in winlist])
def test_deleteFP(self):
w1 = Window2(100, 20, 40, 60, 80.5, 0.5, 1)
w2 = Window2(90, 22, 38, 50, 76, 0.6, 2)
w3 = Window2(90, 21, 40, 50, 76, 0.6, 3)
w4 = Window2(85, 22, 38, 60, 76, 0.8, 4)
winlist = [w1, w2, w3, w4]
winlist = deleteFP(winlist)
expect = [4, 3, 2, 1]
self.assertEqual(expect, [w.conf for w in winlist])
def test_smooth_windows(self):
w1 = Window2(100, 20, 40, 60, 80.5, 0.5, 1)
w2 = Window2(90, 22, 38, 50, 75, 0.6, 2)
w3 = Window2(90, 21, 40, 50, 24, 0.6, 3)
w4 = Window2(85, 22, 38, 60, 76, 0.8, 4)
winlist = [w1, w3, w2, w4]
winlist = smooth_window(winlist)
for win in winlist:
print(win.x, win.y, win.w, win.h, win.angle, win.conf)
self.assertTrue(True)
if __name__ == '__main__':
unittest.main() | 34.807692 | 66 | 0.527072 |
88deadcd31da26e8fa46c82f8684f5fda405572b | 9,215 | py | Python | autokeras/utils/io_utils.py | lc0/autokeras | 413508a5f6aaa38ee7aba719aadb057c0b029591 | [
"Apache-2.0"
] | 3,979 | 2019-04-02T02:01:52.000Z | 2022-03-31T16:53:14.000Z | autokeras/utils/io_utils.py | lc0/autokeras | 413508a5f6aaa38ee7aba719aadb057c0b029591 | [
"Apache-2.0"
] | 939 | 2019-04-02T18:13:53.000Z | 2022-03-31T16:25:08.000Z | autokeras/utils/io_utils.py | lc0/autokeras | 413508a5f6aaa38ee7aba719aadb057c0b029591 | [
"Apache-2.0"
] | 826 | 2019-04-02T00:53:31.000Z | 2022-03-31T10:11:02.000Z | # Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from typing import Optional
from typing import Tuple
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.preprocessing import dataset_utils
WHITELIST_FORMATS = (".bmp", ".gif", ".jpeg", ".jpg", ".png")
def save_json(path, obj):
obj = json.dumps(obj)
with tf.io.gfile.GFile(path, "w") as f:
f.write(obj)
def load_json(path):
with tf.io.gfile.GFile(path, "r") as f:
obj = f.read()
return json.loads(obj)
def get_training_or_validation_split(samples, labels, validation_split, subset):
"""Potentially restict samples & labels to a training or validation split.
# Arguments
samples: List of elements.
labels: List of corresponding labels.
validation_split: Float, fraction of data to reserve for validation.
subset: Subset of the data to return.
Either "training", "validation", or None.
If None, we return all of the data.
# Returns
tuple (samples, labels), potentially restricted to the specified subset.
"""
if not validation_split:
return samples, labels
num_val_samples = int(validation_split * len(samples))
if subset == "training":
print("Using %d files for training." % (len(samples) - num_val_samples,))
samples = samples[:-num_val_samples]
labels = labels[:-num_val_samples]
elif subset == "validation":
print("Using %d files for validation." % (num_val_samples,))
samples = samples[-num_val_samples:]
labels = labels[-num_val_samples:]
else:
raise ValueError(
'`subset` must be either "training" '
'or "validation", received: %s' % (subset,)
)
return samples, labels
def text_dataset_from_directory(
directory: str,
batch_size: int = 32,
max_length: Optional[int] = None,
shuffle: bool = True,
seed: Optional[int] = None,
validation_split: Optional[float] = None,
subset: Optional[str] = None,
) -> tf.data.Dataset:
"""Generates a `tf.data.Dataset` from text files in a directory.
If your directory structure is:
```
main_directory/
...class_a/
......a_text_1.txt
......a_text_2.txt
...class_b/
......b_text_1.txt
......b_text_2.txt
```
Then calling `text_dataset_from_directory(main_directory)`
will return a `tf.data.Dataset` that yields batches of texts from
the subdirectories `class_a` and `class_b`, together with labels
'class_a' and 'class_b'.
Only `.txt` files are supported at this time.
# Arguments
directory: Directory where the data is located.
If `labels` is "inferred", it should contain
subdirectories, each containing text files for a class.
Otherwise, the directory structure is ignored.
batch_size: Size of the batches of data. Defaults to 32.
max_length: Maximum size of a text string. Texts longer than this will
be truncated to `max_length`.
shuffle: Whether to shuffle the data. Default: True.
If set to False, sorts the data in alphanumeric order.
seed: Optional random seed for shuffling and transformations.
validation_split: Optional float between 0 and 1,
fraction of data to reserve for validation.
subset: One of "training" or "validation".
Only used if `validation_split` is set.
# Returns
A `tf.data.Dataset` object, which yields a tuple `(texts, labels)`,
where both has shape `(batch_size,)` and type of tf.string.
"""
if seed is None:
seed = np.random.randint(1e6)
file_paths, labels, class_names = dataset_utils.index_directory(
directory, "inferred", formats=(".txt",), shuffle=shuffle, seed=seed
)
file_paths, labels = get_training_or_validation_split(
file_paths, labels, validation_split, subset
)
strings = tf.data.Dataset.from_tensor_slices(file_paths)
strings = strings.map(tf.io.read_file)
if max_length is not None:
strings = strings.map(lambda x: tf.strings.substr(x, 0, max_length))
labels = np.array(class_names)[np.array(labels)]
labels = tf.data.Dataset.from_tensor_slices(labels)
dataset = tf.data.Dataset.zip((strings, labels))
dataset = dataset.batch(batch_size)
return dataset
def image_dataset_from_directory(
directory: str,
batch_size: int = 32,
color_mode: str = "rgb",
image_size: Tuple[int, int] = (256, 256),
interpolation: str = "bilinear",
shuffle: bool = True,
seed: Optional[int] = None,
validation_split: Optional[float] = None,
subset: Optional[str] = None,
) -> tf.data.Dataset:
"""Generates a `tf.data.Dataset` from image files in a directory.
If your directory structure is:
```
main_directory/
...class_a/
......a_image_1.jpg
......a_image_2.jpg
...class_b/
......b_image_1.jpg
......b_image_2.jpg
```
Then calling `image_dataset_from_directory(main_directory)`
will return a `tf.data.Dataset` that yields batches of images from
the subdirectories `class_a` and `class_b`, together with labels
'class_a' and 'class_b'.
Supported image formats: jpeg, png, bmp, gif.
Animated gifs are truncated to the first frame.
# Arguments
directory: Directory where the data is located.
If `labels` is "inferred", it should contain
subdirectories, each containing images for a class.
Otherwise, the directory structure is ignored.
batch_size: Size of the batches of data. Default: 32.
color_mode: One of "grayscale", "rgb", "rgba". Default: "rgb".
Whether the images will be converted to
have 1, 3, or 4 channels.
image_size: Size to resize images to after they are read from disk.
Defaults to `(256, 256)`.
Since the pipeline processes batches of images that must all have
the same size, this must be provided.
interpolation: String, the interpolation method used when resizing images.
Defaults to `bilinear`. Supports `bilinear`, `nearest`, `bicubic`,
`area`, `lanczos3`, `lanczos5`, `gaussian`, `mitchellcubic`.
shuffle: Whether to shuffle the data. Default: True.
If set to False, sorts the data in alphanumeric order.
seed: Optional random seed for shuffling and transformations.
validation_split: Optional float between 0 and 1,
fraction of data to reserve for validation.
subset: One of "training" or "validation".
Only used if `validation_split` is set.
# Returns
A `tf.data.Dataset` object, which yields a tuple `(texts, labels)`,
where `images` has shape `(batch_size, image_size[0], image_size[1],
num_channels)` where `labels` has shape `(batch_size,)` and type of
tf.string.
- if `color_mode` is `grayscale`, there's 1 channel in the image
tensors.
- if `color_mode` is `rgb`, there are 3 channel in the image tensors.
- if `color_mode` is `rgba`, there are 4 channel in the image tensors.
"""
if color_mode == "rgb":
num_channels = 3
elif color_mode == "rgba":
num_channels = 4
elif color_mode == "grayscale":
num_channels = 1
else:
raise ValueError(
'`color_mode` must be one of {"rbg", "rgba", "grayscale"}. '
"Received: %s" % (color_mode,)
)
if seed is None:
seed = np.random.randint(1e6)
image_paths, labels, class_names = dataset_utils.index_directory(
directory, "inferred", formats=WHITELIST_FORMATS, shuffle=shuffle, seed=seed
)
image_paths, labels = get_training_or_validation_split(
image_paths, labels, validation_split, subset
)
images = tf.data.Dataset.from_tensor_slices(image_paths)
images = images.map(
lambda img: path_to_image(img, num_channels, image_size, interpolation)
)
labels = np.array(class_names)[np.array(labels)]
labels = tf.data.Dataset.from_tensor_slices(labels)
dataset = tf.data.Dataset.zip((images, labels))
dataset = dataset.batch(batch_size)
return dataset
def path_to_image(image, num_channels, image_size, interpolation):
image = tf.io.read_file(image)
image = tf.io.decode_image(image, channels=num_channels, expand_animations=False)
image = tf.image.resize(image, image_size, method=interpolation)
image.set_shape((image_size[0], image_size[1], num_channels))
return image
| 36.56746 | 85 | 0.660119 |
cb86ef9c345a6a15f9b72213fb84e8654651eebc | 1,257 | py | Python | isiscb/isisdata/migrations/0005_searchquery.py | bgopalachary/IsisCB | c28e3f504eea60ebeff38318d8bb2071abb28ebb | [
"MIT"
] | 4 | 2016-01-25T20:35:33.000Z | 2020-04-07T15:39:52.000Z | isiscb/isisdata/migrations/0005_searchquery.py | bgopalachary/IsisCB | c28e3f504eea60ebeff38318d8bb2071abb28ebb | [
"MIT"
] | 41 | 2015-08-19T17:34:41.000Z | 2022-03-11T23:19:01.000Z | isiscb/isisdata/migrations/0005_searchquery.py | bgopalachary/IsisCB | c28e3f504eea60ebeff38318d8bb2071abb28ebb | [
"MIT"
] | 2 | 2020-11-25T20:18:18.000Z | 2021-06-24T15:15:41.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('isisdata', '0004_auto_20151025_2110'),
]
operations = [
migrations.CreateModel(
name='SearchQuery',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('parameters', models.CharField(max_length=500)),
('search_models', models.CharField(max_length=500, null=True, blank=True)),
('selected_facets', models.CharField(max_length=500, null=True, blank=True)),
('name', models.CharField(help_text=b'\n Provide a memorable name so that you can find this search later.', max_length=255, null=True, blank=True)),
('saved', models.BooleanField(default=False)),
('user', models.ForeignKey(related_name='searches', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
),
]
| 41.9 | 167 | 0.636436 |
a4f18e1ee8b3e5917fb0eb2f227147a8965393ef | 2,085 | py | Python | research/cv/ssd_mobilenetV2_FPNlite/src/init_params.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 77 | 2021-10-15T08:32:37.000Z | 2022-03-30T13:09:11.000Z | research/cv/ssd_mobilenetV2_FPNlite/src/init_params.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 3 | 2021-10-30T14:44:57.000Z | 2022-02-14T06:57:57.000Z | research/cv/ssd_mobilenetV2_FPNlite/src/init_params.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 24 | 2021-10-15T08:32:45.000Z | 2022-03-24T18:45:20.000Z | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Parameters utils"""
from mindspore.common.initializer import initializer, TruncatedNormal
def init_net_param(network, initialize_mode='TruncatedNormal'):
"""Init the parameters in net."""
params = network.trainable_params()
for p in params:
if 'beta' not in p.name and 'gamma' not in p.name and 'bias' not in p.name:
if initialize_mode == 'TruncatedNormal':
p.set_data(initializer(TruncatedNormal(0.02), p.data.shape, p.data.dtype))
else:
p.set_data(initialize_mode, p.data.shape, p.data.dtype)
def load_backbone_params(network, param_dict):
"""Init the parameters from pre-train model, default is mobilenetv2."""
for _, param in network.parameters_and_names():
param_name = param.name.replace('network.backbone.', '')
name_split = param_name.split('.')
if 'features_1' in param_name:
param_name = param_name.replace('features_1', 'features')
if 'features_2' in param_name:
param_name = '.'.join(['features', str(int(name_split[1]) + 14)] + name_split[2:])
if param_name in param_dict:
param.set_data(param_dict[param_name].data)
def filter_checkpoint_parameter(param_dict):
"""remove useless parameters"""
for key in list(param_dict.keys()):
if 'multi_loc_layers' in key or 'multi_cls_layers' in key:
del param_dict[key]
| 44.361702 | 94 | 0.666667 |
be2300c02deaa684b9a7d8252bb2c8ef3e87806c | 18,269 | py | Python | backend/api.py | prasys/detecting-fake-text | de1fe92b726fa50849517f02233f86ee62f2435b | [
"Apache-2.0"
] | null | null | null | backend/api.py | prasys/detecting-fake-text | de1fe92b726fa50849517f02233f86ee62f2435b | [
"Apache-2.0"
] | null | null | null | backend/api.py | prasys/detecting-fake-text | de1fe92b726fa50849517f02233f86ee62f2435b | [
"Apache-2.0"
] | null | null | null | import numpy as np
import torch
import time
from pytorch_pretrained_bert import (GPT2LMHeadModel, GPT2Tokenizer,
BertTokenizer, BertForMaskedLM)
from .class_register import register_api
class AbstractLanguageChecker():
"""
Abstract Class that defines the Backend API of GLTR.
To extend the GLTR interface, you need to inherit this and
fill in the defined functions.
"""
def __init__(self):
'''
In the subclass, you need to load all necessary components
for the other functions.
Typically, this will comprise a tokenizer and a model.
'''
self.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu")
def check_probabilities(self, in_text, topk=40):
'''
Function that GLTR interacts with to check the probabilities of words
Params:
- in_text: str -- The text that you want to check
- topk: int -- Your desired truncation of the head of the distribution
Output:
- payload: dict -- The wrapper for results in this function, described below
Payload values
==============
bpe_strings: list of str -- Each individual token in the text
real_topk: list of tuples -- (ranking, prob) of each token
pred_topk: list of list of tuple -- (word, prob) for all topk
'''
raise NotImplementedError
def postprocess(self, token):
"""
clean up the tokens from any special chars and encode
leading space by UTF-8 code '\u0120', linebreak with UTF-8 code 266 '\u010A'
:param token: str -- raw token text
:return: str -- cleaned and re-encoded token text
"""
raise NotImplementedError
def top_k_logits(logits, k):
'''
Filters logits to only the top k choices
from https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/examples/run_gpt2.py
'''
if k == 0:
return logits
values, _ = torch.topk(logits, k)
min_values = values[:, -1]
return torch.where(logits < min_values,
torch.ones_like(logits, dtype=logits.dtype) * -1e10,
logits)
@register_api(name='gpt-2-small')
class LM(AbstractLanguageChecker):
def __init__(self, model_name_or_path="gpt2"):
super(LM, self).__init__()
self.enc = GPT2Tokenizer.from_pretrained(model_name_or_path)
self.model = GPT2LMHeadModel.from_pretrained(model_name_or_path)
self.model.to(self.device)
self.model.eval()
self.start_token = '<|endoftext|>'
print("Loaded GPT-2 model!")
def check_probabilities(self, in_text, topk=40):
# Process input
start_t = torch.full((1, 1),
self.enc.encoder[self.start_token],
device=self.device,
dtype=torch.long)
context = self.enc.encode(in_text)
context = torch.tensor(context,
device=self.device,
dtype=torch.long).unsqueeze(0)
context = torch.cat([start_t, context], dim=1)
# Forward through the model
logits, _ = self.model(context)
# construct target and pred
yhat = torch.softmax(logits[0, :-1], dim=-1)
y = context[0, 1:]
# Sort the predictions for each timestep
sorted_preds = np.argsort(-yhat.data.cpu().numpy())
# [(pos, prob), ...]
real_topk_pos = list(
[int(np.where(sorted_preds[i] == y[i].item())[0][0])
for i in range(y.shape[0])])
real_topk_probs = yhat[np.arange(
0, y.shape[0], 1), y].data.cpu().numpy().tolist()
real_topk_probs = list(map(lambda x: round(x, 5), real_topk_probs))
real_topk = list(zip(real_topk_pos, real_topk_probs))
# [str, str, ...]
bpe_strings = [self.enc.decoder[s.item()] for s in context[0]]
bpe_strings = [self.postprocess(s) for s in bpe_strings]
# [[(pos, prob), ...], [(pos, prob), ..], ...]
pred_topk = [
list(zip([self.enc.decoder[p] for p in sorted_preds[i][:topk]],
list(map(lambda x: round(x, 5),
yhat[i][sorted_preds[i][
:topk]].data.cpu().numpy().tolist()))))
for i in range(y.shape[0])]
pred_topk = [[(self.postprocess(t[0]), t[1]) for t in pred] for pred in pred_topk]
payload = {'bpe_strings': bpe_strings,
'real_topk': real_topk,
'pred_topk': pred_topk}
if torch.cuda.is_available():
torch.cuda.empty_cache()
return payload
@register_api(name='amazon')
class LM(AbstractLanguageChecker):
def __init__(self, model_name_or_path="/data/pradeesh/detecting-fake-text/pytorch/"):
super(LM, self).__init__()
self.enc = GPT2Tokenizer.from_pretrained(model_name_or_path)
self.model = GPT2LMHeadModel.from_pretrained(model_name_or_path)
self.model.to(self.device)
self.model.eval()
self.start_token = '<|endoftext|>'
print("Loaded GPT-2 model!")
def check_probabilities(self, in_text, topk=40):
# Process input
start_t = torch.full((1, 1),
self.enc.encoder[self.start_token],
device=self.device,
dtype=torch.long)
context = self.enc.encode(in_text)
context = torch.tensor(context,
device=self.device,
dtype=torch.long).unsqueeze(0)
context = torch.cat([start_t, context], dim=1)
# Forward through the model
logits, _ = self.model(context)
# construct target and pred
yhat = torch.softmax(logits[0, :-1], dim=-1)
y = context[0, 1:]
# Sort the predictions for each timestep
sorted_preds = np.argsort(-yhat.data.cpu().numpy())
# [(pos, prob), ...]
real_topk_pos = list(
[int(np.where(sorted_preds[i] == y[i].item())[0][0])
for i in range(y.shape[0])])
real_topk_probs = yhat[np.arange(
0, y.shape[0], 1), y].data.cpu().numpy().tolist()
real_topk_probs = list(map(lambda x: round(x, 5), real_topk_probs))
real_topk = list(zip(real_topk_pos, real_topk_probs))
# [str, str, ...]
bpe_strings = [self.enc.decoder[s.item()] for s in context[0]]
bpe_strings = [self.postprocess(s) for s in bpe_strings]
# [[(pos, prob), ...], [(pos, prob), ..], ...]
pred_topk = [
list(zip([self.enc.decoder[p] for p in sorted_preds[i][:topk]],
list(map(lambda x: round(x, 5),
yhat[i][sorted_preds[i][
:topk]].data.cpu().numpy().tolist()))))
for i in range(y.shape[0])]
pred_topk = [[(self.postprocess(t[0]), t[1]) for t in pred] for pred in pred_topk]
payload = {'bpe_strings': bpe_strings,
'real_topk': real_topk,
'pred_topk': pred_topk}
if torch.cuda.is_available():
torch.cuda.empty_cache()
return payload
def sample_unconditional(self, length=100, topk=5, temperature=1.0):
'''
Sample `length` words from the model.
Code strongly inspired by
https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/examples/run_gpt2.py
'''
context = torch.full((1, 1),
self.enc.encoder[self.start_token],
device=self.device,
dtype=torch.long)
prev = context
output = context
past = None
# Forward through the model
with torch.no_grad():
for i in range(length):
logits, past = self.model(prev, past=past)
logits = logits[:, -1, :] / temperature
# Filter predictions to topk and softmax
probs = torch.softmax(top_k_logits(logits, k=topk),
dim=-1)
# Sample
prev = torch.multinomial(probs, num_samples=1)
# Construct output
output = torch.cat((output, prev), dim=1)
output_text = self.enc.decode(output[0].tolist())
return output_text
def postprocess(self, token):
with_space = False
with_break = False
if token.startswith('Ġ'):
with_space = True
token = token[1:]
# print(token)
elif token.startswith('â'):
token = ' '
elif token.startswith('Ċ'):
token = ' '
with_break = True
token = '-' if token.startswith('â') else token
token = '“' if token.startswith('ľ') else token
token = '”' if token.startswith('Ŀ') else token
token = "'" if token.startswith('Ļ') else token
if with_space:
token = '\u0120' + token
if with_break:
token = '\u010A' + token
return token
@register_api(name='BERT')
class BERTLM(AbstractLanguageChecker):
def __init__(self, model_name_or_path="bert-base-cased"):
super(BERTLM, self).__init__()
self.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu")
self.tokenizer = BertTokenizer.from_pretrained(
model_name_or_path,
do_lower_case=False)
self.model = BertForMaskedLM.from_pretrained(
model_name_or_path)
self.model.to(self.device)
self.model.eval()
# BERT-specific symbols
self.mask_tok = self.tokenizer.convert_tokens_to_ids(["[MASK]"])[0]
self.pad = self.tokenizer.convert_tokens_to_ids(["[PAD]"])[0]
print("Loaded BERT model!")
def check_probabilities(self, in_text, topk=40, max_context=20,
batch_size=20):
'''
Same behavior as GPT-2
Extra param: max_context controls how many words should be
fed in left and right
Speeds up inference since BERT requires prediction word by word
'''
in_text = "[CLS] " + in_text + " [SEP]"
tokenized_text = self.tokenizer.tokenize(in_text)
# Construct target
y_toks = self.tokenizer.convert_tokens_to_ids(tokenized_text)
# Only use sentence A embedding here since we have non-separable seq's
segments_ids = [0] * len(y_toks)
y = torch.tensor([y_toks]).to(self.device)
segments_tensor = torch.tensor([segments_ids]).to(self.device)
# TODO batching...
# Create batches of (x,y)
input_batches = []
target_batches = []
for min_ix in range(0, len(y_toks), batch_size):
max_ix = min(min_ix + batch_size, len(y_toks) - 1)
cur_input_batch = []
cur_target_batch = []
# Construct each batch
for running_ix in range(max_ix - min_ix):
tokens_tensor = y.clone()
mask_index = min_ix + running_ix
tokens_tensor[0, mask_index + 1] = self.mask_tok
# Reduce computational complexity by subsetting
min_index = max(0, mask_index - max_context)
max_index = min(tokens_tensor.shape[1] - 1,
mask_index + max_context + 1)
tokens_tensor = tokens_tensor[:, min_index:max_index]
# Add padding
needed_padding = max_context * 2 + 1 - tokens_tensor.shape[1]
if min_index == 0 and max_index == y.shape[1] - 1:
# Only when input is shorter than max_context
left_needed = (max_context) - mask_index
right_needed = needed_padding - left_needed
p = torch.nn.ConstantPad1d((left_needed, right_needed),
self.pad)
tokens_tensor = p(tokens_tensor)
elif min_index == 0:
p = torch.nn.ConstantPad1d((needed_padding, 0), self.pad)
tokens_tensor = p(tokens_tensor)
elif max_index == y.shape[1] - 1:
p = torch.nn.ConstantPad1d((0, needed_padding), self.pad)
tokens_tensor = p(tokens_tensor)
cur_input_batch.append(tokens_tensor)
cur_target_batch.append(y[:, mask_index + 1])
# new_segments = segments_tensor[:, min_index:max_index]
cur_input_batch = torch.cat(cur_input_batch, dim=0)
cur_target_batch = torch.cat(cur_target_batch, dim=0)
input_batches.append(cur_input_batch)
target_batches.append(cur_target_batch)
real_topk = []
pred_topk = []
with torch.no_grad():
for src, tgt in zip(input_batches, target_batches):
# Compute one batch of inputs
# By construction, MASK is always the middle
logits = self.model(src, torch.zeros_like(src))[:,
max_context + 1]
yhat = torch.softmax(logits, dim=-1)
sorted_preds = np.argsort(-yhat.data.cpu().numpy())
# TODO: compare with batch of tgt
# [(pos, prob), ...]
real_topk_pos = list(
[int(np.where(sorted_preds[i] == tgt[i].item())[0][0])
for i in range(yhat.shape[0])])
real_topk_probs = yhat[np.arange(
0, yhat.shape[0], 1), tgt].data.cpu().numpy().tolist()
real_topk.extend(list(zip(real_topk_pos, real_topk_probs)))
# # [[(pos, prob), ...], [(pos, prob), ..], ...]
pred_topk.extend([list(zip(self.tokenizer.convert_ids_to_tokens(
sorted_preds[i][:topk]),
yhat[i][sorted_preds[i][
:topk]].data.cpu().numpy().tolist()))
for i in range(yhat.shape[0])])
bpe_strings = [self.postprocess(s) for s in tokenized_text]
pred_topk = [[(self.postprocess(t[0]), t[1]) for t in pred] for pred in pred_topk]
payload = {'bpe_strings': bpe_strings,
'real_topk': real_topk,
'pred_topk': pred_topk}
return payload
def postprocess(self, token):
with_space = True
with_break = token == '[SEP]'
if token.startswith('##'):
with_space = False
token = token[2:]
if with_space:
token = '\u0120' + token
if with_break:
token = '\u010A' + token
#
# # print ('....', token)
return token
def main():
raw_text = """
In a shocking finding, scientist discovered a herd of unicorns living in a remote, previously unexplored valley, in the Andes Mountains. Even more surprising to the researchers was the fact that the unicorns spoke perfect English.
The scientist named the population, after their distinctive horn, Ovid’s Unicorn. These four-horned, silver-white unicorns were previously unknown to science.
Now, after almost two centuries, the mystery of what sparked this odd phenomenon is finally solved.
Dr. Jorge Pérez, an evolutionary biologist from the University of La Paz, and several companions, were exploring the Andes Mountains when they found a small valley, with no other animals or humans. Pérez noticed that the valley had what appeared to be a natural fountain, surrounded by two peaks of rock and silver snow.
Pérez and the others then ventured further into the valley. “By the time we reached the top of one peak, the water looked blue, with some crystals on top,” said Pérez.
Pérez and his friends were astonished to see the unicorn herd. These creatures could be seen from the air without having to move too much to see them – they were so close they could touch their horns.
While examining these bizarre creatures the scientists discovered that the creatures also spoke some fairly regular English. Pérez stated, “We can see, for example, that they have a common ‘language,’ something like a dialect or dialectic.”
Dr. Pérez believes that the unicorns may have originated in Argentina, where the animals were believed to be descendants of a lost race of people who lived there before the arrival of humans in those parts of South America.
While their origins are still unclear, some believe that perhaps the creatures were created when a human and a unicorn met each other in a time before human civilization. According to Pérez, “In South America, such incidents seem to be quite common.”
However, Pérez also pointed out that it is likely that the only way of knowing for sure if unicorns are indeed the descendants of a lost alien race is through DNA. “But they seem to be able to communicate in English quite well, which I believe is a sign of evolution, or at least a change in social organization,” said the scientist.
"""
raw_text = """
In a shocking finding, scientist discovered a herd of unicorns living in a remote, previously unexplored valley, in the Andes Mountains. Even more surprising to the researchers was the fact that the unicorns spoke perfect English.
"""
'''
Tests for BERT
'''
lm = BERTLM()
start = time.time()
payload = lm.check_probabilities(raw_text, topk=5)
end = time.time()
print("{:.2f} Seconds for a run with BERT".format(end - start))
# print("SAMPLE:", sample)
'''
Tests for GPT-2
'''
lm = LM()
start = time.time()
payload = lm.check_probabilities(raw_text, topk=5)
end = time.time()
print("{:.2f} Seconds for a check with GPT-2".format(end - start))
start = time.time()
sample = lm.sample_unconditional()
end = time.time()
print("{:.2f} Seconds for a sample from GPT-2".format(end - start))
print("SAMPLE:", sample)
if __name__ == "__main__":
main()
| 42.09447 | 337 | 0.583502 |
b9bab67b3478fba0284f86826d4a92ee106550b1 | 3,904 | py | Python | code/models/get_random_cnn_params.py | mrbarbasa/kaggle-spooky-author | a2ded542288efa0e85a25426722619ed2542d98b | [
"MIT"
] | 1 | 2018-10-09T04:57:03.000Z | 2018-10-09T04:57:03.000Z | code/models/get_random_cnn_params.py | mrbarbasa/kaggle-spooky-author | a2ded542288efa0e85a25426722619ed2542d98b | [
"MIT"
] | null | null | null | code/models/get_random_cnn_params.py | mrbarbasa/kaggle-spooky-author | a2ded542288efa0e85a25426722619ed2542d98b | [
"MIT"
] | null | null | null | import numpy as np
def get_random_cnn_params(normal_arch_threshold=0.8):
"""Retrieve random CNN parameters and hyperparameters.
Parameters
----------
normal_arch_threshold : float, optional
A fraction between 0 and 1 that specifies the probability of
using the normal CNN architecture over the special architecture.
Returns
-------
params : dict
Model parameters and hyperparameters to govern the construction
of a CNN model. They are:
- batch_size : int
The number of samples per batch; after a batch is trained,
weights are updated.
- filters : int
The number of filters in a convolutional layer.
- kernel_size : int
The length of the 1D convolution window.
- dropout_rate : float
Fraction of the input units to drop.
- optimizer : string
An optimizer such as Adam or RMSProp.
- use_special_arch : bool
Whether or not to use the special CNN architecture.
- normal_arch_params : dict
This dictionary should only have keys if `use_special_arch`
is False; otherwise, it is an empty dictionary.
- num_conv_stacks : int
The number of convolutional stacks.
- add_extra_conv_layer : bool
Add an extra convolutional layer whenever a convolutional
layer appears.
- add_dropout_layer : bool
Add a dropout layer at the end of every convolutional
stack, after the max pooling layer.
- flatten : bool
Whether or not to end the CNN model with a Keras Flatten
and Dense layer, as opposed to one or two convolutional
layers followed by a global max or average pooling layer.
- use_global_max_pooling_layer : bool
Only applies if `flatten` is False: End the model with a
global max pooling layer instead of a global average.
- add_final_dropout_layer : bool
Add a final dropout layer right before the output layer.
- pool_size : int
Size of the max pooling windows.
- final_dropout_rate : float
Only applies if `add_final_dropout_layer` is True:
Fraction of the input units to drop for the final
dropout layer.
"""
batch_size = int(np.random.choice([32, 64, 128, 256, 512]))
filters = int(np.random.choice([32, 64, 128, 256, 300]))
kernel_size = int(np.random.choice([3, 5, 7, 9]))
dropout_rate = float(np.random.choice([0.1, 0.2, 0.3, 0.4, 0.5]))
optimizer = str(np.random.choice(['adam', 'rmsprop']))
special_arch_value = float(np.random.uniform(0, 1))
# `normal_arch_threshold = 0.8` by default:
# Use normal architecture 80% of the time
use_special_arch = special_arch_value > normal_arch_threshold
nap = {}
if not use_special_arch:
nap['num_conv_stacks'] = int(np.random.choice([1, 2, 3]))
nap['add_extra_conv_layer'] = bool(np.random.choice([True, False]))
nap['add_dropout_layer'] = bool(np.random.choice([True, False]))
nap['flatten'] = bool(np.random.choice([True, False]))
nap['use_global_max_pooling_layer'] = bool(np.random.choice([True, False]))
nap['add_final_dropout_layer'] = bool(np.random.choice([True, False]))
nap['pool_size'] = int(np.random.choice([2, 3, 4, 5]))
nap['final_dropout_rate'] = float(np.random.choice([0.1, 0.2, 0.3, 0.4, 0.5]))
return {
'batch_size': batch_size,
'filters': filters,
'kernel_size': kernel_size,
'dropout_rate': dropout_rate,
'optimizer': optimizer,
'use_special_arch': use_special_arch,
'normal_arch_params': nap,
}
| 41.978495 | 86 | 0.61373 |
7ca03a71c54b622873a8393041189ae284b9e123 | 45,226 | py | Python | comdb2/dbapi2.py | vishalbelsare/python-comdb2 | 05da300c739bcc7e63036ab79f8552165954035b | [
"Apache-2.0"
] | 20 | 2017-07-13T09:04:21.000Z | 2021-11-09T05:32:17.000Z | comdb2/dbapi2.py | vishalbelsare/python-comdb2 | 05da300c739bcc7e63036ab79f8552165954035b | [
"Apache-2.0"
] | 10 | 2017-07-12T20:15:26.000Z | 2021-12-22T20:04:49.000Z | comdb2/dbapi2.py | vishalbelsare/python-comdb2 | 05da300c739bcc7e63036ab79f8552165954035b | [
"Apache-2.0"
] | 21 | 2017-07-12T19:51:22.000Z | 2021-11-09T05:32:07.000Z | # Copyright 2017 Bloomberg Finance L.P.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides a DB-API 2.0 compatible Comdb2 API.
Overview
========
This module provides a Comdb2 interface that conforms to `the Python Database
API Specification v2.0 <https://www.python.org/dev/peps/pep-0249/>`_.
Basic Usage
-----------
The main class used for interacting with a Comdb2 is `Connection`, which you
create by calling the `connect` factory function. Any errors that are
encountered when connecting to or querying the database are raised as instances
of the `Error` class.
A basic usage example looks like this::
from comdb2 import dbapi2
conn = dbapi2.connect('mattdb', autocommit=True)
cursor = conn.cursor()
cursor.execute("select 1, 'a' union all select 2, 'b'")
for row in cursor.fetchall():
print(row)
The above would result in the following output::
[1, 'a']
[2, 'b']
To reduce the amount of boilerplate required for fetching result sets, we
implement 2 extensions to the interface required by the Python DB-API: `Cursor`
objects are iterable, yielding one row of the result set per iteration, and
`Cursor.execute` returns the `Cursor` itself. By utilizing these extensions,
the basic example can be shortened to::
from comdb2 import dbapi2
conn = dbapi2.connect('mattdb', autocommit=True)
for row in conn.cursor().execute("select 1, 'a' union all select 2, 'b'"):
print(row)
Graceful Teardown and Error Handling
------------------------------------
Non-trivial applications should guarantee that the `Connection` is closed when
it is no longer needed, preferably by using `contextlib.closing`. They should
also be prepared to handle any errors returned by the database. So, a more
thorough version of the example above would be::
from comdb2 import dbapi2
from contextlib import closing
try:
with closing(dbapi2.connect('mattdb', autocommit=True)) as conn:
query = "select 1, 'a' union all select 2, 'b'"
for row in conn.cursor().execute(query):
print(row)
except dbapi2.Error as exc:
print("Comdb2 exception encountered: %s" % exc)
In this example, `contextlib.closing` is used to guarantee that
`Connection.close` is called at the end of the ``with`` block, and an exception
handler been added for exceptions of type `Error`. All exceptions raised by
this module are subclasses of `Error`. See :ref:`Exceptions` for details on
when each exception type is raised.
Controlling the Type Used For Result Rows
-----------------------------------------
As you can see, rows are returned as a `list` of column values in positional
order. If you'd prefer to get the columns back as some other type, you can set
`Connection.row_factory` to one of the factories provided by
`comdb2.factories` - for example::
from comdb2 import dbapi2
from comdb2 import factories
conn = dbapi2.connect('mattdb', autocommit=True)
conn.row_factory = factories.dict_row_factory
c = conn.cursor()
for row in c.execute("select 1 as 'x', 2 as 'y' union all select 3, 4"):
print(row)
This program will return each row as a `dict` rather than a `list`::
{'y': 2, 'x': 1}
{'y': 4, 'x': 3}
Parameter Binding
-----------------
In real applications you'll often need to pass parameters into a SQL query.
This is done using parameter binding - in the query, placeholders are specified
using ``%(name)s``, and a mapping of ``name`` to parameter value is passed to
`Cursor.execute` along with the query. The ``%(`` and ``)s`` are fixed, and
the ``name`` between them varies for each parameter. For example:
>>> query = "select 25 between %(a)s and %(b)s"
>>> print(conn.cursor().execute(query, {'a': 20, 'b': 42}).fetchall())
[[1]]
>>> params = {'a': 20, 'b': 23}
>>> print(conn.cursor().execute(query, params).fetchall())
[[0]]
In this example, we run the query with two different sets of
parameters, producing different results. First, we execute the query with
parameter ``a`` bound to ``20`` and ``b`` bound to ``42``. In this case,
because ``20 <= 25 <= 42``, the expression evaluates to true, and a ``1`` is
returned.
When we run the same query with parameter ``b`` bound to ``23``, a ``0`` is
returned instead, because ``20 <= 25 <= 23`` is false.
Note:
Because parameters are bound using ``%(name)s``, other ``%`` signs in
a query must be escaped. For example, ``WHERE name like 'M%'`` becomes
``WHERE name LIKE 'M%%'``.
Types
-----
For all Comdb2 types, the same Python type is used for binding a parameter
value as is returned for a SQL query result column of that type. In brief, SQL
types are mapped to Python types according to the following table:
============ ================================================================
SQL type Python type
============ ================================================================
NULL ``None``
integer `int`
real `float`
blob `six.binary_type` (aka `bytes` in Python 3, ``str`` in Python 2)
text `six.text_type` (aka `str` in Python 3, ``unicode`` in Python 2)
datetime `datetime.datetime`
datetimeus `DatetimeUs`
============ ================================================================
See :ref:`Comdb2 to Python Type Mappings` for a thorough explanation of these
type mappings and their implications.
Note:
This module uses byte strings to represent BLOB columns, and Unicode
strings to represent TEXT columns. This is a very common source of
problems for new users.
Make sure to carefully read :ref:`String and Blob Types` on that page.
.. _Autocommit Mode:
Autocommit Mode
---------------
In all of the examples above, we gave the ``autocommit=True`` keyword argument
when calling `connect`. This opts out of DB-API compliant transaction
handling, in order to use Comdb2's native transaction semantics.
By default, DB-API cursors are always in a transaction. You can commit that
transaction using `Connection.commit`, or roll it back using
`Connection.rollback`. For example::
conn = dbapi2.connect('mattdb')
cursor = conn.cursor()
query = "insert into simple(key, val) values (%(key)s, %(val)s)"
cursor.execute(query, {'key': 1, 'val': 2})
cursor.execute(query, {'key': 3, 'val': 4})
cursor.execute(query, {'key': 5, 'val': 6})
conn.commit()
There are several things to note here. The first is that the insert statements
that were sent to the database don't take effect immediately, because they are
implicitly part of a transaction that must be explicitly completed. This is
different from other Comdb2 APIs, where you must execute a ``BEGIN`` statement
to start a transaction, and where statements otherwise take effect immediately.
The second thing to note is that there are certain error conditions where
a Comdb2 connection can automatically recover when outside of a transaction,
but not within a transaction. In other words, using transactions when they
aren't needed can introduce new failure modes into your program.
In order to provide greater compatibility with other Comdb2 interfaces and
to eliminate the performance costs and extra error cases introduced by using
transactions unnecessarily, we allow you to pass the non-standard
``autocommit=True`` keyword argument when creating a new `Connection`. This
results in the implicit transaction not being created. You can still start
a transaction explicitly by passing a ``BEGIN`` statement to
`Cursor.execute`. For example::
conn = dbapi2.connect('mattdb', autocommit=True)
cursor = conn.cursor()
cursor.execute("delete from simple where 1=1")
cursor.execute("begin")
query = "insert into simple(key, val) values (%(key)s, %(val)s)"
cursor.execute(query, {'key': 1, 'val': 2})
cursor.execute(query, {'key': 3, 'val': 4})
cursor.execute(query, {'key': 5, 'val': 6})
cursor.execute("rollback")
In this example, because we've used ``autocommit=True`` the delete statement
takes effect immediately (that is, it is automatically committed). We then
explicitly create a transaction, insert 3 rows, then decide not to commit
it, and instead explicitly roll back the transaction.
To summarize: you cannot use ``autocommit`` mode if you intend to pass the
`Connection` to a library that requires DB-API compliant connections. You
should prefer ``autocommit`` mode when you don't want to use transactions (for
example, read-only queries where no particular consistency guarantees are
required across queries). If you do intend to use transactions but won't pass
the `Connection` to a library that requires DB-API compliance, you can choose
either mode. It may be easier to port existing code if you use ``autocommit``
mode, but avoiding ``autocommit`` mode may allow you to use 3rd party libraries
in the future that require DB-API compliant connections.
DB-API Compliance
-----------------
The interface this module provides fully conforms to `the Python Database API
Specification v2.0 <https://www.python.org/dev/peps/pep-0249/>`_ with a few
specific exceptions:
1. DB-API requires `Date` and `DateFromTicks` constructors, which we don't
provide because Comdb2 has no type for representing a date without a time
component.
2. DB-API requires `Time` and `TimeFromTicks` constructors, which we don't
provide because Comdb2 has no type for representing a time without a date
component.
3. DB-API is unclear about the required behavior of multiple calls to
`Connection.cursor` on a connection. Comdb2 does not have a concept of
cursors as distinct from connection handles, so each time
`Connection.cursor` is called, we call `Cursor.close` on any existing, open
cursor for that connection.
"""
from __future__ import absolute_import, unicode_literals
import functools
import itertools
import weakref
import datetime
import re
import six
from . import cdb2
__all__ = ['apilevel', 'threadsafety', 'paramstyle',
'connect', 'Connection', 'Cursor',
'STRING', 'BINARY', 'NUMBER', 'DATETIME', 'ROWID',
'Datetime', 'DatetimeUs', 'Binary', 'Timestamp', 'TimestampUs',
'DatetimeFromTicks', 'DatetimeUsFromTicks', 'TimestampFromTicks',
'Error', 'Warning', 'InterfaceError', 'DatabaseError',
'InternalError', 'OperationalError', 'ProgrammingError',
'IntegrityError', 'DataError', 'NotSupportedError',
'UniqueKeyConstraintError', 'ForeignKeyConstraintError',
'NonNullConstraintError']
apilevel = "2.0"
"""This module conforms to the Python Database API Specification 2.0."""
threadsafety = 1
"""Two threads can use this module, but can't share one `Connection`."""
paramstyle = "pyformat"
"""The SQL placeholder format for this module is ``%(name)s``.
Comdb2's native placeholder format is ``@name``, but that cannot be used by
this module because it's not an acceptable `DB-API 2.0 placeholder style
<https://www.python.org/dev/peps/pep-0249/#paramstyle>`_. This module uses
``pyformat`` because it is the only DB-API 2.0 paramstyle that we can translate
into Comdb2's placeholder format without needing a SQL parser.
Note:
An int value is bound as ``%(my_int)s``, not as ``%(my_int)d`` - the last
character is always ``s``.
Note:
Because SQL strings for this module use the ``pyformat`` placeholder style,
any literal ``%`` characters in a query must be escaped by doubling them.
``WHERE name like 'M%'`` becomes ``WHERE name LIKE 'M%%'``.
"""
_FIRST_WORD_OF_STMT = re.compile(
r"""
(?: # match (without capturing)
\s* # optional whitespace
/\*.*?\*/ # then a C-style /* ... */ comment, possibly across lines
| # or
\s* # optional whitespace
--[^\n]*\n # then a SQL-style comment terminated by a newline
)* # repeat until all comments have been matched
\s* # then skip over any whitespace
(\w+) # and capture the first word
""",
re.VERBOSE | re.DOTALL | (0 if six.PY2 else re.ASCII),
)
_VALID_SP_NAME = re.compile(r'^[A-Za-z0-9_.]+$')
@functools.total_ordering
class _TypeObject(object):
def __init__(self, *value_names):
self.value_names = value_names
self.values = [cdb2.TYPE[v] for v in value_names]
def __eq__(self, other):
return other in self.values
def __lt__(self, other):
return self != other and other < self.values
def __repr__(self):
return 'TypeObject' + str(self.value_names)
def _binary(string):
if isinstance(string, six.text_type):
return string.encode('utf-8')
return bytes(string)
STRING = _TypeObject('CSTRING')
"""The type codes of TEXT result columns compare equal to this constant."""
BINARY = _TypeObject('BLOB')
"""The type codes of BLOB result columns compare equal to this constant."""
NUMBER = _TypeObject('INTEGER', 'REAL')
"""The type codes of numeric result columns compare equal to this constant."""
DATETIME = _TypeObject('DATETIME', 'DATETIMEUS')
"""The type codes of datetime result columns compare equal to this constant."""
ROWID = STRING
# comdb2 doesn't support Date or Time, so I'm not defining them.
Datetime = datetime.datetime
DatetimeUs = cdb2.DatetimeUs
Binary = _binary
Timestamp = Datetime
TimestampUs = DatetimeUs
DatetimeFromTicks = Datetime.fromtimestamp
DatetimeUsFromTicks = DatetimeUs.fromtimestamp
TimestampFromTicks = Timestamp.fromtimestamp
TimestampUsFromTicks = TimestampUs.fromtimestamp
try:
UserException = StandardError # Python 2
except NameError:
UserException = Exception # Python 3
class Error(UserException):
"""This is the base class of all exceptions raised by this module.
In addition to being available at the module scope, this class and the
other exception classes derived from it are exposed as attributes on
`Connection` objects, to simplify error handling in environments where
multiple connections from different modules are used.
"""
pass
class Warning(UserException):
"""Exception raised for important warnings.
This is required to exist by the DB-API interface, but we never raise it.
"""
pass
class InterfaceError(Error):
"""Exception raised for errors caused by misuse of this module."""
pass
class DatabaseError(Error):
"""Base class for all errors reported by the database."""
pass
class InternalError(DatabaseError):
"""Exception raised for internal errors reported by the database."""
pass
class OperationalError(DatabaseError):
"""Exception raised for errors related to the database's operation.
These errors are not necessarily the result of a bug either in the
application or in the database - for example, dropped connections.
"""
pass
class ProgrammingError(DatabaseError):
"""Exception raised for programming errors reported by the database.
For example, this will be raised for syntactically incorrect SQL, or for
passing a different number of parameters than are required by the query.
"""
pass
class IntegrityError(DatabaseError):
"""Exception raised for integrity errors reported by the database.
For example, a subclass of this will be raised if a foreign key constraint
would be violated, or a constraint that a column may not be null, or that
an index may not have duplicates. Other types of constraint violations may
raise this type directly.
"""
pass
class UniqueKeyConstraintError(IntegrityError):
"""Exception raised when a unique key constraint would be broken.
Committing after either an INSERT or an UPDATE could result in this being
raised, by either adding a new row that violates a unique (non-dup) key
constraint or modifying an existing row in a way that would violate one.
.. versionadded:: 1.1
"""
pass
class ForeignKeyConstraintError(IntegrityError):
"""Exception raised when a foreign key constraint would be broken.
This would be raised when committing if the changes being committed would
violate referential integrity according to a foreign key constraint
configured on the database. For instance, deleting a row from a parent
table would raise this if rows corresponding to its key still exist in
a child table and the constraint doesn't have ON DELETE CASCADE. Likewise,
inserting a row into a child table would raise this if there was no row in
the parent table matching the new row's key.
.. versionadded:: 1.1
"""
pass
class NonNullConstraintError(IntegrityError):
"""Exception raised when a non-null constraint would be broken.
Committing after either an INSERT or an UPDATE could result in this being
raised if it would result in a null being stored in a non-nullable column.
Note that columns in a Comdb2 are not nullable by default.
.. versionadded:: 1.1
"""
pass
class DataError(DatabaseError):
"""Exception raised for errors related to the processed data.
For example, this will be raised if you attempt to write a value that's out
of range for the column type that would store it, or if you specify an
invalid timezone name for the connection.
"""
pass
class NotSupportedError(DatabaseError):
"""Exception raised when unsupported operations are attempted."""
pass
_EXCEPTION_BY_RC = {
cdb2.ERROR_CODE['CONNECT_ERROR'] : OperationalError,
cdb2.ERROR_CODE['NOTCONNECTED'] : ProgrammingError,
cdb2.ERROR_CODE['PREPARE_ERROR'] : ProgrammingError,
cdb2.ERROR_CODE['IO_ERROR'] : OperationalError,
cdb2.ERROR_CODE['INTERNAL'] : InternalError,
cdb2.ERROR_CODE['NOSTATEMENT'] : ProgrammingError,
cdb2.ERROR_CODE['BADCOLUMN'] : ProgrammingError,
cdb2.ERROR_CODE['BADSTATE'] : ProgrammingError,
cdb2.ERROR_CODE['ASYNCERR'] : OperationalError,
cdb2.ERROR_CODE['INVALID_ID'] : InternalError,
cdb2.ERROR_CODE['RECORD_OUT_OF_RANGE'] : OperationalError,
cdb2.ERROR_CODE['REJECTED'] : OperationalError,
cdb2.ERROR_CODE['STOPPED'] : OperationalError,
cdb2.ERROR_CODE['BADREQ'] : OperationalError,
cdb2.ERROR_CODE['DBCREATE_FAILED'] : OperationalError,
cdb2.ERROR_CODE['THREADPOOL_INTERNAL'] : OperationalError,
cdb2.ERROR_CODE['READONLY'] : NotSupportedError,
cdb2.ERROR_CODE['NOMASTER'] : InternalError,
cdb2.ERROR_CODE['UNTAGGED_DATABASE'] : NotSupportedError,
cdb2.ERROR_CODE['CONSTRAINTS'] : IntegrityError,
cdb2.ERROR_CODE['DEADLOCK'] : OperationalError,
cdb2.ERROR_CODE['TRAN_IO_ERROR'] : OperationalError,
cdb2.ERROR_CODE['ACCESS'] : OperationalError,
cdb2.ERROR_CODE['TRAN_MODE_UNSUPPORTED'] : NotSupportedError,
cdb2.ERROR_CODE['VERIFY_ERROR'] : OperationalError,
cdb2.ERROR_CODE['FKEY_VIOLATION'] : ForeignKeyConstraintError,
cdb2.ERROR_CODE['NULL_CONSTRAINT'] : NonNullConstraintError,
cdb2.ERROR_CODE['CONV_FAIL'] : DataError,
cdb2.ERROR_CODE['NONKLESS'] : NotSupportedError,
cdb2.ERROR_CODE['MALLOC'] : OperationalError,
cdb2.ERROR_CODE['NOTSUPPORTED'] : NotSupportedError,
cdb2.ERROR_CODE['DUPLICATE'] : UniqueKeyConstraintError,
cdb2.ERROR_CODE['TZNAME_FAIL'] : DataError,
cdb2.ERROR_CODE['UNKNOWN'] : OperationalError,
}
def _raise_wrapped_exception(exc):
code = exc.error_code
msg = '%s (cdb2api rc %d)' % (exc.error_message, code)
if "null constraint violation" in msg:
six.raise_from(NonNullConstraintError(msg), exc) # DRQS 86013831
six.raise_from(_EXCEPTION_BY_RC.get(code, OperationalError)(msg), exc)
def _sql_operation(sql):
match = _FIRST_WORD_OF_STMT.match(sql)
if match:
return match.group(1).lower()
return None
def _operation_ends_transaction(operation):
return operation == 'commit' or operation == 'rollback'
def _modifies_rows(operation):
# These operations can modify the contents of the database.
# exec is deliberately excluded because it might return a result set, and
# this function is used to determine whether it's safe to call
# cdb2_get_effects after running the operation.
return operation in ('commit', 'insert', 'update', 'delete')
def connect(*args, **kwargs):
"""Establish a connection to a Comdb2 database.
All arguments are passed directly through to the `Connection` constructor.
Note:
DB-API 2.0 requires the module to expose `connect`, but not
`Connection`. If portability across database modules is a concern, you
should always use `connect` to create your connections rather than
calling the `Connection` constructor directly.
Returns:
Connection: A handle for the newly established connection.
"""
return Connection(*args, **kwargs)
class Connection(object):
"""Represents a connection to a Comdb2 database.
By default, the connection will be made to the cluster configured as the
machine-wide default for the given database. This is almost always what
you want. If you need to connect to a database that's running on your
local machine rather than a cluster, you can pass "local" as the ``tier``.
It's also permitted to specify "dev", "alpha", "beta", or "prod" as the
``tier``, which will connect you directly to the tier you specify (firewall
permitting).
Alternately, you can pass a machine name as the ``host`` argument, to
connect directly to an instance of the given database on that host, rather
than on a cluster or the local machine.
The connection will use UTC as its timezone by default - you can change
this with a ``SET TIMEZONE`` statement if needed.
By default, or if ``autocommit`` is ``False``, `cursor` will return cursors
that behave as mandated by the Python DB API: every statement to be
executed is implicitly considered to be part of a transaction, and that
transaction must be ended explicitly with a call to `commit` (or
`rollback`). If ``autocommit`` is ``True``, `cursor` will instead return
cursors that behave more in line with Comdb2's traditional behavior: the
side effects of any given statement are immediately committed unless you
previously started a transaction by executing a ``begin`` statement.
Note:
Using ``autocommit=True`` will ease porting from code using other
Comdb2 APIs, both because other Comdb2 APIs implicitly commit after
each statement in the same way as an autocommit `Connection` will, and
because there are certain operations that Comdb2 will implicitly
retry when outside of a transaction but won't retry when inside
a transaction - meaning that a non-autocommit `Connection` has extra
failure modes. You should strongly consider using ``autocommit=True``,
especially for read-only use cases.
Note:
Python does not guarantee that object finalizers will be called when
the interpreter exits, so to ensure that the connection is cleanly
released you should call the `close` method when you're done with it.
You can use `contextlib.closing` to guarantee the connection is
released when a block completes.
Note:
DB-API 2.0 requires the module to expose `connect`, but not
`Connection`. If portability across database modules is a concern, you
should always use `connect` to create your connections rather than
instantiating this class directly.
Args:
database_name (str): The name of the database to connect to.
tier (str): The cluster to connect to.
host (str): Alternately, a single remote host to connect to.
autocommit (bool): Whether to automatically commit after DML
statements, disabling DB-API 2.0's automatic implicit transactions.
"""
def __init__(self, database_name, tier="default", autocommit=False,
host=None):
if host is not None and tier != "default":
raise InterfaceError("Connecting to a host by name and to a "
"cluster by tier are mutually exclusive")
self._active_cursor = None
self._in_transaction = False
self._autocommit = autocommit
try:
self._hndl = cdb2.Handle(database_name, tier, host=host)
except cdb2.Error as e:
_raise_wrapped_exception(e)
def _check_closed(self):
if self._hndl is None:
raise InterfaceError("Attempted to use a closed Connection")
@property
def row_factory(self):
"""Factory used when constructing result rows.
By default, or when set to ``None``, each row is returned as a `list`
of column values. If you'd prefer to receive rows as a `dict` or as
a `collections.namedtuple`, you can set this property to one of the
factories provided by the `comdb2.factories` module.
Example:
>>> from comdb2.factories import dict_row_factory
>>> conn.row_factory = dict_row_factory
>>> cursor = conn.cursor()
>>> cursor.execute("SELECT 1 as 'foo', 2 as 'bar'")
>>> cursor.fetchone()
{'foo': 1, 'bar': 2}
.. versionadded:: 0.9
"""
self._check_closed()
return self._hndl.row_factory
@row_factory.setter
def row_factory(self, value):
self._check_closed()
self._hndl.row_factory = value
def _close_any_outstanding_cursor(self):
if self._active_cursor is not None:
cursor = self._active_cursor()
if cursor is not None and not cursor._closed:
cursor.close()
def _execute(self, operation):
cursor = None
if self._active_cursor is not None:
cursor = self._active_cursor()
if cursor is None:
cursor = self.cursor()
cursor._execute(operation, operation)
def close(self):
"""Gracefully close the Comdb2 connection.
Once a `Connection` has been closed, no further operations may be
performed on it.
If a socket pool is running on the machine and the connection was in
a clean state, this will turn over the connection to the socket pool.
This cannot be done if the connection was in a transaction or
in the middle of retrieving a result set. Other restrictions may apply
as well.
You can ensure that this gets called at the end of a block using
something like:
>>> with contextlib.closing(connect('mattdb')) as conn:
>>> for row in conn.cursor().execute("select 1"):
>>> print(row)
"""
if self._hndl is None:
raise InterfaceError("close() called on already closed connection")
self._close_any_outstanding_cursor()
self._hndl.close()
self._hndl = None
def commit(self):
"""Commit any pending transaction to the database.
This method will fail if the `Connection` is in ``autocommit`` mode and
no transaction was explicitly started.
"""
self._check_closed()
self._execute("commit")
def rollback(self):
"""Rollback the current transaction.
This method will fail if the `Connection` is in ``autocommit`` mode and
no transaction was explicitly started.
Note:
Closing a connection without committing the changes first will
cause an implicit rollback to be performed, but will also prevent
that connection from being contributed to the socket pool, if one
is available. Because of this, an explicit rollback should be
preferred when possible.
"""
self._check_closed()
self._execute("rollback")
def cursor(self):
"""Return a new `Cursor` for this connection.
This calls `Cursor.close` on any outstanding `Cursor`; only one
`Cursor` is allowed per `Connection` at a time.
Note:
Although outstanding cursors are closed, uncommitted transactions
started by them are not rolled back, so the new `Cursor` will begin
in the middle of that uncommitted transaction.
Returns:
Cursor: A new cursor on this connection.
"""
self._check_closed()
self._close_any_outstanding_cursor()
cursor = Cursor(self)
self._active_cursor = weakref.ref(cursor)
return cursor
# Optional DB API Extension
Error = Error
Warning = Warning
InterfaceError = InterfaceError
DatabaseError = DatabaseError
InternalError = InternalError
OperationalError = OperationalError
ProgrammingError = ProgrammingError
IntegrityError = IntegrityError
DataError = DataError
NotSupportedError = NotSupportedError
class Cursor(object):
"""Class used to send requests through a database connection.
This class is not meant to be instantiated directly; it should always be
created using `Connection.cursor`. It provides methods for sending
requests to the database and for reading back the result sets produced by
the database.
Queries are made using the `execute` and `callproc` methods. Result sets
can be consumed with the `fetchone`, `fetchmany`, or `fetchall` methods, or
(as a nonstandard DB-API 2.0 extension) by iterating over the `Cursor`.
Note:
Only one `Cursor` per `Connection` can exist at a time. Creating a new
one will `close` the previous one.
"""
_ErrorMessagesByOperation = {
'begin': "Transactions may not be started explicitly",
'commit': "Use Connection.commit to commit transactions",
'rollback': "Use Connection.rollback to roll back transactions",
}
def __init__(self, conn):
self._arraysize = 1
self._conn = conn
self._hndl = conn._hndl
self._description = None
self._closed = False
self._rowcount = -1
def _check_closed(self):
if self._closed:
raise InterfaceError("Attempted to use a closed cursor")
@property
def arraysize(self):
"""Controls the number of rows to fetch at a time with `fetchmany`.
The default is ``1``, meaning that a single row will be fetched at
a time.
"""
return self._arraysize
@arraysize.setter
def arraysize(self, value):
self._arraysize = value
@property
def description(self):
"""Provides the name and type of each column in the latest result set.
This read-only attribute will contain one element per column in the
result set. Each of those elements will be a 7-element sequence whose
first element is the name of that column, whose second element is
a type code for that column, and whose five remaining elements are
``None``.
The type codes can be compared for equality against the `STRING`,
`NUMBER`, `DATETIME`, and `BINARY` objects exposed by this module. If
you need more granularity (e.g. knowing whether the result is
a ``REAL`` or an ``INTEGER``) you can compare the type code for
equality against the members of the `.cdb2.TYPE` dictionary. Or, of
course, you can use `isinstance` to check the type of object returned
as that column's value.
Example:
>>> cursor = connect('mattdb').cursor()
>>> cursor.execute("select 1 as 'x', '2' as 'y', 3.0 as 'z'")
>>> cursor.description[0][:2] == ('x', NUMBER)
True
>>> cursor.description[1][:2] == ('y', STRING)
True
>>> cursor.description[2][:2] == ('z', NUMBER)
True
>>> cursor.description[2][:2] == ('z', TYPE['INTEGER'])
False
>>> cursor.description[2][:2] == ('z', TYPE['REAL'])
True
"""
self._check_closed()
return self._description
@property
def rowcount(self):
"""Provides the count of rows modified by the last transaction.
For `Cursor` objects on a `Connection` that is not using ``autocommit``
mode, this count is valid only after the transaction is committed with
`Connection.commit()`. For `Cursor` objects on a `Connection` that is
using ``autocommit`` mode, this count is valid after a successful
``COMMIT``, or after an ``INSERT``, ``UPDATE``, or ``DELETE`` statement
run outside of an explicit transaction. At all other times, ``-1`` is
returned.
In particular, ``-1`` is returned whenever a transaction is in
progress, because Comdb2 by default handles commit conflicts with other
transactions by rerunning each statement of the transaction. As
a result, row counts obtained within a transaction are meaningless in
the default transaction level; either more or fewer rows may be
affected when the transaction eventually commits.
Also, ``-1`` is returned after ``SELECT`` or ``SELECTV``, because
querying the row count requires calling ``cdb2_get_effects``, which
would consume the result set before the user could iterate over it.
Likewise, ``-1`` is returned after ``EXEC PROCEDURE``, because a stored
procedure could emit a result set.
"""
self._check_closed()
return self._rowcount
# Optional DB API Extension
@property
def connection(self):
"""Return a reference to the `Connection` that this `Cursor` uses."""
self._check_closed()
return self._conn
def close(self):
"""Close the cursor now.
From this point forward an exception will be raised if any
operation is attempted with this `Cursor`.
Note:
This does not rollback any uncommitted operations executed by this
`Cursor`. A new `Cursor` created from the `Connection` that this
`Cursor` uses will start off in the middle of that uncommitted
transaction.
"""
self._check_closed()
self._description = None
self._closed = True
def callproc(self, procname, parameters):
"""Call a stored procedure with the given name.
The ``parameters`` sequence must contain one entry for each argument
that the procedure requires.
If the called procedure emits a result set, it is made available
through the fetch methods, or by iterating over the `Cursor`, as though
it was returned by a ``select`` statement.
Args:
procname (str): The name of the stored procedure to be executed.
parameters (Sequence[T]): A sequence of values to be passed, in
order, as the parameters to the stored procedure. Each element
must be an instance of one of the Python types listed in
:doc:`types`.
Returns:
List[T]: A copy of the input parameters.
"""
if not _VALID_SP_NAME.match(procname):
raise NotSupportedError("Invalid procedure name '%s'" % procname)
params_as_dict = {str(i): e for i, e in enumerate(parameters)}
sql = ("exec procedure " + procname + "("
+ ", ".join("%%(%d)s" % i for i in range(len(params_as_dict)))
+ ")")
self.execute(sql, params_as_dict)
return parameters[:]
def execute(self, sql, parameters=None):
"""Execute a database operation (query or command).
The ``sql`` string must be provided as a Python format string, with
parameter placeholders represented as ``%(name)s`` and all other ``%``
signs escaped as ``%%``.
Note:
Using placeholders should always be the preferred method of
parameterizing the SQL query, as it prevents SQL injection
vulnerabilities, and is faster than dynamically building SQL
strings.
Args:
sql (str): The SQL string to execute, as a Python format string.
parameters (Mapping[str, T]): An optional mapping from parameter
names to the values to be bound for them.
Returns:
Cursor: As a nonstandard DB-API 2.0 extension, this method returns
the `Cursor` that it was called on, which can be used as an
iterator over the result set returned by the query. Iterating over
it will yield one ``list`` per row in the result set, where the
elements in the list correspond to the result columns within the
row, in positional order.
The `Connection.row_factory` property can be used to return rows as
a different type.
Example:
>>> cursor.execute("select 1, 2 UNION ALL select %(x)s, %(y)s",
... {'x': 2, 'y': 4})
>>> cursor.fetchall()
[[1, 2], [2, 4]]
"""
self._check_closed()
self._description = None
operation = _sql_operation(sql)
if not self._conn._autocommit:
# Certain operations are forbidden when not in autocommit mode.
errmsg = self._ErrorMessagesByOperation.get(operation)
if errmsg:
raise InterfaceError(errmsg)
self._execute(operation, sql, parameters)
if self._rowcount == -1:
self._load_description()
# Optional DB API Extension: execute's return value is unspecified. We
# return an iterable over the rows, but this isn't portable across DBs.
return self
def executemany(self, sql, seq_of_parameters):
"""Execute the same SQL statement repeatedly with different parameters.
This is currently equivalent to calling execute multiple times, once
for each element provided in ``seq_of_parameters``.
Args:
sql (str): The SQL string to execute, as a Python format string of
the format expected by `execute`.
seq_of_parameters (Sequence[Mapping[str, T]]): A sequence of
mappings from parameter names to the values to be bound for
them. The ``sql`` statement will be run once per element in
this sequence.
"""
self._check_closed()
for parameters in seq_of_parameters:
self.execute(sql, parameters)
def _execute(self, operation, sql, parameters=None):
self._rowcount = -1
if not self._conn._autocommit:
# Any non-SET operation starts a txn when not in autocommit mode.
if not self._conn._in_transaction and operation != "set":
try:
self._hndl.execute("begin")
except cdb2.Error as e:
_raise_wrapped_exception(e)
self._conn._in_transaction = True
if parameters is None:
parameters = {}
try:
# If variable interpolation fails, then translate the exception to
# an InterfaceError to signal that it's a client-side problem.
sql = sql % {name: "@" + name for name in parameters}
except KeyError as keyerr:
msg = "No value provided for parameter %s" % keyerr
six.raise_from(InterfaceError(msg), keyerr)
except Exception as exc:
msg = "Invalid Python format string for query"
six.raise_from(InterfaceError(msg), exc)
if _operation_ends_transaction(operation):
self._conn._in_transaction = False # txn ends, even on failure
try:
self._hndl.execute(sql, parameters)
except cdb2.Error as e:
_raise_wrapped_exception(e)
if operation == 'begin':
self._conn._in_transaction = True # txn successfully started
elif not self._conn._in_transaction and _modifies_rows(operation):
# We're not in a transaction, and the last statement could have
# modified rows. Either we've just explicitly committed
# a transaction, or we're in autocommit mode and ran DML outside of
# an explicit transaction. We can get the count of affected rows.
self._update_rowcount()
def setinputsizes(self, sizes):
"""No-op; implemented for PEP-249 compliance."""
self._check_closed()
def setoutputsize(self, size, column=None):
"""No-op; implemented for PEP-249 compliance."""
self._check_closed()
def _update_rowcount(self):
try:
self._rowcount = self._hndl.get_effects()[0]
except cdb2.Error:
self._rowcount = -1
def _load_description(self):
names = self._hndl.column_names()
types = self._hndl.column_types()
self._description = tuple((name, type, None, None, None, None, None)
for name, type in zip(names, types))
if not self._description:
self._description = None
def fetchone(self):
"""Fetch the next row of the current result set.
Returns:
Row: If no rows remain in the current result set, ``None`` is
returned, otherwise the next row of the result set is returned. By
default the row is returned as a `list`, where the elements in the
list correspond to the result row's columns in positional order,
but this can be changed with the `Connection.row_factory` property.
"""
try:
return next(self)
except StopIteration:
return None
def fetchmany(self, n=None):
"""Fetch the next set of rows of the current result set.
Args:
n: Maximum number of rows to be returned. If this argument is not
given, `Cursor.arraysize` is used as the maximum.
Returns:
List[Row]: Returns a `list` containing the next ``n`` rows of the
result set. If fewer than ``n`` rows remain, the returned list
will contain fewer than ``n`` elements. If no rows remain, the
list will be empty. By default each row is
returned as a `list`, where the elements in the list correspond to
the result row's columns in positional order, but this can be
changed with the `Connection.row_factory` property.
"""
if n is None:
n = self._arraysize
return [x for x in itertools.islice(self, 0, n)]
def fetchall(self):
"""Fetch all remaining rows of the current result set.
Returns:
List[Row]: Returns a `list` containing all remaining rows of the
result set. By default each row is returned as a `list`, where the
elements in the list correspond to the result row's columns in
positional order, but this can be changed with the
`Connection.row_factory` property.
"""
return [x for x in self]
# Optional DB API Extension
def __iter__(self):
"""Iterate over all rows in a result set.
By default each row is returned as a `list`, where the elements in the
list correspond to the result row's columns in positional order, but
this can be changed with the `Connection.row_factory` property.
Note:
This is not required by DB-API 2.0; for maximum portability
applications should prefer to use `fetchone` or `fetchmany` or
`fetchall` instead.
Example:
>>> cursor.execute("select 1, 2 UNION ALL select 3, 4")
>>> for row in cursor:
... print(row)
[1, 2]
[3, 4]
"""
self._check_closed()
return self
# Optional DB API Extension
def next(self):
self._check_closed()
if not self._description:
raise InterfaceError("No result set exists")
try:
return next(self._hndl)
except cdb2.Error as e:
_raise_wrapped_exception(e)
__next__ = next
| 39.39547 | 79 | 0.657277 |
beeccd82c643ace8391f7591995991251000781e | 1,458 | py | Python | sis_provisioner/tests/account_managers/test_terminate.py | uw-it-aca/bridge-sis-provisioner | 6dd31e5ef59263acbcade5e6e4f74b815c16bdee | [
"Apache-2.0"
] | null | null | null | sis_provisioner/tests/account_managers/test_terminate.py | uw-it-aca/bridge-sis-provisioner | 6dd31e5ef59263acbcade5e6e4f74b815c16bdee | [
"Apache-2.0"
] | 175 | 2016-07-18T23:25:45.000Z | 2022-02-07T20:44:05.000Z | sis_provisioner/tests/account_managers/test_terminate.py | uw-it-aca/bridge-sis-provisioner | 6dd31e5ef59263acbcade5e6e4f74b815c16bdee | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.test import TransactionTestCase
from unittest.mock import patch
from sis_provisioner.account_managers.terminate import TerminateUser
from sis_provisioner.account_managers.bridge_worker import BridgeWorker
from sis_provisioner.tests import (
fdao_pws_override, fdao_gws_override, fdao_bridge_override)
from sis_provisioner.tests.account_managers import set_db_records
@fdao_pws_override
@fdao_gws_override
@fdao_bridge_override
class TestTerminateUser(TransactionTestCase):
@patch('sis_provisioner.dao.gws._get_start_timestamp',
return_value=1626215400, spec=True)
def test_to_check(self, mock_fn):
loader = TerminateUser(BridgeWorker())
self.assertEqual(len(loader.fetch_users()), 2)
@patch('sis_provisioner.dao.gws._get_start_timestamp',
return_value=1626215400, spec=True)
def test_update(self, mock_fn):
set_db_records()
loader = TerminateUser(BridgeWorker())
loader.load()
self.assertEqual(loader.get_total_checked_users(), 2)
self.assertEqual(loader.get_new_user_count(), 0)
self.assertEqual(loader.get_netid_changed_count(), 1)
self.assertEqual(loader.get_deleted_count(), 1)
self.assertEqual(loader.get_restored_count(), 0)
self.assertEqual(loader.get_updated_count(), 1)
self.assertFalse(loader.has_error())
| 39.405405 | 71 | 0.758573 |
3221963c01409c2458b36cad2b5ce313f2a133a3 | 1,179 | py | Python | PositivosMedia1064.py | SricardoSdSouza/LogicaDeProgramacao | f39763bd3378640ff8de674c0b932f36fd09296a | [
"MIT"
] | null | null | null | PositivosMedia1064.py | SricardoSdSouza/LogicaDeProgramacao | f39763bd3378640ff8de674c0b932f36fd09296a | [
"MIT"
] | null | null | null | PositivosMedia1064.py | SricardoSdSouza/LogicaDeProgramacao | f39763bd3378640ff8de674c0b932f36fd09296a | [
"MIT"
] | null | null | null | '''
Leia 6 valores. Em seguida, mostre quantos destes valores digitados foram positivos. Na próxima linha, deve-se
mostrar a média de todos os valores positivos digitados, com um dígito após o ponto decimal.
Entrada = A entrada contém 6 números que podem ser valores inteiros ou de ponto flutuante. Pelo menos um destes
números será positivo.
Saída = O primeiro valor de saída é a quantidade de valores positivos. A próxima linha deve mostrar a média dos
valores positivos digitados.
Exemplo de Entrada Exemplo de Saída
7 4 valores positivos
-5 7.4
6
-3.4
4.6
12
'''
tot = 0
cont = 0
for i in range(6):
num = float(input())
if num > 0:
cont += 1
tot += num
print(f'{cont} valores positivos')
print(f'{tot/cont:.1f}')
'''Outra forma
positivos = [float(input()) for _ in range(6)]
total_positivos = 0
soma_dos_positivos = 0
for n in positivos:
if n > 0:
total_positivos += 1
soma_dos_positivos += n
print('{} valores positivos'.format(total_positivos))
print('{:.1f}'.format(soma_dos_positivos / total_positivos))
''' | 28.756098 | 111 | 0.641221 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.