id
int64 0
6k
| code
stringlengths 4k
8k
| code_compressed
null |
---|---|---|
5,000 |
from sympy.core.singleton import S
from sympy.core.symbol import Symbol
from sympy.functions.elementary.trigonometric import (cos, sin)
from sympy.physics.vector import ReferenceFrame, Vector, Point, \
dynamicsymbols
from sympy.physics.vector.fieldfunctions import divergence, \
gradient, curl, is_conservative, is_solenoidal, \
scalar_potential, scalar_potential_difference
from sympy.testing.pytest import raises
R = ReferenceFrame('R')
q = dynamicsymbols('q')
P = R.orientnew('P', 'Axis', [q, R.z])
def test_curl():
assert curl(Vector(0), R) == Vector(0)
assert curl(R.x, R) == Vector(0)
assert curl(2*R[1]**2*R.y, R) == Vector(0)
assert curl(R[0]*R[1]*R.z, R) == R[0]*R.x - R[1]*R.y
assert curl(R[0]*R[1]*R[2] * (R.x+R.y+R.z), R) == \
(-R[0]*R[1] + R[0]*R[2])*R.x + (R[0]*R[1] - R[1]*R[2])*R.y + \
(-R[0]*R[2] + R[1]*R[2])*R.z
assert curl(2*R[0]**2*R.y, R) == 4*R[0]*R.z
assert curl(P[0]**2*R.x + P.y, R) == \
- 2*(R[0]*cos(q) + R[1]*sin(q))*sin(q)*R.z
assert curl(P[0]*R.y, P) == cos(q)*P.z
def METHOD_NAME():
assert divergence(Vector(0), R) is S.Zero
assert divergence(R.x, R) is S.Zero
assert divergence(R[0]**2*R.x, R) == 2*R[0]
assert divergence(R[0]*R[1]*R[2] * (R.x+R.y+R.z), R) == \
R[0]*R[1] + R[0]*R[2] + R[1]*R[2]
assert divergence((1/(R[0]*R[1]*R[2])) * (R.x+R.y+R.z), R) == \
-1/(R[0]*R[1]*R[2]**2) - 1/(R[0]*R[1]**2*R[2]) - \
1/(R[0]**2*R[1]*R[2])
v = P[0]*P.x + P[1]*P.y + P[2]*P.z
assert divergence(v, P) == 3
assert divergence(v, R).simplify() == 3
assert divergence(P[0]*R.x + R[0]*P.x, R) == 2*cos(q)
def test_gradient():
a = Symbol('a')
assert gradient(0, R) == Vector(0)
assert gradient(R[0], R) == R.x
assert gradient(R[0]*R[1]*R[2], R) == \
R[1]*R[2]*R.x + R[0]*R[2]*R.y + R[0]*R[1]*R.z
assert gradient(2*R[0]**2, R) == 4*R[0]*R.x
assert gradient(a*sin(R[1])/R[0], R) == \
- a*sin(R[1])/R[0]**2*R.x + a*cos(R[1])/R[0]*R.y
assert gradient(P[0]*P[1], R) == \
((-R[0]*sin(q) + R[1]*cos(q))*cos(q) - (R[0]*cos(q) + R[1]*sin(q))*sin(q))*R.x + \
((-R[0]*sin(q) + R[1]*cos(q))*sin(q) + (R[0]*cos(q) + R[1]*sin(q))*cos(q))*R.y
assert gradient(P[0]*R[2], P) == P[2]*P.x + P[0]*P.z
scalar_field = 2*R[0]**2*R[1]*R[2]
grad_field = gradient(scalar_field, R)
vector_field = R[1]**2*R.x + 3*R[0]*R.y + 5*R[1]*R[2]*R.z
curl_field = curl(vector_field, R)
def test_conservative():
assert is_conservative(0) is True
assert is_conservative(R.x) is True
assert is_conservative(2 * R.x + 3 * R.y + 4 * R.z) is True
assert is_conservative(R[1]*R[2]*R.x + R[0]*R[2]*R.y + R[0]*R[1]*R.z) is \
True
assert is_conservative(R[0] * R.y) is False
assert is_conservative(grad_field) is True
assert is_conservative(curl_field) is False
assert is_conservative(4*R[0]*R[1]*R[2]*R.x + 2*R[0]**2*R[2]*R.y) is \
False
assert is_conservative(R[2]*P.x + P[0]*R.z) is True
def test_solenoidal():
assert is_solenoidal(0) is True
assert is_solenoidal(R.x) is True
assert is_solenoidal(2 * R.x + 3 * R.y + 4 * R.z) is True
assert is_solenoidal(R[1]*R[2]*R.x + R[0]*R[2]*R.y + R[0]*R[1]*R.z) is \
True
assert is_solenoidal(R[1] * R.y) is False
assert is_solenoidal(grad_field) is False
assert is_solenoidal(curl_field) is True
assert is_solenoidal((-2*R[1] + 3)*R.z) is True
assert is_solenoidal(cos(q)*R.x + sin(q)*R.y + cos(q)*P.z) is True
assert is_solenoidal(R[2]*P.x + P[0]*R.z) is True
def test_scalar_potential():
assert scalar_potential(0, R) == 0
assert scalar_potential(R.x, R) == R[0]
assert scalar_potential(R.y, R) == R[1]
assert scalar_potential(R.z, R) == R[2]
assert scalar_potential(R[1]*R[2]*R.x + R[0]*R[2]*R.y + \
R[0]*R[1]*R.z, R) == R[0]*R[1]*R[2]
assert scalar_potential(grad_field, R) == scalar_field
assert scalar_potential(R[2]*P.x + P[0]*R.z, R) == \
R[0]*R[2]*cos(q) + R[1]*R[2]*sin(q)
assert scalar_potential(R[2]*P.x + P[0]*R.z, P) == P[0]*P[2]
raises(ValueError, lambda: scalar_potential(R[0] * R.y, R))
def test_scalar_potential_difference():
origin = Point('O')
point1 = origin.locatenew('P1', 1*R.x + 2*R.y + 3*R.z)
point2 = origin.locatenew('P2', 4*R.x + 5*R.y + 6*R.z)
genericpointR = origin.locatenew('RP', R[0]*R.x + R[1]*R.y + R[2]*R.z)
genericpointP = origin.locatenew('PP', P[0]*P.x + P[1]*P.y + P[2]*P.z)
assert scalar_potential_difference(S.Zero, R, point1, point2, \
origin) == 0
assert scalar_potential_difference(scalar_field, R, origin, \
genericpointR, origin) == \
scalar_field
assert scalar_potential_difference(grad_field, R, origin, \
genericpointR, origin) == \
scalar_field
assert scalar_potential_difference(grad_field, R, point1, point2,
origin) == 948
assert scalar_potential_difference(R[1]*R[2]*R.x + R[0]*R[2]*R.y + \
R[0]*R[1]*R.z, R, point1,
genericpointR, origin) == \
R[0]*R[1]*R[2] - 6
potential_diff_P = 2*P[2]*(P[0]*sin(q) + P[1]*cos(q))*\
(P[0]*cos(q) - P[1]*sin(q))**2
assert scalar_potential_difference(grad_field, P, origin, \
genericpointP, \
origin).simplify() == \
potential_diff_P
| null |
5,001 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
import re
import unittest
from distutils.version import LooseVersion
import matplotlib.pyplot as plt # noqa # Needs importing so it's availiabe in the tests, but isn't actually used.
import numpy as np
from mantid.simpleapi import FrameworkManager
from unittest.mock import Mock
from mantidqt.widgets.codeeditor.completion import (
CodeCompleter,
generate_call_tips,
get_function_spec,
get_builtin_argspec,
get_module_import_alias,
)
from testhelpers import assertRaisesNothing
class CodeCompletionTest(unittest.TestCase):
def setUp(self):
# needed so sys.modules can pick up Rebin
FrameworkManager.Instance()
def _get_completer(self, text, env_globals=None):
return CodeCompleter(Mock(text=lambda: text, fileName=lambda: ""), env_globals)
def _run_check_call_tip_generated(self, script_text, call_tip_regex):
completer = self._get_completer(script_text)
update_completion_api_mock = completer.editor.updateCompletionAPI
completer._add_simpleapi_to_completions_if_required()
call_tips = update_completion_api_mock.call_args_list[1][0][0]
self.assertEqual(2, update_completion_api_mock.call_count)
self.assertGreater(len(call_tips), 1)
self.assertTrue(re.search(call_tip_regex, " ".join(call_tips)))
def _run_check_call_tip_not_generated(self, script_text, call_tip_regex):
completer = self._get_completer(script_text)
update_completion_api_mock = completer.editor.updateCompletionAPI
call_tips = update_completion_api_mock.call_args_list[0][0][0]
self.assertEqual(1, update_completion_api_mock.call_count)
self.assertFalse(bool(re.search(call_tip_regex, " ".join(call_tips))))
def test_Rebin_call_tips_generated_on_construction_when_api_import_in_script(self):
self._run_check_call_tip_generated("from mantid.simpleapi import *\n# My code", r"Rebin\(InputWorkspace, .*\)")
def test_numpy_call_tips_generated_if_numpy_imported_in_script(self):
self._run_check_call_tip_generated("import numpy as np\n# My code", r"np\.asarray\(a, \[dtype\], .*\)")
def test_numpy_call_tips_generated_handling_wildcards_properly_if_numpy_imported_in_script(self):
if LooseVersion(np.__version__) >= LooseVersion("1.21"):
self._run_check_call_tip_generated("import numpy as np\n# My code", r"np\.asarray\(a, \[dtype\], \[order\], \*, \[like\]\)")
def test_call_tips_generated_if_syntax_errors_in_script(self):
self._run_check_call_tip_generated("from mantid.simpleapi import *\n print 'Hello', 'World'", "Rebin")
def test_pyplot_call_tips_generated_if_imported_in_script(self):
self._run_check_call_tip_generated("import matplotlib.pyplot as plt\n# My code", r"plt\.figure\(\[num\], .*\)")
def test_simple_api_call_tips_not_generated_on_construction_if_api_import_not_in_script(self):
self._run_check_call_tip_not_generated("import numpy as np\n# My code", "Rebin")
def test_numpy_call_tips_not_generated_if_its_not_imported(self):
self._run_check_call_tip_not_generated("# My code", "numpy")
def test_pyplot_call_tips_not_generated_if_its_not_imported(self):
self._run_check_call_tip_not_generated("# My code", "pyplot")
def test_generate_call_tips_without_module_attribute_and_prepend_module_name(self):
tips = generate_call_tips({"unicode_str": "my unicode str"}, prepend_module_name=True)
self.assertEqual(0, len(tips))
def test_generate_call_tips_without_module_attribute_and_prepend_module_name_false(self):
tips = generate_call_tips({"unicode_str": "my unicode str"}, prepend_module_name=False)
self.assertEqual(0, len(tips))
def test_nothing_raised_when_getting_completions_from_a_not_imported_module(self):
completer = self._get_completer("# My code")
assertRaisesNothing(self, completer._get_module_call_tips, "this.doesnt.exist")
def METHOD_NAME(self):
def my_new_function(arg1, arg2, kwarg1=None, kwarg2=0):
pass
self.assertEqual("(arg1, arg2, [kwarg1], [kwarg2])", get_function_spec(my_new_function))
def test_get_function_spec_returns_expected_string_for_implicit_args(self):
def my_new_function(*args, **kwargs):
pass
self.assertEqual("(args, [**kwargs])", get_function_spec(my_new_function))
def test_get_builtin_argspec_generates_argspec_for_numpy_builtin(self):
argspec = get_builtin_argspec(np.zeros)
self.assertIn("shape, dtype, order", ", ".join(argspec.args))
self.assertIn("float, 'C'", ", ".join(argspec.defaults))
def test_get_module_import_alias_finds_import_aliases(self):
script = (
"import numpy as np\n"
"from keyword import kwlist as key_word_list\n"
"import matplotlib.pyplot as plt\n"
"import mymodule.some_func as func, something as _smthing\n"
"# import commented.module as not_imported\n"
"import thing as _thing # import kwlist2 as kew_word_list2"
)
aliases = {
"numpy": "np",
"kwlist": "key_word_list",
"matplotlib.pyplot": "plt",
"mymodule.some_func": "func",
"something": "_smthing",
"commented.module": "commented.module",
"kwlist2": "kwlist2", # alias is commented out so expect alias to not be assigned
}
for import_name, alias in aliases.items():
self.assertEqual(alias, get_module_import_alias(import_name, script))
if __name__ == "__main__":
unittest.main()
| null |
5,002 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetResourceGroupResult',
'AwaitableGetResourceGroupResult',
'get_resource_group',
'get_resource_group_output',
]
@pulumi.output_type
class GetResourceGroupResult:
"""
A collection of values returned by getResourceGroup.
"""
def __init__(__self__, id=None, location=None, METHOD_NAME=None, name=None, tags=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'managed_by' to be a str")
pulumi.set(__self__, "managed_by", METHOD_NAME)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
The Azure Region where the Resource Group exists.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="managedBy")
def METHOD_NAME(self) -> str:
return pulumi.get(self, "managed_by")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
A mapping of tags assigned to the Resource Group.
"""
return pulumi.get(self, "tags")
class AwaitableGetResourceGroupResult(GetResourceGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetResourceGroupResult(
id=self.id,
location=self.location,
METHOD_NAME=self.METHOD_NAME,
name=self.name,
tags=self.tags)
def get_resource_group(name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetResourceGroupResult:
"""
Use this data source to access information about an existing Resource Group.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.core.get_resource_group(name="existing")
pulumi.export("id", example.id)
```
:param str name: The Name of this Resource Group.
"""
__args__ = dict()
__args__['name'] = name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure:core/getResourceGroup:getResourceGroup', __args__, opts=opts, typ=GetResourceGroupResult).value
return AwaitableGetResourceGroupResult(
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
METHOD_NAME=pulumi.get(__ret__, 'managed_by'),
name=pulumi.get(__ret__, 'name'),
tags=pulumi.get(__ret__, 'tags'))
@_utilities.lift_output_func(get_resource_group)
def get_resource_group_output(name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetResourceGroupResult]:
"""
Use this data source to access information about an existing Resource Group.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.core.get_resource_group(name="existing")
pulumi.export("id", example.id)
```
:param str name: The Name of this Resource Group.
"""
...
| null |
5,003 |
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.tools.cli.handler.base_handler."""
import os
import textwrap
import tensorflow as tf
from tfx.dsl.io import fileio
from tfx.tools.cli import labels
from tfx.tools.cli.handler import base_handler
class FakeHandler(base_handler.BaseHandler):
def create_pipeline(self) -> None:
pass
def update_pipeline(self) -> None:
pass
def list_pipelines(self) -> None:
pass
def delete_pipeline(self) -> None:
pass
def compile_pipeline(self) -> None:
pass
def get_schema(self) -> None:
pass
def create_run(self) -> None:
pass
def delete_run(self) -> None:
pass
def terminate_run(self) -> None:
pass
def list_runs(self) -> None:
pass
def get_run(self) -> None:
pass
class BaseHandlerTest(tf.test.TestCase):
def setUp(self):
super().setUp()
self.engine = 'airflow'
self.chicago_taxi_pipeline_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'testdata')
self.pipeline_path = os.path.join(self.chicago_taxi_pipeline_dir,
'test_pipeline_airflow_1.py')
self._original_home = os.environ['HOME']
os.environ['HOME'] = self.create_tempdir().full_path
def tearDown(self):
super().tearDown()
os.environ['HOME'] = self._original_home
def testCheckPipelineDslPathInvalid(self):
flags_dict = {labels.ENGINE_FLAG: self.engine,
labels.PIPELINE_DSL_PATH: 'taxi_pipeline.py'}
handler = FakeHandler(flags_dict)
with self.assertRaises(SystemExit) as err:
handler._check_pipeline_dsl_path()
self.assertEqual(str(err.exception), 'Invalid pipeline path: {}'
.format(flags_dict[labels.PIPELINE_DSL_PATH]))
def testGetHandlerHome(self):
flags_dict = {
labels.ENGINE_FLAG: 'engine',
labels.PIPELINE_DSL_PATH: 'path_to_pipeline_dsl'
}
handler = FakeHandler(flags_dict)
self.assertEqual(
os.path.join(os.environ['HOME'], 'tfx', 'engine', ''),
handler._get_handler_home())
def testCheckPipelinExistenceNotRequired(self):
flags_dict = {labels.ENGINE_FLAG: 'beam', labels.PIPELINE_NAME: 'pipeline'}
handler = FakeHandler(flags_dict)
fileio.makedirs(
os.path.join(os.environ['HOME'], 'tfx', 'beam', 'pipeline', ''))
with self.assertRaises(SystemExit) as err:
handler._check_pipeline_existence(
flags_dict[labels.PIPELINE_NAME], required=False)
self.assertTrue(
str(err.exception), 'Pipeline "{}" already exists.'.format(
flags_dict[labels.PIPELINE_NAME]))
def METHOD_NAME(self):
flags_dict = {
labels.ENGINE_FLAG: 'beam',
labels.PIPELINE_NAME: 'chicago_taxi_beam'
}
handler = FakeHandler(flags_dict)
with self.assertRaises(SystemExit) as err:
handler._check_pipeline_existence(flags_dict[labels.PIPELINE_NAME])
self.assertTrue(
str(err.exception), 'Pipeline "{}" does not exist.'.format(
flags_dict[labels.PIPELINE_NAME]))
def testCheckPipelinExistenceRequiredMigrated(self):
flags_dict = {labels.ENGINE_FLAG: 'beam', labels.PIPELINE_NAME: 'pipeline'}
handler = FakeHandler(flags_dict)
old_path = os.path.join(os.environ['HOME'], 'beam', 'pipeline')
new_path = os.path.join(os.environ['HOME'], 'tfx', 'beam', 'pipeline')
fileio.makedirs(old_path)
self.assertFalse(fileio.exists(new_path))
handler._check_pipeline_existence(flags_dict[labels.PIPELINE_NAME])
self.assertTrue(fileio.exists(new_path))
self.assertFalse(fileio.exists(old_path))
def testFormatTable(self):
flags_dict = {
labels.ENGINE_FLAG: 'engine',
labels.PIPELINE_DSL_PATH: 'path_to_pipeline_dsl'
}
handler = FakeHandler(flags_dict)
self.assertEqual(
textwrap.dedent("""\
+=====+=====+=======+
| abc | d | False |
+=====+=====+=======+
| 1 | 234 | None |
+-----+-----+-------+
| xxx | | [] |
+=====+=====+=======+
"""),
handler._format_table(('abc', 'd', False),
[[1, '234', None], ['xxx', '', []]]))
if __name__ == '__main__':
tf.test.main()
| null |
5,004 |
# Copyright (c) 2017, Neil Booth
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
'''Base class of servers'''
import asyncio
import os
import platform
import re
import signal
import sys
import time
from contextlib import suppress
from functools import partial
from typing import TYPE_CHECKING
from aiorpcx import spawn
from electrumx.lib.util import class_logger
if TYPE_CHECKING:
from electrumx.server.env import Env
class ServerBase:
'''Base class server implementation.
Derived classes are expected to:
- set PYTHON_MIN_VERSION and SUPPRESS_MESSAGE_REGEX as appropriate
- implement the serve() coroutine, called from the run() method.
Upon return the event loop runs until the shutdown signal is received.
'''
SUPPRESS_MESSAGE_REGEX = re.compile('SSL handshake|Fatal read error on|'
'SSL error in data received|'
'socket.send() raised exception')
SUPPRESS_TASK_REGEX = re.compile('accept_connection2')
PYTHON_MIN_VERSION = (3, 7)
def __init__(self, env: 'Env'):
'''Save the environment, perform basic sanity checks, and set the
event loop policy.
'''
# First asyncio operation must be to set the event loop policy
# as this replaces the event loop
asyncio.set_event_loop_policy(env.loop_policy)
self.logger = class_logger(__name__, self.__class__.__name__)
version_str = ' '.join(sys.version.splitlines())
self.logger.info(f'Python version: {version_str}')
self.env = env
self.start_time = 0
# Sanity checks
if sys.version_info < self.PYTHON_MIN_VERSION:
mvs = '.'.join(str(part) for part in self.PYTHON_MIN_VERSION)
raise RuntimeError(f'Python version >= {mvs} is required')
if platform.system() == 'Windows':
pass
elif os.geteuid() == 0 and not env.allow_root:
raise RuntimeError('RUNNING AS ROOT IS STRONGLY DISCOURAGED!\n'
'You shoud create an unprivileged user account '
'and use that.\n'
'To continue as root anyway, restart with '
'environment variable ALLOW_ROOT non-empty')
async def serve(self, shutdown_event: asyncio.Event):
'''Override to provide the main server functionality.
Run as a task that will be cancelled to request shutdown.
Setting the event also shuts down the server.
'''
def on_exception(self, loop, context):
'''Suppress spurious messages it appears we cannot control.'''
message = context.get('message')
if message and self.SUPPRESS_MESSAGE_REGEX.match(message):
return
if self.SUPPRESS_TASK_REGEX.match(repr(context.get('task'))):
return
loop.default_exception_handler(context)
async def run(self):
'''Run the server application:
- record start time
- install SIGINT and SIGTERM handlers to trigger shutdown_event
- set loop's exception handler to suppress unwanted messages
- run the event loop until serve() completes
'''
def METHOD_NAME(signame):
shutdown_event.set()
self.logger.warning(f'received {signame} signal, initiating shutdown')
async def serve():
try:
await self.serve(shutdown_event)
finally:
shutdown_event.set()
self.start_time = time.time()
loop = asyncio.get_event_loop()
shutdown_event = asyncio.Event()
if platform.system() != 'Windows':
# No signals on Windows
for signame in ('SIGINT', 'SIGTERM'):
loop.add_signal_handler(getattr(signal, signame),
partial(METHOD_NAME, signame))
loop.set_exception_handler(self.on_exception)
# Start serving and wait for shutdown, log receipt of the event
server_task = await spawn(serve, daemon=True)
try:
await shutdown_event.wait()
except KeyboardInterrupt:
self.logger.warning('received keyboard interrupt, initiating shutdown')
self.logger.info('shutting down')
server_task.cancel()
try:
with suppress(asyncio.CancelledError):
await server_task
finally:
self.logger.info('shutdown complete')
| null |
5,005 |
from typing import Any, Dict, List, Optional, Tuple, Union
from kornia.augmentation import random_generator as rg
from kornia.augmentation._2d.mix.base import MixAugmentationBaseV2
from kornia.constants import DataKey, DType
from kornia.core import Tensor, stack, zeros
from kornia.geometry.bbox import bbox_to_mask, infer_bbox_shape
class RandomCutMixV2(MixAugmentationBaseV2):
r"""Apply CutMix augmentation to a batch of tensor images.
.. image:: _static/img/RandomCutMixV2.png
Implementation for `CutMix: Regularization Strategy to Train Strong Classifiers with
Localizable Features` :cite:`yun2019cutmix`.
The function returns (inputs, labels), in which the inputs is the tensor that contains the mixup images
while the labels is a :math:`(\text{num_mixes}, B, 3)` tensor that contains (label_permuted_batch, lambda)
for each cutmix.
The implementation referred to the following repository: `https://github.com/clovaai/CutMix-PyTorch
<https://github.com/clovaai/CutMix-PyTorch>`_.
Args:
height: the width of the input image.
width: the width of the input image.
p: probability for applying an augmentation to a batch. This param controls the augmentation
probabilities batch-wisely.
num_mix: cut mix times.
beta: hyperparameter for generating cut size from beta distribution.
Beta cannot be set to 0 after torch 1.8.0. If None, it will be set to 1.
cut_size: controlling the minimum and maximum cut ratio from [0, 1].
If None, it will be set to [0, 1], which means no restriction.
same_on_batch: apply the same transformation across the batch.
This flag will not maintain permutation order.
keepdim: whether to keep the output shape the same as input (True) or broadcast it
to the batch form (False).
Inputs:
- Input image tensors, shape of :math:`(B, C, H, W)`.
- Raw labels, shape of :math:`(B)`.
Returns:
Tuple[Tensor, Tensor]:
- Adjusted image, shape of :math:`(B, C, H, W)`.
- Raw labels, permuted labels and lambdas for each mix, shape of :math:`(B, num_mix, 3)`.
Note:
This implementation would randomly cutmix images in a batch. Ideally, the larger batch size would be preferred.
Examples:
>>> rng = torch.manual_seed(3)
>>> input = torch.rand(2, 1, 3, 3)
>>> input[0] = torch.ones((1, 3, 3))
>>> label = torch.tensor([0, 1])
>>> cutmix = RandomCutMixV2(data_keys=["input", "class"])
>>> cutmix(input, label)
[tensor([[[[0.8879, 0.4510, 1.0000],
[0.1498, 0.4015, 1.0000],
[1.0000, 1.0000, 1.0000]]],
<BLANKLINE>
<BLANKLINE>
[[[1.0000, 1.0000, 0.7995],
[1.0000, 1.0000, 0.0542],
[0.4594, 0.1756, 0.9492]]]]), tensor([[[0.0000, 1.0000, 0.4444],
[1.0000, 0.0000, 0.4444]]])]
"""
def __init__(
self,
num_mix: int = 1,
cut_size: Optional[Union[Tensor, Tuple[float, float]]] = None,
beta: Optional[Union[Tensor, float]] = None,
same_on_batch: bool = False,
p: float = 1.0,
keepdim: bool = False,
data_keys: List[Union[str, int, DataKey]] = [DataKey.INPUT],
) -> None:
super().__init__(p=1.0, p_batch=p, same_on_batch=same_on_batch, keepdim=keepdim, data_keys=data_keys)
self._param_generator: rg.CutmixGenerator = rg.CutmixGenerator(cut_size, beta, num_mix, p=p)
def apply_transform_class(self, input: Tensor, params: Dict[str, Tensor], flags: Dict[str, Any]) -> Tensor:
height, width = params["image_shape"]
out_labels = []
for pair, crop in zip(params["mix_pairs"], params["crop_src"]):
labels_permute = input.index_select(dim=0, index=pair.to(input.device))
w, h = infer_bbox_shape(crop)
lam = w.to(input.dtype) * h.to(input.dtype) / (width * height) # width_beta * height_beta
out_labels.append(
stack(
[
input.to(device=input.device, dtype=DType.to_torch(int(params["dtype"].item()))),
labels_permute.to(device=input.device, dtype=DType.to_torch(int(params["dtype"].item()))),
lam.to(device=input.device, dtype=DType.to_torch(int(params["dtype"].item()))),
],
1,
)
)
return stack(out_labels, 0)
def apply_non_transform_class(
self, input: Tensor, params: Dict[str, Tensor], flags: Optional[Dict[str, Any]] = None
) -> Tensor:
out_labels = []
lam = zeros((len(input)), device=input.device, dtype=DType.to_torch(int(params["dtype"].item())))
for _ in range(self._param_generator.num_mix):
out_labels.append(
stack(
[
input.to(device=input.device, dtype=DType.to_torch(int(params["dtype"].item()))),
input.to(device=input.device, dtype=DType.to_torch(int(params["dtype"].item()))),
lam,
],
1,
)
)
return stack(out_labels, 0)
def METHOD_NAME(
self, input: Tensor, params: Dict[str, Tensor], maybe_flags: Optional[Dict[str, Any]] = None
) -> Tensor:
height, width = input.size(2), input.size(3)
out_inputs = input.clone()
for pair, crop in zip(params["mix_pairs"], params["crop_src"]):
input_permute = input.index_select(dim=0, index=pair.to(input.device))
# compute mask to match input shape
mask = bbox_to_mask(crop, width, height).bool().unsqueeze(dim=1).repeat(1, input.size(1), 1, 1)
out_inputs[mask] = input_permute[mask]
return out_inputs
| null |
5,006 |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
import mindspore.ops.functional as F
from mindspore import Tensor
from mindspore.common.api import jit
from mindspore.ops import operations as P
from mindspore.ops.functional import vmap
class TestKLDivLossNet(nn.Cell):
def __init__(self, reduction):
super(TestKLDivLossNet, self).__init__()
self.kl_div_loss = P.KLDivLoss(reduction=reduction)
def construct(self, x, target):
return self.kl_div_loss(x, target)
def kl_div_loss_np(x, target, reduction):
out = target * (np.log(target) - x)
out = np.nan_to_num(out, nan=0.)
if reduction == "none":
return out
if reduction == "batchmean":
return np.sum(out) / out.shape[0]
if reduction == "sum":
return np.sum(out)
raise RuntimeError("reduction should be one of ['none', 'batchmean', 'sum']")
def compare_with_numpy(x, target, reduction):
x_ms = Tensor(x)
target_ms = Tensor(target)
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
out = TestKLDivLossNet(reduction)(x_ms, target_ms)
expected = kl_div_loss_np(x, target, reduction)
np.testing.assert_array_almost_equal(out.asnumpy(), expected)
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
out = TestKLDivLossNet(reduction)(x_ms, target_ms)
expected = kl_div_loss_np(x, target, reduction)
np.testing.assert_array_almost_equal(out.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize("reduction", ["none", "sum"])
@pytest.mark.parametrize("data_type", [np.float16, np.float32])
def METHOD_NAME(reduction, data_type):
"""
Feature: KLDivLoss operators.
Description: test cases for KLDivLoss operator
Expectation: the result match numpy implementation.
"""
x = data_type(0.7)
target = data_type(1.)
compare_with_numpy(x, target, reduction)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize("reduction", ["none", "sum", "batchmean"])
@pytest.mark.parametrize("data_type", [np.float16, np.float32])
def test_kl_div_loss_multi_dim(reduction, data_type):
"""
Feature: KLDivLoss operators.
Description: test cases for KLDivLoss operator
Expectation: the result match numpy implementation.
"""
x = np.array([[0.2, 0.7, 0.1], [-0.1, 3., 0.9]]).astype(data_type)
target = np.array([[1., 0., 0.1], [0.6, -1., 4.]]).astype(data_type)
compare_with_numpy(x, target, reduction)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize("reduction", ["none", "sum", "batchmean"])
def test_kl_div_loss_vmap(reduction):
"""
Feature: vmap of KLDivLoss operators.
Description: test cases for vmap of KLDivLoss operator
Expectation: the result matched.
"""
def cal_kl_div_loss(x, target):
return P.KLDivLoss(reduction)(x, target)
@jit
def manually_batched(xs, targets):
output = []
for i in range(xs.shape[-1]):
inner_output = []
for j in range(xs[:, :, i].shape[1]):
inner_output.append(cal_kl_div_loss(xs[:, j, i], targets[:, j, i]))
output.append(F.stack(inner_output))
return F.stack(output)
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
x = Tensor(np.random.rand(4, 4, 4).astype(np.float32))
target = Tensor(np.random.rand(4, 4, 4).astype(np.float32))
vmap_kl_div_loss = vmap(
vmap(cal_kl_div_loss, in_axes=(1, 1), out_axes=0),
in_axes=(-1, -1), out_axes=0,
)
outputs = vmap_kl_div_loss(x, target)
expect = manually_batched(x, target)
np.testing.assert_allclose(outputs.asnumpy(), expect.asnumpy(), rtol=1e-4, atol=1e-3)
| null |
5,007 |
#!/usr/bin/env python3
import rospy
import json
from lg_msg_defs.srv import BrowserPool, USCSMessage
from lg_common.helpers import add_url_params
from std_msgs.msg import String
from appctl_msg_defs.msg import Mode
from urllib.request import urlopen
from lg_common.helpers import run_with_influx_exception_handler
NODE_NAME = 'state_tracker'
from lg_common.logger import get_logger
logger = get_logger(NODE_NAME)
class StateTracker(object):
def __init__(self, state_publisher, update_rfid_pub, last_uscs_service,
tactile_flag='', display_url_service=None, kiosk_url_service=None):
self.state_publisher = state_publisher
self.update_rfid_pub = update_rfid_pub
self.last_uscs_service = last_uscs_service
self.last_runway_card = None
self.ignore_card = 'click!![1,[3,null,[true],[null,null,0],null,null,false,1],1]'
self.exit_card = 'exit!![1,[],0]'
self.last_rfid = ''
self.tactile_flag = tactile_flag
self.display_url_service = display_url_service
self.kiosk_url_service = kiosk_url_service
def handle_runway_cards(self, msg):
if msg.data == self.exit_card or msg.data == self.ignore_card or msg.data[11] == '3':
self.last_runway_card = None
return
self.last_runway_card = msg.data
def build_state(self):
"""
Calls state tracking service and handles all url grabbing
"""
current_state = self.last_uscs_service().message
try:
current_state = json.loads(current_state)
except Exception:
logger.error("Error parsing last uscs message as json")
return
windows = current_state.get('windows', [])
for window in windows:
url = self.METHOD_NAME(window)
if url is None:
continue
# adding cms_protocol and cms_port for the portal launcher
# only needed on the kiosk
if window.get('presentation_viewport', None) == 'kiosk':
# default port and protocol
protocol = 'http'
port = '8088'
if 'https' in url:
protocol = 'https'
port = '443'
window['assets'] = [add_url_params(url, cms_protocol=protocol, cms_port=port)]
current_state = self.handle_tactile(current_state)
return current_state
def handle_tactile(self, state):
for window in state.get('windows', []):
if window.get('activity', '') != 'browser':
continue
for i in range(len(window.get('assets', []))):
if ('maps.google.com' in window['assets'][i] or "google.com/maps" in window['assets'][i]) and \
self.tactile_flag not in window['assets'][i]:
# add a param to be sure there is at least one param (HACK)
url = add_url_params(window['assets'][i], foo='bar')
url += '&%s' % self.tactile_flag
window['assets'][i] = url
# adding cms_protocol and cms_port for the portal launcher
# only needed on the kiosk
#if window.get('presentation_viewport', None) == 'kiosk':
# window['assets'][i] = add_url_params(window['assets'][i], cms_protocol='https', cms_port='443')
return state
def METHOD_NAME(self, window):
"""
given a window (from the director message) grab either the kiosk or
the display current url based on the viewport from the window. Apply
any tactile changes if maps.google.com is part of the url
"""
url_service = None
activity = window.get('activity', None)
if activity != 'browser':
return
viewport = window.get('presentation_viewport', None)
if viewport is None:
rospy.info("viewport was None... ignoring")
return
# display might be ok to go away, but only once we're sure
if viewport != 'kiosk' and viewport != 'wall' and viewport != 'display':
rospy.warn("Unable to determine viewport named (%s)" % viewport)
return
if viewport == 'kiosk':
url_service = self.kiosk_url_service
elif viewport == 'display' or viewport == 'wall':
url_service = self.display_url_service
state = url_service.call().state
try:
state = json.loads(state)
except Exception:
logger.warning("Unable to parse state (%s)" % state)
raise
if len(state) > 1:
logger.warning('There are more than one browser active, the wrong URL might be returned')
for browser_id, browser_data in state.items():
return browser_data['current_url_normalized']
def handle_nfc(self, msg):
self.last_rfid = msg.data
state = self.build_state()
state['rfid'] = msg.data
self.update_rfid_pub.publish(json.dumps(state))
def main():
rospy.init_node(NODE_NAME)
current_state_topic = rospy.get_param('~current_state_topic', '/state_tracker/current_state')
update_rfid_topic = rospy.get_param('~update_rfid_topic', '/rfid/uscs/update')
tactile_flag = rospy.get_param('~tactile_flag', '')
last_uscs_service = rospy.ServiceProxy('/uscs/message', USCSMessage, persistent=False)
kiosk_url_service = rospy.ServiceProxy('/browser_service/kiosk', BrowserPool, persistent=False)
display_url_service = rospy.ServiceProxy('/browser_service/wall', BrowserPool, persistent=False)
current_state = rospy.Publisher(current_state_topic, String, queue_size=10)
update_rfid_pub = rospy.Publisher(update_rfid_topic, String, queue_size=10)
state_tracker = StateTracker(
current_state, update_rfid_pub, last_uscs_service,
tactile_flag=tactile_flag, display_url_service=display_url_service,
kiosk_url_service=kiosk_url_service)
rospy.Subscriber('/portal_kiosk/runway', String, state_tracker.handle_runway_cards)
rospy.Subscriber('/rfid/set', String, state_tracker.handle_nfc)
rospy.spin()
if __name__ == '__main__':
run_with_influx_exception_handler(main, NODE_NAME)
| null |
5,008 |
import pytest
SMART_FRAME_SURROUNDINGS = {
'hide_all': 'hide_all',
'on': 'hide_all',
'true': 'hide_all',
'1': 'hide_all',
'hide_gaps': 'hide_gaps',
'off': 'off',
'false': 'off',
'0': 'off',
}
can_toggle = [
'update_dragged_clients',
]
cannot_toggle = [
'window_border_width',
'frame_border_active_color',
'default_frame_layout',
'wmname'
]
@pytest.mark.parametrize('name', can_toggle)
def test_toggle_boolean_settings(hlwm, name):
hlwm.call("toggle " + name)
@pytest.mark.parametrize('name', cannot_toggle)
def test_cannot_toggle_non_boolean(hlwm, name):
p = hlwm.call_xfail("toggle " + name)
assert p.stderr.endswith("not of type bool\n")
@pytest.mark.parametrize('name', can_toggle + cannot_toggle)
def test_get(hlwm, name):
hlwm.call("get " + name)
@pytest.mark.parametrize('name', can_toggle)
def test_toggle_numeric_settings(hlwm, name):
hlwm.call("toggle " + name)
@pytest.mark.parametrize('name', cannot_toggle)
def test_cannot_toggle_non_numeric(hlwm, name):
hlwm.call_xfail("toggle " + name)
def test_toggle_completion(hlwm):
res = hlwm.complete("toggle")
for n in can_toggle:
assert n in res
for n in cannot_toggle:
assert n not in res
def test_default_frame_layout_value_too_high(hlwm):
hlwm.call_xfail('set default_frame_layout 99') \
.expect_stderr('set: Invalid value "99" for setting "default_frame_layout": .*out of range')
def test_default_frame_layout_value_invalid_value(hlwm):
hlwm.call_xfail('set default_frame_layout -23') \
.expect_stderr('set: Invalid value "-23" for setting "default_frame_layout": .*Expecting.*vertical')
hlwm.call_xfail('set default_frame_layout foobar') \
.expect_stderr('set: Invalid value "foobar" for setting "default_frame_layout": .*Expecting.*vertical')
def test_default_frame_layout_after_split(hlwm):
"""When splitting a FrameLeaf, then the new frame
inherits the layout algorithm. However, when a FrameSplit is
split, then default_frame_layout is used.
"""
old_default = hlwm.attr.settings.default_frame_layout()
new_default = 'grid'
assert old_default != new_default, \
"the test is vacuous if the default didn't change"
hlwm.attr.settings.default_frame_layout = new_default
hlwm.call('split right')
# split the root frame
hlwm.call(['split', 'bottom', '0.5', ''])
# this new frame has the new default frame layout, but the two frames
# on the top still have the original algorithm:
assert hlwm.attr.tags.focus.tiling.root[0][0].algorithm() == old_default
assert hlwm.attr.tags.focus.tiling.root[0][1].algorithm() == old_default
assert hlwm.attr.tags.focus.tiling.root[1].algorithm() == new_default
def test_default_frame_layout_on_new_tag(hlwm):
old_default = hlwm.attr.settings.default_frame_layout()
new_default = 'grid'
assert old_default != new_default, \
"the test is vacuous if the default didn't change"
hlwm.attr.settings.default_frame_layout = new_default
hlwm.call('add newtag')
assert hlwm.attr.tags[1].tiling.root.algorithm() == new_default
assert hlwm.attr.tags[0].tiling.root.algorithm() == old_default
def test_default_frame_layout_index_as_name(hlwm):
"""test backwards compatibility of default_frame_layout"""
layout_with_index_1 = 'horizontal'
assert hlwm.attr.settings.default_frame_layout() != layout_with_index_1
hlwm.attr.settings.default_frame_layout = '1'
assert hlwm.attr.settings.default_frame_layout() == layout_with_index_1
def test_default_frame_layout_completion(hlwm):
assert 'grid' in hlwm.complete(['set', 'default_frame_layout'])
def test_set_invalid_setting(hlwm):
hlwm.call_xfail('set foobar baz') \
.expect_stderr('Setting "foobar" not found\n')
def test_get_invalid_setting(hlwm):
hlwm.call_xfail('get foobar') \
.expect_stderr('Setting "foobar" not found\n')
def test_toggle_invalid_setting(hlwm):
hlwm.call_xfail('toggle foobar') \
.expect_stderr('Setting "foobar" not found\n')
def test_monitors_locked_negative_value(hlwm):
hlwm.call_xfail('set monitors_locked -1') \
.expect_stderr('out of range')
def METHOD_NAME(hlwm):
assert sorted(SMART_FRAME_SURROUNDINGS) == sorted(hlwm.complete(['set', 'smart_frame_surroundings']))
for k in SMART_FRAME_SURROUNDINGS:
hlwm.attr.settings.smart_frame_surroundings = k
assert hlwm.attr.settings.smart_frame_surroundings() == SMART_FRAME_SURROUNDINGS[k]
hlwm.call_xfail('set smart_frame_surroundings foobar') \
.expect_stderr('Expecting one of: hide_all.*')
def test_smart_frame_surroundings(hlwm, x11):
hlwm.attr.settings.frame_border_width = 5
hlwm.attr.settings.frame_gap = 7
hlwm.attr.settings.smart_frame_surroundings = 'hide_all'
frame_x11 = x11.get_hlwm_frames()[0]
frame_geom = frame_x11.get_geometry()
assert (frame_geom.width, frame_geom.height) == (800, 600)
hlwm.attr.settings.smart_frame_surroundings = 'hide_gaps'
frame_x11 = x11.get_hlwm_frames()[0]
frame_geom = frame_x11.get_geometry()
assert (frame_geom.width, frame_geom.height) == (790, 590)
hlwm.attr.settings.smart_frame_surroundings = 'off'
frame_x11 = x11.get_hlwm_frames()[0]
frame_geom = frame_x11.get_geometry()
assert (frame_geom.width, frame_geom.height) == (776, 576)
def test_always_show_frame(hlwm):
# test old->new setting
settings = hlwm.attr.settings
settings.always_show_frame = True
assert settings.show_frame_decorations() == 'all'
settings.always_show_frame = False
assert settings.show_frame_decorations() == 'focused'
# test new->old setting
settings.always_show_frame = True
settings.show_frame_decorations = 'nonempty'
assert settings.always_show_frame() is False
settings.show_frame_decorations = 'all'
assert settings.always_show_frame() is True
settings.show_frame_decorations = 'focused'
assert settings.always_show_frame() is False
| null |
5,009 |
"""
The test design notes:
* The field "abc.def.hij" is existent.
* The field "rst.uvw.xyz" is inexistent.
* Its mixtures ("a.b.z", "a.y.z") are used to simulate partial inexistence.
* For the existent keys, kwargs should not matter.
* For the non-existent keys, the default is returned,
or a ``KeyError`` raised -- as with regular mappings.
* For special cases with "wrong" values (``"value"["z"]``, ``None["z"]``, etc),
either a ``TypeError`` should be raised normally. If "wrong" values are said
to be ignored, then they are treated the same as inexistent values,
and the default value is returned or a ``KeyError`` is raised.
"""
import types
import pytest
from kopf._cogs.structs.dicts import resolve, resolve_obj
default = object()
#
# Both resolve functions should behave exactly the same for dicts.
#
@pytest.mark.parametrize('resolve', [resolve, resolve_obj])
def test_dict_with_existent_key_with_no_default(resolve):
d = {'abc': {'def': {'hij': 'val'}}}
r = resolve(d, ['abc', 'def', 'hij'])
assert r == 'val'
@pytest.mark.parametrize('resolve', [resolve, resolve_obj])
def test_dict_with_existent_key_with_default(resolve):
d = {'abc': {'def': {'hij': 'val'}}}
r = resolve(d, ['abc', 'def', 'hij'], default)
assert r == 'val'
@pytest.mark.parametrize('key', [
pytest.param(['rst', 'uvw', 'xyz'], id='1stlvl'),
pytest.param(['abc', 'uvw', 'xyz'], id='2ndlvl'),
pytest.param(['abc', 'def', 'xyz'], id='3rdlvl'),
])
@pytest.mark.parametrize('resolve', [resolve, resolve_obj])
def test_dict_with_inexistent_key_with_no_default(resolve, key):
d = {'abc': {'def': {'hij': 'val'}}}
with pytest.raises(KeyError):
resolve(d, key)
@pytest.mark.parametrize('key', [
pytest.param(['rst', 'uvw', 'xyz'], id='1stlvl'),
pytest.param(['abc', 'uvw', 'xyz'], id='2ndlvl'),
pytest.param(['abc', 'def', 'xyz'], id='3rdlvl'),
])
@pytest.mark.parametrize('resolve', [resolve, resolve_obj])
def test_dict_with_inexistent_key_with_default(resolve, key):
d = {'abc': {'def': {'hij': 'val'}}}
r = resolve(d, key, default)
assert r is default
@pytest.mark.parametrize('resolve', [resolve, resolve_obj])
def test_dict_with_nonmapping_with_no_default(resolve):
d = {'key': 'val'}
with pytest.raises(TypeError):
resolve(d, ['key', 'sub'])
@pytest.mark.parametrize('resolve', [resolve, resolve_obj])
def test_dict_with_nonmapping_with_default(resolve):
d = {'key': 'val'}
r = resolve(d, ['key', 'sub'], default)
assert r is default
@pytest.mark.parametrize('resolve', [resolve, resolve_obj])
def test_dict_with_none_is_treated_as_a_regular_default_value(resolve):
d = {'abc': {'def': {'hij': 'val'}}}
r = resolve(d, ['abc', 'def', 'xyz'], None)
assert r is None
@pytest.mark.parametrize('resolve', [resolve, resolve_obj])
def test_dict_with_empty_path(resolve):
d = {'key': 'val'}
r = resolve(d, [])
assert r == d
assert r is d
#
# Specialised drill-down for objects.
#
class FakeKubernetesModel: # no bases!
__module__ = 'kubernetes.client.models.fake-for-tests'
@property
def metadata(self):
return None
attribute_map = {
'AbC': 'abc',
'zzz': 'dez',
}
@pytest.fixture(params=[FakeKubernetesModel, types.SimpleNamespace])
def obj(request):
cls = request.param
obj = cls()
if cls is FakeKubernetesModel:
# With attribute mapping in mind.
obj.key = 'val'
obj.AbC = cls()
obj.AbC.zzz = cls()
obj.AbC.zzz.hij = 'val'
else:
# Exactly as they will be requested.
obj.key = 'val'
obj.abc = cls()
obj.abc.dez = cls()
obj.abc.dez.hij = 'val'
return obj
def test_object_with_existent_key_with_no_default(obj):
r = resolve_obj(obj, ['abc', 'dez', 'hij'])
assert r == 'val'
def test_object_with_existent_key_with_default(obj):
r = resolve_obj(obj, ['abc', 'dez', 'hij'], default)
assert r == 'val'
@pytest.mark.parametrize('key', [
pytest.param(['rst', 'uvw', 'xyz'], id='1stlvl'),
pytest.param(['abc', 'uvw', 'xyz'], id='2ndlvl'),
pytest.param(['abc', 'dez', 'xyz'], id='3rdlvl'),
])
def test_object_with_inexistent_key_with_no_default(obj, key):
with pytest.raises(AttributeError):
resolve_obj(obj, key)
@pytest.mark.parametrize('key', [
pytest.param(['rst', 'uvw', 'xyz'], id='1stlvl'),
pytest.param(['abc', 'uvw', 'xyz'], id='2ndlvl'),
pytest.param(['abc', 'dez', 'xyz'], id='3rdlvl'),
])
def test_object_with_inexistent_key_with_default(obj, key):
r = resolve_obj(obj, key, default)
assert r is default
def test_object_with_nonmapping_with_no_default(obj):
with pytest.raises(TypeError):
resolve_obj(obj, ['key', 'sub'])
def test_object_with_nonmapping_with_default(obj):
r = resolve_obj(obj, ['key', 'sub'], default)
assert r is default
def test_object_with_none_is_treated_as_a_regular_default_value(obj):
r = resolve_obj(obj, ['abc', 'dez', 'xyz'], None)
assert r is None
def METHOD_NAME(obj):
r = resolve_obj(obj, [])
assert r == obj
assert r is obj
#
# Some special cases.
#
@pytest.mark.parametrize('cls', (tuple, list, set, frozenset, str, bytes))
def test_raises_for_builtins(cls):
obj = cls()
with pytest.raises(TypeError):
resolve_obj(obj, ['__class__'])
| null |
5,010 |
import hail as hl
from data_pipeline.data_types.locus import normalized_contig
from data_pipeline.data_types.variant import variant_id
FILTER_NAMES = hl.dict(
{"artifact_prone_site": "Artifact-prone site", "indel_stack": "Indel stack", "npg": "No passing genotype"}
)
def METHOD_NAME(value):
return hl.cond(hl.is_nan(value), hl.null(value.dtype), value)
def prepare_mitochondrial_variants(path, mnvs_path=None):
ds = hl.read_table(path)
haplogroups = hl.eval(ds.globals.hap_order)
ds = ds.annotate(
hl_hist=ds.hl_hist.annotate(bin_edges=ds.hl_hist.bin_edges.map(lambda n: hl.float(hl.format("%.2f", n))))
)
ds = ds.select(
# ID
variant_id=variant_id(ds.locus, ds.alleles),
reference_genome=ds.locus.dtype.reference_genome.name,
chrom=normalized_contig(ds.locus.contig),
pos=ds.locus.position,
ref=ds.alleles[0],
alt=ds.alleles[1],
rsids=ds.rsid,
# Quality
filters=ds.filters.map(lambda f: FILTER_NAMES.get(f, f)),
genotype_quality_metrics=[hl.struct(name="Depth", alt=ds.dp_hist_alt, all=ds.dp_hist_all)],
genotype_quality_filters=[
hl.struct(
name="Base Quality",
filtered=hl.struct(bin_edges=ds.hl_hist.bin_edges, bin_freq=ds.base_qual_hist),
),
hl.struct(
name="Contamination",
filtered=hl.struct(bin_edges=ds.hl_hist.bin_edges, bin_freq=ds.contamination_hist),
),
hl.struct(
name="Heteroplasmy below minimum heteroplasmy threshold",
filtered=hl.struct(
bin_edges=ds.hl_hist.bin_edges, bin_freq=ds.heteroplasmy_below_min_het_threshold_hist
),
),
hl.struct(name="Position", filtered=hl.struct(bin_edges=ds.hl_hist.bin_edges, bin_freq=ds.position_hist)),
hl.struct(
name="Strand Bias",
filtered=hl.struct(bin_edges=ds.hl_hist.bin_edges, bin_freq=ds.strand_bias_hist),
),
hl.struct(
name="Weak Evidence",
filtered=hl.struct(bin_edges=ds.hl_hist.bin_edges, bin_freq=ds.weak_evidence_hist),
),
],
site_quality_metrics=[
hl.struct(name="Mean Depth", value=METHOD_NAME(ds.dp_mean)),
hl.struct(name="Mean MQ", value=METHOD_NAME(ds.mq_mean)),
hl.struct(name="Mean TLOD", value=METHOD_NAME(ds.tlod_mean)),
],
# Frequency
an=ds.AN,
ac_hom=ds.AC_hom,
ac_het=ds.AC_het,
excluded_ac=ds.excluded_AC,
# Heteroplasmy
common_low_heteroplasmy=ds.common_low_heteroplasmy,
heteroplasmy_distribution=ds.hl_hist,
max_heteroplasmy=ds.max_hl,
# Populations
populations=hl.sorted(
hl.range(hl.len(ds.globals.pop_order)).map(
lambda pop_index: hl.struct(
id=ds.globals.pop_order[pop_index],
an=ds.pop_AN[pop_index],
ac_het=ds.pop_AC_het[pop_index],
ac_hom=ds.pop_AC_hom[pop_index],
heteroplasmy_distribution=hl.struct(
bin_edges=ds.hl_hist.bin_edges,
bin_freq=ds.pop_hl_hist[pop_index],
n_smaller=0,
n_larger=0,
),
)
),
key=lambda pop: pop.id,
),
# Haplogroups
hapmax_af_hom=ds.hapmax_AF_hom,
hapmax_af_het=ds.hapmax_AF_het,
faf_hapmax_hom=ds.faf_hapmax_hom,
haplogroup_defining=ds.hap_defining_variant,
haplogroups=[
hl.struct(
id=haplogroup,
an=ds.hap_AN[i],
ac_het=ds.hap_AC_het[i],
ac_hom=ds.hap_AC_hom[i],
faf_hom=ds.hap_faf_hom[i],
heteroplasmy_distribution=ds.hap_hl_hist[i],
)
for i, haplogroup in enumerate(haplogroups)
],
# Other
age_distribution=hl.struct(het=ds.age_hist_het, hom=ds.age_hist_hom),
flags=hl.set([hl.or_missing(ds.common_low_heteroplasmy, "common_low_heteroplasmy")]).filter(hl.is_defined),
mitotip_score=ds.mitotip_score,
mitotip_trna_prediction=ds.mitotip_trna_prediction,
pon_ml_probability_of_pathogenicity=ds.pon_ml_probability_of_pathogenicity,
pon_mt_trna_prediction=ds.pon_mt_trna_prediction,
variant_collapsed=ds.variant_collapsed,
vep=ds.vep,
)
if mnvs_path:
mnvs = hl.import_table(mnvs_path, types={"pos": hl.tint, "ref": hl.tstr, "alt": hl.tstr, "AC_hom_MNV": hl.tint})
mnvs = mnvs.key_by(
locus=hl.locus("chrM", mnvs.pos, reference_genome=ds.locus.dtype.reference_genome),
alleles=[mnvs.ref, mnvs.alt],
)
ds = ds.annotate(ac_hom_mnv=hl.or_else(mnvs[ds.key].AC_hom_MNV, 0))
ds = ds.annotate(flags=hl.if_else(ds.ac_hom_mnv > 0, ds.flags.add("mnv"), ds.flags))
return ds
| null |
5,011 |
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2023, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import json
import uuid
from pgadmin.browser.server_groups.servers.databases.schemas.tests import \
utils as schema_utils
from pgadmin.browser.server_groups.servers.databases.tests import utils as \
database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from . import utils as domain_cons_utils
from unittest.mock import patch
class DomainConstraintDeleteTestCase(BaseTestGenerator):
""" This class will add new domain constraint under schema node. """
scenarios = utils.generate_scenarios('domain_constraint_delete',
domain_cons_utils.test_cases)
def METHOD_NAME(self):
super().METHOD_NAME()
self.db_name = parent_node_dict["database"][-1]["db_name"]
schema_info = parent_node_dict["schema"][-1]
self.schema_id = schema_info["schema_id"]
self.schema_name = schema_info["schema_name"]
self.server_id = schema_info["server_id"]
self.db_id = schema_info["db_id"]
self.domain_name = "domain_%s" % (str(uuid.uuid4())[1:8])
self.domain_con_name = \
"test_domain_con_delete_%s" % (str(uuid.uuid4())[1:8])
self.domain_info = domain_cons_utils.create_domain(self.server,
self.db_name,
self.schema_name,
self.schema_id,
self.domain_name)
self.domain_constraint_id = \
domain_cons_utils.create_domain_constraints(self.server,
self.db_name,
self.schema_name,
self.domain_name,
self.domain_con_name)
def delete_domain_constraint(self):
"""
This function returns the domain constraint delete response
:return: domain constraint delete response
"""
return self.tester.delete(
self.url + str(utils.SERVER_GROUP) + '/' +
str(self.server_id) + '/' +
str(self.db_id) + '/' +
str(self.schema_id) + '/' +
str(self.domain_id) + '/' +
str(self.domain_constraint_id),
follow_redirects=True)
def runTest(self):
""" This function will add domain constraint under test database. """
db_con = database_utils.connect_database(self, utils.SERVER_GROUP,
self.server_id, self.db_id)
if not db_con['data']["connected"]:
raise Exception("Could not connect to database.")
schema_response = schema_utils.verify_schemas(self.server,
self.db_name,
self.schema_name)
if not schema_response:
raise Exception("Could not find the schema.")
self.domain_id = self.domain_info[0]
domain_response = domain_cons_utils.verify_domain(
self.server,
self.db_name,
self.schema_id,
self.domain_name)
if not domain_response:
raise Exception("Could not find the domain.")
domain_cons_response = domain_cons_utils.verify_domain_constraint(
self.server, self.db_name,
self.domain_con_name)
if not domain_cons_response:
raise Exception("Could not find domain constraint.")
if self.is_positive_test:
response = self.delete_domain_constraint()
else:
if hasattr(self, "error_deleting_domain_constraints"):
with patch(self.mock_data["function_name"],
return_value=eval(self.mock_data["return_value"])):
response = self.delete_domain_constraint()
if hasattr(self, "wrong_domain_constraint_id"):
self.domain_constraint_id = 99999
response = self.delete_domain_constraint()
actual_response_code = response.status_code
expected_response_code = self.expected_data['status_code']
self.assertEqual(actual_response_code, expected_response_code)
def tearDown(self):
# Disconnect the database
database_utils.disconnect_database(self, self.server_id, self.db_id)
| null |
5,012 |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from mindspore import Tensor, context
from mindspore.common.api import _cell_graph_executor
from mindspore.nn import Cell
from mindspore.ops import operations as P
from parallel.utils.utils import ParallelValidator
def setup_function():
context.set_auto_parallel_context(dataset_strategy="full_batch")
POOLED_HEIGHT = 2
POOLED_WIDTH = 2
SPATIAL_SCALE = 0.5
BATCH_SIZE = 32
FEATURES_HEIGHT = 256
FEATURES_WIDTH = 256
CHANNELS = 3
NUM_ROIS = 16
_features = Tensor(np.random.normal(size=[BATCH_SIZE, CHANNELS, FEATURES_HEIGHT, FEATURES_WIDTH]).astype(np.float32))
_rois = Tensor(
np.hstack((np.random.randint(0, BATCH_SIZE, [NUM_ROIS, 1]).astype(np.float32),
np.random.uniform(low=0, high=FEATURES_HEIGHT / SPATIAL_SCALE, size=[NUM_ROIS, 4]).astype(np.float32))))
class Net(Cell):
def __init__(self, pooled_h, pooled_w, spatial_scale, strategy=None):
super(Net, self).__init__()
self.roi_align = P.ROIAlign(pooled_h, pooled_w, spatial_scale).shard(strategy)
def construct(self, features, rois):
output = self.roi_align(features, rois)
return output
def METHOD_NAME(net: Cell, *inputs):
net.set_train()
phase, _ = _cell_graph_executor.compile(net, *inputs)
context.reset_auto_parallel_context()
return phase
def test_roi_align_auto_parallel():
"""
Feature: test ROIAlign auto parallel
Description: auto parallel
Expectation: compile success
"""
context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=8, global_rank=0)
net = Net(POOLED_HEIGHT, POOLED_WIDTH, SPATIAL_SCALE)
METHOD_NAME(net, _features, _rois)
def test_roi_align_data_parallel():
"""
Feature: test ROIAlign data parallel
Description: data parallel
Expectation: compile success
"""
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy = ((4, 1, 1, 1), (2, 1))
net = Net(POOLED_HEIGHT, POOLED_WIDTH, SPATIAL_SCALE, strategy)
METHOD_NAME(net, _features, _rois)
def test_roi_align_strategy_error():
"""
Feature: test invalid strategy for ROIAlign
Description: illegal strategy
Expectation: raise RuntimeError
"""
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy = ((2, 1, 2, 2), (1, 1))
net = Net(POOLED_HEIGHT, POOLED_WIDTH, SPATIAL_SCALE, strategy)
with pytest.raises(RuntimeError):
METHOD_NAME(net, _features, _rois)
context.reset_auto_parallel_context()
def test_roi_align_layout():
"""
Features: ROIAlignInfo
Description: validate layout and structure
Expectation: No raise RuntimeError
"""
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy = ((4, 1, 1, 1), (2, 1))
net = Net(POOLED_HEIGHT, POOLED_WIDTH, SPATIAL_SCALE, strategy)
phase = METHOD_NAME(net, _features, _rois)
validator = ParallelValidator(net, phase)
# check layout
features_expect_layout = ([8], [-1, -1, -1, -1], [32, 3, 256, 256], 0, True, '')
assert validator.check_parameter_layout('features', features_expect_layout)
# check attrs
roi_expect_attrs = {'pooled_height': POOLED_HEIGHT, 'pooled_width': POOLED_WIDTH, 'spatial_scale': SPATIAL_SCALE}
assert validator.check_node_attrs('ROIAlign-0', roi_expect_attrs)
# check inputs
roi_expect_inputs = ['StridedSlice-0', 'TensorScatterUpdate-0']
assert validator.check_node_inputs('ROIAlign-0', roi_expect_inputs)
# check sub_graph
sub_graph = {
'TensorScatterUpdate-0': ['StridedSlice-1', 'Stack-0', 'Minimum-0'],
'Equal-0': ['Sub-0', 'Minimum-0'],
'ROIAlign-0': ['StridedSlice-0', 'TensorScatterUpdate-0'],
'Mul-0': ['ROIAlign-0', 'ExpandDims-2'],
'AllReduce-0': ['Mul-0']
}
assert validator.check_graph_structure(sub_graph)
| null |
5,013 |
from __future__ import annotations
import asyncio
import platform
import signal
import uuid
from datetime import datetime, timedelta
import pytest
from meltano.core.job.job import (
HEARTBEAT_VALID_MINUTES,
HEARTBEATLESS_JOB_VALID_HOURS,
Job,
State,
)
class TestJob:
def sample_job(self, payload=None):
return Job(
job_name="meltano:sample-elt",
state=State.IDLE,
payload=payload or {},
)
def test_save(self, session):
subject = self.sample_job().save(session)
assert subject.id > 0
def test_load(self, session):
for key in range(0, 10):
session.add(self.sample_job({"key": key}))
subjects = session.query(Job).filter_by(job_name="meltano:sample-elt")
assert len(subjects.all()) == 10
session.rollback()
def test_transit(self, session):
subject = self.sample_job().save(session)
transition = subject.transit(State.RUNNING)
assert transition == (State.IDLE, State.RUNNING)
subject.started_at = datetime.utcnow()
transition = subject.transit(State.SUCCESS)
assert transition == (State.RUNNING, State.SUCCESS)
subject.ended_at = datetime.utcnow()
@pytest.mark.asyncio()
async def test_run(self, session):
subject = self.sample_job().save(session)
# A successful run will mark the subject as SUCCESS and set the `ended_at`
async with subject.run(session):
assert subject.state is State.RUNNING
assert subject.ended_at is None
await asyncio.sleep(1)
original_heartbeat = subject.last_heartbeat_at
assert original_heartbeat is not None
# Heartbeat is recorded every second
await asyncio.sleep(2)
assert subject.last_heartbeat_at > original_heartbeat
# Yield to give heartbeat another chance to be updated
await asyncio.sleep(0)
assert subject.state is State.SUCCESS
assert subject.ended_at is not None
# Allow one additional second of delay:
assert subject.ended_at - subject.last_heartbeat_at < timedelta(seconds=2)
@pytest.mark.asyncio()
async def test_run_failed(self, session):
# A failed run will mark the subject as FAILED an set the payload['error']
subject = self.sample_job({"original_state": 1}).save(session)
exception = Exception("This is a test.")
with pytest.raises(Exception) as err: # noqa: PT012, PT011
async with subject.run(session):
raise exception
# raise the same exception
assert err is exception
assert subject.state is State.FAIL
assert subject.ended_at is not None
assert subject.payload["original_state"] == 1
assert subject.payload["error"] == "This is a test."
@pytest.mark.asyncio()
async def METHOD_NAME(self, session):
if platform.system() == "Windows":
pytest.xfail(
"Fails on Windows: https://github.com/meltano/meltano/issues/2842",
)
subject = self.sample_job({"original_state": 1}).save(session)
with pytest.raises(KeyboardInterrupt): # noqa: PT012
async with subject.run(session):
signal.raise_signal(signal.SIGINT)
assert subject.state is State.FAIL
assert subject.ended_at is not None
assert subject.payload["original_state"] == 1
assert subject.payload["error"] == "The process was interrupted"
@pytest.mark.asyncio()
async def test_run_terminated(self, session):
if platform.system() == "Windows":
pytest.xfail(
"Fails on Windows: https://github.com/meltano/meltano/issues/2842",
)
subject = self.sample_job({"original_state": 1}).save(session)
with pytest.raises(SystemExit): # noqa: PT012
async with subject.run(session):
signal.raise_signal(signal.SIGTERM)
assert subject.state is State.FAIL
assert subject.ended_at is not None
assert subject.payload["original_state"] == 1
assert subject.payload["error"] == "The process was terminated"
def test_run_id(self, session):
job = Job()
run_id = job.run_id
assert isinstance(run_id, uuid.UUID)
job.save(session)
assert job.run_id == run_id
def test_is_stale(self):
job = Job()
# Idle jobs are not stale
assert not job.is_stale()
# Jobs that were just started are not stale
job.start()
assert not job.is_stale()
# Jobs started more than 25 hours ago without a heartbeat are stale
offset = timedelta(hours=HEARTBEATLESS_JOB_VALID_HOURS + 1)
job.started_at = datetime.utcnow() - offset
assert job.is_stale()
# Jobs with a recent heartbeat are not stale
job._heartbeat()
assert not job.is_stale()
# Jobs without a heartbeat for 5 minutes are stale
offset = timedelta(minutes=HEARTBEAT_VALID_MINUTES + 1)
job.last_heartbeat_at = datetime.utcnow() - offset
assert job.is_stale()
# Completed jobs are not stale
job.success()
assert not job.is_stale()
def test_fail_stale(self):
job = Job()
# Leaves a job that isn't stale alone
assert not job.fail_stale()
assert not job.has_error()
# Fails a stale job without a heartbeat
job.start()
offset = timedelta(hours=HEARTBEATLESS_JOB_VALID_HOURS + 1)
job.started_at = datetime.utcnow() - offset
assert job.fail_stale()
assert job.has_error()
assert "24 hours" in job.payload["error"]
# Doesn't fail a job that's already failed
assert not job.fail_stale()
# Fails a stale job with a heartbeat
job = Job()
job.start()
offset = timedelta(minutes=HEARTBEAT_VALID_MINUTES + 1)
job.last_heartbeat_at = datetime.utcnow() - offset
assert job.fail_stale()
assert job.has_error()
assert "5 minutes" in job.payload["error"]
| null |
5,014 |
import datetime
import re
import sys
from html import unescape
from pathlib import Path
import pytest
import lektor.context
from lektor.db import Pad
from lektor.environment import Environment
@pytest.fixture
def scratch_project_data(scratch_project_data):
# Add a sub-page to the scratch project
data = {"_model": "page", "title": "Subpage", "body": "Subpage body"}
subpage_lr = scratch_project_data / "content/sub-page/contents.lr"
subpage_lr.parent.mkdir()
subpage_lr.write_text("".join(lektor.metaformat.serialize(data.items())))
testbag_ini = scratch_project_data / "databags/testbag.ini"
testbag_ini.parent.mkdir()
testbag_ini.write_text("foo = bar")
return scratch_project_data
@pytest.fixture
def compile_template(scratch_env):
def compile_template(source, name="tmpl.html"):
Path(scratch_env.root_path, "templates", name).write_text(
source, encoding="utf-8"
)
return scratch_env.jinja_env.get_template(name)
return compile_template
@pytest.fixture
def source_path():
return "/"
@pytest.fixture
def bogus_context(scratch_pad, source_path):
# Construct a Context that has a source, without going through all
# all the steps necessary to construct an Artifact.
with lektor.context.Context(pad=scratch_pad) as ctx:
if source_path is not None:
ctx.source = scratch_pad.get(source_path)
yield
def test_jinja2_feature_autoescape(compile_template):
tmpl = compile_template("{{ value }}", "tmpl.html")
rendered = tmpl.render(value="<tag>")
assert unescape(rendered) == "<tag>"
assert "<" not in rendered
def test_jinja2_feature_with(compile_template):
tmpl = compile_template("{% with x = 'good' %}{{ x }}{% endwith %}")
assert tmpl.render() == "good"
def test_jinja2_feature_do(compile_template):
tmpl = compile_template(
"{% set x = ['a'] %}{% do x.append('b') %}{{ x|join('-') }}"
)
assert tmpl.render() == "a-b"
@pytest.mark.parametrize("source_path", [None, "/"])
@pytest.mark.usefixtures("bogus_context")
def test_jinja2_markdown_filter(compile_template):
tmpl = compile_template("{{ '**word**' | markdown }}")
assert "<strong>word</strong>" in tmpl.render()
@pytest.mark.usefixtures("bogus_context")
def test_jinja2_markdown_filter_resolve_links(compile_template):
tmpl = compile_template(
"{{ '[subpage](sub-page)' | markdown(resolve_links='always') }}"
)
assert re.search(r"<a.*\bhref=(['\"])sub-page/\1.*>subpage</a>", tmpl.render())
@pytest.mark.parametrize(
"source_path, resolve_links",
[
(None, "if-possible"),
(None, "never"),
("/", "never"),
],
)
@pytest.mark.usefixtures("bogus_context")
def test_jinja2_markdown_filter_noresolve_links(compile_template, resolve_links):
tmpl = compile_template(
f"{{{{ '[subpage](sub-page)' | markdown(resolve_links={resolve_links!r}) }}}}"
)
assert re.search(r"<a.*\bhref=(['\"])sub-page\1.*>subpage</a>", tmpl.render())
@pytest.mark.parametrize("source_path", [None])
@pytest.mark.usefixtures("bogus_context")
def test_jinja2_markdown_filter_resolve_raises_if_no_source_obj(compile_template):
tmpl = compile_template(
"{{ '[subpage](sub-page)' | markdown(resolve_links='always') }}"
)
with pytest.raises(RuntimeError) as exc_info:
tmpl.render()
assert re.search(r"\bsource object\b.*\brequired\b", str(exc_info.value))
def test_no_reference_cycle_in_environment(project):
env = project.make_env(load_plugins=False)
# reference count should be two: one from our `env` variable, and
# another from the argument to sys.getrefcount
assert sys.getrefcount(env) == 2
@pytest.fixture
def render_string(env):
def render_string(s, **kwargs):
template = env.jinja_env.from_string(s)
return template.render(**kwargs)
return render_string
def test_dateformat_filter(render_string):
tmpl = "{{ dt | dateformat('yyyy-MM-dd') }}"
dt = datetime.date(2001, 2, 3)
assert render_string(tmpl, dt=dt) == "2001-02-03"
def test_datetimeformat_filter_not_inlined(pad):
template = pad.env.jinja_env.from_string("{{ 1678749806 | datetimeformat }}")
en_date = template.render()
with lektor.context.Context(pad=pad) as ctx:
ctx.source = pad.get("/", alt="de")
de_date = template.render()
assert en_date != de_date
def test_datetimeformat_filter(render_string):
tmpl = "{{ dt | datetimeformat('yyyy-MM-ddTHH:mm') }}"
dt = datetime.datetime(2001, 2, 3, 4, 5, 6)
assert render_string(tmpl, dt=dt) == "2001-02-03T04:05"
def test_timeformat_filter(render_string):
tmpl = "{{ dt | datetimeformat('HH:mm') }}"
dt = datetime.time(1, 2, 3)
assert render_string(tmpl, dt=dt) == "01:02"
@pytest.fixture(params=["dateformat", "datetimeformat", "timeformat"])
def dates_filter(request: pytest.FixtureRequest) -> str:
return request.param
def test_dates_format_filter_handles_undefined(
env: Environment, dates_filter: str
) -> None:
template = env.jinja_env.from_string("{{ undefined | %s }}" % dates_filter)
assert template.render() == ""
def test_dates_format_filter_raises_type_error_on_bad_arg(
env: Environment, dates_filter: str
) -> None:
template = env.jinja_env.from_string("{{ obj | %s }}" % dates_filter)
with pytest.raises(TypeError, match="unexpected exception"):
template.render(obj=object())
def test_dates_format_filter_raises_type_error_on_bad_format(
env: Environment, dates_filter: str
) -> None:
template = env.jinja_env.from_string("{{ now | %s(42) }}" % dates_filter)
with pytest.raises(TypeError, match="should be a str"):
template.render(now=datetime.datetime.now())
@pytest.mark.parametrize("arg", ["locale", "tzinfo"])
def test_dates_format_filter_raises_type_error_on_bad_kwarg(
env: Environment, dates_filter: str, arg: str
) -> None:
template = env.jinja_env.from_string("{{ now | %s(%s=42) }}" % (dates_filter, arg))
with pytest.raises(TypeError):
template.render(now=datetime.datetime.now())
def METHOD_NAME(
scratch_env: Environment, scratch_pad: Pad
) -> None:
template = scratch_env.jinja_env.from_string("{{ bag('testbag.foo') }}")
assert template.render(site=scratch_pad) == "bar"
def test_bag_gets_site_from_lektor_context(
scratch_env: Environment, scratch_pad: Pad
) -> None:
template = scratch_env.jinja_env.from_string("{{ bag('testbag.foo') }}")
with lektor.context.Context(pad=scratch_pad):
assert template.render() == "bar"
| null |
5,015 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import mantid.simpleapi as mantid
import os
def METHOD_NAME(run_details, processed_spectra, include_q_squared=False):
"""
Splits a processed list containing all focused banks into TOF and
d-spacing groups. It also sets the names of the output workspaces
to the run number(s) - Result<Unit>-<Bank Number> e.g.
123-130-ResultTOF-3
:param run_details: The run details associated with this run
:param processed_spectra: A list containing workspaces, one entry per focused bank.
:param include_q_squared: a boolian indicating if the QSquared unit should be output
:return: A workspace group for dSpacing and TOF in that order and QSquared if requested
"""
d_spacing_output = []
tof_output = []
q_squared_output = []
run_number = str(run_details.output_run_string)
ext = run_details.file_extension if run_details.file_extension else ""
for name_index, ws in enumerate(processed_spectra, start=1):
d_spacing_out_name = run_number + ext + "-ResultD_" + str(name_index)
tof_out_name = run_number + ext + "-ResultTOF_" + str(name_index)
q_squared_out_name = run_number + ext + "-ResultQ_" + str(name_index)
d_spacing_output.append(mantid.ConvertUnits(InputWorkspace=ws, OutputWorkspace=d_spacing_out_name, Target="dSpacing"))
tof_output.append(mantid.ConvertUnits(InputWorkspace=ws, OutputWorkspace=tof_out_name, Target="TOF"))
if include_q_squared:
q_squared_output.append(mantid.ConvertUnits(InputWorkspace=ws, OutputWorkspace=q_squared_out_name, Target="QSquared"))
# Group the outputs
d_spacing_group_name = run_number + ext + "-ResultD"
d_spacing_group = mantid.GroupWorkspaces(InputWorkspaces=d_spacing_output, OutputWorkspace=d_spacing_group_name)
tof_group_name = run_number + ext + "-ResultTOF"
tof_group = mantid.GroupWorkspaces(InputWorkspaces=tof_output, OutputWorkspace=tof_group_name)
if include_q_squared:
q_squared_group_name = run_number + ext + "-ResultQ"
q_squared_tof_group = mantid.GroupWorkspaces(InputWorkspaces=q_squared_output, OutputWorkspace=q_squared_group_name)
return d_spacing_group, tof_group, q_squared_tof_group
return d_spacing_group, tof_group
def save_focused_data(d_spacing_group, tof_group, output_paths, q_squared=None):
"""
Saves out focused data into nxs, GSAS and .dat formats. Requires the grouped workspace
in TOF, dSpacing and QSquared (optionally) and the dictionary of output paths generated by abstract_inst.
:param d_spacing_group: The focused workspace group in dSpacing
:param tof_group: The focused workspace group in TOF
:param output_paths: A dictionary containing the full paths to save to
:return: None
"""
def ensure_dir_exists(filename):
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
return filename
mantid.SaveGSS(InputWorkspace=tof_group, Filename=ensure_dir_exists(output_paths["gss_filename"]), SplitFiles=False, Append=False)
mantid.SaveNexusProcessed(InputWorkspace=tof_group, Filename=ensure_dir_exists(output_paths["nxs_filename"]), Append=False)
_save_xye(ws_group=d_spacing_group, filename_template=ensure_dir_exists(output_paths["dspacing_xye_filename"]))
_save_xye(ws_group=tof_group, filename_template=ensure_dir_exists(output_paths["tof_xye_filename"]))
def _save_xye(ws_group, filename_template):
"""
Saves XYE data into .dat files. This expects the .dat folder to be created and passed.
It saves the specified group with the specified units into a file whose name contains that
information.
:param ws_group: The workspace group to save out to .dat files
:param filename_template: A string containing a fullpath with a string format template {bankno} to
denote where the bank number should be inserted
"""
for bank_index, ws in enumerate(ws_group):
bank_index += 1 # Ensure we start a 1 when saving out
mantid.SaveFocusedXYE(
InputWorkspace=ws, Filename=filename_template.format(bankno=bank_index), SplitFiles=False, IncludeHeader=False
)
| null |
5,016 |
# -*- coding: utf-8 -*-
"""Moderate user roles
:copyright: Copyright (c) 2022 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from pykern import pkconfig
from pykern.pkdebug import pkdexc, pkdp, pkdlog
from pykern.pkcollections import PKDict
from pykern import pkjinja
import sirepo.quest
import sirepo.auth_role
import sirepo.feature_config
import sirepo.simulation_db
import sirepo.smtp
import sirepo.uri
import sirepo.uri_router
import sqlalchemy
_STATUS_TO_SUBJECT = None
_cfg = None
_ACTIVE = frozenset(
[
sirepo.auth_role.ModerationStatus.CLARIFY,
sirepo.auth_role.ModerationStatus.PENDING,
]
)
class API(sirepo.quest.API):
@sirepo.quest.Spec(
"require_adm", token="AuthModerationToken", status="AuthModerationStatus"
)
async def api_admModerate(self):
def _send_moderation_status_email(info):
sirepo.smtp.send(
recipient=info.user_name,
subject=_STATUS_TO_SUBJECT[info.status].format(info.app_name),
body=pkjinja.render_resource(
f"auth_role_moderation/{info.status}_email",
PKDict(
app_name=info.app_name,
display_name=info.display_name,
link=self.absolute_uri(
self.uri_for_app_root(
sirepo.auth_role.sim_type(info.role),
),
),
),
),
)
def _set_moderation_status(info):
if info.status == "approve":
self.auth_db.model("UserRole").add_roles(roles=[info.role])
self.auth_db.model("UserRoleInvite").set_status(
role=info.role,
status=info.status,
moderator_uid=info.moderator_uid,
)
req = self.parse_post(type=False)
i = self.auth_db.model("UserRoleInvite").unchecked_search_by(
token=req.req_data.token
)
if not i:
pkdlog(f"No record in UserRoleInvite for token={req.req_data.token}")
raise sirepo.util.UserAlert(
"Could not find the moderation request; "
"refresh your browser to get the latest moderation list.",
)
p = PKDict(
app_name=sirepo.simulation_db.SCHEMA_COMMON.appInfo[
sirepo.auth_role.sim_type(i.role)
].longName,
role=i.role,
status=req.req_data.status,
moderator_uid=self.auth.logged_in_user(),
)
pkdlog("status={} uid={} role={} token={}", p.status, i.uid, i.role, i.token)
with self.auth.logged_in_user_set(uid=i.uid, method=self.auth.METHOD_EMAIL):
p.pkupdate(
display_name=self.auth.user_display_name(i.uid),
user_name=self.auth.logged_in_user_name(),
)
_set_moderation_status(p)
_send_moderation_status_email(p)
return self.reply_ok()
@sirepo.quest.Spec("require_adm")
async def api_admModerateRedirect(self):
def _type():
x = sirepo.feature_config.auth_controlled_sim_types()
res = sorted(sirepo.feature_config.cfg().sim_types - x)
return res[0] if res else sorted(x)[0]
raise sirepo.util.Redirect(
self.absolute_uri(
sirepo.uri.local_route(_type(), route_name="admRoles"),
),
)
@sirepo.quest.Spec("require_adm")
async def api_getModerationRequestRows(self):
return self.reply_json(
PKDict(
rows=self.auth_db.model("UserRoleInvite").get_moderation_request_rows(),
),
)
@sirepo.quest.Spec(
"allow_sim_typeless_require_email_user", reason="AuthModerationReason"
)
async def api_saveModerationReason(self):
def METHOD_NAME(info):
sirepo.smtp.send(
recipient=_cfg.moderator_email,
subject=f"{info.sim_type} Access Request",
body=pkjinja.render_resource(
"auth_role_moderation/moderation_email", info
),
)
req = self.parse_post()
u = self.auth.logged_in_user()
r = sirepo.auth_role.for_sim_type(req.type)
if self.auth_db.model("UserRole").has_role(role=r):
raise sirepo.util.Redirect(sirepo.uri.local_route(req.type))
try:
self.auth_db.model(
"UserRoleInvite",
uid=u,
role=r,
status=sirepo.auth_role.ModerationStatus.PENDING,
token=sirepo.util.random_base62(32),
).save()
except sqlalchemy.exc.IntegrityError as e:
pkdlog(
"Error={} saving UserRoleInvite for uid={} role={} stack={}",
e,
u,
r,
pkdexc(),
)
raise sirepo.util.UserAlert(
"You've already submitted a moderation request.",
)
l = self.absolute_uri(self.uri_for_api("admModerateRedirect"))
if len(req.req_data.get("reason", "").strip()) == 0:
raise sirepo.util.UserAlert("Reason for requesting access not provided")
METHOD_NAME(
PKDict(
display_name=self.auth.user_display_name(u),
email_addr=self.auth.logged_in_user_name(),
link=l,
reason=req.req_data.reason,
role=sirepo.auth_role.for_sim_type(req.type),
sim_type=req.type,
uid=u,
).pkupdate(self.user_agent_headers())
)
return self.reply_ok()
def raise_control_for_user(qcall, uid, role, sim_type):
s = qcall.auth_db.model("UserRoleInvite").get_status(role=role)
if s in _ACTIVE:
raise sirepo.util.SRException("moderationPending", PKDict(sim_type=sim_type))
if s == sirepo.auth_role.ModerationStatus.DENY:
raise sirepo.util.Forbidden(f"uid={uid} role={role} already denied")
assert s is None, f"Unexpected status={s} for uid={uid} and role={role}"
qcall.auth.require_email_user()
raise sirepo.util.SRException("moderationRequest", PKDict(sim_type=sim_type))
def init_apis(*args, **kwargs):
global _cfg, _STATUS_TO_SUBJECT
_cfg = pkconfig.init(
moderator_email=pkconfig.Required(
str, "The email address to send moderation emails to"
),
)
_STATUS_TO_SUBJECT = PKDict(
approve="{} Access Request Approved",
# TODO(robnagler) should we send an email when moderation pending?
# For completeness
pending=None,
clarify="Sirepo {}: Additional Info?",
deny="{} Access Request Denied",
)
x = frozenset(_STATUS_TO_SUBJECT.keys())
if x != sirepo.auth_role.ModerationStatus.VALID_SET:
raise AssertionError(
f"{x} not same as {sirepo.auth_role.ModerationStatus.VALID_SET}"
)
| null |
5,017 |
# -*- coding: utf-8 -*-
"""?
:copyright: Copyright (c) 2017 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from pykern import pkio, pkconfig
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdc, pkdexc, pkdlog, pkdp
from sirepo import feature_config
from sirepo import sim_data
from sirepo import simulation_db
from sirepo import srdb
from sirepo import srtime
from sirepo import util
from sirepo.template import template_common
import datetime
import glob
import json
import os.path
import re
import shutil
import sirepo.quest
_MILLISECONDS_PER_MONTH = 30 * 24 * 60 * 60 * 1000
_MAXIMUM_SIM_AGE_IN_MONTHS = 6
def audit_proprietary_lib_files(*uid):
"""Add/removes proprietary files based on a user's roles
For example, add the FLASH proprietary files if user has the sim_type_flash role.
Args:
*uid: UID(s) of the user(s) to audit. If None, all users will be audited.
"""
with sirepo.quest.start() as qcall:
for u in uid or qcall.auth_db.all_uids():
with qcall.auth.logged_in_user_set(u):
sim_data.audit_proprietary_lib_files(qcall=qcall)
def db_upgrade():
with sirepo.quest.start() as qcall:
qcall.auth_db.create_or_upgrade()
def create_examples():
"""Adds missing app examples to all users"""
with sirepo.quest.start() as qcall:
examples = _get_examples_by_type(qcall)
for t, s in _iterate_sims_by_users(qcall, examples.keys()):
for e in examples[t]:
if e.models.simulation.name not in s[t].keys():
_create_example(qcall, e)
def reset_examples():
with sirepo.quest.start() as qcall:
e = _get_examples_by_type(qcall)
for t, s in _iterate_sims_by_users(qcall, e.keys()):
o = METHOD_NAME(list(s[t].values()), t, e)
_revert(qcall, o, e)
_delete(qcall, o)
# TODO(e-carlin): more than uid (ex email)
def delete_user(uid):
"""Delete a user and all of their data across Sirepo and Jupyter
This will delete information based on what is configured. So configure
all service (jupyterhublogin, email, etc.) that may be relevant. Once
this script runs all records are blown away from the db's so if you
forget to configure something you will have to delete manually.
Does nothing if `uid` does not exist.
Args:
uid (str): user to delete
"""
import sirepo.template
with sirepo.quest.start() as qcall:
if qcall.auth.unchecked_get_user(uid) is None:
return
with qcall.auth.logged_in_user_set(uid):
if sirepo.template.is_sim_type("jupyterhublogin"):
from sirepo.sim_api import jupyterhublogin
jupyterhublogin.delete_user_dir(qcall=qcall)
simulation_db.delete_user(qcall=qcall)
# This needs to be done last so we have access to the records in
# previous steps.
qcall.auth_db.delete_user(uid=uid)
def move_user_sims(uid):
"""Moves non-example sims and lib files into the target user's directory.
Must be run in the source uid directory."""
if not os.path.exists("srw/lib"):
pkcli.command_error("srw/lib does not exist; must run in user dir")
if not os.path.exists("../{}".format(uid)):
pkcli.command_error(f"missing user_dir=../{uid}")
sim_dirs = []
lib_files = []
for path in glob.glob("*/*/sirepo-data.json"):
with open(path) as f:
data = json.loads(f.read())
sim = data["models"]["simulation"]
if "isExample" in sim and sim["isExample"]:
continue
sim_dirs.append(os.path.dirname(path))
for path in glob.glob("*/lib/*"):
lib_files.append(path)
for sim_dir in sim_dirs:
target = "../{}/{}".format(uid, sim_dir)
assert not os.path.exists(target), "target sim already exists: {}".format(
target
)
pkdlog(sim_dir)
shutil.move(sim_dir, target)
for lib_file in lib_files:
target = "../{}/{}".format(uid, lib_file)
if os.path.exists(target):
continue
pkdlog(lib_file)
shutil.move(lib_file, target)
def METHOD_NAME(simulations, sim_type, examples):
ops = PKDict(delete=[], revert=[])
n = set([x.models.simulation.name for x in examples[sim_type]])
for sim in simulations:
if sim.name not in n:
ops.delete.append((sim, sim_type))
elif _example_is_too_old(sim.simulation.lastModified):
ops.revert.append((sim.name, sim_type))
ops.delete.append((sim, sim_type))
return ops
def _create_example(qcall, example):
simulation_db.save_new_example(example, qcall=qcall)
def _delete(qcall, ops):
for s, t in ops.delete:
simulation_db.delete_simulation(t, s.simulationId, qcall=qcall)
def _example_is_too_old(last_modified):
return (
(srtime.utc_now_as_milliseconds() - last_modified) / _MILLISECONDS_PER_MONTH
) > _MAXIMUM_SIM_AGE_IN_MONTHS
def _get_example_by_name(name, sim_type, examples):
for e in examples[sim_type]:
if e.models.simulation.name == name:
return e
raise AssertionError(f"Failed to find example simulation with name={name}")
def _get_examples_by_type(qcall):
return PKDict(
{t: simulation_db.examples(t) for t in feature_config.cfg().sim_types}
)
def _get_named_example_sims(qcall, all_sim_types):
return PKDict(
{
t: PKDict(
{
x.name: x
for x in simulation_db.iterate_simulation_datafiles(
t,
simulation_db.process_simulation_list,
{"simulation.isExample": True},
qcall=qcall,
)
}
)
for t in all_sim_types
}
)
def _is_src_dir(d):
return re.search(r"/src$", str(d))
def _iterate_sims_by_users(qcall, all_sim_types):
for d in pkio.sorted_glob(simulation_db.user_path_root().join("*")):
if _is_src_dir(d):
continue
with qcall.auth.logged_in_user_set(simulation_db.uid_from_dir_name(d)):
s = _get_named_example_sims(
qcall,
[a for a in all_sim_types if d.join(a).exists()],
)
for t in s.keys():
yield (t, s)
def _revert(qcall, ops, examples):
for n, t in ops.revert:
_create_example(qcall, _get_example_by_name(n, t, examples))
| null |
5,018 |
from os.path import dirname
from os.path import join
import pytest
from lektor.publisher import RsyncPublisher
def METHOD_NAME(env):
server = env.load_config().get_server("production")
assert server.name == "Production"
assert server.name_i18n["de"] == "Produktion"
assert server.target == "rsync://myserver.com/path/to/website"
assert server.extra == {"extra_field": "extra_value"}
def test_rsync_command_credentials(tmpdir, mocker, env):
output_path = tmpdir.mkdir("output")
publisher = RsyncPublisher(env, str(output_path))
target_url = "http://example.com"
credentials = {
"username": "fakeuser",
"password": "fakepass",
}
mock_popen = mocker.patch("lektor.publisher.portable_popen")
with publisher.get_command(target_url, credentials):
assert mock_popen.called
assert mock_popen.call_args[0] == (
[
"rsync",
"-rclzv",
"--exclude=.lektor",
str(output_path) + "/",
"[email protected]:/",
],
)
output_path = join(dirname(__file__), "OUTPUT_PATH")
@pytest.mark.parametrize(
"target_url,called_command",
[
(
"http://example.com",
[
"rsync",
"-rclzv",
"--exclude=.lektor",
str(output_path) + "/",
"example.com:/",
],
),
(
"http://[email protected]",
[
"rsync",
"-rclzv",
"--exclude=.lektor",
str(output_path) + "/",
"[email protected]:/",
],
),
(
"http://example.com?exclude=file",
[
"rsync",
"-rclzv",
"--exclude=.lektor",
"--exclude",
"file",
str(output_path) + "/",
"example.com:/",
],
),
(
"http://example.com?exclude=file_one&exclude=file_two",
[
"rsync",
"-rclzv",
"--exclude=.lektor",
"--exclude",
"file_one",
"--exclude",
"file_two",
str(output_path) + "/",
"example.com:/",
],
),
(
"""http://example.com?exclude='user's "special" file name'""",
[
"rsync",
"-rclzv",
"--exclude=.lektor",
"--exclude",
"'user's \"special\" file name'",
str(output_path) + "/",
"example.com:/",
],
),
(
'http://example.com?exclude="file name"',
[
"rsync",
"-rclzv",
"--exclude=.lektor",
"--exclude",
'"file name"',
str(output_path) + "/",
"example.com:/",
],
),
(
"http://example.com?delete",
[
"rsync",
"-rclzv",
"--exclude=.lektor",
"--delete-after",
str(output_path) + "/",
"example.com:/",
],
),
(
"http://example.com?delete=yes",
[
"rsync",
"-rclzv",
"--exclude=.lektor",
"--delete-after",
str(output_path) + "/",
"example.com:/",
],
),
(
"http://example.com?delete=no",
[
"rsync",
"-rclzv",
"--exclude=.lektor",
str(output_path) + "/",
"example.com:/",
],
),
(
"file:///path/to/directory",
[
"rsync",
"-rclzv",
"--exclude=.lektor",
str(output_path) + "/",
"/path/to/directory/",
],
),
],
)
def test_rsync_publisher(target_url, called_command, tmpdir, mocker, env):
publisher = RsyncPublisher(env, str(output_path))
mock_popen = mocker.patch("lektor.publisher.portable_popen")
with publisher.get_command(target_url, credentials=None):
assert mock_popen.called
assert mock_popen.call_args[0] == (called_command,)
| null |
5,019 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from sans.common.enums import BinningType
from sans.gui_logic.models.SummationSettingsModel import SummationSettingsModel
class SummationSettingsTestCase(unittest.TestCase):
def setUpWithInitialType(self, initial_type):
self.summation_settings = SummationSettingsModel(initial_type)
def setUp(self):
self.setUpWithInitialType(BinningType.CUSTOM)
class SummationSettingsOverlayEventWorkspaceTestCase(unittest.TestCase):
def assertHasOverlayEventWorkspaces(self):
self.assertTrue(self.summation_settings.has_overlay_event_workspaces())
def assertDoesNotHaveOverlayEventWorkspaces(self):
self.assertFalse(self.summation_settings.has_overlay_event_workspaces())
def assertOverlayEventWorkspacesDisabled(self):
self.assertFalse(self.summation_settings.is_overlay_event_workspaces_enabled())
def assertOverlayEventWorkspacesEnabled(self):
self.assertTrue(self.summation_settings.is_overlay_event_workspaces_enabled())
class SummationSettingsBinSettingsTest(SummationSettingsTestCase):
def assertHasBinSettings(self):
self.assertTrue(self.summation_settings.has_bin_settings())
def assertDoesNotHaveBinSettings(self):
self.assertFalse(self.summation_settings.has_bin_settings())
def test_can_set_bin_settings_when_in_custom(self):
bin_settings = "1,24,545,23"
self.summation_settings.bin_settings = bin_settings
self.assertEqual(bin_settings, self.summation_settings.bin_settings)
def test_custom_binning_has_bin_settings(self):
self.setUpWithInitialType(BinningType.CUSTOM)
self.assertHasBinSettings()
def test_save_as_event_data_does_not_have_bin_settings(self):
self.setUpWithInitialType(BinningType.SAVE_AS_EVENT_DATA)
self.assertDoesNotHaveBinSettings()
def test_from_monitors_does_not_have_bin_settings(self):
self.setUpWithInitialType(BinningType.FROM_MONITORS)
self.assertDoesNotHaveBinSettings()
class SummationSettingsAdditionalTimeShiftsTest(SummationSettingsTestCase, SummationSettingsOverlayEventWorkspaceTestCase):
def assertHasAdditionalTimeShifts(self):
self.assertTrue(self.summation_settings.has_additional_time_shifts())
def assertDoesNotHaveAdditionalTimeShifts(self):
self.assertFalse(self.summation_settings.has_additional_time_shifts())
def test_custom_binning_does_not_have_additional_time_shifts(self):
self.setUpWithInitialType(BinningType.CUSTOM)
self.assertDoesNotHaveAdditionalTimeShifts()
def test_save_as_event_data_has_additional_time_shifts_if_overlay_event_workspaces_enabled(self):
self.setUpWithInitialType(BinningType.SAVE_AS_EVENT_DATA)
self.assertDoesNotHaveAdditionalTimeShifts()
self.summation_settings.enable_overlay_event_workspaces()
self.assertHasAdditionalTimeShifts()
self.summation_settings.disable_overlay_event_workspaces()
self.assertDoesNotHaveAdditionalTimeShifts()
def test_from_monitors_does_not_have_additional_time_shifts(self):
self.setUpWithInitialType(BinningType.FROM_MONITORS)
self.assertDoesNotHaveAdditionalTimeShifts()
def test_can_set_additional_time_shifts_when_available(self):
self.setUpWithInitialType(BinningType.SAVE_AS_EVENT_DATA)
self.summation_settings.enable_overlay_event_workspaces()
additional_time_shifts = "1,24,545,23"
self.summation_settings.additional_time_shifts = additional_time_shifts
self.assertEqual(additional_time_shifts, self.summation_settings.additional_time_shifts)
def test_stores_additional_time_shifts_between_mode_switches(self):
bin_settings = "232,2132,123"
additional_time_shifts = "32,252,12"
self.setUpWithInitialType(BinningType.CUSTOM)
self.summation_settings.bin_settings = bin_settings
self.summation_settings.set_histogram_binning_type(BinningType.SAVE_AS_EVENT_DATA)
self.summation_settings.additional_time_shifts = additional_time_shifts
self.summation_settings.set_histogram_binning_type(BinningType.CUSTOM)
self.assertEqual(bin_settings, self.summation_settings.bin_settings)
self.summation_settings.set_histogram_binning_type(BinningType.SAVE_AS_EVENT_DATA)
self.assertEqual(additional_time_shifts, self.summation_settings.additional_time_shifts)
class SummationSettingsOverlayEventWorkspace(SummationSettingsTestCase, SummationSettingsOverlayEventWorkspaceTestCase):
def test_custom_binning_does_not_have_overlay_event_workspaces(self):
self.setUpWithInitialType(BinningType.CUSTOM)
self.assertDoesNotHaveOverlayEventWorkspaces()
def test_save_as_event_data_has_overlay_event_workspaces(self):
self.setUpWithInitialType(BinningType.SAVE_AS_EVENT_DATA)
self.assertHasOverlayEventWorkspaces()
def test_from_monitors_does_not_have_overlay_event_workspaces(self):
self.setUpWithInitialType(BinningType.FROM_MONITORS)
self.assertDoesNotHaveOverlayEventWorkspaces()
def test_switching_to_save_as_event_data_enables_overlay_event_workspaces_option(self):
self.setUpWithInitialType(BinningType.FROM_MONITORS)
self.summation_settings.set_histogram_binning_type(BinningType.SAVE_AS_EVENT_DATA)
self.assertHasOverlayEventWorkspaces()
def test_can_enable_overlay_event_workspaces_when_available(self):
self.setUpWithInitialType(BinningType.SAVE_AS_EVENT_DATA)
self.summation_settings.enable_overlay_event_workspaces()
self.assertOverlayEventWorkspacesEnabled()
def METHOD_NAME(self):
self.setUpWithInitialType(BinningType.SAVE_AS_EVENT_DATA)
self.summation_settings.enable_overlay_event_workspaces()
self.summation_settings.disable_overlay_event_workspaces()
self.assertOverlayEventWorkspacesDisabled()
if __name__ == "__main__":
unittest.main()
| null |
5,020 |
from django.contrib.auth import get_user_model
from django.utils.translation import gettext_lazy
from .abstract_is_admin import AbstractIsAdmin
from .save_interface import SaveInterface
class InheritedAdmin(object):
"""
Stores reference to inherited admin, both to the
:class:`django.contrib.auth.models.User` object, and to the
:class:`.BaseNode` object.
"""
def __init__(self, user, basenode):
#: The :class:`django.contrib.auth.models.User`
self.user = user
#: The BaseNode
self.basenode = basenode
class BaseNode(SaveInterface):
"""
The base class of the Devilry hierarchy. Implements basic functionality
used by the other Node classes. This is an abstract datamodel, so it
is never used directly.
.. attribute:: short_name
A django.db.models.SlugField_ with max 20 characters. Only numbers,
letters, '_' and '-'.
.. attribute:: long_name
A django.db.models.CharField_ with max 100 characters. Gives a longer
description than :attr:`short_name`.
"""
def __str__(self):
return self.get_path()
def get_path(self):
""" Get the unique path to this node.
:return:
A ``'.'`` separated list containing the short_name of this
node and every parentnode required to make this path unique. For
everything from Subject and down, this is up to subject, and for Node,
this is up to a Node with ``parentnode==None``.
"""
return self.parentnode.get_path() + "." + self.short_name
get_path.short_description = gettext_lazy('Path')
def get_admins(self):
""" Get a string with the shortname of all administrators on this node
separated by comma and a space like: ``"uioadmin, superuser"``.
Note that admins on parentnode(s) is not included.
"""
return ', '.join([u.shortname for u in self.admins.all()])
get_admins.short_description = gettext_lazy('Administrators')
def _get_inherited_admins(self, admins):
for admin in self.admins.all():
admins[admin.id] = InheritedAdmin(user=admin, basenode=self)
if self.parentnode:
self.parentnode._get_inherited_admins(admins)
def get_inherited_admins(self):
"""
Get list of inherited admins.
:return:
List of :class:`.InheritedAdmin` objects. Does
not contain duplicates.
"""
admins = {}
if self.parentnode:
self.parentnode._get_inherited_admins(admins)
return list(admins.values())
def get_all_admins(self):
"""
Get all admins (including inherited) as a list of ``django.contrib.auth.model.User`` objects.
"""
return set(list(self.get_inherited_admins()) + list(self.admins.all()))
def get_all_admin_ids(self):
"""
Get all admins (including inherited) as a set user-ids.
"""
admin_ids = set([inheritedadmin.user.id for inheritedadmin in self.get_inherited_admins()])
for user in self.admins.all():
admin_ids.add(user.id)
return admin_ids
def is_admin(self, user_obj):
""" Check if the given user is admin on this node or any parentnode.
:param user_obj: A User object.
:rtype: bool
"""
try:
self.admins.get(pk=user_obj.pk)
except get_user_model().DoesNotExist:
if self.parentnode:
return self.parentnode.is_admin(user_obj)
else:
return True
return False
def _can_save_id_none(self, user_obj):
""" Used by all except Node, which overrides. """
return self.parentnode.is_admin(user_obj)
def METHOD_NAME(self, user_obj):
"""
Check if the current user can save the current object. Unlike is_admin,
this method returns true for superusers, and if this is a new object (id=None),
we check if the user is admin on any parent.
"""
if user_obj.is_superuser:
return True
if self.id is None:
return self._can_save_id_none(user_obj)
elif self.is_admin(user_obj):
return True
else:
return False
def is_empty(self):
"""
Check if this node is empty (has no children). Used by
:meth:`.can_delete` to determine if non-super-users are allowed to
delete a node, but may also be useful in other situations.
"""
raise NotImplementedError('is_empty must be implemented in subclasses.')
def can_delete(self, user_obj):
"""
Check if the given user is permitted to delete this object. A user is
permitted to delete an object if the user is superadmin, or if the user
is admin on the parentnode (uses :meth:`.is_admin`). Only superusers
are allowed to delete nodes where :meth:`.is_empty` returns ``False``.
:return: ``True`` if the user is permitted to delete this object.
"""
if self.id is None:
return False
if user_obj.is_superuser:
return True
if self.parentnode is not None and self.is_empty():
return self.parentnode.is_admin(user_obj)
else:
return False
| null |
5,021 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from mantid.simpleapi import CreateSampleWorkspace, CreatePeaksWorkspace, CreateWorkspace, SetSampleMaterial, SetUB
from mantid.geometry import CrystalStructure, CSGObject, OrientedLattice
from mantid.api import Sample
from numpy import pi
import copy
class SampleTest(unittest.TestCase):
def test_lattice_accessors(self):
instrument_ws = CreateSampleWorkspace()
peaks = CreatePeaksWorkspace(instrument_ws, 0)
SetUB(peaks, 1, 1, 1, 90, 90, 90)
sample = peaks.sample()
self.assertTrue(sample.hasOrientedLattice())
self.assertTrue(isinstance(sample.getOrientedLattice(), OrientedLattice))
sample.clearOrientedLattice()
self.assertFalse(sample.hasOrientedLattice())
def METHOD_NAME(self):
sample = Sample()
sample.setThickness(12.5)
self.assertEqual(sample.getThickness(), 12.5)
sample.setHeight(10.2)
self.assertEqual(sample.getHeight(), 10.2)
sample.setWidth(5.9)
self.assertEqual(sample.getWidth(), 5.9)
def test_crystal_structure_handling(self):
sample = Sample()
self.assertEqual(sample.hasCrystalStructure(), False)
self.assertRaises(RuntimeError, sample.getCrystalStructure)
cs = CrystalStructure("5.43 5.43 5.43", "F d -3 m", "Si 0 0 0 1.0 0.01")
sample.setCrystalStructure(cs)
self.assertEqual(sample.hasCrystalStructure(), True)
cs_from_sample = sample.getCrystalStructure()
self.assertEqual(cs.getSpaceGroup().getHMSymbol(), cs_from_sample.getSpaceGroup().getHMSymbol())
self.assertEqual(cs.getUnitCell().a(), cs_from_sample.getUnitCell().a())
self.assertEqual(len(cs.getScatterers()), len(cs_from_sample.getScatterers()))
self.assertEqual(cs.getScatterers()[0], cs_from_sample.getScatterers()[0])
sample.clearCrystalStructure()
self.assertEqual(sample.hasCrystalStructure(), False)
self.assertRaises(RuntimeError, sample.getCrystalStructure)
def test_material(self):
ws = CreateWorkspace(DataX=[1], DataY=[1], StoreInADS=False)
sample = ws.sample()
SetSampleMaterial(ws, "Al2 O3", SampleMassDensity=4, StoreInADS=False)
material = sample.getMaterial()
self.assertAlmostEqual(material.numberDensity, 0.1181, places=4)
self.assertAlmostEqual(material.relativeMolecularMass(), 101.961, places=3)
atoms, numatoms = material.chemicalFormula()
self.assertEqual(len(atoms), len(numatoms))
self.assertEqual(len(atoms), 2)
self.assertEqual(numatoms[0], 2)
self.assertEqual(numatoms[1], 3)
xs0 = atoms[0].neutron()
xs1 = atoms[1].neutron()
# the correct way to calculate for coherent cross section
# is to average the scattering lengths then convert to a cross section
b_real = (xs0["coh_scatt_length_real"] * 2 + xs1["coh_scatt_length_real"] * 3) / 5
b_imag = (xs0["coh_scatt_length_img"] * 2 + xs1["coh_scatt_length_img"] * 3) / 5
xs = 0.04 * pi * (b_real * b_real + b_imag * b_imag)
self.assertAlmostEqual(material.cohScatterXSection(), xs, places=4)
def test_get_shape(self):
sample = Sample()
self.assertEqual(type(sample.getShape()), CSGObject)
def test_get_shape_xml(self):
sample = Sample()
shape = sample.getShape()
xml = shape.getShapeXML()
self.assertEqual(type(xml), str)
def do_test_copyable(self, copy_op):
original = Sample()
width = 1.0
height = 2.0
thickness = 3.0
original.setThickness(thickness)
original.setHeight(height)
original.setWidth(width)
# make copy
cp = copy_op(original)
# Check identity different
self.assertNotEqual(id(original), id(cp))
# Simple tests that cp is equal to original
self.assertEqual(original.getHeight(), cp.getHeight())
self.assertEqual(original.getWidth(), cp.getWidth())
self.assertEqual(original.getThickness(), cp.getThickness())
# Check really did succeed and is not tied in any way to original
del original
self.assertTrue(id(cp) > 0)
self.assertEqual(height, cp.getHeight())
self.assertEqual(width, cp.getWidth())
self.assertEqual(thickness, cp.getThickness())
def test_shallow_copyable(self):
self.do_test_copyable(copy.copy)
def test_deep_copyable(self):
self.do_test_copyable(copy.deepcopy)
def test_equals(self):
a = Sample()
b = Sample()
self.assertEqual(a, b)
b.setThickness(10)
self.assertNotEqual(a, b)
if __name__ == "__main__":
unittest.main()
| null |
5,022 |
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A database of Python protocol buffer generated symbols.
SymbolDatabase is the MessageFactory for messages generated at compile time,
and makes it easy to create new instances of a registered type, given only the
type's protocol buffer symbol name.
Example usage:
db = symbol_database.SymbolDatabase()
# Register symbols of interest, from one or multiple files.
db.RegisterFileDescriptor(my_proto_pb2.DESCRIPTOR)
db.RegisterMessage(my_proto_pb2.MyMessage)
db.RegisterEnumDescriptor(my_proto_pb2.MyEnum.DESCRIPTOR)
# The database can be used as a MessageFactory, to generate types based on
# their name:
types = db.GetMessages(['my_proto.proto'])
my_message_instance = types['MyMessage']()
# The database's underlying descriptor pool can be queried, so it's not
# necessary to know a type's filename to be able to generate it:
filename = db.pool.FindFileContainingSymbol('MyMessage')
my_message_instance = db.GetMessages([filename])['MyMessage']()
# This functionality is also provided directly via a convenience method:
my_message_instance = db.GetSymbol('MyMessage')()
"""
from google.protobuf import descriptor_pool
from google.protobuf import message_factory
class SymbolDatabase(message_factory.MessageFactory):
"""A database of Python generated symbols."""
def RegisterMessage(self, message):
"""Registers the given message type in the local database.
Calls to GetSymbol() and GetMessages() will return messages registered here.
Args:
message: a message.Message, to be registered.
Returns:
The provided message.
"""
desc = message.DESCRIPTOR
self._classes[desc.full_name] = message
self.pool.AddDescriptor(desc)
return message
def RegisterEnumDescriptor(self, enum_descriptor):
"""Registers the given enum descriptor in the local database.
Args:
enum_descriptor: a descriptor.EnumDescriptor.
Returns:
The provided descriptor.
"""
self.pool.AddEnumDescriptor(enum_descriptor)
return enum_descriptor
def METHOD_NAME(self, service_descriptor):
"""Registers the given service descriptor in the local database.
Args:
service_descriptor: a descriptor.ServiceDescriptor.
Returns:
The provided descriptor.
"""
self.pool.AddServiceDescriptor(service_descriptor)
def RegisterFileDescriptor(self, file_descriptor):
"""Registers the given file descriptor in the local database.
Args:
file_descriptor: a descriptor.FileDescriptor.
Returns:
The provided descriptor.
"""
self.pool.AddFileDescriptor(file_descriptor)
def GetSymbol(self, symbol):
"""Tries to find a symbol in the local database.
Currently, this method only returns message.Message instances, however, if
may be extended in future to support other symbol types.
Args:
symbol: A str, a protocol buffer symbol.
Returns:
A Python class corresponding to the symbol.
Raises:
KeyError: if the symbol could not be found.
"""
return self._classes[symbol]
def GetMessages(self, files):
# TODO(amauryfa): Fix the differences with MessageFactory.
"""Gets all registered messages from a specified file.
Only messages already created and registered will be returned; (this is the
case for imported _pb2 modules)
But unlike MessageFactory, this version also returns already defined nested
messages, but does not register any message extensions.
Args:
files: The file names to extract messages from.
Returns:
A dictionary mapping proto names to the message classes.
Raises:
KeyError: if a file could not be found.
"""
def _GetAllMessageNames(desc):
"""Walk a message Descriptor and recursively yields all message names."""
yield desc.full_name
for msg_desc in desc.nested_types:
for full_name in _GetAllMessageNames(msg_desc):
yield full_name
result = {}
for file_name in files:
file_desc = self.pool.FindFileByName(file_name)
for msg_desc in file_desc.message_types_by_name.values():
for full_name in _GetAllMessageNames(msg_desc):
try:
result[full_name] = self._classes[full_name]
except KeyError:
# This descriptor has no registered class, skip it.
pass
return result
_DEFAULT = SymbolDatabase(pool=descriptor_pool.Default())
def Default():
"""Returns the default SymbolDatabase."""
return _DEFAULT
| null |
5,023 |
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2017 Kevin Redon <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
class Decoder(srd.Decoder):
api_version = 3
id = 'eeprom93xx'
name = '93xx EEPROM'
longname = '93xx Microwire EEPROM'
desc = '93xx series Microwire EEPROM protocol.'
license = 'gplv2+'
inputs = ['microwire']
outputs = []
tags = ['IC', 'Memory']
options = (
{'id': 'addresssize', 'desc': 'Address size', 'default': 8, 'idn':'dec_eeprom93xx_opt_addresssize'},
{'id': 'wordsize', 'desc': 'Word size', 'default': 16, 'idn':'dec_eeprom93xx_opt_wordsize'},
)
annotations = (
('si-data', 'SI data'),
('so-data', 'SO data'),
('warning', 'Warning'),
)
annotation_rows = (
('data', 'Data', (0, 1)),
('warnings', 'Warnings', (2,)),
)
def __init__(self):
self.reset()
def reset(self):
self.frame = []
def METHOD_NAME(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
self.addresssize = self.options['addresssize']
self.wordsize = self.options['wordsize']
def put_address(self, data):
# Get address (MSb first).
a = 0
for b in range(len(data)):
a += (data[b].si << (len(data) - b - 1))
self.put(data[0].ss, data[-1].es, self.out_ann,
[0, ['Address: 0x%x' % a, 'Addr: 0x%x' % a, '0x%x' % a]])
def put_word(self, si, data):
# Decode word (MSb first).
word = 0
for b in range(len(data)):
d = data[b].si if si else data[b].so
word += (d << (len(data) - b - 1))
idx = 0 if si else 1
self.put(data[0].ss, data[-1].es,
self.out_ann, [idx, ['Data: 0x%x' % word, '0x%x' % word]])
def decode(self, ss, es, data):
if len(data) < (2 + self.addresssize):
self.put(ss, es, self.out_ann, [2, ['Not enough packet bits']])
return
opcode = (data[0].si << 1) + (data[1].si << 0)
if opcode == 2:
# READ instruction.
self.put(data[0].ss, data[1].es,
self.out_ann, [0, ['Read word', 'READ']])
self.put_address(data[2:2 + self.addresssize])
# Get all words.
word_start = 2 + self.addresssize
while len(data) - word_start > 0:
# Check if there are enough bits for a word.
if len(data) - word_start < self.wordsize:
self.put(data[word_start].ss, data[len(data) - 1].es,
self.out_ann, [2, ['Not enough word bits']])
break
self.put_word(False, data[word_start:word_start + self.wordsize])
# Go to next word.
word_start += self.wordsize
elif opcode == 1:
# WRITE instruction.
self.put(data[0].ss, data[1].es,
self.out_ann, [0, ['Write word', 'WRITE']])
self.put_address(data[2:2 + self.addresssize])
# Get word.
if len(data) < 2 + self.addresssize + self.wordsize:
self.put(data[2 + self.addresssize].ss,
data[len(data) - 1].ss,
self.out_ann, [2, ['Not enough word bits']])
else:
self.put_word(True, data[2 + self.addresssize:2 + self.addresssize + self.wordsize])
elif opcode == 3:
# ERASE instruction.
self.put(data[0].ss, data[1].es,
self.out_ann, [0, ['Erase word', 'ERASE']])
self.put_address(data[2:2 + self.addresssize])
elif opcode == 0:
if data[2].si == 1 and data[3].si == 1:
# WEN instruction.
self.put(data[0].ss, data[2 + self.addresssize - 1].es,
self.out_ann, [0, ['Write enable', 'WEN']])
elif data[2].si == 0 and data[3].si == 0:
# WDS instruction.
self.put(data[0].ss, data[2 + self.addresssize - 1].es,
self.out_ann, [0, ['Write disable', 'WDS']])
elif data[2].si == 1 and data[3].si == 0:
# ERAL instruction.
self.put(data[0].ss, data[2 + self.addresssize - 1].es,
self.out_ann, [0, ['Erase all memory',
'Erase all', 'ERAL']])
elif data[2].si == 0 and data[3].si == 1:
# WRAL instruction.
self.put(data[0].ss, data[2 + self.addresssize - 1].es,
self.out_ann, [0, ['Write all memory',
'Write all', 'WRAL']])
# Get word.
if len(data) < 2 + self.addresssize + self.wordsize:
self.put(data[2 + self.addresssize].ss,
data[len(data) - 1].ss,
self.out_ann, [2, ['Not enough word bits']])
else:
self.put_word(True, data[2 + self.addresssize:2 + self.addresssize + self.wordsize])
| null |
5,024 |
# Microsoft Azure Linux Agent
#
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import os
import sys
import time
import azurelinuxagent.common.conf as conf
import azurelinuxagent.common.logger as logger
import azurelinuxagent.common.utils.fileutil as fileutil
from azurelinuxagent.common.event import add_event, WALAEventOperation, initialize_event_logger_vminfo_common_parameters
from azurelinuxagent.common.future import ustr
from azurelinuxagent.common.osutil import get_osutil
from azurelinuxagent.common.protocol.goal_state import GoalState, GoalStateProperties
from azurelinuxagent.common.protocol.util import get_protocol_util
from azurelinuxagent.common.rdma import setup_rdma_device
from azurelinuxagent.common.utils import textutil
from azurelinuxagent.common.version import AGENT_NAME, AGENT_LONG_NAME, \
AGENT_VERSION, \
DISTRO_NAME, DISTRO_VERSION, PY_VERSION_MAJOR, PY_VERSION_MINOR, \
PY_VERSION_MICRO
from azurelinuxagent.daemon.resourcedisk import get_resourcedisk_handler
from azurelinuxagent.daemon.scvmm import get_scvmm_handler
from azurelinuxagent.ga.update import get_update_handler
from azurelinuxagent.pa.provision import get_provision_handler
from azurelinuxagent.pa.rdma import get_rdma_handler
OPENSSL_FIPS_ENVIRONMENT = "OPENSSL_FIPS"
def get_daemon_handler():
return DaemonHandler()
class DaemonHandler(object):
"""
Main thread of daemon. It will invoke other threads to do actual work
"""
def __init__(self):
self.running = True
self.osutil = get_osutil()
def run(self, child_args=None):
#
# The Container ID in telemetry events is retrieved from the goal state. We can fetch the goal state
# only after protocol detection, which is done during provisioning.
#
# Be aware that telemetry events emitted before that will not include the Container ID.
#
logger.info("{0} Version: {1}", AGENT_LONG_NAME, AGENT_VERSION)
logger.info("OS: {0} {1}", DISTRO_NAME, DISTRO_VERSION)
logger.info("Python: {0}.{1}.{2}", PY_VERSION_MAJOR, PY_VERSION_MINOR, PY_VERSION_MICRO)
self.METHOD_NAME()
self.initialize_environment()
# If FIPS is enabled, set the OpenSSL environment variable
# Note:
# -- Subprocesses inherit the current environment
if conf.get_fips_enabled():
os.environ[OPENSSL_FIPS_ENVIRONMENT] = '1'
while self.running:
try:
self.daemon(child_args)
except Exception as e: # pylint: disable=W0612
err_msg = textutil.format_exception(e)
add_event(name=AGENT_NAME, is_success=False, message=ustr(err_msg),
op=WALAEventOperation.UnhandledError)
logger.warn("Daemon ended with exception -- Sleep 15 seconds and restart daemon")
time.sleep(15)
def METHOD_NAME(self):
"""Check whether daemon is already running"""
pid = None
pid_file = conf.get_agent_pid_file_path()
if os.path.isfile(pid_file):
pid = fileutil.read_file(pid_file)
if self.osutil.check_pid_alive(pid):
logger.info("Daemon is already running: {0}", pid)
sys.exit(0)
fileutil.write_file(pid_file, ustr(os.getpid()))
def sleep_if_disabled(self):
agent_disabled_file_path = conf.get_disable_agent_file_path()
if os.path.exists(agent_disabled_file_path):
import threading
logger.warn("Disabling the guest agent by sleeping forever; "
"to re-enable, remove {0} and restart"
.format(agent_disabled_file_path))
self.running = False
disable_event = threading.Event()
disable_event.wait()
def initialize_environment(self):
# Create lib dir
if not os.path.isdir(conf.get_lib_dir()):
fileutil.mkdir(conf.get_lib_dir(), mode=0o700)
os.chdir(conf.get_lib_dir())
def _initialize_telemetry(self):
protocol = self.protocol_util.get_protocol()
initialize_event_logger_vminfo_common_parameters(protocol)
def daemon(self, child_args=None):
logger.info("Run daemon")
self.protocol_util = get_protocol_util() # pylint: disable=W0201
self.scvmm_handler = get_scvmm_handler() # pylint: disable=W0201
self.resourcedisk_handler = get_resourcedisk_handler() # pylint: disable=W0201
self.rdma_handler = get_rdma_handler() # pylint: disable=W0201
self.provision_handler = get_provision_handler() # pylint: disable=W0201
self.update_handler = get_update_handler() # pylint: disable=W0201
if conf.get_detect_scvmm_env():
self.scvmm_handler.run()
if conf.get_resourcedisk_format():
self.resourcedisk_handler.run()
# Always redetermine the protocol start (e.g., wireserver vs.
# on-premise) since a VHD can move between environments
self.protocol_util.clear_protocol()
self.provision_handler.run()
# Once we have the protocol, complete initialization of the telemetry fields
# that require the goal state and IMDS
self._initialize_telemetry()
# Enable RDMA, continue in errors
if conf.enable_rdma():
nd_version = self.rdma_handler.get_rdma_version()
self.rdma_handler.install_driver_if_needed()
logger.info("RDMA capabilities are enabled in configuration")
try:
# Ensure the most recent SharedConfig is available
# - Changes to RDMA state may not increment the goal state
# incarnation number. A forced update ensures the most
# current values.
protocol = self.protocol_util.get_protocol()
goal_state = GoalState(protocol, goal_state_properties=GoalStateProperties.SharedConfig)
setup_rdma_device(nd_version, goal_state.shared_conf)
except Exception as e:
logger.error("Error setting up rdma device: %s" % e)
else:
logger.info("RDMA capabilities are not enabled, skipping")
self.sleep_if_disabled()
# Disable output to /dev/console once provisioning has completed
if logger.console_output_enabled():
logger.info("End of log to /dev/console. The agent will now check for updates and then will process extensions.")
logger.disable_console_output()
while self.running:
self.update_handler.run_latest(child_args=child_args)
| null |
5,025 |
#!/usr/bin/python3
# Code shared by translation conversion scripts.
#
# Copyright 2013 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import codecs
import json
import os
from datetime import datetime
class InputError(Exception):
"""Exception raised for errors in the input.
Attributes:
location -- where error occurred
msg -- explanation of the error
"""
def __init__(self, location, msg):
Exception.__init__(self, '{0}: {1}'.format(location, msg))
self.location = location
self.msg = msg
def read_json_file(filename):
"""Read a JSON file as UTF-8 into a dictionary, discarding @metadata.
Args:
filename: The filename, which must end ".json".
Returns:
The dictionary.
Raises:
InputError: The filename did not end with ".json" or an error occurred
while opening or reading the file.
"""
if not filename.endswith('.json'):
raise InputError(filename, 'filenames must end with ".json"')
try:
# Read in file.
with codecs.open(filename, 'r', 'utf-8') as infile:
defs = json.load(infile)
if '@metadata' in defs:
del defs['@metadata']
return defs
except ValueError as e:
print('Error reading ' + filename)
raise InputError(filename, str(e))
def _create_qqq_file(output_dir):
"""Creates a qqq.json file with message documentation for translatewiki.net.
The file consists of key-value pairs, where the keys are message ids and
the values are descriptions for the translators of the messages.
What documentation exists for the format can be found at:
http://translatewiki.net/wiki/Translating:Localisation_for_developers#Message_documentation
The file should be closed by _close_qqq_file().
Parameters:
output_dir: The output directory.
Returns:
A pointer to a file to which a left brace and newline have been written.
Raises:
IOError: An error occurred while opening or writing the file.
"""
qqq_file_name = os.path.join(os.curdir, output_dir, 'qqq.json')
qqq_file = codecs.open(qqq_file_name, 'w', 'utf-8')
print('Created file: ' + qqq_file_name)
qqq_file.write('{\n')
return qqq_file
def _close_qqq_file(qqq_file):
"""Closes a qqq.json file created and opened by _create_qqq_file().
This writes the final newlines and right brace.
Args:
qqq_file: A file created by _create_qqq_file().
Raises:
IOError: An error occurred while writing to or closing the file.
"""
qqq_file.write('\n}\n')
qqq_file.close()
def METHOD_NAME(author, lang, output_dir):
"""Creates a <lang>.json file for translatewiki.net.
The file consists of metadata, followed by key-value pairs, where the keys
are message ids and the values are the messages in the language specified
by the corresponding command-line argument. The file should be closed by
_close_lang_file().
Args:
author: Name and email address of contact for translators.
lang: ISO 639-1 source language code.
output_dir: Relative directory for output files.
Returns:
A pointer to a file to which the metadata has been written.
Raises:
IOError: An error occurred while opening or writing the file.
"""
lang_file_name = os.path.join(os.curdir, output_dir, lang + '.json')
lang_file = codecs.open(lang_file_name, 'w', 'utf-8')
print('Created file: ' + lang_file_name)
# string.format doesn't like printing braces, so break up our writes.
lang_file.write('{\n\t"@metadata": {')
lang_file.write("""
\t\t"author": "{0}",
\t\t"lastupdated": "{1}",
\t\t"locale": "{2}",
\t\t"messagedocumentation" : "qqq"
""".format(author, str(datetime.now()), lang))
lang_file.write('\t},\n')
return lang_file
def _close_lang_file(lang_file):
"""Closes a <lang>.json file created with _create_lang_file().
This also writes the terminating left brace and newline.
Args:
lang_file: A file opened with _create_lang_file().
Raises:
IOError: An error occurred while writing to or closing the file.
"""
lang_file.write('\n}\n')
lang_file.close()
def _create_key_file(output_dir):
"""Creates a keys.json file mapping Closure keys to Blockly keys.
Args:
output_dir: Relative directory for output files.
Raises:
IOError: An error occurred while creating the file.
"""
key_file_name = os.path.join(os.curdir, output_dir, 'keys.json')
key_file = open(key_file_name, 'w')
key_file.write('{\n')
print('Created file: ' + key_file_name)
return key_file
def _close_key_file(key_file):
"""Closes a key file created and opened with _create_key_file().
Args:
key_file: A file created by _create_key_file().
Raises:
IOError: An error occurred while writing to or closing the file.
"""
key_file.write('\n}\n')
key_file.close()
def write_files(author, lang, output_dir, units, write_key_file):
"""Writes the output files for the given units.
There are three possible output files:
* lang_file: JSON file mapping meanings (e.g., Maze.turnLeft) to the
English text. The base name of the language file is specified by the
"lang" command-line argument.
* key_file: JSON file mapping meanings to Soy-generated keys (long hash
codes). This is only output if the parameter write_key_file is True.
* qqq_file: JSON file mapping meanings to descriptions.
Args:
author: Name and email address of contact for translators.
lang: ISO 639-1 source language code.
output_dir: Relative directory for output files.
units: A list of dictionaries with entries for 'meaning', 'source',
'description', and 'keys' (the last only if write_key_file is true),
in the order desired in the output files.
write_key_file: Whether to output a keys.json file.
Raises:
IOError: An error occurs opening, writing to, or closing a file.
KeyError: An expected key is missing from units.
"""
lang_file = METHOD_NAME(author, lang, output_dir)
qqq_file = _create_qqq_file(output_dir)
if write_key_file:
key_file = _create_key_file(output_dir)
first_entry = True
for unit in units:
if not first_entry:
lang_file.write(',\n')
if write_key_file:
key_file.write(',\n')
qqq_file.write(',\n')
lang_file.write(u'\t"{0}": "{1}"'.format(
unit['meaning'],
unit['source'].replace('"', "'")))
if write_key_file:
key_file.write('"{0}": "{1}"'.format(unit['meaning'], unit['key']))
qqq_file.write(u'\t"{0}": "{1}"'.format(
unit['meaning'],
unit['description'].replace('"', "'").replace(
'{lb}', '{').replace('{rb}', '}')))
first_entry = False
_close_lang_file(lang_file)
if write_key_file:
_close_key_file(key_file)
_close_qqq_file(qqq_file)
| null |
5,026 |
import time
from .base import TestDHTBase
from ..mocking.ipv8 import MockIPv8
from ...dht import DHTError
from ...dht.discovery import DHTDiscoveryCommunity
from ...dht.routing import Node, RoutingTable
from ...util import succeed
class TestDHTDiscoveryCommunity(TestDHTBase):
def setUp(self):
super().setUp()
self.initialize(DHTDiscoveryCommunity, 2)
self.pinged = None
self.puncture_to = None
for node in self.nodes:
node.overlay.cancel_pending_task('store_peer')
node.overlay.token_maintenance()
def create_node(self, *args, **kwargs):
return MockIPv8("curve25519", DHTDiscoveryCommunity)
async def test_store_peer(self):
await self.introduce_nodes()
await self.overlay(0).store_peer()
self.assertIn(self.mid(0), self.overlay(1).store)
self.assertIn(self.mid(0), self.overlay(0).store_for_me)
async def test_store_peer_fail(self):
await self.introduce_nodes()
self.overlay(0).routing_tables[self.address(0).__class__] = RoutingTable(self.my_node_id(0))
self.assertFalse(await self.overlay(0).store_peer())
async def test_connect_peer(self):
# Add a third node
node = MockIPv8("curve25519", DHTDiscoveryCommunity)
self.add_node_to_experiment(node)
await self.introduce_nodes()
# Node1 is storing the peer of node0
self.overlay(1).store[self.mid(0)].append(self.my_peer(0))
self.overlay(0).store_for_me[self.mid(0)].append(self.my_peer(1))
org_func = self.overlay(1).create_puncture_request
def create_puncture_request(*args):
self.puncture_to = args[1]
return org_func(*args)
self.overlay(1).create_puncture_request = create_puncture_request
await self.deliver_messages()
nodes = await self.overlay(2).connect_peer(self.mid(0))
self.assertEqual(self.puncture_to, self.address(2))
self.assertIn(self.key_bin(0),
[n.public_key.key_to_bin() for n in nodes])
async def test_connect_peer_fail(self):
await self.introduce_nodes()
self.overlay(0).routing_tables[self.address(0).__class__] = RoutingTable(self.my_node_id(0))
with self.assertRaises(DHTError):
await self.overlay(0).connect_peer(self.mid(1))
async def test_ping_pong(self):
now = time.time() - 1
node0 = Node(self.private_key(0), self.address(0))
node0.last_response = now
node0.last_queries.append(now)
node1 = Node(self.private_key(1), self.address(1))
node1.last_response = now
node1.last_queries.append(now)
key = node1.mid
self.overlay(0).store[key].append(node1)
self.overlay(1).store_for_me[key].append(node0)
await self.overlay(1).ping(node0)
self.assertNotEqual(node0.last_response, now)
self.assertNotEqual(node1.last_query, now)
def METHOD_NAME(self):
self.overlay(0).ping = lambda n: setattr(self, 'pinged', n) or succeed(None)
node1 = Node(self.private_key(1), self.address(1))
node1.last_ping_sent = time.time()
node1.last_queries.append(time.time())
self.overlay(0).store[node1.mid].append(node1)
self.overlay(0).ping_all()
self.assertIn(node1, self.overlay(0).store[node1.mid])
node1.last_queries[-1] -= 100
self.overlay(0).ping_all()
self.assertNotIn(node1, self.overlay(0).store[node1.mid])
self.assertEqual(self.pinged, None)
self.overlay(0).store_for_me[node1.mid].append(node1)
self.overlay(0).ping_all()
self.assertIn(node1.mid, self.overlay(0).store_for_me)
node1.last_ping_sent -= 30
self.overlay(0).ping_all()
self.assertEqual(self.pinged, node1)
self.assertIn(node1, self.overlay(0).store_for_me[node1.mid])
self.pinged = None
node1.failed = 3
self.overlay(0).ping_all()
self.assertEqual(self.pinged, None)
self.assertNotIn(node1, self.overlay(0).store_for_me[node1.mid])
| null |
5,027 |
"""Tests for efficient functions for generating orthogonal polynomials. """
from sympy.core.numbers import Rational as Q
from sympy.core.singleton import S
from sympy.core.symbol import symbols
from sympy.polys.polytools import Poly
from sympy.testing.pytest import raises
from sympy.polys.orthopolys import (
jacobi_poly,
gegenbauer_poly,
chebyshevt_poly,
chebyshevu_poly,
hermite_poly,
hermite_prob_poly,
legendre_poly,
laguerre_poly,
spherical_bessel_fn,
)
from sympy.abc import x, a, b
def test_jacobi_poly():
raises(ValueError, lambda: jacobi_poly(-1, a, b, x))
assert jacobi_poly(1, a, b, x, polys=True) == Poly(
(a/2 + b/2 + 1)*x + a/2 - b/2, x, domain='ZZ(a,b)')
assert jacobi_poly(0, a, b, x) == 1
assert jacobi_poly(1, a, b, x) == a/2 - b/2 + x*(a/2 + b/2 + 1)
assert jacobi_poly(2, a, b, x) == (a**2/8 - a*b/4 - a/8 + b**2/8 - b/8 +
x**2*(a**2/8 + a*b/4 + a*Q(7, 8) + b**2/8 +
b*Q(7, 8) + Q(3, 2)) + x*(a**2/4 +
a*Q(3, 4) - b**2/4 - b*Q(3, 4)) - S.Half)
assert jacobi_poly(1, a, b, polys=True) == Poly(
(a/2 + b/2 + 1)*x + a/2 - b/2, x, domain='ZZ(a,b)')
def test_gegenbauer_poly():
raises(ValueError, lambda: gegenbauer_poly(-1, a, x))
assert gegenbauer_poly(
1, a, x, polys=True) == Poly(2*a*x, x, domain='ZZ(a)')
assert gegenbauer_poly(0, a, x) == 1
assert gegenbauer_poly(1, a, x) == 2*a*x
assert gegenbauer_poly(2, a, x) == -a + x**2*(2*a**2 + 2*a)
assert gegenbauer_poly(
3, a, x) == x**3*(4*a**3/3 + 4*a**2 + a*Q(8, 3)) + x*(-2*a**2 - 2*a)
assert gegenbauer_poly(1, S.Half).dummy_eq(x)
assert gegenbauer_poly(1, a, polys=True) == Poly(2*a*x, x, domain='ZZ(a)')
def test_chebyshevt_poly():
raises(ValueError, lambda: chebyshevt_poly(-1, x))
assert chebyshevt_poly(1, x, polys=True) == Poly(x)
assert chebyshevt_poly(0, x) == 1
assert chebyshevt_poly(1, x) == x
assert chebyshevt_poly(2, x) == 2*x**2 - 1
assert chebyshevt_poly(3, x) == 4*x**3 - 3*x
assert chebyshevt_poly(4, x) == 8*x**4 - 8*x**2 + 1
assert chebyshevt_poly(5, x) == 16*x**5 - 20*x**3 + 5*x
assert chebyshevt_poly(6, x) == 32*x**6 - 48*x**4 + 18*x**2 - 1
assert chebyshevt_poly(1).dummy_eq(x)
assert chebyshevt_poly(1, polys=True) == Poly(x)
def test_chebyshevu_poly():
raises(ValueError, lambda: chebyshevu_poly(-1, x))
assert chebyshevu_poly(1, x, polys=True) == Poly(2*x)
assert chebyshevu_poly(0, x) == 1
assert chebyshevu_poly(1, x) == 2*x
assert chebyshevu_poly(2, x) == 4*x**2 - 1
assert chebyshevu_poly(3, x) == 8*x**3 - 4*x
assert chebyshevu_poly(4, x) == 16*x**4 - 12*x**2 + 1
assert chebyshevu_poly(5, x) == 32*x**5 - 32*x**3 + 6*x
assert chebyshevu_poly(6, x) == 64*x**6 - 80*x**4 + 24*x**2 - 1
assert chebyshevu_poly(1).dummy_eq(2*x)
assert chebyshevu_poly(1, polys=True) == Poly(2*x)
def test_hermite_poly():
raises(ValueError, lambda: hermite_poly(-1, x))
assert hermite_poly(1, x, polys=True) == Poly(2*x)
assert hermite_poly(0, x) == 1
assert hermite_poly(1, x) == 2*x
assert hermite_poly(2, x) == 4*x**2 - 2
assert hermite_poly(3, x) == 8*x**3 - 12*x
assert hermite_poly(4, x) == 16*x**4 - 48*x**2 + 12
assert hermite_poly(5, x) == 32*x**5 - 160*x**3 + 120*x
assert hermite_poly(6, x) == 64*x**6 - 480*x**4 + 720*x**2 - 120
assert hermite_poly(1).dummy_eq(2*x)
assert hermite_poly(1, polys=True) == Poly(2*x)
def METHOD_NAME():
raises(ValueError, lambda: hermite_prob_poly(-1, x))
assert hermite_prob_poly(1, x, polys=True) == Poly(x)
assert hermite_prob_poly(0, x) == 1
assert hermite_prob_poly(1, x) == x
assert hermite_prob_poly(2, x) == x**2 - 1
assert hermite_prob_poly(3, x) == x**3 - 3*x
assert hermite_prob_poly(4, x) == x**4 - 6*x**2 + 3
assert hermite_prob_poly(5, x) == x**5 - 10*x**3 + 15*x
assert hermite_prob_poly(6, x) == x**6 - 15*x**4 + 45*x**2 - 15
assert hermite_prob_poly(1).dummy_eq(x)
assert hermite_prob_poly(1, polys=True) == Poly(x)
def test_legendre_poly():
raises(ValueError, lambda: legendre_poly(-1, x))
assert legendre_poly(1, x, polys=True) == Poly(x, domain='QQ')
assert legendre_poly(0, x) == 1
assert legendre_poly(1, x) == x
assert legendre_poly(2, x) == Q(3, 2)*x**2 - Q(1, 2)
assert legendre_poly(3, x) == Q(5, 2)*x**3 - Q(3, 2)*x
assert legendre_poly(4, x) == Q(35, 8)*x**4 - Q(30, 8)*x**2 + Q(3, 8)
assert legendre_poly(5, x) == Q(63, 8)*x**5 - Q(70, 8)*x**3 + Q(15, 8)*x
assert legendre_poly(6, x) == Q(
231, 16)*x**6 - Q(315, 16)*x**4 + Q(105, 16)*x**2 - Q(5, 16)
assert legendre_poly(1).dummy_eq(x)
assert legendre_poly(1, polys=True) == Poly(x)
def test_laguerre_poly():
raises(ValueError, lambda: laguerre_poly(-1, x))
assert laguerre_poly(1, x, polys=True) == Poly(-x + 1, domain='QQ')
assert laguerre_poly(0, x) == 1
assert laguerre_poly(1, x) == -x + 1
assert laguerre_poly(2, x) == Q(1, 2)*x**2 - Q(4, 2)*x + 1
assert laguerre_poly(3, x) == -Q(1, 6)*x**3 + Q(9, 6)*x**2 - Q(18, 6)*x + 1
assert laguerre_poly(4, x) == Q(
1, 24)*x**4 - Q(16, 24)*x**3 + Q(72, 24)*x**2 - Q(96, 24)*x + 1
assert laguerre_poly(5, x) == -Q(1, 120)*x**5 + Q(25, 120)*x**4 - Q(
200, 120)*x**3 + Q(600, 120)*x**2 - Q(600, 120)*x + 1
assert laguerre_poly(6, x) == Q(1, 720)*x**6 - Q(36, 720)*x**5 + Q(450, 720)*x**4 - Q(2400, 720)*x**3 + Q(5400, 720)*x**2 - Q(4320, 720)*x + 1
assert laguerre_poly(0, x, a) == 1
assert laguerre_poly(1, x, a) == -x + a + 1
assert laguerre_poly(2, x, a) == x**2/2 + (-a - 2)*x + a**2/2 + a*Q(3, 2) + 1
assert laguerre_poly(3, x, a) == -x**3/6 + (a/2 + Q(
3)/2)*x**2 + (-a**2/2 - a*Q(5, 2) - 3)*x + a**3/6 + a**2 + a*Q(11, 6) + 1
assert laguerre_poly(1).dummy_eq(-x + 1)
assert laguerre_poly(1, polys=True) == Poly(-x + 1)
def test_spherical_bessel_fn():
x, z = symbols("x z")
assert spherical_bessel_fn(1, z) == 1/z**2
assert spherical_bessel_fn(2, z) == -1/z + 3/z**3
assert spherical_bessel_fn(3, z) == -6/z**2 + 15/z**4
assert spherical_bessel_fn(4, z) == 1/z - 45/z**3 + 105/z**5
| null |
5,028 |
# pylint: disable=no-member,unnecessary-dunder-call
from collections import Counter
_iter_values = 'values'
_range = range
_string_type = str
import collections as _c
class _kView(_c.KeysView):
def __iter__(self):
return self._mapping.iterkeys()
class _vView(_c.ValuesView):
def __iter__(self):
return self._mapping.itervalues()
class _iView(_c.ItemsView):
def __iter__(self):
return self._mapping.iteritems()
class VDFDict(dict):
def __init__(self, data=None):
"""
This is a dictionary that supports duplicate keys and preserves insert order
``data`` can be a ``dict``, or a sequence of key-value tuples. (e.g. ``[('key', 'value'),..]``)
The only supported type for key is str.
Get/set duplicates is done by tuples ``(index, key)``, where index is the duplicate index
for the specified key. (e.g. ``(0, 'key')``, ``(1, 'key')``...)
When the ``key`` is ``str``, instead of tuple, set will create a duplicate and get will look up ``(0, key)``
"""
super().__init__()
self.__omap = []
self.__kcount = Counter()
if data is not None:
if not isinstance(data, (list, dict)):
raise ValueError("Expected data to be list of pairs or dict, got %s" % type(data))
self.METHOD_NAME(data)
def __repr__(self):
out = "%s(" % self.__class__.__name__
out += "%s)" % repr(list(self.iteritems()))
return out
def __len__(self):
return len(self.__omap)
def _verify_key_tuple(self, key):
if len(key) != 2:
raise ValueError("Expected key tuple length to be 2, got %d" % len(key))
if not isinstance(key[0], int):
raise TypeError("Key index should be an int")
if not isinstance(key[1], _string_type):
raise TypeError("Key value should be a str")
def _normalize_key(self, key):
if isinstance(key, _string_type):
key = (0, key)
elif isinstance(key, tuple):
self._verify_key_tuple(key)
else:
raise TypeError("Expected key to be a str or tuple, got %s" % type(key))
return key
def __setitem__(self, key, value):
if isinstance(key, _string_type):
key = (self.__kcount[key], key)
self.__omap.append(key)
elif isinstance(key, tuple):
self._verify_key_tuple(key)
if key not in self:
raise KeyError("%s doesn't exist" % repr(key))
else:
raise TypeError("Expected either a str or tuple for key")
super().__setitem__(key, value)
self.__kcount[key[1]] += 1
def __getitem__(self, key):
return super().__getitem__(self._normalize_key(key))
def __delitem__(self, key):
key = self._normalize_key(key)
result = super().__delitem__(key)
start_idx = self.__omap.index(key)
del self.__omap[start_idx]
dup_idx, skey = key
self.__kcount[skey] -= 1
tail_count = self.__kcount[skey] - dup_idx
if tail_count > 0:
for idx in _range(start_idx, len(self.__omap)):
if self.__omap[idx][1] == skey:
oldkey = self.__omap[idx]
newkey = (dup_idx, skey)
super().__setitem__(newkey, self[oldkey])
super().__delitem__(oldkey)
self.__omap[idx] = newkey
dup_idx += 1
tail_count -= 1
if tail_count == 0:
break
if self.__kcount[skey] == 0:
del self.__kcount[skey]
return result
def __iter__(self):
return iter(self.iterkeys())
def __contains__(self, key):
return super().__contains__(self._normalize_key(key))
def __eq__(self, other):
if isinstance(other, VDFDict):
return list(self.items()) == list(other.items())
return False
def __ne__(self, other):
return not self.__eq__(other)
def clear(self):
super().clear()
self.__kcount.clear()
self.__omap = []
def get(self, key, *args):
return super().get(self._normalize_key(key), *args)
def setdefault(self, key, default=None):
if key not in self:
self.__setitem__(key, default)
return self.__getitem__(key)
def pop(self, key):
key = self._normalize_key(key)
value = self.__getitem__(key)
self.__delitem__(key)
return value
def popitem(self):
if not self.__omap:
raise KeyError("VDFDict is empty")
key = self.__omap[-1]
return key[1], self.pop(key)
def METHOD_NAME(self, data=None, **kwargs):
if isinstance(data, dict):
data = data.items()
elif not isinstance(data, list):
raise TypeError("Expected data to be a list or dict, got %s" % type(data))
for key, value in data:
self.__setitem__(key, value)
def iterkeys(self):
return (key[1] for key in self.__omap)
def keys(self):
return _kView(self)
def itervalues(self):
return (self[key] for key in self.__omap)
def values(self):
return _vView(self)
def iteritems(self):
return ((key[1], self[key]) for key in self.__omap)
def items(self):
return _iView(self)
def get_all_for(self, key):
""" Returns all values of the given key """
if not isinstance(key, _string_type):
raise TypeError("Key needs to be a string.")
return [self[(idx, key)] for idx in _range(self.__kcount[key])]
def remove_all_for(self, key):
""" Removes all items with the given key """
if not isinstance(key, _string_type):
raise TypeError("Key need to be a string.")
for idx in _range(self.__kcount[key]):
super().__delitem__((idx, key))
self.__omap = list(filter(lambda x: x[1] != key, self.__omap))
del self.__kcount[key]
def has_duplicates(self):
"""
Returns ``True`` if the dict contains keys with duplicates.
Recurses through any all keys with value that is ``VDFDict``.
"""
for n in getattr(self.__kcount, _iter_values)():
if n != 1:
return True
def dict_recurse(obj):
for v in getattr(obj, _iter_values)():
if isinstance(v, VDFDict) and v.has_duplicates():
return True
if isinstance(v, dict):
return dict_recurse(v)
return False
return dict_recurse(self)
| null |
5,029 |
# kornia.geometry.plane module inspired by Eigen::geometry::Hyperplane
# https://gitlab.com/libeigen/eigen/-/blob/master/Eigen/src/Geometry/Hyperplane.h
from typing import Optional
from kornia.core import Module, Tensor, stack, where
from kornia.core.check import KORNIA_CHECK, KORNIA_CHECK_SHAPE, KORNIA_CHECK_TYPE
from kornia.core.tensor_wrapper import unwrap, wrap # type: ignore[attr-defined]
from kornia.geometry.linalg import batched_dot_product
from kornia.geometry.vector import Scalar, Vector3
from kornia.utils.helpers import _torch_svd_cast
__all__ = ["Hyperplane", "fit_plane"]
def normalized(v: Tensor, eps: float = 1e-6) -> Tensor:
return v / batched_dot_product(v, v).add(eps).sqrt()
class Hyperplane(Module):
def __init__(self, n: Vector3, d: Scalar) -> None:
super().__init__()
KORNIA_CHECK_TYPE(n, Vector3)
KORNIA_CHECK_TYPE(d, Scalar)
# TODO: fix checkers
# KORNIA_CHECK_SHAPE(n, ["B", "*"])
# KORNIA_CHECK_SHAPE(d, ["B"])
self._n = n
self._d = d
def __str__(self) -> str:
return f"Normal: {self.normal}\nOffset: {self.offset}"
def __repr__(self) -> str:
return str(self)
@property
def normal(self) -> Vector3:
return self._n
@property
def offset(self) -> Scalar:
return self._d
def abs_distance(self, p: Vector3) -> Scalar:
return Scalar(self.signed_distance(p).abs())
# https://gitlab.com/libeigen/eigen/-/blob/master/Eigen/src/Geometry/Hyperplane.h#L145
# TODO: tests
def signed_distance(self, p: Vector3) -> Scalar:
KORNIA_CHECK(isinstance(p, (Vector3, Tensor)))
return self.normal.dot(p) + self.offset
# https://gitlab.com/libeigen/eigen/-/blob/master/Eigen/src/Geometry/Hyperplane.h#L154
# TODO: tests
def projection(self, p: Vector3) -> Vector3:
dist = self.signed_distance(p)
if len(dist.shape) != len(self.normal):
# non batched plane project a batch of points
dist = dist[..., None] # Nx1
# TODO: TypeError: bad operand type for unary -: 'Scalar'
return p - dist.data * self.normal
# TODO: make that Vector can subtract Scalar
# return p - self.signed_distance(p) * self.normal
@classmethod
def from_vector(self, n: Vector3, e: Vector3) -> "Hyperplane":
normal: Vector3 = n
offset = -normal.dot(e)
return Hyperplane(normal, Scalar(offset))
@classmethod
def through(cls, p0: Tensor, p1: Tensor, p2: Optional[Tensor] = None) -> "Hyperplane":
# 2d case
if p2 is None:
# TODO: improve tests
KORNIA_CHECK_SHAPE(p0, ["*", "2"])
KORNIA_CHECK(p0.shape == p1.shape)
# TODO: implement `.unitOrthonormal`
normal2d = normalized(p1 - p0)
offset2d = -batched_dot_product(p0, normal2d)
return Hyperplane(wrap(normal2d, Vector3), wrap(offset2d, Scalar))
# 3d case
KORNIA_CHECK_SHAPE(p0, ["*", "3"])
KORNIA_CHECK(p0.shape == p1.shape)
KORNIA_CHECK(p1.shape == p2.shape)
v0, v1 = (p2 - p0), (p1 - p0)
normal = v0.cross(v1)
norm = normal.norm(-1)
# https://gitlab.com/libeigen/eigen/-/blob/master/Eigen/src/Geometry/Hyperplane.h#L108
def compute_normal_svd(v0: Tensor, v1: Tensor) -> 'Vector3':
# NOTE: for reason TensorWrapper does not stack well
m = stack((unwrap(v0), unwrap(v1)), -2) # Bx2x3
_, _, V = _torch_svd_cast(m) # kornia solution lies in the last row
return wrap(V[..., :, -1], Vector3) # Bx3
normal_mask = norm <= v0.norm(-1) * v1.norm(-1) * 1e-6
normal = where(normal_mask, compute_normal_svd(v0, v1).data, normal / (norm + 1e-6))
offset = -batched_dot_product(p0, normal)
return Hyperplane(wrap(normal, Vector3), wrap(offset, Scalar))
# TODO: factor to avoid duplicated from line.py
# https://github.com/strasdat/Sophus/blob/23.04-beta/cpp/sophus/geometry/fit_plane.h
def METHOD_NAME(points: Vector3) -> Hyperplane:
"""Fit a plane from a set of points using SVD.
Args:
points: tensor containing a batch of sets of n-dimensional points. The expected
shape of the tensor is :math:`(N, D)`.
Return:
The computed hyperplane object.
"""
# TODO: fix to support more type check here
# KORNIA_CHECK_SHAPE(points, ["N", "D"])
if points.shape[-1] != 3:
raise TypeError("vector must be (*, 3)")
mean = points.mean(-2, True)
points_centered = points - mean
# NOTE: not optimal for 2d points, but for now works for other dimensions
_, _, V = _torch_svd_cast(points_centered)
# the first left eigenvector is the direction on the fited line
direction = V[..., :, -1] # BxD
origin = mean[..., 0, :] # BxD
return Hyperplane.from_vector(Vector3(direction), Vector3(origin))
| null |
5,030 |
import os
import tempfile
from sympy.core.symbol import (Symbol, symbols)
from sympy.codegen.ast import (
Assignment, Print, Declaration, FunctionDefinition, Return, real,
FunctionCall, Variable, Element, integer
)
from sympy.codegen.fnodes import (
allocatable, ArrayConstructor, isign, dsign, cmplx, kind, literal_dp,
Program, Module, use, Subroutine, dimension, assumed_extent, ImpliedDoLoop,
intent_out, size, Do, SubroutineCall, sum_, array, bind_C
)
from sympy.codegen.futils import render_as_module
from sympy.core.expr import unchanged
from sympy.external import import_module
from sympy.printing.codeprinter import fcode
from sympy.utilities._compilation import has_fortran, compile_run_strings, compile_link_import_strings
from sympy.utilities._compilation.util import may_xfail
from sympy.testing.pytest import skip, XFAIL
cython = import_module('cython')
np = import_module('numpy')
def test_size():
x = Symbol('x', real=True)
sx = size(x)
assert fcode(sx, source_format='free') == 'size(x)'
@may_xfail
def test_size_assumed_shape():
if not has_fortran():
skip("No fortran compiler found.")
a = Symbol('a', real=True)
body = [Return((sum_(a**2)/size(a))**.5)]
arr = array(a, dim=[':'], intent='in')
fd = FunctionDefinition(real, 'rms', [arr], body)
render_as_module([fd], 'mod_rms')
(stdout, stderr), info = compile_run_strings([
('rms.f90', render_as_module([fd], 'mod_rms')),
('main.f90', (
'program myprog\n'
'use mod_rms, only: rms\n'
'real*8, dimension(4), parameter :: x = [4, 2, 2, 2]\n'
'print *, dsqrt(7d0) - rms(x)\n'
'end program\n'
))
], clean=True)
assert '0.00000' in stdout
assert stderr == ''
assert info['exit_status'] == os.EX_OK
@XFAIL # https://github.com/sympy/sympy/issues/20265
@may_xfail
def test_ImpliedDoLoop():
if not has_fortran():
skip("No fortran compiler found.")
a, i = symbols('a i', integer=True)
idl = ImpliedDoLoop(i**3, i, -3, 3, 2)
ac = ArrayConstructor([-28, idl, 28])
a = array(a, dim=[':'], attrs=[allocatable])
prog = Program('idlprog', [
a.as_Declaration(),
Assignment(a, ac),
Print([a])
])
fsrc = fcode(prog, standard=2003, source_format='free')
(stdout, stderr), info = compile_run_strings([('main.f90', fsrc)], clean=True)
for numstr in '-28 -27 -1 1 27 28'.split():
assert numstr in stdout
assert stderr == ''
assert info['exit_status'] == os.EX_OK
@may_xfail
def test_Program():
x = Symbol('x', real=True)
vx = Variable.deduced(x, 42)
decl = Declaration(vx)
prnt = Print([x, x+1])
prog = Program('foo', [decl, prnt])
if not has_fortran():
skip("No fortran compiler found.")
(stdout, stderr), info = compile_run_strings([('main.f90', fcode(prog, standard=90))], clean=True)
assert '42' in stdout
assert '43' in stdout
assert stderr == ''
assert info['exit_status'] == os.EX_OK
@may_xfail
def test_Module():
x = Symbol('x', real=True)
v_x = Variable.deduced(x)
sq = FunctionDefinition(real, 'sqr', [v_x], [Return(x**2)])
mod_sq = Module('mod_sq', [], [sq])
sq_call = FunctionCall('sqr', [42.])
prg_sq = Program('foobar', [
use('mod_sq', only=['sqr']),
Print(['"Square of 42 = "', sq_call])
])
if not has_fortran():
skip("No fortran compiler found.")
(stdout, stderr), info = compile_run_strings([
('mod_sq.f90', fcode(mod_sq, standard=90)),
('main.f90', fcode(prg_sq, standard=90))
], clean=True)
assert '42' in stdout
assert str(42**2) in stdout
assert stderr == ''
@XFAIL # https://github.com/sympy/sympy/issues/20265
@may_xfail
def test_Subroutine():
# Code to generate the subroutine in the example from
# http://www.fortran90.org/src/best-practices.html#arrays
r = Symbol('r', real=True)
i = Symbol('i', integer=True)
v_r = Variable.deduced(r, attrs=(dimension(assumed_extent), intent_out))
v_i = Variable.deduced(i)
v_n = Variable('n', integer)
do_loop = Do([
Assignment(Element(r, [i]), literal_dp(1)/i**2)
], i, 1, v_n)
sub = Subroutine("f", [v_r], [
Declaration(v_n),
Declaration(v_i),
Assignment(v_n, size(r)),
do_loop
])
x = Symbol('x', real=True)
v_x3 = Variable.deduced(x, attrs=[dimension(3)])
mod = Module('mymod', definitions=[sub])
prog = Program('foo', [
use(mod, only=[sub]),
Declaration(v_x3),
SubroutineCall(sub, [v_x3]),
Print([sum_(v_x3), v_x3])
])
if not has_fortran():
skip("No fortran compiler found.")
(stdout, stderr), info = compile_run_strings([
('a.f90', fcode(mod, standard=90)),
('b.f90', fcode(prog, standard=90))
], clean=True)
ref = [1.0/i**2 for i in range(1, 4)]
assert str(sum(ref))[:-3] in stdout
for _ in ref:
assert str(_)[:-3] in stdout
assert stderr == ''
def test_isign():
x = Symbol('x', integer=True)
assert unchanged(isign, 1, x)
assert fcode(isign(1, x), standard=95, source_format='free') == 'isign(1, x)'
def test_dsign():
x = Symbol('x')
assert unchanged(dsign, 1, x)
assert fcode(dsign(literal_dp(1), x), standard=95, source_format='free') == 'dsign(1d0, x)'
def METHOD_NAME():
x = Symbol('x')
assert unchanged(cmplx, 1, x)
def test_kind():
x = Symbol('x')
assert unchanged(kind, x)
def test_literal_dp():
assert fcode(literal_dp(0), source_format='free') == '0d0'
@may_xfail
def test_bind_C():
if not has_fortran():
skip("No fortran compiler found.")
if not cython:
skip("Cython not found.")
if not np:
skip("NumPy not found.")
a = Symbol('a', real=True)
s = Symbol('s', integer=True)
body = [Return((sum_(a**2)/s)**.5)]
arr = array(a, dim=[s], intent='in')
fd = FunctionDefinition(real, 'rms', [arr, s], body, attrs=[bind_C('rms')])
f_mod = render_as_module([fd], 'mod_rms')
with tempfile.TemporaryDirectory() as folder:
mod, info = compile_link_import_strings([
('rms.f90', f_mod),
('_rms.pyx', (
"#cython: language_level={}\n".format("3") +
"cdef extern double rms(double*, int*)\n"
"def py_rms(double[::1] x):\n"
" cdef int s = x.size\n"
" return rms(&x[0], &s)\n"))
], build_dir=folder)
assert abs(mod.py_rms(np.array([2., 4., 2., 2.])) - 7**0.5) < 1e-14
| null |
5,031 |
"""
Main execution script for ``PypeIt`` reduction pipelines.
.. include common links, assuming primary doc root is up one directory
.. include:: ../include/links.rst
"""
from pypeit.scripts import scriptbase
class RunPypeIt(scriptbase.ScriptBase):
# TODO: Combining classmethod and property works in python 3.9 and later
# only: https://docs.python.org/3.9/library/functions.html#classmethod
# Order matters. In python 3.9, it would be:
#
# @classmethod
# @property
#
# Because we're not requiring python 3.9 yet, we have to leave this as a
# classmethod only:
@classmethod
def name(cls):
"""
Return the name of the executable.
"""
return 'run_pypeit'
@classmethod
def METHOD_NAME(cls):
"""
Print pypeit usage description.
"""
import textwrap
import pypeit
from pypeit.spectrographs import available_spectrographs
spclist = ', '.join(available_spectrographs)
spcl = textwrap.wrap(spclist, width=70)
descs = '## '
descs += '\x1B[1;37;42m' + 'PypeIt : '
descs += 'The Python Spectroscopic Data Reduction Pipeline v{0:s}'.format(pypeit.__version__) \
+ '\x1B[' + '0m' + '\n'
descs += '## '
descs += '\n## Available spectrographs include:'
for ispcl in spcl:
descs += '\n## ' + ispcl
return descs
@classmethod
def get_parser(cls, width=None):
import argparse
parser = super().get_parser(description=cls.METHOD_NAME(),
width=width, formatter=argparse.RawDescriptionHelpFormatter)
parser.add_argument('pypeit_file', type=str,
help='PypeIt reduction file (must have .pypeit extension)')
parser.add_argument('-v', '--verbosity', type=int, default=2,
help='Verbosity level between 0 [none] and 2 [all]')
parser.add_argument('-r', '--redux_path', default=None,
help='Path to directory for the reduction. Only advised for testing')
parser.add_argument('-m', '--do_not_reuse_calibs', dest='reuse_calibs', default=True,
action='store_false',
help='Do not load previously generated calibrations, even ones made '
'during the run.')
parser.add_argument('-s', '--show', default=False, action='store_true',
help='Show reduction steps via plots (which will block further '
'execution until clicked on) and outputs to ginga. Requires '
'remote control ginga session via '
'"ginga --modules=RC,SlitWavelength &"')
# TODO: JFH Should the default now be true with the new definition.
parser.add_argument('-o', '--overwrite', default=False, action='store_true',
help='Overwrite any existing files/directories')
parser.add_argument('-c', '--calib_only', default=False, action='store_true',
help='Only run on calibrations')
return parser
@staticmethod
def main(args):
import os
from IPython import embed
from pypeit import pypeit
from pypeit import msgs
# Load options from command line
splitnm = os.path.splitext(args.pypeit_file)
if splitnm[1] != '.pypeit':
msgs.error('Input file must have a .pypeit extension!')
logname = splitnm[0] + ".log"
# Instantiate the main pipeline reduction object
pypeIt = pypeit.PypeIt(args.pypeit_file, verbosity=args.verbosity,
reuse_calibs=args.reuse_calibs, overwrite=args.overwrite,
redux_path=args.redux_path, calib_only=args.calib_only,
logname=logname, show=args.show)
if args.calib_only:
calib_dict = pypeIt.calib_all()
else:
pypeIt.reduce_all()
msgs.info('Data reduction complete')
# QA HTML
msgs.info('Generating QA HTML')
pypeIt.build_qa()
msgs.close()
return 0
| null |
5,032 |
import unittest
import os
import numpy as npy
from pathlib import Path
from skrf.io.touchstone import Touchstone
class TouchstoneTestCase(unittest.TestCase):
"""
TouchstoneTestCase tests the IO of Touchstone files
"""
def setUp(self):
"""
Sets up the test directory
"""
self.test_dir = os.path.dirname(os.path.abspath(__file__))+'/'
def METHOD_NAME(self):
"""
This test reads data from simple_touchstone.s2p and compares with known
true values.
"""
filename = os.path.join(self.test_dir, 'simple_touchstone.s2p')
touch = Touchstone(filename)
f, s = touch.get_sparameter_arrays()
z0 = complex(touch.resistance)
f_true = npy.array([1.00000000e+09, 1.10000000e+09])
s_true = npy.array([[[1.+2.j, 5.+6.j], [3.+4.j, 7.+8.j]],
[[9.+10.j, 13.+14.j], [11.+12.j, 15.+16.j]]])
z0_true = 50+50j
self.assertTrue((f == f_true).all())
self.assertTrue((s == s_true).all())
self.assertTrue(z0 == z0_true)
def test_read_with_special_encoding(self):
"""
Read Touchstone files with various file encoding
"""
filename_utf8_sig = os.path.join(self.test_dir, 'test_encoding_UTF-8-SIG.s2p')
filename_latin1 = os.path.join(self.test_dir, 'test_encoding_ISO-8859-1.s2p')
filename_unknown = os.path.join(self.test_dir, 'test_encoding_unknown.s2p')
# most common situation: try and error guessing the encoding
Touchstone(filename_utf8_sig)
Touchstone(filename_latin1)
Touchstone(filename_unknown)
# specify the encoding
Touchstone(filename_latin1, encoding='ISO-8859-1')
Touchstone(filename_utf8_sig, encoding='utf_8_sig')
def test_read_from_fid(self):
"""
This tests reading touch stone data from a file object as compared with
a string path and name of the file.
"""
with open(os.path.join(self.test_dir, 'simple_touchstone.s2p')) as fid:
touch = Touchstone(fid)
f, s = touch.get_sparameter_arrays()
z0 = complex(touch.resistance)
f_true = npy.array([1.00000000e+09, 1.10000000e+09])
s_true = npy.array([[[1.+2.j, 5.+6.j], [3.+4.j, 7.+8.j]],
[[9.+10.j, 13.+14.j], [11.+12.j, 15.+16.j]]])
z0_true = 50+50j
self.assertTrue((f == f_true).all())
self.assertTrue((s == s_true).all())
self.assertTrue(z0 == z0_true)
def test_get_sparameter_data(self):
"""
This tests the get_sparameter_data function.
"""
with open(os.path.join(self.test_dir, 'simple_touchstone.s2p')) as fid:
touch = Touchstone(fid)
expected_keys = ["frequency", "S11R", "S11I", "S12R", "S12I",
"S21R", "S21I", "S22R", "S22I", ]
unexpected_keys = ['S11DB', 'S11M', ]
# get dict data structure
sp_ri = touch.get_sparameter_data(format="ri")
# Get dict data in db to check ri -> db/angle conversion
sp_db = touch.get_sparameter_data(format="db")
# test data structure
for ek in expected_keys:
self.assertTrue(ek in sp_ri)
for uk in unexpected_keys:
self.assertFalse(uk in sp_ri)
# test data contents
expected_sp_ri = {
'frequency': npy.array([1.0e+09, 1.1e+09]),
'S11R': npy.array([1., 9.]),
'S11I': npy.array([ 2., 10.]),
'S21R': npy.array([ 3., 11.]),
'S21I': npy.array([ 4., 12.]),
'S12R': npy.array([ 5., 13.]),
'S12I': npy.array([ 6., 14.]),
'S22R': npy.array([ 7., 15.]),
'S22I': npy.array([ 8., 16.]),
}
S11 = npy.array([1., 9.]) + 1j*npy.array([ 2., 10.])
S21 = npy.array([ 3., 11.]) + 1j*npy.array([ 4., 12.])
S12 = npy.array([ 5., 13.]) + 1j*npy.array([ 6., 14.])
S22 = npy.array([ 7., 15.]) + 1j*npy.array([ 8., 16.])
expected_sp_db = {
'frequency': npy.array([1.0e+09, 1.1e+09]),
'S11DB': 20*npy.log10(npy.abs(S11)),
'S11A': npy.angle(S11, deg=True),
'S21DB': 20*npy.log10(npy.abs(S21)),
'S21A': npy.angle(S21, deg=True),
'S12DB': 20*npy.log10(npy.abs(S12)),
'S12A': npy.angle(S12, deg=True),
'S22DB': 20*npy.log10(npy.abs(S22)),
'S22A': npy.angle(S22, deg=True),
}
for k in sp_ri:
self.assertTrue(k in expected_sp_ri)
self.assertTrue( (expected_sp_ri[k] == sp_ri[k]).all(),
msg='Field %s does not match. Expected "%s", got "%s"'%(
k, str(expected_sp_ri[k]), str(sp_ri[k])) )
for k in sp_db:
self.assertTrue(k in expected_sp_db)
self.assertTrue( (expected_sp_db[k] == sp_db[k]).all(),
msg='Field %s does not match. Expected "%s", got "%s"'%(
k, str(expected_sp_db[k]), str(sp_db[k])) )
for k, v in zip(touch.get_sparameter_names(), touch.sparameters.T):
if k[0] != 'S':
# frequency doesn't match because of Hz vs GHz.
continue
self.assertTrue(npy.all(expected_sp_ri[k] == v))
def test_HFSS_touchstone_files(self):
"""
HFSS can export additional information in the Touchstone file
such as gamma and z0 for each port. However, the way there are stored
depend of the HFSS version...
In versions before 2020 R2, data were stored as following:
! Gamma ! re1 im2 re2 im2 re3 im3 re4 im4
! re5 im5 re6 im6 re7 im7 re8 im8
! re9 im9 [etc]
! Port Impedancere1 im2 re2 im2 re3 im3 re4 im4
! re5 im5 re6 im6 re7 im7 re8 im8
! re9 im9 [etc]
[NB: there is an extra ! before re1 for Gamma]
[NB: re1 value is stuck to the 'e' of Impedance]
Since version 2020 R2n the data are stored in a single line:
! Gamma re1 im2 re2 im2 re3 im3 re4 im4 re5 im5 re6 im6 re7 im7 re8 im8 [etc]
! Port Impedance re1 im2 re2 im2 re3 im3 re4 im4 re5 im5 re6 im6 re7 im7 re8 im8 [etc]
[NB: re1 value is no more stuck to the 'e' of Impedance]
This test checks that the shape of gamma and z0 matche the rank of the Network
for Touchstone files of various port obtained from different HFSS version
"""
HFSS_RELEASES= ['HFSS_2019R2', 'HFSS_2020R2']
p = Path('.')
for hfss_release in HFSS_RELEASES:
for sNp_file in p.glob(hfss_release+'/*.s*'):
touchst = Touchstone(sNp_file.as_posix())
gamma, z0 = touchst.get_gamma_z0()
assert(gamma.shape[-1] == touchst.rank)
assert(z0.shape[-1] == touchst.rank)
suite = unittest.TestLoader().loadTestsFromTestCase(TouchstoneTestCase)
unittest.TextTestRunner(verbosity=2).run(suite)
| null |
5,033 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from mantidqtinterfaces.Muon.GUI.Common.home_tab.home_tab_presenter import HomeTabSubWidget
class InstrumentWidgetPresenter(HomeTabSubWidget):
def __init__(self, view, model):
self._view = view
self._model = model
self._view.on_time_zero_checkState_changed(self.handle_loaded_time_zero_checkState_change)
self._view.on_time_zero_changed(self.handle_user_changes_time_zero)
self._view.on_first_good_data_checkState_changed(self.handle_loaded_first_good_data_checkState_change)
self._view.on_first_good_data_changed(self.handle_user_changes_first_good_data)
self._view.on_last_good_data_checkState_changed(self.handle_loaded_last_good_data_checkState_change)
self._view.on_last_good_data_changed(self.METHOD_NAME)
self._view.on_fixed_rebin_edit_changed(self.handle_fixed_rebin_changed)
self._view.on_variable_rebin_edit_changed(self.handle_variable_rebin_changed)
self._view.on_rebin_type_changed(self.handle_rebin_type_changed)
self._view.on_instrument_changed(self.handle_instrument_changed)
self._view.on_double_pulse_time_changed(self.handle_double_pulse_time_changed)
self._view.on_double_pulse_checkState_changed(self.handle_double_pulse_enabled)
self.handle_loaded_time_zero_checkState_change()
self.handle_loaded_first_good_data_checkState_change()
self.handle_loaded_last_good_data_checkState_change()
def show(self):
self._view.show()
def update_view_from_model(self):
if self._view.first_good_data_state():
first_good_data = self._model.get_file_first_good_data()
self._view.set_first_good_data(first_good_data)
else:
first_good_data = self._model.get_user_first_good_data()
self._view.set_first_good_data(first_good_data)
if self._view.last_good_data_state():
last_good_data = self._model.get_file_last_good_data()
else:
last_good_data = self._model.get_last_good_data()
self._view.set_last_good_data(last_good_data)
if self._view.time_zero_state():
time_zero = self._model.get_file_time_zero()
self._view.set_time_zero(time_zero)
else:
time_zero = self._model.get_user_time_zero()
self._view.set_time_zero(time_zero)
self._view.set_instrument(self._model._data.instrument)
def clear_view(self):
self._view.set_time_zero(0.0)
self._view.set_first_good_data(0.0)
self._view.set_last_good_data(0.0)
self._view.set_combo_boxes_to_default()
self._view.set_checkboxes_to_defualt()
# ------------------------------------------------------------------------------------------------------------------
# Time Zero
# ------------------------------------------------------------------------------------------------------------------
def handle_user_changes_time_zero(self):
time_zero = self._view.get_time_zero()
self._model.set_user_time_zero(time_zero)
def handle_loaded_time_zero_checkState_change(self):
if self._view.time_zero_state():
self._model.set_time_zero_from_file(True)
time_zero = self._model.get_file_time_zero()
self._view.set_time_zero(time_zero)
else:
self._model.set_time_zero_from_file(False)
time_zero = self._model.get_user_time_zero()
self._view.set_time_zero(time_zero)
# ------------------------------------------------------------------------------------------------------------------
# First Good Data
# ------------------------------------------------------------------------------------------------------------------
def handle_user_changes_first_good_data(self):
first_good_data = self._view.get_first_good_data()
self._model.set_user_first_good_data(first_good_data)
def handle_loaded_first_good_data_checkState_change(self):
if self._view.first_good_data_state():
self._model.set_first_good_data_source(True)
first_good_data = self._model.get_file_first_good_data()
self._view.set_first_good_data(first_good_data)
else:
self._model.set_first_good_data_source(False)
first_good_data = self._model.get_user_first_good_data()
self._view.set_first_good_data(first_good_data)
def METHOD_NAME(self):
last_good_data = self._view.get_last_good_data()
self._model.set_user_last_good_data(last_good_data)
def handle_loaded_last_good_data_checkState_change(self):
if self._view.last_good_data_state():
self._model.set_last_good_data_source(True)
last_good_data = self._model.get_file_last_good_data()
self._view.set_last_good_data(last_good_data)
else:
self._model.set_last_good_data_source(False)
last_good_data = self._model.get_last_good_data()
self._view.set_last_good_data(last_good_data)
# ------------------------------------------------------------------------------------------------------------------
# Rebin
# ------------------------------------------------------------------------------------------------------------------
def handle_fixed_rebin_changed(self):
fixed_bin_size = self._view.get_fixed_bin_text()
self._model.add_fixed_binning(fixed_bin_size)
def handle_variable_rebin_changed(self):
variable_bin_size = self._view.get_variable_bin_text()
valid, message = self._model.validate_variable_rebin_string(variable_bin_size)
if not valid:
self._view.rebin_variable_edit.setText(self._model.get_variable_binning())
self._view.warning_popup(message)
else:
self._model.add_variable_binning(variable_bin_size)
def handle_rebin_type_changed(self):
rebin_type = self._view.rebin_selector.currentText()
self._model.update_binning_type(rebin_type)
# ------------------------------------------------------------------------------------------------------------------
# Instrument
# ------------------------------------------------------------------------------------------------------------------
def handle_instrument_changed(self):
"""User changes the selected instrument."""
instrument = self._view.get_instrument()
if instrument != self._model._data.instrument:
self._model._data.instrument = instrument
self._view.set_instrument(instrument, block=True)
def handle_double_pulse_time_changed(self):
double_pulse_time = self._view.get_double_pulse_time()
self._model.set_double_pulse_time(double_pulse_time)
def handle_double_pulse_enabled(self):
pulseType = self._view.double_pulse_state()
enabled = pulseType == "Double Pulse"
self._view.double_pulse_edit_enabled(enabled)
self._model.set_double_pulse_enabled(enabled)
| null |
5,034 |
"""
启动模拟器的程序
"""
import subprocess
import time
from abc import abstractmethod, ABCMeta
from core.pcr_config import emulator_console, emulator_id, emulator_address, auto_emulator_address, wait_for_launch_time
class LauncherBase(metaclass=ABCMeta):
@abstractmethod
def id_to_serial(self, id: int) -> str:
"""
给定一个模拟器序号ID,获得其对应的端口号
:param id:
:return: 端口Serial
"""
pass
@abstractmethod
def launch(self, id: int, block: bool) -> None:
"""
启动编号为id的模拟器,是否阻塞直到启动
"""
pass
@abstractmethod
def quit(self, id: int) -> None:
"""
关闭编号为id的模拟器
"""
pass
@abstractmethod
def is_running(self, id: int) -> bool:
"""
判断编号为id的模拟器是否处于启动状态
"""
pass
def restart(self, id: int, block=False):
"""
重启
"""
self.quit(id)
time.sleep(3)
self.launch(id, block)
def METHOD_NAME(self, id: int, timeout=wait_for_launch_time, adb_restart_fun=None):
last_time = time.time()
cnt = 0
while not self.is_running(id):
time.sleep(1)
if time.time() - last_time > timeout:
return False
cnt += 1
if cnt % 10 == 0 and adb_restart_fun is not None:
adb_restart_fun()
return True
def wait_for_all(self, adb_restart_fun=None):
for i in emulator_id:
cnt = 0
while not self.is_running(i):
time.sleep(1)
cnt += 1
if cnt % 10 == 0 and adb_restart_fun is not None:
adb_restart_fun()
def start_all(self, adb_restart_fun=None):
for i in emulator_id:
self.launch(i, False)
self.wait_for_all(adb_restart_fun=adb_restart_fun)
def quit_all(self):
for i in emulator_id:
self.quit(i)
class DnPlayer:
def __init__(self, info: list):
# 索引,标题,顶层窗口句柄,绑定窗口句柄,是否进入android,进程PID,VBox进程PID
self.index = int(info[0])
self.name = info[1]
self.top_win_handler = int(info[2])
self.bind_win_handler = int(info[3])
self.is_in_android = True if int(info[4]) == 1 else False
self.pid = int(info[5])
self.vbox_pid = int(info[6])
def is_running(self) -> bool:
return self.is_in_android
def __str__(self):
index = self.index
name = self.name
r = str(self.is_in_android)
twh = self.top_win_handler
bwh = self.bind_win_handler
pid = self.pid
vpid = self.vbox_pid
return "\nindex:%d name:%s top:%08X bind:%08X running:%s pid:%d vbox_pid:%d\n" % (
index, name, twh, bwh, r, pid, vpid)
def __repr__(self):
index = self.index
name = self.name
r = str(self.is_in_android)
twh = self.top_win_handler
bwh = self.bind_win_handler
pid = self.pid
vpid = self.vbox_pid
return "\nindex:%d name:%s top:%08X bind:%08X running:%s pid:%d vbox_pid:%d\n" % (
index, name, twh, bwh, r, pid, vpid)
class LDLauncher(LauncherBase):
def __init__(self):
self.console_str = emulator_console
def id_to_serial(self, id: int) -> str:
if auto_emulator_address:
return f"emulator-{5554 + id * 2}"
else:
return emulator_address[id]
def launch(self, id: int, block: bool = False) -> None:
cmd = f"{self.console_str} globalsetting --audio 0 --fastplay 1 --cleanmode 1"
subprocess.check_call(cmd)
cmd = f"{self.console_str} launch --index {id}"
subprocess.check_call(cmd)
cmd = f"{self.console_str} downcpu --index {id} --rate 50"
subprocess.check_call(cmd)
if len(emulator_id) == 0:
block = False
if block:
last_time = time.time()
while not self.is_running(id) and time.time() - last_time < wait_for_launch_time:
time.sleep(1)
def quit(self, id: int) -> None:
cmd = f"{self.console_str} quit --index {id}"
subprocess.check_call(cmd)
time.sleep(3)
def get_list(self):
# 获取模拟器列表
cmd = f"{self.console_str} list2"
text = subprocess.check_output(cmd).decode("gbk")
info = text.split('\n')
result = list()
for line in info:
if len(line) > 1:
dnplayer = line.split(',')
result.append(DnPlayer(dnplayer))
return result
def is_running(self, id: int) -> bool:
try:
all = self.get_list()
except Exception as e:
print("WARNING: ", e)
return False
if id >= len(all):
return False
else:
return all[id].is_running()
class BSLauncher(LauncherBase):
def __init__(self):
self.console_str = emulator_console
def id_to_serial(self, id: int) -> str:
if auto_emulator_address:
return f"emulator-{5554 + id * 10}"
else:
return emulator_address[id]
def launch(self, id: int, block: bool = False) -> None:
cmd = f"{self.console_str} launch --index {id}"
subprocess.check_call(cmd)
if len(emulator_id) == 0:
block = False
if block:
last_time = time.time()
while not self.is_running(id) and time.time() - last_time < wait_for_launch_time:
time.sleep(1)
def quit(self, id: int) -> None:
cmd = f"{self.console_str} quit --index {id}"
subprocess.check_call(cmd)
time.sleep(3)
def get_list(self):
# 获取模拟器列表
cmd = f"{self.console_str} list"
text = subprocess.check_output(cmd).decode("gbk")
info_list = text.split('\n')
device_list = []
for i in info_list:
if len(i)>0:
device_list.append(i) # Careful: Empty Lines.
return device_list
def get_running_list(self):
# 获取运行中模拟器列表
cmd = f"{self.console_str} runninglist"
text = subprocess.check_output(cmd).decode("gbk")
info_list = text.split('\n')
device_list = []
for i in info_list:
if len(i)>0:
device_list.append(i) # Careful: Empty Lines.
return device_list
def is_running(self, id: int) -> bool:
running_ids = {}
try:
all = self.get_list()
run = self.get_running_list()
for ind,device_name in enumerate(all):
running_ids[ind]=device_name in run
if id >= len(all):
return False
else:
return running_ids[id]
except Exception as e:
print("WARNING: ", e)
return False
EMULATOR_DICT = {
"雷电": LDLauncher,
"蓝叠": BSLauncher,
"雷神": LDLauncher,
}
| null |
5,035 |
"""Test case to start and stop games."""
from unittest.mock import MagicMock
from mpf.tests.MpfTestCase import MpfTestCase
class MpfGameTestCase(MpfTestCase):
"""Test case for starting and running games.
This is based on ``MpfTestCase`` but adds methods and assertions related
to running games (rather than just testing MPF components or devices).
"""
def __init__(self, methodName):
"""Patch minimal config needed to start a game into the machine config.
This method adds a switch called ``s_start`` with a tag called
``start``.
"""
super().__init__(methodName)
self.machine_config_patches['switches'] = dict()
self.machine_config_patches['switches']['s_start'] = {"number": "", "tags": "start"}
def start_two_player_game(self, start_switch=None):
"""Start two player game."""
self.start_game(start_switch=start_switch)
self.add_player()
def fill_troughs(self):
"""Fill all ball devices tagged with ``trough`` with balls."""
for trough in self.machine.ball_devices.items_tagged("trough"):
for switch in trough.ball_count_handler.counter.config['ball_switches']:
self.hit_switch_and_run(switch.name, 0)
self.advance_time_and_run()
def start_game(self, num_balls_known=None, start_switch=None):
"""Start a game.
This method checks to make sure a game is not running,
then hits and releases the ``s_start`` switch, and
finally checks to make sure a game actually started
properly.
For example:
.. code::
self.start_game()
"""
if num_balls_known is not None:
self.assertNumBallsKnown(num_balls_known)
if start_switch is None:
start_switch = "s_start"
# game start should work
self.assertGameIsNotRunning()
self.hit_and_release_switch(start_switch)
self.advance_time_and_run()
self.assertGameIsRunning()
self.assertEqual(1, self.machine.game.num_players)
self.assertPlayerNumber(1)
def add_player(self):
"""Add a player to the current game.
This method hits and releases a switch called ``s_start``
and then verifies that the player count actually increased
by 1.
You can call this method multiple times to add multiple
players. For example, to start a game and then add 2 additional
players (for 3 players total), you would use:
.. code::
self.start_game()
self.add_player()
self.add_player()
"""
prev_players = self.machine.game.num_players
self.hit_and_release_switch("s_start")
self.advance_time_and_run(1)
self.assertEqual(prev_players + 1, self.machine.game.num_players)
def assertBallNumber(self, number):
"""Asserts that the current ball is a certain ball numebr.
Args:
number: The number to check.
Raises:
Assertion error if there is no game in progress or if
the current ball is not the ball number passed.
The following code will check to make sure the game is on
Ball 1:
.. code::
self.assertBallNumber(1)
"""
self.assertGameIsRunning()
self.assertEqual(number, self.machine.game.player.ball)
def assertBallsInPlay(self, balls):
"""Asserts that a certain number of balls are in play.
Note that the number of balls in play is not necessarily the same as
the number of balls on the playfield. (For example, a ball could
be held in a ball device, or the machine could be in the process
of adding a ball to the platfield.)
Args:
balls: The number of balls you want to assert are in
play.
To assert that there are 3 balls in play (perhaps during a multiball),
you would use:
.. code::
self.assertBallsInPlay(3)
"""
self.assertEqual(balls, self.machine.game.balls_in_play)
def drain_all_balls(self):
"""Drain all balls in play."""
drain = self.machine.ball_devices.items_tagged("drain")[0]
for _ in range(self.machine.game.balls_in_play):
self.machine.default_platform.add_ball_to_device(drain)
def METHOD_NAME(self):
"""Drain a single ball.
If more than 1 ball is in play, this method will need to
be called once for each ball in order to end the current
ball.
If you want to drain all balls use `drain_all_balls`.
"""
drain = self.machine.ball_devices.items_tagged("drain")[0]
self.machine.default_platform.add_ball_to_device(drain)
def assertPlayerNumber(self, number):
"""Asserts that the current player is a certain player number.
Args:
number: The player number you can to assert is the current
player.
For example, to assert that the current player is Player 2, you
would use:
.. code::
self.assertPlayerNumber(2)
"""
self.assertEqual(number, self.machine.game.player.index + 1)
def assertPlayerCount(self, count):
"""Asserts that count players exist.
Args:
count: The expected number of players.
For example, to assert that the to players are in the game:
.. code::
self.assertPlayerCount(2)
"""
self.assertEqual(count, len(self.machine.game.player_list))
def stop_game(self, stop_time=1):
"""Stop the current game.
This method asserts that a game is running, then call's
the game mode's ``end_game()`` method, then asserts that
the game has successfully stopped.
Example:
.. code::
self.stop_game()
"""
self.assertGameIsRunning()
self.machine.game.end_game()
self.advance_time_and_run(stop_time)
self.assertGameIsNotRunning()
def assertGameIsRunning(self):
"""Assert a game is running.
Example:
.. code::
self.assertGameIsRunning()
"""
self.assertIsNotNone(self.machine.game, "Expected a running game but no game is active.")
def assertGameIsNotRunning(self):
"""Assert a game is not running.
Example:
.. code::
self.assertGameIsNotRunning()
"""
self.assertIsNone(self.machine.game, "Expected game to have ended but game is active.")
| null |
5,036 |
import os
import json
import pathlib
import eth_abi
import pytest
from solana.keypair import Keypair
from eth_keys import keys as eth_keys
from solana.publickey import PublicKey
from solana.rpc.commitment import Confirmed
from .solana_utils import EvmLoader, OperatorAccount, create_treasury_pool_address, make_new_user, get_solana_balance, \
deposit_neon, solana_client
from .utils.contract import deploy_contract
from .utils.storage import create_holder
from .utils.types import TreasuryPool, Caller, Contract
def pytest_addoption(parser):
parser.addoption(
"--operator-keys", action="store", default="~/.config/solana/id.json,~/.config/solana/id2.json",
help="Path to 2 comma separated operator keypairs"
)
def pytest_configure(config):
if "RUST_LOG" in os.environ:
pytest.CONTRACTS_PATH = pathlib.Path("/opt/solidity")
else:
pytest.CONTRACTS_PATH = pathlib.Path(__file__).parent / "contracts"
@pytest.fixture(scope="session")
def evm_loader(request) -> EvmLoader:
wallet = OperatorAccount(
pathlib.Path(request.config.getoption("--operator-keys").split(',')[0]).expanduser().as_posix())
loader = EvmLoader(wallet)
return loader
@pytest.fixture(scope="session")
def METHOD_NAME(request, evm_loader) -> Keypair:
"""
Initialized solana keypair with balance. Get private key from cli or ~/.config/solana/id.json
"""
with open(pathlib.Path(request.config.getoption("--operator-keys").split(',')[0]).expanduser(), "r") as key:
secret_key = json.load(key)[:32]
account = Keypair.from_secret_key(secret_key)
caller_ether = eth_keys.PrivateKey(account.secret_key[:32]).public_key.to_canonical_address()
caller, caller_nonce = evm_loader.ether2program(caller_ether)
if get_solana_balance(PublicKey(caller)) == 0:
evm_loader.check_account(account.public_key)
evm_loader.check_account(PublicKey(caller))
evm_loader.create_ether_account(caller_ether)
return account
@pytest.fixture(scope="session")
def second_operator_keypair(request, evm_loader) -> Keypair:
"""
Initialized solana keypair with balance. Get private key from cli or ~/.config/solana/id.json
"""
with open(pathlib.Path(request.config.getoption("--operator-keys").split(",")[1]).expanduser(), "r") as key:
secret_key = json.load(key)[:32]
account = Keypair.from_secret_key(secret_key)
caller_ether = eth_keys.PrivateKey(account.secret_key[:32]).public_key.to_canonical_address()
caller, caller_nonce = evm_loader.ether2program(caller_ether)
if get_solana_balance(PublicKey(caller)) == 0:
evm_loader.create_ether_account(caller_ether)
return account
@pytest.fixture(scope="session")
def treasury_pool(evm_loader) -> TreasuryPool:
index = 2
address = create_treasury_pool_address(index)
index_buf = index.to_bytes(4, 'little')
return TreasuryPool(index, address, index_buf)
@pytest.fixture(scope="function")
def user_account(evm_loader) -> Caller:
return make_new_user(evm_loader)
@pytest.fixture(scope="session")
def session_user(evm_loader) -> Caller:
return make_new_user(evm_loader)
@pytest.fixture(scope="session")
def second_session_user(evm_loader) -> Caller:
return make_new_user(evm_loader)
@pytest.fixture(scope="session")
def sender_with_tokens(evm_loader, METHOD_NAME) -> Caller:
user = make_new_user(evm_loader)
deposit_neon(evm_loader, METHOD_NAME, user.eth_address, 100000)
return user
@pytest.fixture(scope="session")
def holder_acc(METHOD_NAME) -> PublicKey:
return create_holder(METHOD_NAME)
@pytest.fixture(scope="function")
def new_holder_acc(METHOD_NAME) -> PublicKey:
return create_holder(METHOD_NAME)
@pytest.fixture(scope="function")
def rw_lock_contract(evm_loader: EvmLoader, METHOD_NAME: Keypair, session_user: Caller,
treasury_pool) -> Contract:
return deploy_contract(METHOD_NAME, session_user, "rw_lock.binary", evm_loader, treasury_pool)
@pytest.fixture(scope="function")
def rw_lock_caller(evm_loader: EvmLoader, METHOD_NAME: Keypair,
session_user: Caller, treasury_pool: TreasuryPool, rw_lock_contract: Contract) -> Contract:
constructor_args = eth_abi.encode(['address'], [rw_lock_contract.eth_address.hex()])
return deploy_contract(METHOD_NAME, session_user, "rw_lock_caller.binary", evm_loader,
treasury_pool, encoded_args=constructor_args)
@pytest.fixture(scope="function")
def string_setter_contract(evm_loader: EvmLoader, METHOD_NAME: Keypair, session_user: Caller,
treasury_pool) -> Contract:
return deploy_contract(METHOD_NAME, session_user, "string_setter.binary", evm_loader, treasury_pool)
| null |
5,037 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2021 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from mantid.api import ITableWorkspace
from mantidqtinterfaces.Muon.GUI.Common.ADSHandler.ADS_calls import check_if_workspace_exist, retrieve_ws
from mantidqtinterfaces.Muon.GUI.Common.contexts.corrections_context import CorrectionsContext, DEAD_TIME_FROM_FILE, DEAD_TIME_FROM_ADS
from mantidqtinterfaces.Muon.GUI.Common.contexts.muon_data_context import MuonDataContext
from mantidqtinterfaces.Muon.GUI.Common.corrections_tab_widget.corrections_model import CorrectionsModel
DEAD_TIME_TABLE_KEY = "dead-time"
class DeadTimeCorrectionsModel:
"""
The DeadTimeCorrectionsModel calculates Dead Time corrections.
"""
def __init__(self, corrections_model: CorrectionsModel, data_context: MuonDataContext, corrections_context: CorrectionsContext):
"""Initialize the DeadTimeCorrectionsModel with empty data."""
self._corrections_model = corrections_model
self._data_context = data_context
self._corrections_context = corrections_context
def dead_times_average(self) -> float:
"""Returns the average dead time for the currently selected run."""
dead_times = self._current_dead_times()
return sum(dead_times) / len(dead_times) if len(dead_times) > 0 else 0.0
def dead_times_range(self) -> tuple:
"""Returns the minimum and maximum dead time for the currently selected run."""
dead_times = self._current_dead_times()
return min(dead_times, default=0.0), max(dead_times, default=0.0)
def _current_dead_times(self) -> list:
"""Returns a list of dead times for the currently displayed run and dead time mode."""
table_name = self._corrections_context.current_dead_time_table_name_for_run(
self._data_context.instrument, self._corrections_model.current_runs()
)
table = retrieve_ws(table_name) if table_name else None
return table.toDict()[DEAD_TIME_TABLE_KEY] if table is not None else []
def METHOD_NAME(self) -> None:
"""Sets the dead time source to be 'FromFile'."""
self._corrections_context.dead_time_source = DEAD_TIME_FROM_FILE
self._corrections_context.dead_time_table_name_from_ads = None
def set_dead_time_source_to_from_ads(self, table_name: str) -> None:
"""Sets the dead time source to be 'FromADS'."""
if table_name == "None":
self.set_dead_time_source_to_none()
else:
self._corrections_context.dead_time_source = DEAD_TIME_FROM_ADS
self._corrections_context.dead_time_table_name_from_ads = table_name
def set_dead_time_source_to_none(self) -> None:
"""Sets the dead time source to be 'None'."""
self._corrections_context.dead_time_source = None
self._corrections_context.dead_time_table_name_from_ads = None
def is_dead_time_source_from_data_file(self) -> bool:
"""Returns true if the dead time should be retrieved from a data file."""
return self._corrections_context.dead_time_source == DEAD_TIME_FROM_FILE
def is_dead_time_source_from_workspace(self) -> bool:
"""Returns true if the dead time should be retrieved from a workspace."""
return self._corrections_context.dead_time_source == DEAD_TIME_FROM_ADS
def is_dead_time_source_from_none(self) -> bool:
"""Returns true if the dead time should not be retrieved from any."""
return self._corrections_context.dead_time_source is None
def validate_selected_dead_time_workspace(self, table_name: str) -> str:
"""Validates the selected dead time workspace. Returns a string containing an error message if its invalid."""
if check_if_workspace_exist(table_name):
table = retrieve_ws(table_name)
return self._validate_dead_time_table(table)
else:
return f"Workspace '{table_name}' does not exist in the ADS."
def _validate_dead_time_table(self, table: ITableWorkspace) -> str:
"""Validates that the dead time table provided has the expected format."""
if not isinstance(table, ITableWorkspace):
return "The dead time table selected is not a Table Workspace."
column_names = table.getColumnNames()
if len(column_names) != 2:
return f"Expected 2 columns, found {str(max(0, len(column_names)))} columns."
if column_names[0] != "spectrum" or column_names[1] != DEAD_TIME_TABLE_KEY:
return f"Columns have incorrect names. Column 1 should be 'spectrum' and column 2 should be " f"'{DEAD_TIME_TABLE_KEY}'."
number_of_rows = table.rowCount()
number_of_histograms = self._data_context.current_workspace.getNumberHistograms()
if number_of_rows != number_of_histograms:
return (
f"The number of histograms ({number_of_histograms}) does not match the number of rows "
f"({number_of_rows}) in dead time table."
)
return ""
| null |
5,038 |
#!/usr/bin/env python3
# Copyright (c) 2015-2022 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP66 (DER SIG).
Test that the DERSIG soft-fork activates at (regtest) height 1251.
"""
from test_framework.blocktools import create_coinbase, create_block, create_transaction
from test_framework.messages import msg_block
from test_framework.p2p import P2PInterface
from test_framework.script import CScript
from test_framework.test_framework import PocketcoinTestFramework
from test_framework.util import (
assert_equal,
)
DERSIG_HEIGHT = 1251
# A canonical signature consists of:
# <30> <total len> <02> <len R> <R> <02> <len S> <S> <hashtype>
def METHOD_NAME(tx):
"""
Make the signature in vin 0 of a tx non-DER-compliant,
by adding padding after the S-value.
"""
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if (len(newscript) == 0):
newscript.append(i[0:-1] + b'\0' + i[-1:])
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
class BIP66Test(PocketcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [[
'[email protected]',
'-par=1', # Use only one script thread to get the exact log msg for testing
]]
self.setup_clean_chain = True
self.rpc_timeout = 240
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def test_dersig_info(self, *, is_active):
assert_equal(self.nodes[0].getblockchaininfo()['softforks']['bip66'],
{
"active": is_active,
"height": DERSIG_HEIGHT,
"type": "buried",
},
)
def run_test(self):
peer = self.nodes[0].add_p2p_connection(P2PInterface())
self.test_dersig_info(is_active=False)
self.log.info("Mining %d blocks", DERSIG_HEIGHT - 2)
self.coinbase_txids = [self.nodes[0].getblock(b)['tx'][0] for b in self.nodes[0].generate(DERSIG_HEIGHT - 2)]
self.nodeaddress = self.nodes[0].getnewaddress()
self.log.info("Test that a transaction with non-DER signature can still appear in a block")
spendtx = create_transaction(self.nodes[0], self.coinbase_txids[0],
self.nodeaddress, amount=1.0)
METHOD_NAME(spendtx)
spendtx.rehash()
tip = self.nodes[0].getbestblockhash()
block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
block = create_block(int(tip, 16), create_coinbase(DERSIG_HEIGHT - 1), block_time)
block.nVersion = 2
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.test_dersig_info(is_active=False) # Not active as of current tip and next block does not need to obey rules
peer.send_and_ping(msg_block(block))
self.test_dersig_info(is_active=True) # Not active as of current tip, but next block must obey rules
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
self.log.info("Test that blocks must now be at least version 3")
tip = block.sha256
block_time += 1
block = create_block(tip, create_coinbase(DERSIG_HEIGHT), block_time)
block.nVersion = 2
block.rehash()
block.solve()
with self.nodes[0].assert_debug_log(expected_msgs=['{}, bad-version(0x00000002)'.format(block.hash)]):
peer.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
peer.sync_with_ping()
self.log.info("Test that transactions with non-DER signatures cannot appear in a block")
block.nVersion = 3
spendtx = create_transaction(self.nodes[0], self.coinbase_txids[1],
self.nodeaddress, amount=1.0)
METHOD_NAME(spendtx)
spendtx.rehash()
# First we show that this tx is valid except for DERSIG by getting it
# rejected from the mempool for exactly that reason.
assert_equal(
[{'txid': spendtx.hash, 'allowed': False, 'reject-reason': 'non-mandatory-script-verify-flag (Non-canonical DER signature)'}],
self.nodes[0].testmempoolaccept(rawtxs=[spendtx.serialize().hex()], maxfeerate=0)
)
# Now we verify that a block with this transaction is also invalid.
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
with self.nodes[0].assert_debug_log(expected_msgs=['CheckInputScripts on {} failed with non-mandatory-script-verify-flag (Non-canonical DER signature)'.format(block.vtx[-1].hash)]):
peer.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
peer.sync_with_ping()
self.log.info("Test that a version 3 block with a DERSIG-compliant transaction is accepted")
block.vtx[1] = create_transaction(self.nodes[0], self.coinbase_txids[1], self.nodeaddress, amount=1.0)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.test_dersig_info(is_active=True) # Not active as of current tip, but next block must obey rules
peer.send_and_ping(msg_block(block))
self.test_dersig_info(is_active=True) # Active as of current tip
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
if __name__ == '__main__':
BIP66Test().main()
| null |
5,039 |
import os
import re
import json
try:
from ansible.module_utils.compat.version import LooseVersion
except ImportError:
from distutils.version import LooseVersion
from .conftest import run_playbook, get_ansible_version
def run_playbook_callback(tmpdir, report_type):
extra_env = {}
ansible_version = get_ansible_version()
if LooseVersion(ansible_version) < LooseVersion('2.11'):
extra_env['ANSIBLE_CALLBACK_WHITELIST'] = "theforeman.foreman.foreman"
extra_env['ANSIBLE_COMMAND_WARNINGS'] = "0"
else:
extra_env['ANSIBLE_CALLBACKS_ENABLED'] = "theforeman.foreman.foreman"
# No connection is actually performed during the test
extra_env['FOREMAN_REPORT_TYPE'] = report_type
extra_env['FOREMAN_URL'] = "http://localhost"
if report_type == "proxy":
extra_env['FOREMAN_PROXY_URL'] = "http://localhost"
extra_env['FOREMAN_SSL_CERT'] = "/dev/zero"
extra_env['FOREMAN_SSL_KEY'] = "/dev/zero"
extra_env['FOREMAN_DIR_STORE'] = tmpdir.strpath
playbook = os.path.join('..', 'callback', 'three_hosts')
inventory = os.path.join(os.getcwd(), 'tests', 'callback', 'three_hosts')
return run_playbook(playbook, inventory=inventory, extra_env=extra_env)
def drop_incompatible_items(d):
"""
Recursively drop report items that vary on invocations
and versions and cannot be reasonably fixed
"""
dd = {}
for k, v in d.items():
if k in ['msg', 'start', 'end', 'delta', 'uuid', 'timeout', '_ansible_no_log', 'warn', 'connection',
'extended_allitems', 'loop_control', 'expand_argument_vars', 'retries']:
continue
if isinstance(v, dict):
dd[k] = drop_incompatible_items(v)
elif isinstance(v, (list, set, tuple)):
dd[k] = type(v)(drop_incompatible_items(vv) if isinstance(vv, dict) else vv
for vv in v)
else:
dd[k] = v
return dd
def run_callback(tmpdir, report_type, vcrmode):
run = run_playbook_callback(tmpdir, report_type)
assert run.rc == 0
assert len(tmpdir.listdir()) > 0, "Directory with results is empty"
for real_file in tmpdir.listdir(sort=True):
contents = real_file.read()
contents = re.sub(r"\d+-\d+-\d+ \d+:\d+:\d+\+\d+:\d+", "2000-01-01 12:00:00+00:00", contents)
contents = re.sub(r"\d+-\d+-\d+[ T]\d+:\d+:\d+\.\d+", "2000-01-01 12:00:00.0000", contents)
contents = re.sub(r"\d+:\d+:\d+\.\d+", "12:00:00.0000", contents)
if report_type == "foreman":
# drop_incompatible_items cannot be used for the legacy format
contents = re.sub(r", \\\"msg\\\": \\\"\\\"", "", contents)
contents = re.sub(r"\\\"_ansible_no_log\\\": [^,]+, ", "", contents)
contents = re.sub(r", \\\"warn\\\": false", "", contents)
contents = re.sub(r", \\\"expand_argument_vars\\\": true", "", contents)
real_contents = json.loads(contents)
if report_type == "foreman":
real_contents['config_report']['metrics']['time']['total'] = 1
else:
real_contents['metrics']['time']['total'] = 1
real_contents = drop_incompatible_items(real_contents)
fixture_name = real_file.basename
fixture = os.path.join(os.getcwd(), 'tests', 'fixtures', 'callback', 'dir_store', report_type, fixture_name)
if vcrmode == "record":
print("Writing: ", str(fixture))
with open(fixture, 'w') as f:
json.dump(real_contents, f, indent=2, sort_keys=True)
else:
with open(fixture, 'r') as f:
expected_contents = json.load(f)
expected_contents = drop_incompatible_items(expected_contents)
real_contents = drop_incompatible_items(real_contents)
assert expected_contents == real_contents, "Fixture {fixture_name} differs, run with -vvvv to see the diff".format(fixture_name=fixture_name)
def test_callback_foreman(tmpdir, vcrmode):
run_callback(tmpdir, "foreman", vcrmode)
def METHOD_NAME(tmpdir, vcrmode):
run_callback(tmpdir, "proxy", vcrmode)
| null |
5,040 |
##############################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2023, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##############################################################################
"""Multi-factor Authentication implementation for Time-based One-Time Password
(TOTP) applications"""
import base64
from io import BytesIO
from typing import Union
from flask import url_for, session, flash
from flask_babel import gettext as _
from flask_login import current_user
import pyotp
import qrcode
import config
from pgadmin.model import UserMFA
from .registry import BaseMFAuth
from .utils import ValidationException, fetch_auth_option, mfa_add
from pgadmin.utils.constants import MessageType
_TOTP_AUTH_METHOD = "authenticator"
_TOTP_AUTHENTICATOR = _("Authenticator App")
_OTP_PLACEHOLDER = _("Enter code")
class TOTPAuthenticator(BaseMFAuth):
"""
Authenction class for TOTP based authentication.
Base Class: BaseMFAuth
"""
@classmethod
def __create_topt_for_currentuser(cls) -> pyotp.TOTP:
"""
Create the TOPT object using the secret stored for the current user in
the configuration database.
Assumption: Configuration database is not modified by anybody manually,
and removed the secrete for the current user.
Raises:
ValidationException: Raises when user is not registered for this
authenction method.
Returns:
pyotp.TOTP: TOTP object for the current user (if registered)
"""
options, found = fetch_auth_option(_TOTP_AUTH_METHOD)
if found is False:
raise ValidationException(_(
"User has not registered the Time-based One-Time Password "
"(TOTP) Authenticator for authentication."
))
if options is None or options == '':
raise ValidationException(_(
"User does not have valid HASH to generate the OTP."
))
return pyotp.TOTP(options)
@property
def name(self) -> str:
"""
Name of the authetication method for internal presentation.
Returns:
str: Short name for this authentication method
"""
return _TOTP_AUTH_METHOD
@property
def label(self) -> str:
"""
Label for the UI for this authentication method.
Returns:
str: User presentable string for this auth method
"""
return _(_TOTP_AUTHENTICATOR)
@property
def icon(self) -> str:
"""
Property for the icon url string for this auth method, to be used on
the authentication or registration page.
Returns:
str: url for the icon representation for this auth method
"""
return url_for("mfa.static", filename="images/totp_lock.svg")
def validate(self, **kwargs):
"""
Validate the code sent using the HTTP request.
Raises:
ValidationException: Raises when code is not valid
"""
code = kwargs.get('code', None)
totp = TOTPAuthenticator.__create_topt_for_currentuser()
if totp.verify(code) is False:
raise ValidationException("Invalid Code")
def METHOD_NAME(self) -> dict:
"""
Generate the portion of the view to render on the authentication page
Returns:
str: Authentication view as a string
"""
return dict(
auth_description=_(
"Enter the code shown in your authenticator application for "
"TOTP (Time-based One-Time Password)"
),
otp_placeholder=_OTP_PLACEHOLDER,
)
def _registration_view(self) -> dict:
"""
Internal function to generate a view for the registration page.
View will contain the QRCode image for the TOTP based authenticator
applications to scan.
Returns:
str: Registration view with QRcode for TOTP based applications
"""
option = session.pop('mfa_authenticator_opt', None)
if option is None:
option = pyotp.random_base32()
session['mfa_authenticator_opt'] = option
totp = pyotp.TOTP(option)
uri = totp.provisioning_uri(
current_user.username, issuer_name=getattr(
config, "APP_NAME", "pgAdmin 4"
)
)
img = qrcode.make(uri)
buffered = BytesIO()
img.save(buffered, format="JPEG")
img_base64 = base64.b64encode(buffered.getvalue())
return dict(
auth_title=_(_TOTP_AUTHENTICATOR),
auth_method=_TOTP_AUTH_METHOD,
image=img_base64.decode("utf-8"),
qrcode_alt_text=_("TOTP Authenticator QRCode"),
auth_description=_(
"Scan the QR code and the enter the code from the "
"TOTP Authenticator application"
), otp_placeholder=_OTP_PLACEHOLDER
)
def registration_view(self, form_data) -> Union[str, None]:
"""
Returns the registration view for this authentication method.
It is also responsible for validating the code during the registration.
Args:
form_data (dict): Form data as a dictionary sent from the
registration page for rendering or validation of
the code.
Returns:
str: Registration view for the 'authenticator' method if it is not
a request for the validation of the code or the code sent is
not a valid TOTP code, otherwise - it will return None.
"""
if 'VALIDATE' not in form_data:
return self._registration_view()
code = form_data.get('code', None)
authenticator_opt = session.get('mfa_authenticator_opt', None)
if authenticator_opt is None or \
pyotp.TOTP(authenticator_opt).verify(code) is False:
flash(_("Failed to validate the code"), MessageType.ERROR)
return self._registration_view()
mfa_add(_TOTP_AUTH_METHOD, authenticator_opt)
flash(_(
"TOTP Authenticator registered successfully for authentication."
), MessageType.SUCCESS)
session.pop('mfa_authenticator_opt', None)
return None
| null |
5,041 |
#!/usr/bin/env python
import unittest
from cogent3.core.moltype import CodonAlphabet
from cogent3.evolve.predicate import MotifChange, parse
class FakeModel(object):
def __init__(self, alphabet):
self.alphabet = alphabet
self.moltype = alphabet.moltype
def get_alphabet(self):
return self.alphabet
class TestPredicates(unittest.TestCase):
def setUp(self):
self.alphabet = CodonAlphabet()
self.model = FakeModel(self.alphabet)
def _makeMotifChange(self, *args, **kw):
pred = MotifChange(*args, **kw)
return pred.interpret(self.model)
def test_parse(self):
"""correctly construction"""
ag = MotifChange("A", "G")
got = parse(str(ag))
self.assertEqual(str(got), "A/G")
ts = MotifChange("A", "G") | MotifChange("C", "T")
got = parse(str(ts))
self.assertEqual(str(got), "(A/G | C/T)")
a_g = MotifChange("A", "G", forward_only=True)
t_c = MotifChange("T", "C", forward_only=True)
sym = a_g | t_c
got = parse(str(sym))
self.assertEqual(str(got), "(A>G | T>C)")
def assertMatch(self, pred, seq1, seq2):
assert pred(seq1, seq2), (pred.__doc__, (seq1, seq2))
def METHOD_NAME(self, pred, seq1, seq2):
assert not pred(seq1, seq2), ("not " + pred.__doc__, (seq1, seq2))
def test_indels(self):
indel = self._makeMotifChange("---", "NNN")
self.assertMatch(indel, "---", "AAA")
def test_impossible_change(self):
self.assertRaises(Exception, self._makeMotifChange, "----", "NNNN")
def test_isfromcpg(self):
isFromCpG = self._makeMotifChange("CG", forward_only=True)
self.assertMatch(isFromCpG, "CG", "CA")
self.assertMatch(isFromCpG, "CG", "TG")
self.assertMatch(isFromCpG, "ACG", "ATG")
self.assertMatch(isFromCpG, "CGT", "CTT")
self.METHOD_NAME(isFromCpG, "CTT", "CGT")
self.METHOD_NAME(isFromCpG, "C", "G")
def test_isfromtocpg(self):
isFromToCpG = self._makeMotifChange("CG")
self.assertMatch(isFromToCpG, "CG", "CA")
self.assertMatch(isFromToCpG, "CG", "TG")
self.assertMatch(isFromToCpG, "ACG", "ATG")
self.assertMatch(isFromToCpG, "CGT", "CTT")
self.assertMatch(isFromToCpG, "CTT", "CGT")
def test_isFromToCpA_C_only(self):
isFromToCpA_C_only = self._makeMotifChange("CA", diff_at=0)
self.assertMatch(isFromToCpA_C_only, "CA", "TA")
self.assertMatch(isFromToCpA_C_only, "TCA", "TTA")
self.assertMatch(isFromToCpA_C_only, "TAA", "CAA")
self.METHOD_NAME(isFromToCpA_C_only, "TCA", "TCT")
def test_isFromCpA_C_only(self):
isFromCpA_C_only = self._makeMotifChange("CA", forward_only=True, diff_at=0)
self.assertMatch(isFromCpA_C_only, "CA", "TA")
self.assertMatch(isFromCpA_C_only, "TCA", "TTA")
self.METHOD_NAME(isFromCpA_C_only, "TAA", "CAA")
def test_isCpT_T_only(self):
isCpT_T_only = self._makeMotifChange("CT", diff_at=1)
self.assertMatch(isCpT_T_only, "CT", "CA")
self.assertMatch(isCpT_T_only, "TCA", "TCT")
self.METHOD_NAME(isCpT_T_only, "TTA", "TCA")
self.METHOD_NAME(isCpT_T_only, "TA", "CT")
def test_isCCC(self):
isCCC = self._makeMotifChange("CCC")
self.METHOD_NAME(isCCC, "CC", "CT")
def test_isC(self):
isC = self._makeMotifChange("C")
self.assertMatch(isC, "C", "T")
self.METHOD_NAME(isC, "CA", "CT")
self.assertMatch(isC, "CA", "CC")
self.assertMatch(isC, "CAT", "GAT")
self.assertMatch(isC, "CAT", "CCT")
self.assertMatch(isC, "CAT", "CAC")
self.METHOD_NAME(isC, "CAT", "CAA")
self.METHOD_NAME(isC, "C", "C")
def test_isCtoT(self):
isCtoT = self._makeMotifChange("C", "T")
self.assertMatch(isCtoT, "C", "T")
self.assertMatch(isCtoT, "T", "C")
self.METHOD_NAME(isCtoT, "T", "A")
isCtoT = self._makeMotifChange("C", "T", forward_only=True)
self.assertMatch(isCtoT, "C", "T")
self.METHOD_NAME(isCtoT, "T", "C")
def test_isCGtoCA(self):
isCG_CA = self._makeMotifChange("CG", "CA")
self.assertMatch(isCG_CA, "CG", "CA")
self.assertMatch(isCG_CA, "CA", "CG")
self.assertMatch(isCG_CA, "CAT", "CGT")
self.assertMatch(isCG_CA, "CGT", "CAT")
self.assertMatch(isCG_CA, "TCA", "TCG")
self.METHOD_NAME(isCG_CA, "TCT", "TCG")
self.assertMatch(isCG_CA, "CGTT", "CATT")
self.assertMatch(isCG_CA, "TCGT", "TCAT")
self.assertMatch(isCG_CA, "TTCG", "TTCA")
self.assertMatch(isCG_CA, "CATT", "CGTT")
self.assertMatch(isCG_CA, "TCAT", "TCGT")
self.assertMatch(isCG_CA, "TTCA", "TTCG")
isCG_CA = self._makeMotifChange("CG", "CA", forward_only=True)
self.assertMatch(isCG_CA, "CGTT", "CATT")
self.assertMatch(isCG_CA, "TCGT", "TCAT")
self.assertMatch(isCG_CA, "TTCG", "TTCA")
self.METHOD_NAME(isCG_CA, "CATT", "CGTT")
self.METHOD_NAME(isCG_CA, "TCAT", "TCGT")
self.METHOD_NAME(isCG_CA, "TTCA", "TTCG")
isCG = self._makeMotifChange("CG", diff_at=1)
self.assertMatch(isCG, "CGTT", "CATT")
self.assertMatch(isCG, "TCGT", "TCAT")
self.assertMatch(isCG, "TTCG", "TTCA")
self.METHOD_NAME(isCG, "CGTT", "TGTT")
self.METHOD_NAME(isCG, "TCGT", "TAGT")
self.METHOD_NAME(isCG, "TTCG", "--GG")
def test_wildcards(self):
isCG_CN = self._makeMotifChange("CG", "CN")
self.assertMatch(isCG_CN, "CG", "CA")
self.METHOD_NAME(isCG_CN, "CG", "CG")
self.METHOD_NAME(isCG_CN, "CG", "C-")
if __name__ == "__main__":
unittest.main()
| null |
5,042 |
from sympy.core.add import Add
from sympy.core.basic import Basic
from sympy.core.containers import Tuple
from sympy.core.singleton import S
from sympy.core.symbol import (Symbol, symbols)
from sympy.logic.boolalg import And
from sympy.core.symbol import Str
from sympy.unify.core import Compound, Variable
from sympy.unify.usympy import (deconstruct, construct, unify, is_associative,
is_commutative)
from sympy.abc import x, y, z, n
def METHOD_NAME():
expr = Basic(S(1), S(2), S(3))
expected = Compound(Basic, (1, 2, 3))
assert deconstruct(expr) == expected
assert deconstruct(1) == 1
assert deconstruct(x) == x
assert deconstruct(x, variables=(x,)) == Variable(x)
assert deconstruct(Add(1, x, evaluate=False)) == Compound(Add, (1, x))
assert deconstruct(Add(1, x, evaluate=False), variables=(x,)) == \
Compound(Add, (1, Variable(x)))
def test_construct():
expr = Compound(Basic, (S(1), S(2), S(3)))
expected = Basic(S(1), S(2), S(3))
assert construct(expr) == expected
def test_nested():
expr = Basic(S(1), Basic(S(2)), S(3))
cmpd = Compound(Basic, (S(1), Compound(Basic, Tuple(2)), S(3)))
assert deconstruct(expr) == cmpd
assert construct(cmpd) == expr
def test_unify():
expr = Basic(S(1), S(2), S(3))
a, b, c = map(Symbol, 'abc')
pattern = Basic(a, b, c)
assert list(unify(expr, pattern, {}, (a, b, c))) == [{a: 1, b: 2, c: 3}]
assert list(unify(expr, pattern, variables=(a, b, c))) == \
[{a: 1, b: 2, c: 3}]
def test_unify_variables():
assert list(unify(Basic(S(1), S(2)), Basic(S(1), x), {}, variables=(x,))) == [{x: 2}]
def test_s_input():
expr = Basic(S(1), S(2))
a, b = map(Symbol, 'ab')
pattern = Basic(a, b)
assert list(unify(expr, pattern, {}, (a, b))) == [{a: 1, b: 2}]
assert list(unify(expr, pattern, {a: 5}, (a, b))) == []
def iterdicteq(a, b):
a = tuple(a)
b = tuple(b)
return len(a) == len(b) and all(x in b for x in a)
def test_unify_commutative():
expr = Add(1, 2, 3, evaluate=False)
a, b, c = map(Symbol, 'abc')
pattern = Add(a, b, c, evaluate=False)
result = tuple(unify(expr, pattern, {}, (a, b, c)))
expected = ({a: 1, b: 2, c: 3},
{a: 1, b: 3, c: 2},
{a: 2, b: 1, c: 3},
{a: 2, b: 3, c: 1},
{a: 3, b: 1, c: 2},
{a: 3, b: 2, c: 1})
assert iterdicteq(result, expected)
def test_unify_iter():
expr = Add(1, 2, 3, evaluate=False)
a, b, c = map(Symbol, 'abc')
pattern = Add(a, c, evaluate=False)
assert is_associative(deconstruct(pattern))
assert is_commutative(deconstruct(pattern))
result = list(unify(expr, pattern, {}, (a, c)))
expected = [{a: 1, c: Add(2, 3, evaluate=False)},
{a: 1, c: Add(3, 2, evaluate=False)},
{a: 2, c: Add(1, 3, evaluate=False)},
{a: 2, c: Add(3, 1, evaluate=False)},
{a: 3, c: Add(1, 2, evaluate=False)},
{a: 3, c: Add(2, 1, evaluate=False)},
{a: Add(1, 2, evaluate=False), c: 3},
{a: Add(2, 1, evaluate=False), c: 3},
{a: Add(1, 3, evaluate=False), c: 2},
{a: Add(3, 1, evaluate=False), c: 2},
{a: Add(2, 3, evaluate=False), c: 1},
{a: Add(3, 2, evaluate=False), c: 1}]
assert iterdicteq(result, expected)
def test_hard_match():
from sympy.functions.elementary.trigonometric import (cos, sin)
expr = sin(x) + cos(x)**2
p, q = map(Symbol, 'pq')
pattern = sin(p) + cos(p)**2
assert list(unify(expr, pattern, {}, (p, q))) == [{p: x}]
def test_matrix():
from sympy.matrices.expressions.matexpr import MatrixSymbol
X = MatrixSymbol('X', n, n)
Y = MatrixSymbol('Y', 2, 2)
Z = MatrixSymbol('Z', 2, 3)
assert list(unify(X, Y, {}, variables=[n, Str('X')])) == [{Str('X'): Str('Y'), n: 2}]
assert list(unify(X, Z, {}, variables=[n, Str('X')])) == []
def test_non_frankenAdds():
# the is_commutative property used to fail because of Basic.__new__
# This caused is_commutative and str calls to fail
expr = x+y*2
rebuilt = construct(deconstruct(expr))
# Ensure that we can run these commands without causing an error
str(rebuilt)
rebuilt.is_commutative
def test_FiniteSet_commutivity():
from sympy.sets.sets import FiniteSet
a, b, c, x, y = symbols('a,b,c,x,y')
s = FiniteSet(a, b, c)
t = FiniteSet(x, y)
variables = (x, y)
assert {x: FiniteSet(a, c), y: b} in tuple(unify(s, t, variables=variables))
def test_FiniteSet_complex():
from sympy.sets.sets import FiniteSet
a, b, c, x, y, z = symbols('a,b,c,x,y,z')
expr = FiniteSet(Basic(S(1), x), y, Basic(x, z))
pattern = FiniteSet(a, Basic(x, b))
variables = a, b
expected = ({b: 1, a: FiniteSet(y, Basic(x, z))},
{b: z, a: FiniteSet(y, Basic(S(1), x))})
assert iterdicteq(unify(expr, pattern, variables=variables), expected)
def test_and():
variables = x, y
expected = ({x: z > 0, y: n < 3},)
assert iterdicteq(unify((z>0) & (n<3), And(x, y), variables=variables),
expected)
def test_Union():
from sympy.sets.sets import Interval
assert list(unify(Interval(0, 1) + Interval(10, 11),
Interval(0, 1) + Interval(12, 13),
variables=(Interval(12, 13),)))
def test_is_commutative():
assert is_commutative(deconstruct(x+y))
assert is_commutative(deconstruct(x*y))
assert not is_commutative(deconstruct(x**y))
def test_commutative_in_commutative():
from sympy.abc import a,b,c,d
from sympy.functions.elementary.trigonometric import (cos, sin)
eq = sin(3)*sin(4)*sin(5) + 4*cos(3)*cos(4)
pat = a*cos(b)*cos(c) + d*sin(b)*sin(c)
assert next(unify(eq, pat, variables=(a,b,c,d)))
| null |
5,043 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'ServicesCommunicationsGatewayServiceLocation',
]
@pulumi.output_type
class ServicesCommunicationsGatewayServiceLocation(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "operatorAddresses":
suggest = "operator_addresses"
elif key == "allowedMediaSourceAddressPrefixes":
suggest = "allowed_media_source_address_prefixes"
elif key == "allowedSignalingSourceAddressPrefixes":
suggest = "allowed_signaling_source_address_prefixes"
elif key == "esrpAddresses":
suggest = "esrp_addresses"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ServicesCommunicationsGatewayServiceLocation. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ServicesCommunicationsGatewayServiceLocation.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ServicesCommunicationsGatewayServiceLocation.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
location: str,
operator_addresses: Sequence[str],
allowed_media_source_address_prefixes: Optional[Sequence[str]] = None,
METHOD_NAME: Optional[Sequence[str]] = None,
esrp_addresses: Optional[Sequence[str]] = None):
"""
:param str location: Specifies the region in which the resources needed for Teams Calling will be deployed.
:param Sequence[str] operator_addresses: IP address to use to contact the operator network from this region.
:param Sequence[str] allowed_media_source_address_prefixes: Specifies the allowed source IP address or CIDR ranges for media.
:param Sequence[str] allowed_signaling_source_address_prefixes: Specifies the allowed source IP address or CIDR ranges for signaling.
:param Sequence[str] esrp_addresses: IP address to use to contact the ESRP from this region.
!> **NOTE:** The `esrp_addresses` must be specified for each `service_location` when the`e911_type` is set to `DirectToEsrp`. The `esrp_addresses` must not be specified for each `service_location` when the`e911_type` is set to `Standard`.
"""
pulumi.set(__self__, "location", location)
pulumi.set(__self__, "operator_addresses", operator_addresses)
if allowed_media_source_address_prefixes is not None:
pulumi.set(__self__, "allowed_media_source_address_prefixes", allowed_media_source_address_prefixes)
if METHOD_NAME is not None:
pulumi.set(__self__, "allowed_signaling_source_address_prefixes", METHOD_NAME)
if esrp_addresses is not None:
pulumi.set(__self__, "esrp_addresses", esrp_addresses)
@property
@pulumi.getter
def location(self) -> str:
"""
Specifies the region in which the resources needed for Teams Calling will be deployed.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="operatorAddresses")
def operator_addresses(self) -> Sequence[str]:
"""
IP address to use to contact the operator network from this region.
"""
return pulumi.get(self, "operator_addresses")
@property
@pulumi.getter(name="allowedMediaSourceAddressPrefixes")
def allowed_media_source_address_prefixes(self) -> Optional[Sequence[str]]:
"""
Specifies the allowed source IP address or CIDR ranges for media.
"""
return pulumi.get(self, "allowed_media_source_address_prefixes")
@property
@pulumi.getter(name="allowedSignalingSourceAddressPrefixes")
def METHOD_NAME(self) -> Optional[Sequence[str]]:
"""
Specifies the allowed source IP address or CIDR ranges for signaling.
"""
return pulumi.get(self, "allowed_signaling_source_address_prefixes")
@property
@pulumi.getter(name="esrpAddresses")
def esrp_addresses(self) -> Optional[Sequence[str]]:
"""
IP address to use to contact the ESRP from this region.
!> **NOTE:** The `esrp_addresses` must be specified for each `service_location` when the`e911_type` is set to `DirectToEsrp`. The `esrp_addresses` must not be specified for each `service_location` when the`e911_type` is set to `Standard`.
"""
return pulumi.get(self, "esrp_addresses")
| null |
5,044 |
from __future__ import annotations
from dials.util import tabulate
class Array:
"""
A class to represent an array
"""
def __init__(self):
"""
Initialise the array
"""
self.name = ""
self.title = ""
self.data = None
def as_dict(self):
"""
Return as a dictionary
:return: The dictionary
"""
return {
"title": self.title,
"shape": self.data.all(),
"data": list(self.data),
}
def as_str(self, prefix=""):
"""
Return as a string
:return: The string
"""
return ""
class Table:
"""
A class to represent a table
"""
def __init__(self):
"""
Initialize the table
"""
self.name = ""
self.title = ""
self.cols = []
self.rows = []
def as_dict(self):
"""
Return as a dictionary
:return: The dictionary
"""
cols = {col[0]: col[1] for col in self.cols}
rows = [{self.cols[j][0]: r for j, r in enumerate(row)} for row in self.rows]
# Create the output
return {
"title": self.title,
"cols": cols,
"rows": rows,
}
def as_str(self, prefix=""):
"""
Return the table as a string
:return: The string
"""
rows = [[col[1] for col in self.cols]]
for i, row in enumerate(self.rows):
rows.append([str(x) for x in row])
text = [prefix + self.title, tabulate(rows, headers="firstrow"), ""]
return "\n".join(text)
class Report:
"""
A class to represent the report
"""
def __init__(self):
"""
Initialize the tables
"""
self.tables = []
self.arrays = []
def add_array(self, array):
"""
Add an array to the report
:param array: The array to add
"""
self.arrays.append(array)
def METHOD_NAME(self, table):
"""
Add a table to the report
:param table: The table to add
"""
self.tables.append(table)
def combine(self, other):
"""
Combine two reports
:param other: The other report
"""
self.tables.extend(other.tables)
self.arrays.extend(other.arrays)
def as_dict(self):
"""
Return the report as a dictionary
:return: The dictionary
"""
return {
"tables": {table.name: table.as_dict() for table in self.tables},
"arrays": {array.name: array.as_dict() for array in self.arrays},
}
def as_str(self, prefix=""):
"""
Return the report as a string
:return: The string
"""
return "\n".join([table.as_str(prefix) for table in self.tables])
def as_json(self):
"""
Save the report as a json file
:return: The json string
"""
import json
return json.dumps(self.as_dict(), indent=2)
def as_xml(self):
"""
Save the report as an xml file
:return: The XML string
"""
from xml.dom import minidom
# Get the XML implementation
impl = minidom.getDOMImplementation()
# Create the XML document
doc = impl.createDocument(None, None, None)
# Create the document root
root = doc.createElement("DIALS")
# Function to process objects
def process(root, obj):
if isinstance(obj, dict):
for key, value in obj.items():
root.appendChild(process(doc.createElement(key), value))
elif isinstance(obj, list) or isinstance(obj, tuple):
for i, value in enumerate(obj):
root.appendChild(process(doc.createElement("%d" % i), value))
else:
root.appendChild(doc.createTextNode(str(obj)))
return root
# Process the dictionary and append
doc.appendChild(process(root, self.as_dict()))
# Return the XML document
return doc.toprettyxml(indent=" ")
def as_file(self, filename):
"""
Export as a file (either json or xml depending on extension
:param filename: The filename
"""
from os.path import splitext
ext = splitext(filename)[1]
with open(filename, "w") as outfile:
if ext == ".json":
outfile.write(self.as_json())
elif ext == ".xml":
outfile.write(self.as_xml())
else:
raise RuntimeError("Filename must be *.xml or *.json")
| null |
5,045 |
"""Module to interact with the registry.
Functions aren't a 1:1 mapping to the OCI API specs, but an abstracted
version of what we actually need.
"""
from typing import List, Optional
import requests
from kubernetes import stream
from _orchest.internals import config as _config
from app import errors, utils
from app.connections import k8s_core_api
from config import CONFIG_CLASS
logger = utils.get_logger()
_VERIFY = CONFIG_CLASS.REGISTRY_TLS_CERT_BUNDLE
def get_list_of_repositories() -> List[str]:
"""Gets all repositories in the registry.
At the moment this means environments and the user configured
jupyter image.
"""
repos = []
batch_size = 50
next = f"/v2/_catalog?n={batch_size}"
while next is not None:
resp = requests.get(f"{CONFIG_CLASS.REGISTRY_ADDRESS}{next}", verify=_VERIFY)
repos.extend(resp.json().get("repositories", []))
next = resp.links.get("next", {}).get("url")
return repos
def get_tags_of_repository(repository: str) -> List[str]:
"""Gets all the tags of a repository."""
tags = []
batch_size = 50
next = f"/v2/{repository}/tags/list?n={batch_size}"
while next is not None:
resp = requests.get(f"{CONFIG_CLASS.REGISTRY_ADDRESS}{next}", verify=_VERIFY)
# The "tags" entry can be missing if the repository has just
# been created, None if there are no tags, or a list of strings.
tags_batch = resp.json().get("tags", [])
tags_batch = tags_batch if tags_batch is not None else []
tags.extend(tags_batch)
next = resp.links.get("next", {}).get("url")
return tags
def get_manifest(repository: str, tag: str) -> List[str]:
"""Gets the manifest of a tag.
Can be used, for example, to calculate total size of an image.
"""
r = requests.get(
f"{CONFIG_CLASS.REGISTRY_ADDRESS}/v2/{repository}/manifests/{tag}",
verify=_VERIFY,
headers={"Accept": "application/vnd.docker.distribution.manifest.v2+json"},
)
return r.json()
def get_manifest_digest(repository: str, tag: str) -> Optional[str]:
"""Gets the digest of a manifest.
Can be used, for example, in other registry API endpoints.
"""
# From the docs: When deleting a manifest from a registry version
# 2.3 or later, the following header must be used when HEAD or
# GET-ing the manifest to obtain the correct digest to delete.
resp = requests.head(
f"{CONFIG_CLASS.REGISTRY_ADDRESS}/v2/{repository}/manifests/{tag}",
verify=_VERIFY,
headers={"Accept": "application/vnd.docker.distribution.manifest.v2+json"},
)
digest = resp.headers.get("Docker-Content-Digest")
return digest
def delete_image_by_digest(
repository: str, digest: str, run_garbage_collection: bool
) -> None:
"""Delete an image by digest.
All tags pointing to this digest are deleted. This function should
be used when deleting a given tagged image, because doing that
directly it's not possible, see:
https://github.com/distribution/distribution/issues/1566. Note that
different tags could be backed by the same digest, meaning that
such deletion could end up deleting multiple tags.
"""
resp = requests.delete(
f"{CONFIG_CLASS.REGISTRY_ADDRESS}/v2/{repository}/manifests/{digest}",
verify=_VERIFY,
)
if resp.status_code not in [200, 202, 404]:
raise errors.ImageRegistryDeletionError(resp)
if run_garbage_collection:
METHOD_NAME()
def METHOD_NAME(repositories: Optional[List[str]] = None) -> None:
"""Runs the registry garbage collection process.
Docs: https://docs.docker.com/registry/garbage-collection/
This is necessary because deleting an image through the API is akin
to deleting a reference, the actual image (layers) on the FS will
still be there, and a GC run is necessary to - potentially - delete
them.
We rely on the registry pointing to a non existent REDIS instance
for its cache to avoid the following issue:
https://github.com/distribution/distribution/issues/1803, this
makes it possible to run GC without having to restart the registry
to clean the cache.
This function should be called only when having the certainty that
no images are being pushed to the registry to avoid race conditions.
This is currently accomplished by calling it from a celery task part
of the "builds" queue.
"""
if repositories is None:
repositories = []
pods = k8s_core_api.list_namespaced_pod(
_config.ORCHEST_NAMESPACE, label_selector="app=docker-registry"
)
for pod in pods.items:
logger.info(f"Running garbage collection in pod: {pod.metadata.name}.")
resp = stream.stream(
k8s_core_api.connect_get_namespaced_pod_exec,
pod.metadata.name,
_config.ORCHEST_NAMESPACE,
command=[
"./bin/registry",
"garbage-collect",
"/etc/docker/registry/config.yml",
"--delete-untagged",
],
stderr=True,
stdin=False,
stdout=True,
tty=False,
)
logger.info(str(resp))
# Sadly running the docker-registry GC is not enough.
for repo in repositories:
logger.info(
f"Deleting repo {repo} from pod {pod.metadata.name} file system."
)
resp = stream.stream(
k8s_core_api.connect_get_namespaced_pod_exec,
pod.metadata.name,
_config.ORCHEST_NAMESPACE,
command=[
"rm",
"-rf",
f"/var/lib/registry/docker/registry/v2/repositories/{repo}",
],
stderr=True,
stdin=False,
stdout=True,
tty=False,
)
logger.info(str(resp))
| null |
5,046 |
from gettext import gettext as _
from gi.repository import Gtk, Pango
from lutris.database.games import get_games
from lutris.game import Game
from lutris.gui.dialogs import ModalDialog, QuestionDialog
from lutris.util.jobs import AsyncCall
from lutris.util.log import logger
from lutris.util.strings import gtk_safe, human_size
from lutris.util.system import get_disk_size, is_removeable, path_exists, reverse_expanduser
class UninstallGameDialog(ModalDialog):
def __init__(self, game_id, parent=None):
super().__init__(parent=parent, border_width=10)
self.set_size_request(640, 128)
self.game = Game(game_id)
self.delete_files = False
self.add_button(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL)
self.delete_button = self.add_default_button(_("Uninstall"), Gtk.ResponseType.OK,
css_class="destructive-action")
self.connect("response", self.on_response)
container = Gtk.VBox(visible=True)
self.get_content_area().add(container)
title_label = Gtk.Label(visible=True)
title_label.set_line_wrap(True)
title_label.set_alignment(0, 0.5)
title_label.set_line_wrap_mode(Pango.WrapMode.WORD_CHAR)
title_label.set_markup(_("<span font_desc='14'><b>Uninstall %s</b></span>") % gtk_safe(self.game.name))
container.pack_start(title_label, False, False, 4)
self.folder_label = Gtk.Label(visible=True)
self.folder_label.set_alignment(0, 0.5)
if not self.game.directory:
self.folder_label.set_markup(_("No file will be deleted"))
elif len(get_games(filters={"directory": self.game.directory})) > 1:
self.folder_label.set_markup(
_("The folder %s is used by other games and will be kept.") % self.game.directory)
elif self.game.config and is_removeable(self.game.directory, self.game.config.system_config):
self.delete_button.set_sensitive(False)
self.folder_label.set_markup(_("<i>Calculating size…</i>"))
AsyncCall(get_disk_size, self.METHOD_NAME, self.game.directory)
elif not path_exists(self.game.directory):
self.folder_label.set_markup(
_("%s does not exist.") % reverse_expanduser(self.game.directory)
)
else:
self.folder_label.set_markup(
_("Content of %s are protected and will not be deleted.") % reverse_expanduser(self.game.directory)
)
container.pack_start(self.folder_label, False, False, 4)
self.confirm_delete_button = Gtk.CheckButton()
self.confirm_delete_button.set_active(True)
container.pack_start(self.confirm_delete_button, False, False, 4)
def METHOD_NAME(self, folder_size, error):
if error:
logger.error(error)
return
self.delete_files = True
self.delete_button.set_sensitive(True)
self.folder_label.hide()
self.confirm_delete_button.show()
self.confirm_delete_button.set_label(
_("Delete %s (%s)") % (
reverse_expanduser(self.game.directory),
human_size(folder_size)
)
)
def on_response(self, _widget, response):
if response == Gtk.ResponseType.OK:
self.delete_button.set_sensitive(False)
if not self.confirm_delete_button.get_active():
self.delete_files = False
if self.delete_files and not hasattr(self.game.runner, "no_game_remove_warning"):
dlg = QuestionDialog(
{
"parent": self,
"question": _(
"Please confirm.\nEverything under <b>%s</b>\n"
"will be deleted."
) % gtk_safe(self.game.directory),
"title": _("Permanently delete files?"),
}
)
if dlg.result != Gtk.ResponseType.YES:
self.delete_button.set_sensitive(True)
self.stop_emission_by_name("response")
return
if self.delete_files:
self.folder_label.set_markup(_("Uninstalling game and deleting files..."))
else:
self.folder_label.set_markup(_("Uninstalling game..."))
self.game.remove(self.delete_files)
self.destroy()
class RemoveGameDialog(ModalDialog):
def __init__(self, game_id, parent=None):
super().__init__(parent=parent, border_width=10)
self.set_size_request(640, 128)
self.game = Game(game_id)
self.add_button(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL)
self.remove_button = self.add_default_button(_("Remove"), Gtk.ResponseType.OK,
css_class="destructive-action")
self.connect("response", self.on_response)
container = Gtk.VBox(visible=True)
self.get_content_area().add(container)
title_label = Gtk.Label(visible=True)
title_label.set_line_wrap(True)
title_label.set_alignment(0, 0.5)
title_label.set_line_wrap_mode(Pango.WrapMode.WORD_CHAR)
title_label.set_markup(_("<span font_desc='14'><b>Remove %s</b></span>") % gtk_safe(self.game.name))
container.pack_start(title_label, False, False, 4)
self.delete_label = Gtk.Label(visible=True)
self.delete_label.set_alignment(0, 0.5)
self.delete_label.set_markup(
_("Completely remove %s from the library?\nAll play time will be lost.") % self.game)
container.pack_start(self.delete_label, False, False, 4)
def on_response(self, _widget, response):
if response == Gtk.ResponseType.OK:
self.remove_button.set_sensitive(False)
self.game.delete()
self.destroy()
| null |
5,047 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from qtpy import QtWidgets, QtCore
from mantidqtinterfaces.Muon.GUI.Common.plot_widget.quick_edit.axis_changer.axis_changer_presenter import AxisChangerWidget
class QuickEditView(QtWidgets.QWidget):
error_signal = QtCore.Signal(object)
def __init__(self, subcontext, parent=None, default_msg="All"):
super(QuickEditView, self).__init__(parent)
self._default_selector_msg = default_msg
button_layout = QtWidgets.QHBoxLayout()
self.plot_selector = QtWidgets.QComboBox()
self.plot_selector.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
self.plot_selector.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToContents)
self.plot_selector.setMinimumContentsLength(12)
self.plot_selector.setEditable(True)
self.plot_selector.completer().setCompletionMode(QtWidgets.QCompleter.PopupCompletion)
self.plot_selector.view().setMinimumWidth(100)
self.plot_selector.completer().setFilterMode(QtCore.Qt.MatchContains)
self.plot_selector.addItem(self._default_selector_msg)
self.plot_selector.setEditable(False)
self.x_axis_changer = AxisChangerWidget("X", self)
self.autoscale = None
self.autoscale = QtWidgets.QCheckBox("Autoscale y")
self.autoscale.setToolTip("While pan or zoom are enabled autoscale is disabled")
self.y_axis_changer = AxisChangerWidget("Y", self)
self.errors = QtWidgets.QCheckBox("Errors")
self.errors.stateChanged.connect(self._emit_errors)
button_layout.addWidget(self.plot_selector)
button_layout.addWidget(self.x_axis_changer.view)
button_layout.addWidget(self.autoscale)
button_layout.addWidget(self.y_axis_changer.view)
button_layout.addWidget(self.errors)
self.setLayout(button_layout)
@property
def METHOD_NAME(self):
return self._default_selector_msg
""" plot selection """
def disable_plot_selection(self):
self.plot_selector.setEnabled(False)
def add_subplot(self, name):
self.plot_selector.blockSignals(True)
self.plot_selector.addItem(name)
self.plot_selector.adjustSize()
self.plot_selector.blockSignals(False)
def rm_subplot(self, index):
self.plot_selector.removeItem(index)
self.plot_selector.adjustSize()
def current_selection(self):
return self.plot_selector.currentText()
def find_subplot(self, name):
return self.plot_selector.findText(name)
def set_selection(self, index: int):
self.plot_selector.setCurrentIndex(index)
def get_selection_index(self) -> int:
return self.plot_selector.currentIndex()
def plot_at_index(self, index):
return self.plot_selector.itemText(index)
def number_of_plots(self):
return self.plot_selector.count()
def clear_subplots(self):
self.plot_selector.blockSignals(True)
self.plot_selector.clear()
self.plot_selector.addItem(self._default_selector_msg)
self.plot_selector.blockSignals(False)
def connect_plot_selection(self, slot):
self.plot_selector.currentIndexChanged.connect(slot)
""" x axis selection """
def connect_x_range_changed(self, slot):
self.x_axis_changer.on_range_changed(slot)
def set_plot_x_range(self, limits):
self.x_axis_changer.set_limits(limits)
def get_x_bounds(self):
return self.x_axis_changer.get_limits()
""" y axis selection """
def connect_y_range_changed(self, slot):
self.y_axis_changer.on_range_changed(slot)
def set_plot_y_range(self, limits):
self.y_axis_changer.set_limits(limits)
def get_y_bounds(self):
return self.y_axis_changer.get_limits()
def disable_yaxis_changer(self):
self.y_axis_changer.view.setEnabled(False)
def enable_yaxis_changer(self):
self.y_axis_changer.view.setEnabled(True)
""" auto scale selection """
def connect_autoscale_changed(self, slot):
self.autoscale.clicked.connect(slot)
@property
def autoscale_state(self):
return self.autoscale.checkState()
def disable_autoscale(self):
self.autoscale.setEnabled(False)
def enable_autoscale(self):
self.autoscale.setEnabled(True)
def set_autoscale(self, state: bool):
self.autoscale.setChecked(state)
def uncheck_autoscale(self):
self.autoscale.setChecked(False)
""" errors selection """
# need our own signal that sends a bool
def _emit_errors(self):
state = self.get_errors()
self.error_signal.emit(state)
def connect_errors_changed(self, slot):
self.error_signal.connect(slot)
def set_errors(self, state):
self.errors.setChecked(state)
def get_errors(self):
return self.errors.isChecked()
| null |
5,048 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetVolumeGroupSapHanaResult',
'AwaitableGetVolumeGroupSapHanaResult',
'get_volume_group_sap_hana',
'get_volume_group_sap_hana_output',
]
@pulumi.output_type
class GetVolumeGroupSapHanaResult:
"""
A collection of values returned by getVolumeGroupSapHana.
"""
def __init__(__self__, METHOD_NAME=None, application_identifier=None, group_description=None, id=None, location=None, name=None, resource_group_name=None, volumes=None):
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'account_name' to be a str")
pulumi.set(__self__, "account_name", METHOD_NAME)
if application_identifier and not isinstance(application_identifier, str):
raise TypeError("Expected argument 'application_identifier' to be a str")
pulumi.set(__self__, "application_identifier", application_identifier)
if group_description and not isinstance(group_description, str):
raise TypeError("Expected argument 'group_description' to be a str")
pulumi.set(__self__, "group_description", group_description)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
pulumi.set(__self__, "resource_group_name", resource_group_name)
if volumes and not isinstance(volumes, list):
raise TypeError("Expected argument 'volumes' to be a list")
pulumi.set(__self__, "volumes", volumes)
@property
@pulumi.getter(name="accountName")
def METHOD_NAME(self) -> str:
return pulumi.get(self, "account_name")
@property
@pulumi.getter(name="applicationIdentifier")
def application_identifier(self) -> str:
"""
The application identifier.
"""
return pulumi.get(self, "application_identifier")
@property
@pulumi.getter(name="groupDescription")
def group_description(self) -> str:
"""
Volume group description.
"""
return pulumi.get(self, "group_description")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
The Azure Region where the Application Volume Group exists.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of this volume.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> str:
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def volumes(self) -> Sequence['outputs.GetVolumeGroupSapHanaVolumeResult']:
"""
A `volume` block as defined below.
"""
return pulumi.get(self, "volumes")
class AwaitableGetVolumeGroupSapHanaResult(GetVolumeGroupSapHanaResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVolumeGroupSapHanaResult(
METHOD_NAME=self.METHOD_NAME,
application_identifier=self.application_identifier,
group_description=self.group_description,
id=self.id,
location=self.location,
name=self.name,
resource_group_name=self.resource_group_name,
volumes=self.volumes)
def get_volume_group_sap_hana(METHOD_NAME: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVolumeGroupSapHanaResult:
"""
Use this data source to access information about an existing Application Volume Group for SAP HANA application.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.netapp.get_volume_group_sap_hana(name="existing application volume group name",
resource_group_name="resource group name where the account and volume group belong to",
account_name="existing account where the application volume group belong to")
pulumi.export("id", example.id)
```
:param str account_name: Name of the account where the application volume group belong to.
:param str name: The name of this Application Volume Group for SAP HANA application.
:param str resource_group_name: The name of the Resource Group where the Application Volume Group exists.
"""
__args__ = dict()
__args__['accountName'] = METHOD_NAME
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure:netapp/getVolumeGroupSapHana:getVolumeGroupSapHana', __args__, opts=opts, typ=GetVolumeGroupSapHanaResult).value
return AwaitableGetVolumeGroupSapHanaResult(
METHOD_NAME=pulumi.get(__ret__, 'account_name'),
application_identifier=pulumi.get(__ret__, 'application_identifier'),
group_description=pulumi.get(__ret__, 'group_description'),
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
resource_group_name=pulumi.get(__ret__, 'resource_group_name'),
volumes=pulumi.get(__ret__, 'volumes'))
@_utilities.lift_output_func(get_volume_group_sap_hana)
def get_volume_group_sap_hana_output(METHOD_NAME: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetVolumeGroupSapHanaResult]:
"""
Use this data source to access information about an existing Application Volume Group for SAP HANA application.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.netapp.get_volume_group_sap_hana(name="existing application volume group name",
resource_group_name="resource group name where the account and volume group belong to",
account_name="existing account where the application volume group belong to")
pulumi.export("id", example.id)
```
:param str account_name: Name of the account where the application volume group belong to.
:param str name: The name of this Application Volume Group for SAP HANA application.
:param str resource_group_name: The name of the Resource Group where the Application Volume Group exists.
"""
...
| null |
5,049 |
"""
In this example, nmpc (Nonlinear model predictive control) is applied on a simple 2-dofs arm model. The goal is to
perform a rotation of the arm in a quasi-cyclic manner. The sliding window across iterations is advanced for a full
cycle at a time while optimizing three cycles at a time (main difference between cyclic and multi-cyclic is that
the latter has more cycle at a time giving the knowledge to the solver that 'something' is coming after)
"""
import platform
import numpy as np
from bioptim import (
BiorbdModel,
MultiCyclicNonlinearModelPredictiveControl,
Dynamics,
DynamicsFcn,
Objective,
ObjectiveFcn,
ConstraintList,
ConstraintFcn,
BoundsList,
InitialGuessList,
Solver,
Node,
Axis,
Solution,
)
class MyCyclicNMPC(MultiCyclicNonlinearModelPredictiveControl):
def advance_window_bounds_states(self, sol, n_cycles_simultaneous=None):
# Reimplementation of the advance_window method so the rotation of the wheel restart at -pi
super(MyCyclicNMPC, self).advance_window_bounds_states(sol)
self.nlp[0].x_bounds["q"].min[0, :] = -2 * np.pi * n_cycles_simultaneous
self.nlp[0].x_bounds["q"].max[0, :] = 0
return True
def advance_window_initial_guess_states(self, sol, n_cycles_simultaneous=None):
# Reimplementation of the advance_window method so the rotation of the wheel restart at -pi
super(MyCyclicNMPC, self).advance_window_initial_guess_states(sol)
self.nlp[0].x_init["q"].init[0, :] = sol.states["q"][0, :] # Keep the previously found value for the wheel
return True
def prepare_nmpc(
model_path,
cycle_len,
cycle_duration,
n_cycles_simultaneous,
n_cycles_to_advance,
max_torque,
assume_phase_dynamics: bool = True,
expand_dynamics: bool = True,
):
model = BiorbdModel(model_path)
dynamics = Dynamics(DynamicsFcn.TORQUE_DRIVEN, expand=expand_dynamics)
x_bounds = BoundsList()
x_bounds["q"] = model.bounds_from_ranges("q")
x_bounds["q"].min[0, :] = -2 * np.pi * n_cycles_simultaneous # Allow the wheel to spin as much as needed
x_bounds["q"].max[0, :] = 0
x_bounds["qdot"] = model.bounds_from_ranges("qdot")
u_bounds = BoundsList()
u_bounds["tau"] = [-max_torque] * model.nb_q, [max_torque] * model.nb_q
new_objectives = Objective(ObjectiveFcn.Lagrange.MINIMIZE_STATE, key="q")
# Rotate the wheel and force the marker of the hand to follow the marker on the wheel
wheel_target = np.linspace(-2 * np.pi * n_cycles_simultaneous, 0, cycle_len * n_cycles_simultaneous + 1)[
np.newaxis, :
]
constraints = ConstraintList()
constraints.add(ConstraintFcn.TRACK_STATE, key="q", index=0, node=Node.ALL, target=wheel_target)
constraints.add(
ConstraintFcn.SUPERIMPOSE_MARKERS,
node=Node.ALL,
first_marker="wheel",
second_marker="COM_hand",
axes=[Axis.X, Axis.Y],
)
return MyCyclicNMPC(
model,
dynamics,
cycle_len=cycle_len,
cycle_duration=cycle_duration,
n_cycles_simultaneous=n_cycles_simultaneous,
n_cycles_to_advance=n_cycles_to_advance,
objective_functions=new_objectives,
constraints=constraints,
x_bounds=x_bounds,
u_bounds=u_bounds,
assume_phase_dynamics=assume_phase_dynamics,
)
def main():
model_path = "models/arm2.bioMod"
torque_max = 50
cycle_duration = 1
cycle_len = 20
n_cycles_to_advance = 1
n_cycles_simultaneous = 3
n_cycles = 4
nmpc = prepare_nmpc(
model_path,
cycle_len=cycle_len,
cycle_duration=cycle_duration,
n_cycles_to_advance=n_cycles_to_advance,
n_cycles_simultaneous=n_cycles_simultaneous,
max_torque=torque_max,
)
def METHOD_NAME(_nmpc: MultiCyclicNonlinearModelPredictiveControl, cycle_idx: int, _sol: Solution):
return cycle_idx < n_cycles # True if there are still some cycle to perform
# Solve the program
sol = nmpc.solve(
METHOD_NAME,
solver=Solver.IPOPT(show_online_optim=platform.system() == "Linux"),
n_cycles_simultaneous=n_cycles_simultaneous,
)
sol.print_cost()
sol.graphs()
sol.animate(n_frames=100)
if __name__ == "__main__":
main()
| null |
5,050 |
# SPDX-License-Identifier: MIT
"""
Testing strategies for Hypothesis-based tests.
"""
import keyword
import string
from collections import OrderedDict
from hypothesis import strategies as st
import attr
from .utils import make_class
optional_bool = st.one_of(st.none(), st.booleans())
def gen_attr_names():
"""
Generate names for attributes, 'a'...'z', then 'aa'...'zz'.
~702 different attribute names should be enough in practice.
Some short strings (such as 'as') are keywords, so we skip them.
"""
lc = string.ascii_lowercase
yield from lc
for outer in lc:
for inner in lc:
res = outer + inner
if keyword.iskeyword(res):
continue
yield outer + inner
def maybe_underscore_prefix(source):
"""
A generator to sometimes prepend an underscore.
"""
to_underscore = False
for val in source:
yield val if not to_underscore else "_" + val
to_underscore = not to_underscore
@st.composite
def _create_hyp_nested_strategy(draw, simple_class_strategy):
"""
Create a recursive attrs class.
Given a strategy for building (simpler) classes, create and return
a strategy for building classes that have as an attribute: either just
the simpler class, a list of simpler classes, a tuple of simpler classes,
an ordered dict or a dict mapping the string "cls" to a simpler class.
"""
cls = draw(simple_class_strategy)
factories = [
cls,
lambda: [cls()],
lambda: (cls(),),
lambda: {"cls": cls()},
lambda: OrderedDict([("cls", cls())]),
]
factory = draw(st.sampled_from(factories))
attrs = [*draw(list_of_attrs), attr.ib(default=attr.Factory(factory))]
return make_class("HypClass", dict(zip(gen_attr_names(), attrs)))
bare_attrs = st.builds(attr.ib, default=st.none())
int_attrs = st.integers().map(lambda i: attr.ib(default=i))
str_attrs = st.text().map(lambda s: attr.ib(default=s))
float_attrs = st.floats().map(lambda f: attr.ib(default=f))
dict_attrs = st.dictionaries(keys=st.text(), values=st.integers()).map(
lambda d: attr.ib(default=d)
)
simple_attrs_without_metadata = (
bare_attrs | int_attrs | str_attrs | float_attrs | dict_attrs
)
@st.composite
def simple_attrs_with_metadata(draw):
"""
Create a simple attribute with arbitrary metadata.
"""
c_attr = draw(simple_attrs)
keys = st.booleans() | st.binary() | st.integers() | st.text()
vals = st.booleans() | st.binary() | st.integers() | st.text()
metadata = draw(
st.dictionaries(keys=keys, values=vals, min_size=1, max_size=3)
)
return attr.ib(
default=c_attr._default,
validator=c_attr._validator,
repr=c_attr.repr,
eq=c_attr.eq,
order=c_attr.order,
hash=c_attr.hash,
METHOD_NAME=c_attr.METHOD_NAME,
metadata=metadata,
type=None,
converter=c_attr.converter,
)
simple_attrs = simple_attrs_without_metadata | simple_attrs_with_metadata()
# Python functions support up to 255 arguments.
list_of_attrs = st.lists(simple_attrs, max_size=3)
@st.composite
def simple_classes(
draw, slots=None, frozen=None, weakref_slot=None, private_attrs=None
):
"""
A strategy that generates classes with default non-attr attributes.
For example, this strategy might generate a class such as:
@attr.s(slots=True, frozen=True, weakref_slot=True)
class HypClass:
a = attr.ib(default=1)
_b = attr.ib(default=None)
c = attr.ib(default='text')
_d = attr.ib(default=1.0)
c = attr.ib(default={'t': 1})
By default, all combinations of slots, frozen, and weakref_slot classes
will be generated. If `slots=True` is passed in, only slotted classes will
be generated, and if `slots=False` is passed in, no slotted classes will be
generated. The same applies to `frozen` and `weakref_slot`.
By default, some attributes will be private (i.e. prefixed with an
underscore). If `private_attrs=True` is passed in, all attributes will be
private, and if `private_attrs=False`, no attributes will be private.
"""
attrs = draw(list_of_attrs)
frozen_flag = draw(st.booleans())
slots_flag = draw(st.booleans())
weakref_flag = draw(st.booleans())
if private_attrs is None:
attr_names = maybe_underscore_prefix(gen_attr_names())
elif private_attrs is True:
attr_names = ("_" + n for n in gen_attr_names())
elif private_attrs is False:
attr_names = gen_attr_names()
cls_dict = dict(zip(attr_names, attrs))
pre_init_flag = draw(st.booleans())
post_init_flag = draw(st.booleans())
init_flag = draw(st.booleans())
if pre_init_flag:
def pre_init(self):
pass
cls_dict["__attrs_pre_init__"] = pre_init
if post_init_flag:
def post_init(self):
pass
cls_dict["__attrs_post_init__"] = post_init
if not init_flag:
def METHOD_NAME(self, *args, **kwargs):
self.__attrs_init__(*args, **kwargs)
cls_dict["__init__"] = METHOD_NAME
return make_class(
"HypClass",
cls_dict,
slots=slots_flag if slots is None else slots,
frozen=frozen_flag if frozen is None else frozen,
weakref_slot=weakref_flag if weakref_slot is None else weakref_slot,
METHOD_NAME=init_flag,
)
# st.recursive works by taking a base strategy (in this case, simple_classes)
# and a special function. This function receives a strategy, and returns
# another strategy (building on top of the base strategy).
nested_classes = st.recursive(
simple_classes(), _create_hyp_nested_strategy, max_leaves=3
)
| null |
5,051 |
import unittest
from parameterized.parameterized import parameterized
from conan.tools.build.flags import architecture_flag, build_type_flags
from conans.test.utils.mocks import MockSettings
class CompilerFlagsTest(unittest.TestCase):
@parameterized.expand([("gcc", "x86", None, "-m32"),
("clang", "x86", None, "-m32"),
("sun-cc", "x86", None, "-m32"),
("gcc", "x86_64", None, "-m64"),
("clang", "x86_64", None, "-m64"),
("sun-cc", "x86_64", None, "-m64"),
("sun-cc", "sparc", None, "-m32"),
("sun-cc", "sparcv9", None, "-m64"),
("gcc", "armv7", None, ""),
("clang", "armv7", None, ""),
("sun-cc", "armv7", None, ""),
("gcc", "s390", None, "-m31"),
("clang", "s390", None, "-m31"),
("sun-cc", "s390", None, "-m31"),
("gcc", "s390x", None, "-m64"),
("clang", "s390x", None, "-m64"),
("sun-cc", "s390x", None, "-m64"),
("msvc", "x86", None, ""),
("msvc", "x86_64", None, ""),
("gcc", "ppc32", "AIX", "-maix32"),
("gcc", "ppc64", "AIX", "-maix64"),
])
def test_arch_flag(self, compiler, arch, the_os, flag):
settings = MockSettings({"compiler": compiler,
"arch": arch,
"os": the_os})
self.assertEqual(architecture_flag(settings), flag)
def METHOD_NAME(self):
settings = MockSettings({"compiler": "apple-clang",
"arch": "x86_64",
"os": "Macos",
"os.subsystem": "catalyst",
"os.subsystem.ios_version": "13.1"})
self.assertEqual(architecture_flag(settings), "--target=x86_64-apple-ios13.1-macabi")
settings = MockSettings({"compiler": "apple-clang",
"arch": "armv8",
"os": "Macos",
"os.subsystem": "catalyst",
"os.subsystem.ios_version": "13.1"})
self.assertEqual(architecture_flag(settings), "--target=arm64-apple-ios13.1-macabi")
@parameterized.expand([("Linux", "x86", "-m32"),
("Linux", "x86_64", "-m64"),
("Windows", "x86", "/Qm32"),
("Windows", "x86_64", "/Qm64"),
])
def test_arch_flag_intel(self, os_, arch, flag):
settings = MockSettings({"compiler": "intel-cc",
"os": os_,
"arch": arch})
self.assertEqual(architecture_flag(settings), flag)
@parameterized.expand([("e2k-v2", "-march=elbrus-v2"),
("e2k-v3", "-march=elbrus-v3"),
("e2k-v4", "-march=elbrus-v4"),
("e2k-v5", "-march=elbrus-v5"),
("e2k-v6", "-march=elbrus-v6"),
("e2k-v7", "-march=elbrus-v7"),
])
def test_arch_flag_mcst_lcc(self, arch, flag):
settings = MockSettings({"compiler": "mcst-lcc",
"arch": arch})
self.assertEqual(architecture_flag(settings), flag)
@parameterized.expand([("msvc", "Debug", None, "-Zi -Ob0 -Od"),
("msvc", "Release", None, "-O2 -Ob2"),
("msvc", "RelWithDebInfo", None, "-Zi -O2 -Ob1"),
("msvc", "MinSizeRel", None, "-O1 -Ob1"),
("msvc", "Debug", "v140_clang_c2", "-gline-tables-only -fno-inline -O0"),
("msvc", "Release", "v140_clang_c2", "-O2"),
("msvc", "RelWithDebInfo", "v140_clang_c2", "-gline-tables-only -O2 -fno-inline"),
("msvc", "MinSizeRel", "v140_clang_c2", ""),
("gcc", "Debug", None, "-g"),
("gcc", "Release", None, "-O3"),
("gcc", "RelWithDebInfo", None, "-O2 -g"),
("gcc", "MinSizeRel", None, "-Os"),
("clang", "Debug", None, "-g"),
("clang", "Release", None, "-O3"),
("clang", "RelWithDebInfo", None, "-O2 -g"),
("clang", "MinSizeRel", None, "-Os"),
("apple-clang", "Debug", None, "-g"),
("apple-clang", "Release", None, "-O3"),
("apple-clang", "RelWithDebInfo", None, "-O2 -g"),
("apple-clang", "MinSizeRel", None, "-Os"),
("sun-cc", "Debug", None, "-g"),
("sun-cc", "Release", None, "-xO3"),
("sun-cc", "RelWithDebInfo", None, "-xO2 -g"),
("sun-cc", "MinSizeRel", None, "-xO2 -xspace"),
])
def test_build_type_flags(self, compiler, build_type, vs_toolset, flags):
settings = MockSettings({"compiler": compiler,
"build_type": build_type,
"compiler.toolset": vs_toolset})
self.assertEqual(' '.join(build_type_flags(settings)),
flags)
| null |
5,052 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2020 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from qtpy import QtCore, QtWidgets
from matplotlib.figure import Figure
from mantidqt.utils.qt import load_ui
from mantidqt.MPLwidgets import FigureCanvasQTAgg as FigureCanvas
Ui_sample_transmission, _ = load_ui(__file__, "SampleTransmission.ui")
class SampleTransmissionCalculatorView(QtWidgets.QWidget, Ui_sample_transmission):
def __init__(self, parent=None):
super(SampleTransmissionCalculatorView, self).__init__(parent)
self.setupUi(self)
fig = Figure()
fig.set_layout_engine(layout="tight")
self.axes = fig.add_subplot(111)
self.plot_frame = FigureCanvas(fig)
self.axes.set_xlabel("Wavelength (Å)")
self.axes.set_ylabel("Transmission (%)")
self.output_layout.replaceWidget(self.placeholder_widget, self.plot_frame)
self.assistant_process = QtCore.QProcess(self)
self.validation_label.setStyleSheet("QLabel { color : red; }")
self.histogram_err.setStyleSheet("QLabel { color : red; }")
self.chemical_formula_err.setStyleSheet("QLabel { color : red; }")
self.density_err.setStyleSheet("QLabel { color : red; }")
self.thickness_err.setStyleSheet("QLabel { color : red; }")
self.multiple_line_edit.setToolTip(
"A comma separated list of first bin boundary, width, last bin boundary. \n"
"Optionally this can be followed by a comma and more widths and last boundary pairs. \n"
"E.g. 0,100,20000: from 0 rebin in constant size bins of 100 up to 20,000. \n"
"Or 0,100,10000,200,20000: from 0 rebin in steps of 100 to 10,000 then steps of 200 to 20,000."
)
def get_input_dict(self):
input_dict = {
"binning_type": self.binning_type_combo_box.currentIndex(),
"single_low": self.single_low_spin_box.value(),
"single_width": self.single_width_spin_box.value(),
"single_high": self.single_high_spin_box.value(),
"multiple_bin": self.multiple_line_edit.text(),
"chemical_formula": self.chemical_formula_line_edit.text(),
"density_type": self.density_combo_box.currentText(),
"density": self.density_spin_box.value(),
"thickness": self.thickness_spin_box.value(),
}
return input_dict
def set_output_table(self, output_dict, statistics):
self.results_tree.clear()
scattering_item = QtWidgets.QTreeWidgetItem()
scattering_item.setText(0, "Scattering")
scattering_item.setText(1, str(statistics))
self.results_tree.addTopLevelItem(scattering_item)
transmission_item = QtWidgets.QTreeWidgetItem()
transmission_item.setText(0, "Transmission")
self.results_tree.addTopLevelItem(transmission_item)
transmission_item.setExpanded(True)
for key in output_dict:
item = QtWidgets.QTreeWidgetItem()
item.setText(0, key)
item.setText(1, str(output_dict[key]))
transmission_item.addChild(item)
def plot(self, x, y):
self.axes.cla()
self.axes.plot(x, y)
self.axes.set_xlabel("Wavelength (Å)")
self.axes.set_ylabel("Transmission (%)")
self.plot_frame.figure.tight_layout()
self.plot_frame.draw()
def set_validation_label(self, warning_text=""):
if warning_text == "":
self.validation_label.setStyleSheet("QLabel { color : red; }")
else:
self.validation_label.setStyleSheet("QLabel { color : red; border: 1px solid red }")
self.validation_label.setText(warning_text)
def METHOD_NAME(self):
self.histogram_err.setText("")
self.chemical_formula_err.setText("")
self.density_err.setText("")
self.thickness_err.setText("")
def set_error_indicator(self, error_key):
getattr(self, error_key + "_err").setText("*")
| null |
5,053 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2021 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from mantidqtinterfaces.Muon.GUI.MuonAnalysis.plot_widget.muon_analysis_plot_widget import MuonAnalysisPlotWidget, FIT, RAW
from mantidqtinterfaces.Muon.GUI.Common.plot_widget.main_plot_widget_view import MainPlotWidgetView
from mantidqtinterfaces.Muon.GUI.Common.plot_widget.plotting_canvas.plotting_canvas_widget import PlottingCanvasWidget
from mantidqtinterfaces.Muon.GUI.Common.plot_widget.base_pane.base_pane_view import BasePaneView
from mantidqtinterfaces.Muon.GUI.FrequencyDomainAnalysis.plot_widget.plot_freq_fit_pane_model import PlotFreqFitPaneModel
from mantidqtinterfaces.Muon.GUI.FrequencyDomainAnalysis.plot_widget.plot_freq_fit_pane_presenter import PlotFreqFitPanePresenter
from mantidqtinterfaces.Muon.GUI.FrequencyDomainAnalysis.plot_widget.dual_plot_maxent_pane.dual_plot_maxent_pane_presenter import (
DualPlotMaxentPanePresenter,
)
from mantidqtinterfaces.Muon.GUI.FrequencyDomainAnalysis.plot_widget.dual_plot_maxent_pane.dual_plot_maxent_pane_model import (
DualPlotMaxentPaneModel,
)
from mantidqtinterfaces.Muon.GUI.FrequencyDomainAnalysis.plot_widget.dual_plot_maxent_pane.dual_plot_maxent_pane_view import (
DualPlotMaxentPaneView,
)
from mantidqtinterfaces.Muon.GUI.Common.plot_widget.raw_pane.raw_pane_model import RawPaneModel
from mantidqtinterfaces.Muon.GUI.Common.plot_widget.quick_edit.quick_edit_widget import DualQuickEditWidget
# These are just used internally for ID purposes
# the rest of the code uses the names from the models
MAXENT = "maxent dual"
class FrequencyAnalysisPlotWidget(MuonAnalysisPlotWidget):
def __init__(self, context=None, parent=None):
super().__init__(context, parent)
def _create_default_panes(self):
self.view = MainPlotWidgetView(self._parent)
# The plotting canvas widgets
self.plotting_canvas_widgets = {}
# The UI view
self._views = {}
# create default panes
self.create_data_pane()
self.create_fit_pane()
self.create_maxent_pane()
def METHOD_NAME(self):
old_plot_mode = self._current_plot_mode
self._current_plot_mode = self.presenter.get_plot_mode
if old_plot_mode == self._current_plot_mode:
return
if self._current_plot_mode == self.mode_name(RAW):
# plot the raw data
self.modes[RAW].handle_data_updated()
self.presenter.hide(old_plot_mode)
self.presenter.show(self._current_plot_mode)
def update_freq_units_add_subscriber(self, subscriber):
self.fit_mode.update_freq_units.add_subscriber(subscriber)
""" Fit (and transform) pane """
def create_fit_pane(self):
self.fit_model = PlotFreqFitPaneModel(self._context)
name = self.fit_model.name
self.plotting_canvas_widgets[name] = PlottingCanvasWidget(
self._parent, context=self._context.plot_panes_context[name], plot_model=self.fit_model
)
self._views[name] = BasePaneView(self._parent)
self._views[name].add_canvas_widget(self.plotting_canvas_widgets[name].widget)
self.modes[FIT] = PlotFreqFitPanePresenter(
self._views[name], self.fit_model, self._context, self._context.fitting_context, self.plotting_canvas_widgets[name].presenter
)
self._panes.append(self.modes[FIT])
@property
def fit_index(self):
return self.view.get_index(self.mode_name(FIT))
@property
def fit_mode(self):
return self.modes[FIT]
""" Maxent dual pane """
def create_maxent_pane(self):
self.dual_model = DualPlotMaxentPaneModel(self._context, self.data_model, RawPaneModel(self._context))
name = self.dual_model.name
dual_quick_edit = DualQuickEditWidget(self._context.plot_panes_context[name], self._parent)
self.plotting_canvas_widgets[name] = PlottingCanvasWidget(
self._parent, context=self._context.plot_panes_context[name], plot_model=self.dual_model, figure_options=dual_quick_edit
)
self._views[name] = DualPlotMaxentPaneView(self._parent)
self._views[name].add_canvas_widget(self.plotting_canvas_widgets[name].widget)
self.modes[MAXENT] = DualPlotMaxentPanePresenter(
self._views[name], self.dual_model, self._context, self.plotting_canvas_widgets[name].presenter
)
self._panes.append(self.modes[MAXENT])
self.maxent_mode.update_freq_units.add_subscriber(self.fit_mode.update_fit_pane_observer)
self.fit_mode.update_maxent_plot.add_subscriber(self.maxent_mode.update_x_label_observer)
@property
def maxent_index(self):
return self.view.get_index(self.mode_name(MAXENT))
@property
def maxent_mode(self):
return self.modes[MAXENT]
| null |
5,054 |
import datetime
import arrow
import discord
from discord.ext import commands, tasks
from pydis_core.utils.channel import get_or_fetch_channel
from bot import constants
from bot.bot import Bot
from bot.constants import Channels, Guild, Roles, STAFF_PARTNERS_COMMUNITY_ROLES
from bot.decorators import in_whitelist
from bot.log import get_logger
log = get_logger(__name__)
PATREON_INFORMATION = (
"Python Discord is a volunteer run non-profit organization, so we rely on Patreon donations to do what we do. "
"We use the money we get to offer excellent prizes for all of our events. These include t-shirts, "
"stickers, and sometimes even Raspberry Pis!\n\n"
"You can read more about how Patreon donations help us, and consider donating yourself, on our patreon page "
"[here](https://pydis.com/patreon)!"
)
NO_PATRONS_MESSAGE = "*There are currently no patrons at this tier.*"
# List of tuples containing tier number and Discord role ID.
# Ordered from highest tier to lowest.
PATREON_TIERS: list[tuple[int, int]] = [
(3, Roles.patreon_tier_3),
(2, Roles.patreon_tier_2),
(1, Roles.patreon_tier_1),
]
def get_patreon_tier(member: discord.Member) -> int:
"""
Get the patreon tier of `member`.
A patreon tier of 0 indicates the user is not a patron.
"""
for tier, role_id in PATREON_TIERS:
if member.get_role(role_id):
return tier
return 0
class Patreon(commands.Cog):
"""Cog that shows patreon supporters."""
def __init__(self, bot: Bot) -> None:
self.bot = bot
self.current_monthly_supporters.start()
@commands.Cog.listener()
async def on_member_update(self, before: discord.Member, after: discord.Member) -> None:
"""Send a message when someone receives a patreon role."""
old_patreon_tier = get_patreon_tier(before)
new_patreon_tier = get_patreon_tier(after)
if new_patreon_tier <= old_patreon_tier:
return
message = (
f":tada: {after.mention} just became a **tier {new_patreon_tier}** patron!\n"
"Support us on Patreon: https://pydis.com/patreon"
)
channel = await get_or_fetch_channel(self.bot, Channels.meta)
await channel.send(message)
async def send_current_supporters(self, channel: discord.abc.Messageable, automatic: bool = False) -> None:
"""Send the current list of patreon supporters, sorted by tier level."""
guild = self.bot.get_guild(Guild.id)
embed_list = []
for tier, role_id in PATREON_TIERS:
role = guild.get_role(role_id)
# Filter out any members where this is not their highest tier.
patrons = [member for member in role.members if get_patreon_tier(member) == tier]
patron_names = [f"• {patron}" for patron in patrons]
embed = discord.Embed(
title=role.name,
description="\n".join(patron_names) if patron_names else NO_PATRONS_MESSAGE,
colour=role.colour
)
embed_list.append(embed)
main_embed = discord.Embed(
title="Patreon Supporters - Monthly Update" if automatic else "Patreon Supporters",
description=(
PATREON_INFORMATION +
"\n\nThank you to the users listed below who are already supporting us!"
),
)
await channel.send(embeds=(main_embed, *embed_list))
@commands.group("patreon", aliases=("patron",), invoke_without_command=True)
async def METHOD_NAME(self, ctx: commands.Context) -> None:
"""Send information about how Python Discord uses Patreon."""
embed = discord.Embed(
title="Patreon",
description=(
PATREON_INFORMATION +
"\n\nTo see our current supporters, run " +
f"`{constants.Bot.prefix}patreon supporters` in <#{Channels.bot_commands}>"
)
)
await ctx.send(embed=embed)
@METHOD_NAME.command("supporters", aliases=("patrons",))
@in_whitelist(channels=(Channels.bot_commands,), roles=STAFF_PARTNERS_COMMUNITY_ROLES)
async def patreon_supporters(self, ctx: commands.Context) -> None:
"""Sends the current list of patreon supporters, sorted by tier level."""
await self.send_current_supporters(ctx.channel)
@tasks.loop(time=datetime.time(hour=17))
async def current_monthly_supporters(self) -> None:
"""A loop running daily to see if it's the first of the month. If so call `self.send_current_supporters()`."""
now = arrow.utcnow()
if now.day == 1:
meta_channel = await get_or_fetch_channel(self.bot, Channels.meta)
await self.send_current_supporters(meta_channel, automatic=True)
async def setup(bot: Bot) -> None:
"""Load the Patreon cog."""
await bot.add_cog(Patreon(bot))
| null |
5,055 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from mantid.api import *
import mantid.simpleapi as sapi
class EnggVanadiumCorrectionsTest(unittest.TestCase):
_data_ws = None
_van_integ_tbl = None
_van_curves_ws = None
NUM_SPEC = 2513
# Note not using @classmethod setUpClass / tearDownClass because that's not supported in the old
# unittest of rhel6
def setUp(self):
"""
Set up dependencies (big files load) for one or more of the tests below.
"""
if not self.__class__._data_ws:
self.__class__._data_ws = sapi.LoadNexus("ENGINX00228061.nxs", OutputWorkspace="ENGIN-X_test_ws")
if not self.__class__._van_curves_ws:
# Note the pre-calculated file instead of the too big vanadium run
# self.__class__._van_ws = LoadNexus("ENGINX00236516.nxs", OutputWorkspace='ENGIN-X_test_vanadium_ws')
self.__class__._van_curves_ws = sapi.LoadNexus(
Filename="ENGINX_precalculated_vanadium_run000236516_bank_curves.nxs", OutputWorkspace="ENGIN-X_vanadium_curves_test_ws"
)
if not self.__class__._van_integ_tbl:
self.__class__._van_integ_tbl = sapi.LoadNexus(
Filename="ENGINX_precalculated_vanadium_run000236516_integration.nxs", OutputWorkspace="ENGIN-X_vanadium_integ_test_ws"
)
def test_issues_with_properties(self):
"""
Tests proper error handling when passing wrong properties or not passing required
ones.
"""
# absolutely wrong properties passed
self.assertRaises(RuntimeError, sapi.EnggVanadiumCorrections, File="foo", Bank="1")
# Wrong (mispelled) Workspace property
self.assertRaises(RuntimeError, sapi.EnggVanadiumCorrections, InputWorkspace="anything_goes")
# mispelled VanadiumWorkspace
self.assertRaises(
RuntimeError,
sapi.EnggVanadiumCorrections,
VanWorkspace=self.__class__._data_ws,
IntegrationWorkspace=self.__class__._van_integ_tbl,
CurvesWorkspace=self.__class__._van_curves_ws,
)
# mispelled CurvesWorkspace
self.assertRaises(
RuntimeError,
sapi.EnggVanadiumCorrections,
IntegrationWorkspace=self.__class__._van_integ_tbl,
CurveWorkspace=self.__class__._van_curves_ws,
)
# mispelled IntegrationWorkspace
self.assertRaises(
RuntimeError,
sapi.EnggVanadiumCorrections,
IntegWorkspace=self.__class__._van_integ_tbl,
CurvesWorkspace=self.__class__._van_curves_ws,
)
# mispelled SplineBreakPoints
self.assertRaises(
RuntimeError,
sapi.EnggVanadiumCorrections,
BreakPoints=self.__class__._van_integ_tbl,
IntegrationWorkspace=self.__class__._van_integ_tbl,
CurvesWorkspace=self.__class__._van_curves_ws,
)
# validation of SplineBreakPoints value bounds fails
self.assertRaises(
ValueError,
sapi.EnggVanadiumCorrections,
SplineBreakPoints=-1,
IntegrationWorkspace=self.__class__._van_integ_tbl,
CurvesWorkspace=self.__class__._van_curves_ws,
)
self.assertRaises(
ValueError,
sapi.EnggVanadiumCorrections,
SplineBreakPoints=0,
IntegrationWorkspace=self.__class__._van_integ_tbl,
CurvesWorkspace=self.__class__._van_curves_ws,
)
self.assertRaises(
ValueError,
sapi.EnggVanadiumCorrections,
SplineBreakPoints=3,
IntegrationWorkspace=self.__class__._van_integ_tbl,
CurvesWorkspace=self.__class__._van_curves_ws,
)
def _check_corrected_ws(self, wks):
self.assertEqual(wks.getAxis(0).getUnit().unitID(), "TOF")
self.assertEqual(wks.getAxis(1).getUnit().unitID(), "Label")
self.assertEqual(wks.getNumberHistograms(), self.NUM_SPEC)
def _check_integ_ws(self, wks):
self.assertTrue(isinstance(wks, ITableWorkspace), "The integration workspace should be a table workspace.")
self.assertEqual(wks.columnCount(), 1)
self.assertEqual(wks.rowCount(), self.NUM_SPEC)
def _check_curves_ws(self, wks):
self.assertEqual(0, wks.getNumberHistograms() % 3)
self.assertTrue(isinstance(wks, MatrixWorkspace), "The integration workspace should be a matrix workspace.")
def METHOD_NAME(self):
"""
Checks normal operation, re-using previously calculated integrations and curves from
Vanadium run data
"""
sample_ws = self.__class__._data_ws
int_ws = self.__class__._van_integ_tbl
curves_ws = self.__class__._van_curves_ws
sapi.EnggVanadiumCorrections(Workspace=sample_ws, IntegrationWorkspace=int_ws, CurvesWorkspace=curves_ws)
self._check_corrected_ws(sample_ws)
self._check_integ_ws(int_ws)
self._check_curves_ws(curves_ws)
# This is disabled because it would require loading the big vanadium run file. This is tested
# in the EnggCalibration system test
def disabled_test_runs_ok_when_calculating(self):
"""
Checks normal operation, when calculating integrations and curves from Vanadium run data
"""
sample_ws = self.__class__._data_ws
integ_ws_name = "calc_integ_ws"
curves_ws_name = "calc_curves_ws"
sapi.EnggVanadiumCorrections(
Workspace=sample_ws,
VanadiumWorkspace=self.__class__._van_ws,
IntegrationWorkspace=integ_ws_name,
CurvesWorkspace=curves_ws_name,
)
self._check_corrected_ws(sample_ws)
self._check_integ_ws(integ_ws_name)
self._check_curves_ws(curves_ws_name)
if __name__ == "__main__":
unittest.main()
| null |
5,056 |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import composite as C
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.ops.functional import vmap
from mindspore.common.api import jit
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
def fast_gelu_grad_compute(x, dy):
"""FastGeluGradCompute."""
div_up = np.exp(-1.702 * x) + 1.702 * x * np.exp(-1.702 * x) + 1
div_down = (np.exp(-1.702 * x) + 1) ** 2
return dy * div_up / div_down
def fast_gelu_compute(x):
"""FastGeluCompute."""
return x * np.exp(0.851 * (x - np.abs(x))) / (1 + np.exp(-1.702 * np.abs(x)))
class FastGeluNet(nn.Cell):
"""FastGeluNet."""
def __init__(self):
"""Init."""
super(FastGeluNet, self).__init__()
self.fast_gelu = P.FastGeLU()
def construct(self, x):
"""Construct."""
return self.fast_gelu(x)
class FastGeLUGrad(nn.Cell):
"""FastGeLUGrad."""
def __init__(self, network):
"""Init."""
super(FastGeLUGrad, self).__init__()
self.fast_gelu_grad = C.GradOperation(get_all=True, sens_param=True)
self.network = network
def construct(self, input_data, sens):
"""Construct."""
gout = self.fast_gelu_grad(self.network)(input_data, sens)
return gout
def np_all_close_with_loss(out, expect):
"""np_all_close_with_loss"""
return np.allclose(out, expect, 0.005, 0.005, equal_nan=True)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('shape', [(2,), (4, 5), (3, 4, 5, 6)])
@pytest.mark.parametrize('dtype', [np.float32, np.float16])
def test_fast_gelu_grad(shape, dtype):
"""
Feature: FastGeLUGrad gpu kernel
Description: test the rightness of FastGeLUGrad gpu kernel.
Expectation: Success.
"""
prop = 1 if np.random.random() > 0.5 else -1
dy_np = (np.random.randn(*shape) * prop).astype(dtype)
x_np = (np.random.randn(*shape) * prop).astype(dtype)
expect = fast_gelu_grad_compute(dy_np, x_np)
dy_ms = Tensor(dy_np)
x_ms = Tensor(x_np)
net = FastGeluNet()
grad = FastGeLUGrad(net)
output = grad(dy_ms, x_ms)
assert np_all_close_with_loss(output[0].asnumpy(), expect)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('shape', [(2,), (4, 5), (3, 4, 5, 6)])
@pytest.mark.parametrize('dtype', [np.float32, np.float16])
def test_fast_gelu(shape, dtype):
"""
Feature: FastGeLU gpu kernel
Description: test the rightness of FastGeLU gpu kernel.
Expectation: Success.
"""
prop = 100 if np.random.random() > 0.5 else -100
x_np = (np.random.randn(*shape) * prop).astype(dtype)
y_np = fast_gelu_compute(x_np)
x_ms = Tensor(x_np)
net = FastGeluNet()
y_ms = net(x_ms)
assert np_all_close_with_loss(y_np, y_ms.asnumpy())
x_ms = Tensor(x_np)
y_fun = F.fast_gelu(x_ms)
assert np_all_close_with_loss(y_np, y_fun.asnumpy())
x_ms = Tensor(x_np)
fast_gelu_nn = nn.FastGelu()
y_nn = fast_gelu_nn(x_ms)
assert np_all_close_with_loss(y_np, y_nn.asnumpy())
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('dtype', [np.float32, np.float16])
def test_fast_gelu_grad_vmap(dtype, shape=(100, 2)):
"""
Feature: FastGeLUGrad gpu kernel
Description: test the rightness of FastGeLUGrad gpu kernel vmap feature.
Expectation: Success.
"""
net = FastGeluNet()
grad = FastGeLUGrad(net)
def METHOD_NAME(dy, x):
"""fast_gelu_grad_func"""
output = grad(dy, x)
return output[0]
prop = 1 if np.random.random() > 0.5 else -1
dy_np = (np.random.randn(*shape) * prop).astype(dtype)
x_np = (np.random.randn(*shape) * prop).astype(dtype)
dy = Tensor(dy_np)
x = Tensor(x_np)
dy = F.sub(dy, 0)
x = F.sub(x, 0)
output_vmap = vmap(METHOD_NAME, in_axes=(0, 0))(dy, x)
@jit
def manually_batched(dys, xs):
"""manually_batched"""
output = []
for i in range(dys.shape[0]):
output.append(METHOD_NAME(dys[i], xs[i]))
return F.stack(output)
output_manually = manually_batched(dy, x)
assert np_all_close_with_loss(output_vmap.asnumpy(), output_manually.asnumpy())
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('dtype', [np.float32, np.float16])
def test_fast_gelu_vmap(dtype, shape=(100, 2)):
"""
Feature: FastGeLU gpu kernel
Description: test the rightness of FastGeLU gpu kernel vmap feature.
Expectation: Success.
"""
def fast_gelu_func(x):
"""fast_gelu_func"""
return P.FastGeLU()(x)
prop = 100 if np.random.random() > 0.5 else -100
x_np = (np.random.randn(*shape) * prop).astype(dtype)
x = Tensor(x_np)
x = F.sub(x, 0)
output_vmap = vmap(fast_gelu_func, in_axes=(0,))(x)
@jit
def manually_batched(xs):
"""manually_batched"""
output = []
for i in range(xs.shape[0]):
output.append(fast_gelu_func(xs[i]))
return F.stack(output)
output_manually = manually_batched(x)
assert np_all_close_with_loss(output_vmap.asnumpy(), output_manually.asnumpy())
| null |
5,057 |
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.extensions.google_cloud_big_query.ml.pusher.executor."""
import copy
import os
from typing import Any, Dict
from unittest import mock
from google.cloud import bigquery
import tensorflow as tf
from tfx.dsl.io import fileio
from tfx.extensions.google_cloud_big_query.pusher import executor
from tfx.types import standard_artifacts
from tfx.utils import io_utils
from tfx.utils import json_utils
class ExecutorTest(tf.test.TestCase):
def setUp(self):
super().setUp()
self._source_data_dir = os.path.join(
os.path.dirname(
os.path.dirname(os.path.dirname(os.path.dirname(__file__)))),
'components', 'testdata')
self._output_data_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
fileio.makedirs(self._output_data_dir)
self._model_export = standard_artifacts.Model()
self._model_export.uri = os.path.join(self._source_data_dir,
'trainer/current')
self._model_blessing = standard_artifacts.ModelBlessing()
self._input_dict = {
'model': [self._model_export],
'model_blessing': [self._model_blessing],
}
self._model_push = standard_artifacts.PushedModel()
self._model_push.uri = 'gs://bucket/test_model_path'
self._output_dict = {
'pushed_model': [self._model_push],
}
self._exec_properties = {
'custom_config': {
'bigquery_serving_args': {
'model_name': 'model_name',
'project_id': 'project_id',
'bq_dataset_id': 'bq_dataset_id',
'compute_project_id': 'compute_project_id',
},
},
'push_destination': None,
}
self._executor = executor.Executor()
# Setting up Mock for external services
self.addCleanup(mock.patch.stopall)
self.mock_bq = mock.patch.object(bigquery, 'Client', autospec=True).start()
self.mock_check_blessing = mock.patch.object(
executor.Executor, 'CheckBlessing', autospec=True).start()
self.mock_copy_dir = mock.patch.object(
io_utils, 'copy_dir', autospec=True).start()
def _serialize_custom_config_under_test(self) -> Dict[str, Any]:
"""Converts self._exec_properties['custom_config'] to string."""
result = copy.deepcopy(self._exec_properties)
result['custom_config'] = json_utils.dumps(result['custom_config'])
return result
def METHOD_NAME(self):
self.mock_copy_dir.assert_called_with(
src=mock.ANY, dst=self._model_push.uri)
self.assertEqual(1, self._model_push.get_int_custom_property('pushed'))
def assertNotPushed(self):
self.assertEqual(0, self._model_push.get_int_custom_property('pushed'))
def testPipelineRoot(self):
self._model_push.uri = '/none_gcs_pipeline_root'
with self.assertRaises(ValueError):
self._executor.Do(self._input_dict, self._output_dict,
self._serialize_custom_config_under_test())
def testBigQueryServingArgs(self):
temp_exec_properties = {
'custom_config': json_utils.dumps({}),
'push_destination': None,
}
with self.assertRaises(ValueError):
self._executor.Do(self._input_dict, self._output_dict,
temp_exec_properties)
def testDoBlessed(self):
self.mock_check_blessing.return_value = True
self._executor.Do(self._input_dict, self._output_dict,
self._serialize_custom_config_under_test())
self.mock_bq.assert_called_once()
self.METHOD_NAME()
def testDoNotBlessed(self):
self.mock_check_blessing.return_value = False
self._executor.Do(self._input_dict, self._output_dict,
self._serialize_custom_config_under_test())
self.mock_bq.assert_not_called()
self.assertNotPushed()
if __name__ == '__main__':
tf.test.main()
| null |
5,058 |
from celery import Celery
from celery.schedules import crontab
from celery.app.trace import build_tracer
from celery.app.control import Inspect
from celery.backends.base import DisabledBackend
from jaseci.jsorc.jsorc import JsOrc
from jaseci.jsorc.jsorc_utils import ManifestType
from .tasks import Queue, ScheduledWalker, ScheduledSequence
#################################################
# TASK APP #
#################################################
@JsOrc.service(name="task", config="TASK_CONFIG")
class TaskService(JsOrc.CommonService):
###################################################
# INITIALIZER #
###################################################
def __init__(
self,
config: dict,
manifest: dict,
manifest_type: ManifestType = ManifestType.DEDICATED,
source: dict = {},
):
self.inspect: Inspect = None
self.queue: Queue = None
self.scheduled_walker: ScheduledWalker = None
self.scheduled_sequence: ScheduledSequence = None
super().__init__(config, manifest, manifest_type, source)
###################################################
# BUILDER #
###################################################
def run(self):
self.app = Celery("celery")
self.app.conf.update(**self.config)
# -------------------- TASKS -------------------- #
(
self.queue,
self.scheduled_walker,
self.scheduled_sequence,
) = self.register_tasks(Queue, ScheduledWalker, ScheduledSequence)
# ------------------ INSPECTOR ------------------ #
self.inspect = self.app.control.inspect()
self.METHOD_NAME()
def post_run(self):
self.spawn_daemon(
worker=self.app.Worker(quiet=self.quiet).start,
scheduler=self.app.Beat(socket_timeout=None, quiet=self.quiet).run,
)
def register_tasks(self, *tasks) -> tuple:
registered = []
for task in tasks:
task = self.app.register_task(task())
task.__trace__ = build_tracer(task.name, task, app=self.app)
registered.append(task)
return tuple(registered)
###################################################
# COMMON GETTER/SETTER #
###################################################
def get_by_task_id(self, task_id, wait=False, timeout=30):
task = self.app.AsyncResult(task_id)
if isinstance(task.backend, DisabledBackend):
return {
"status": "DISABLED",
"result": "result_backend is set to disabled!",
}
ret = {"status": task.state}
if task.ready():
ret["result"] = task.result
elif wait:
ret["status"] = "SUCCESS"
ret["result"] = task.get(timeout=timeout, disable_sync_subtasks=False)
return ret
def inspect_tasks(self):
return {
"scheduled": self.inspect.scheduled(),
"active": self.inspect.active(),
"reserved": self.inspect.reserved(),
}
def METHOD_NAME(self): # will throw exception
self.inspect.METHOD_NAME()
self.app.AsyncResult("").result
###################################################
# QUEUING #
###################################################
def add_queue(self, wlk, nd, *args):
return self.queue.delay(wlk.jid, nd.jid, args).task_id
###################################################
# SCHEDULED QUEUING #
###################################################
def add_scheduled_queue(
self, queue_type: object, name: str, schedule: dict, body: dict
):
return None
def get_scheduled_queues(
self,
limit: int = 10,
offset: int = 0,
asc: bool = True,
name: str = None,
master=False,
):
return []
def delete_scheduled_queue(self, scheduled_queue_id: int, master):
return None
###################################################
# CLEANER #
###################################################
def failed(self, error):
super().failed(error)
self.terminate_daemon("worker", "scheduler")
# ---------------- PROXY EVENTS ----------------- #
def on_delete(self):
self.terminate_daemon("worker", "scheduler")
###################################################
# UTILS #
###################################################
def get_task_name(self, task):
cls = type(task)
module = cls.__module__
name = cls.__qualname__
if module is not None and module != "__builtin__":
name = module + "." + name
return name
| null |
5,059 |
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for executor of Transform."""
import functools
import os
from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence
from absl import logging
from tfx import types
from tfx.components.transform import labels
from tfx.components.util import value_utils
from tfx.proto import transform_pb2
from tfx.types import artifact_utils
from tfx.types import standard_component_specs
from tfx.utils import json_utils
from tfx.utils import proto_utils
# Default file name prefix for transformed_examples.
_DEFAULT_TRANSFORMED_EXAMPLES_PREFIX = 'transformed_examples'
def MaybeBindCustomConfig(inputs: Mapping[str, Any],
fn: Any) -> Callable[..., Any]:
# For compatibility, only bind custom config if it's in the signature.
if value_utils.FunctionHasArg(fn, labels.CUSTOM_CONFIG):
custom_config_json = value_utils.GetSoleValue(inputs, labels.CUSTOM_CONFIG)
custom_config = (json_utils.loads(custom_config_json)
if custom_config_json else {}) or {}
fn = functools.partial(fn, custom_config=custom_config)
return fn
def METHOD_NAME(inputs: Mapping[str, Any],
keys: Sequence[str],
allow_missing: bool = False) -> bool:
"""Check whether only one of given keys are specified in the input.
NOTE: False-equivalent values like 0, '' are regarded as not specified.
Args:
inputs: input dictionary.
keys: keys to check the existence of values.
allow_missing: If False, one of keys should be set in inputs.
Returns:
True if one of the key has a value.
Raises:
ValueError: if none of the keys have non empty value in the input.
"""
counter = 0
for key in keys:
counter += int(bool(value_utils.GetSoleValue(inputs, key, strict=False)))
keys_str = ', '.join(keys)
if counter > 1:
raise ValueError(
f'At most one of {keys_str} should be supplied in the input.')
elif counter == 0 and not allow_missing:
raise ValueError(f'One of {keys_str} should be supplied in the input.')
return counter > 0
def MatchNumberOfTransformedExamplesArtifacts(
input_dict: Dict[str, List[types.Artifact]],
output_dict: Dict[str, List[types.Artifact]]) -> None:
"""Alters output_dict to have the same number of examples to input.
If there are multiple input Examples artifacts, replicate Examples artifact
in output_dict to have the same number of artifacts. The resulting artifact
will have URIs that is located under the original output uri.
No-op if there is one or less Examples artifact in the input_dict.
Args:
input_dict: input artifact dictionary of the Executor.
output_dict: output artifact dictionary of the Executor.
"""
num_examples = len(input_dict[standard_component_specs.EXAMPLES_KEY])
if (num_examples > 1 and
standard_component_specs.TRANSFORMED_EXAMPLES_KEY in output_dict and
len(output_dict[standard_component_specs.TRANSFORMED_EXAMPLES_KEY]) == 1):
output_dict[standard_component_specs
.TRANSFORMED_EXAMPLES_KEY] = artifact_utils.replicate_artifacts(
output_dict[
standard_component_specs.TRANSFORMED_EXAMPLES_KEY][0],
num_examples)
def ResolveSplitsConfig(
splits_config_str: Optional[str],
examples: List[types.Artifact]) -> transform_pb2.SplitsConfig:
"""Resolve SplitsConfig proto for the transfrom request."""
result = transform_pb2.SplitsConfig()
if splits_config_str:
proto_utils.json_to_proto(splits_config_str, result)
if not result.analyze:
raise ValueError('analyze cannot be empty when splits_config is set.')
return result
result.analyze.append('train')
# All input artifacts should have the same set of split names.
split_names = set(artifact_utils.decode_split_names(examples[0].split_names))
for artifact in examples:
artifact_split_names = set(
artifact_utils.decode_split_names(artifact.split_names))
if split_names != artifact_split_names:
raise ValueError(
'Not all input artifacts have the same split names: (%s, %s)' %
(split_names, artifact_split_names))
result.transform.extend(split_names)
logging.info("Analyze the 'train' split and transform all splits when "
'splits_config is not set.')
return result
def SetSplitNames(
splits: Sequence[str],
transformed_examples: Optional[List[types.Artifact]]) -> None:
"""Sets split_names property of input artifacts."""
if not transformed_examples:
return
for artifact in transformed_examples:
artifact.split_names = artifact_utils.encode_split_names(list(splits))
def GetSplitPaths(
transformed_examples: Optional[List[types.Artifact]]) -> List[str]:
"""Gets all paths for splits in the input artifacts."""
result = []
if not transformed_examples:
return result
splits = artifact_utils.decode_split_names(
transformed_examples[0].split_names)
for split in splits:
transformed_example_uris = artifact_utils.get_split_uris(
transformed_examples, split)
for output_uri in transformed_example_uris:
result.append(
os.path.join(output_uri, _DEFAULT_TRANSFORMED_EXAMPLES_PREFIX))
return result
def GetCachePathEntry(
label: str, params_dict: Dict[str, List[types.Artifact]]) -> Dict[str, str]:
"""Returns a cachePath entry if label exists in params_dict."""
# Covers the cases: path wasn't provided, or was provided an empty list.
if not params_dict.get(label):
return {}
if label == standard_component_specs.ANALYZER_CACHE_KEY:
dict_key = labels.CACHE_INPUT_PATH_LABEL
elif label == standard_component_specs.UPDATED_ANALYZER_CACHE_KEY:
dict_key = labels.CACHE_OUTPUT_PATH_LABEL
return {dict_key: artifact_utils.get_single_uri(params_dict[label])}
def GetStatsOutputPathEntries(
disable_statistics: bool,
output_dict: Dict[str, List[types.Artifact]]) -> Dict[str, str]:
"""Returns output entries for stats output path."""
label_component_key_list = [
(labels.PRE_TRANSFORM_OUTPUT_STATS_PATH_LABEL,
standard_component_specs.PRE_TRANSFORM_STATS_KEY),
(labels.PRE_TRANSFORM_OUTPUT_SCHEMA_PATH_LABEL,
standard_component_specs.PRE_TRANSFORM_SCHEMA_KEY),
(labels.POST_TRANSFORM_OUTPUT_ANOMALIES_PATH_LABEL,
standard_component_specs.POST_TRANSFORM_ANOMALIES_KEY),
(labels.POST_TRANSFORM_OUTPUT_STATS_PATH_LABEL,
standard_component_specs.POST_TRANSFORM_STATS_KEY),
(labels.POST_TRANSFORM_OUTPUT_SCHEMA_PATH_LABEL,
standard_component_specs.POST_TRANSFORM_SCHEMA_KEY)
]
result = {}
if not disable_statistics:
for label, component_key in label_component_key_list:
if component_key in output_dict:
result[label] = artifact_utils.get_single_uri(
output_dict[component_key])
if result and len(result) != len(label_component_key_list):
raise ValueError(
'Either all stats_output_paths should be specified or none.')
return result
| null |
5,060 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from qtpy import QtWidgets
from qtpy.QtCore import Signal
import mantidqtinterfaces.Muon.GUI.Common.message_box as message_box
from mantidqtinterfaces.Muon.GUI.Common.utilities.muon_file_utils import show_file_browser_and_return_selection
class BrowseFileWidgetView(QtWidgets.QWidget):
# signals for use by parent widgets
loadingStarted = Signal()
loadingFinished = Signal()
dataChanged = Signal()
def __init__(self, parent=None):
super(BrowseFileWidgetView, self).__init__(parent)
self.horizontal_layout = None
self.browse_button = None
self.file_path_edit = None
self.METHOD_NAME()
self._store_edit_text = False
self._stored_edit_text = ""
self._cached_text = ""
self.set_file_edit("No data loaded", False)
self.file_path_edit.setReadOnly(True)
def METHOD_NAME(self):
self.setObjectName("BrowseFileWidget")
self.resize(500, 100)
self.browse_button = QtWidgets.QPushButton(self)
size_policy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
size_policy.setHorizontalStretch(0)
size_policy.setVerticalStretch(0)
size_policy.setHeightForWidth(self.browse_button.sizePolicy().hasHeightForWidth())
self.browse_button.setSizePolicy(size_policy)
self.browse_button.setObjectName("browseButton")
self.browse_button.setText("Browse")
self.file_path_edit = QtWidgets.QLineEdit(self)
size_policy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
size_policy.setHorizontalStretch(0)
size_policy.setVerticalStretch(0)
size_policy.setHeightForWidth(self.file_path_edit.sizePolicy().hasHeightForWidth())
self.file_path_edit.setSizePolicy(size_policy)
self.file_path_edit.setToolTip("")
self.file_path_edit.setObjectName("filePathEdit")
self.setStyleSheet("QLineEdit {background: #d7d6d5}")
self.horizontal_layout = QtWidgets.QHBoxLayout()
self.horizontal_layout.setObjectName("horizontalLayout")
self.horizontal_layout.addWidget(self.browse_button)
self.horizontal_layout.addWidget(self.file_path_edit)
self.horizontal_layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.horizontal_layout)
def getLayout(self):
return self.horizontal_layout
def on_browse_clicked(self, slot):
self.browse_button.clicked.connect(slot)
def on_file_edit_changed(self, slot):
self.file_path_edit.returnPressed.connect(slot)
def show_file_browser_and_return_selection(self, file_filter, search_directories, multiple_files=False):
return show_file_browser_and_return_selection(self, file_filter, search_directories, multiple_files)
def disable_loading(self):
self.disable_load_buttons()
self.loadingStarted.emit()
def enable_loading(self):
self.enable_load_buttons()
self.loadingFinished.emit()
def notify_loading_started(self):
self.loadingStarted.emit()
def notify_loading_finished(self):
self.loadingFinished.emit()
self.dataChanged.emit()
def disable_load_buttons(self):
self.browse_button.setEnabled(False)
self.file_path_edit.setEnabled(False)
def enable_load_buttons(self):
self.browse_button.setEnabled(True)
self.file_path_edit.setEnabled(True)
def get_file_edit_text(self):
if self._store_edit_text:
return str(self._stored_edit_text)
else:
return str(self.file_path_edit.text())
def set_file_edit(self, text, store=False):
if store:
self._store_edit_text = True
self._stored_edit_text = text
self.file_path_edit.setText("(... more than 10 files, use right-click -> copy)")
else:
self.file_path_edit.setText(text)
self._cached_text = self.get_file_edit_text()
def clear(self):
self.set_file_edit("No data loaded")
self._store_edit_text = False
self._cached_text = "No data loaded"
def reset_edit_to_cached_value(self):
tmp = self._cached_text
self.set_file_edit(tmp)
self._cached_text = tmp
def warning_popup(self, message):
message_box.warning(str(message))
def hide_browse(self):
self.browse_button.hide()
| null |
5,061 |
################################################################################
# Copyright (c) 2021 ContinualAI. #
# Copyrights licensed under the MIT License. #
# See the accompanying LICENSE file for terms. #
# #
# Date: 20-05-2020 #
# Author: Vincenzo Lomonaco #
# E-mail: [email protected] #
# Website: continualai.org #
################################################################################
from pathlib import Path
from typing import Union, Optional, Any
from torchvision.transforms import Compose, ToTensor, Resize
from avalanche.benchmarks.classic.classic_benchmarks_utils import (
check_vision_benchmark,
)
from avalanche.benchmarks.datasets import CUB200
from avalanche.benchmarks import nc_benchmark
from torchvision import transforms
_default_train_transform = transforms.Compose(
[
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
)
_default_eval_transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
)
def SplitCUB200(
n_experiences=11,
*,
classes_first_batch=100,
return_task_id=False,
seed=0,
fixed_class_order=None,
shuffle=False,
class_ids_from_zero_in_each_exp: bool = False,
class_ids_from_zero_from_first_exp: bool = False,
train_transform: Optional[Any] = _default_train_transform,
eval_transform: Optional[Any] = _default_eval_transform,
dataset_root: Optional[Union[str, Path]] = None
):
"""
Creates a CL benchmark using the Cub-200 dataset.
If the dataset is not present in the computer, **this method will NOT be
able automatically download** and store it.
The returned benchmark will return experiences containing all patterns of a
subset of classes, which means that each class is only seen "once".
This is one of the most common scenarios in the Continual Learning
literature. Common names used in literature to describe this kind of
scenario are "Class Incremental", "New Classes", etc. By default,
an equal amount of classes will be assigned to each experience.
This generator doesn't force a choice on the availability of task labels,
a choice that is left to the user (see the `return_task_id` parameter for
more info on task labels).
The benchmark instance returned by this method will have two fields,
`train_stream` and `test_stream`, which can be iterated to obtain
training and test :class:`Experience`. Each Experience contains the
`dataset` and the associated task label.
The benchmark API is quite simple and is uniform across all benchmark
generators. It is recommended to check the tutorial of the "benchmark" API,
which contains usage examples ranging from "basic" to "advanced".
:param n_experiences: The number of experiences in the current benchmark.
Defaults to 11.
:param classes_first_batch: Number of classes in the first batch.
Usually this is set to 500. Defaults to 100.
:param return_task_id: if True, a progressive task id is returned for every
experience. If False, all experiences will have a task ID of 0.
:param seed: A valid int used to initialize the random number generator.
Can be None.
:param fixed_class_order: A list of class IDs used to define the class
order. If None, value of ``seed`` will be used to define the class
order. If non-None, ``seed`` parameter will be ignored.
Defaults to None.
:param shuffle: If true, the class order in the incremental experiences is
randomly shuffled. Default to false.
:param class_ids_from_zero_in_each_exp: If True, original class IDs
will be mapped to range [0, n_classes_in_exp) for each experience.
Defaults to False. Mutually exclusive with the
``class_ids_from_zero_from_first_exp`` parameter.
:param class_ids_from_zero_from_first_exp: If True, original class IDs
will be remapped so that they will appear as having an ascending
order. For instance, if the resulting class order after shuffling
(or defined by fixed_class_order) is [23, 34, 11, 7, 6, ...] and
class_ids_from_zero_from_first_exp is True, then all the patterns
belonging to class 23 will appear as belonging to class "0",
class "34" will be mapped to "1", class "11" to "2" and so on.
This is very useful when drawing confusion matrices and when dealing
with algorithms with dynamic head expansion. Defaults to False.
Mutually exclusive with the ``class_ids_from_zero_in_each_exp``
parameter.
:param train_transform: The transformation to apply to the training data,
e.g. a random crop, a normalization or a concatenation of different
transformations (see torchvision.transform documentation for a
comprehensive list of possible transformations).
If no transformation is passed, the default train transformation
will be used.
:param eval_transform: The transformation to apply to the test data,
e.g. a random crop, a normalization or a concatenation of different
transformations (see torchvision.transform documentation for a
comprehensive list of possible transformations).
If no transformation is passed, the default test transformation
will be used.
:param dataset_root: The root path of the dataset.
Defaults to None, which means that the default location for
'CUB_200_2011' will be used.
:returns: A properly initialized :class:`NCScenario` instance.
"""
train_set, test_set = METHOD_NAME(dataset_root)
if classes_first_batch is not None:
per_exp_classes = {0: classes_first_batch}
else:
per_exp_classes = None
return nc_benchmark(
train_dataset=train_set,
test_dataset=test_set,
n_experiences=n_experiences,
task_labels=return_task_id,
per_exp_classes=per_exp_classes,
seed=seed,
fixed_class_order=fixed_class_order,
shuffle=shuffle,
one_dataset_per_exp=True,
class_ids_from_zero_in_each_exp=class_ids_from_zero_in_each_exp,
class_ids_from_zero_from_first_exp=class_ids_from_zero_from_first_exp,
train_transform=train_transform,
eval_transform=eval_transform,
)
def METHOD_NAME(root):
train_set = CUB200(root, train=True)
test_set = CUB200(root, train=False)
return train_set, test_set
__all__ = ["SplitCUB200"]
if __name__ == "__main__":
import sys
benchmark_instance = SplitCUB200(
5, train_transform=Compose([ToTensor(), Resize((128, 128))])
)
check_vision_benchmark(benchmark_instance, show_without_transforms=False)
sys.exit(0)
| null |
5,062 |
# Status: ported.
# Base revision: 64488
#
# Copyright (c) 2010 Vladimir Prus.
#
# Use, modification and distribution is subject to the Boost Software
# License Version 1.0. (See accompanying file LICENSE_1_0.txt or
# http://www.boost.org/LICENSE_1_0.txt)
# This module defines function to help with two main tasks:
#
# - Discovering build-time configuration for the purposes of adjusting
# build process.
# - Reporting what is built, and how it is configured.
import b2.build.property as property
import b2.build.property_set as property_set
from b2.build import targets as targets_
from b2.manager import get_manager
from b2.util.sequence import unique
from b2.util import bjam_signature, value_to_jam, is_iterable
import bjam
import os
__width = 30
def set_width(width):
global __width
__width = 30
__components = []
__built_components = []
__component_logs = {}
__announced_checks = False
__log_file = None
__log_fd = -1
def register_components(components):
"""Declare that the components specified by the parameter exist."""
assert is_iterable(components)
__components.extend(components)
def components_building(components):
"""Declare that the components specified by the parameters will be build."""
assert is_iterable(components)
__built_components.extend(components)
def log_component_configuration(component, message):
"""Report something about component configuration that the user should better know."""
assert isinstance(component, basestring)
assert isinstance(message, basestring)
__component_logs.setdefault(component, []).append(message)
def log_check_result(result):
assert isinstance(result, basestring)
global __announced_checks
if not __announced_checks:
print "Performing configuration checks"
__announced_checks = True
print result
def log_library_search_result(library, result):
assert isinstance(library, basestring)
assert isinstance(result, basestring)
log_check_result((" - %(library)s : %(result)s" % locals()).rjust(__width))
def print_component_configuration():
print "\nComponent configuration:"
for c in __components:
if c in __built_components:
s = "building"
else:
s = "not building"
message = " - %s)" % c
message = message.rjust(__width)
message += " : " + s
for m in __component_logs.get(c, []):
print " -" + m
print ""
__builds_cache = {}
def builds(metatarget_reference, project, ps, what):
# Attempt to build a metatarget named by 'metatarget-reference'
# in context of 'project' with properties 'ps'.
# Returns non-empty value if build is OK.
assert isinstance(metatarget_reference, basestring)
assert isinstance(project, targets_.ProjectTarget)
assert isinstance(ps, property_set.PropertySet)
assert isinstance(what, basestring)
result = []
existing = __builds_cache.get((what, ps), None)
if existing is None:
result = False
__builds_cache[(what, ps)] = False
targets = targets_.generate_from_reference(
metatarget_reference, project, ps).targets()
jam_targets = []
for t in targets:
jam_targets.append(t.actualize())
x = (" - %s" % what).rjust(__width)
if bjam.call("UPDATE_NOW", jam_targets, str(__log_fd), "ignore-minus-n"):
__builds_cache[(what, ps)] = True
result = True
log_check_result("%s: yes" % x)
else:
log_check_result("%s: no" % x)
return result
else:
return existing
def set_log_file(log_file_name):
assert isinstance(log_file_name, basestring)
# Called by Boost.Build startup code to specify name of a file
# that will receive results of configure checks. This
# should never be called by users.
global __log_file, __log_fd
dirname = os.path.dirname(log_file_name)
if not os.path.exists(dirname):
os.makedirs(dirname)
# Make sure to keep the file around, so that it's not
# garbage-collected and closed
__log_file = open(log_file_name, "w")
__log_fd = __log_file.fileno()
# Frontend rules
class CheckTargetBuildsWorker:
def __init__(self, target, true_properties, false_properties):
self.target = target
self.true_properties = property.create_from_strings(true_properties, True)
self.false_properties = property.create_from_strings(false_properties, True)
def check(self, ps):
assert isinstance(ps, property_set.PropertySet)
# FIXME: this should not be hardcoded. Other checks might
# want to consider different set of features as relevant.
toolset = ps.get('toolset')[0]
toolset_version_property = "<toolset-" + toolset + ":version>" ;
relevant = ps.get_properties('target-os') + \
ps.get_properties("toolset") + \
ps.get_properties(toolset_version_property) + \
ps.get_properties("address-model") + \
ps.get_properties("architecture")
rps = property_set.create(relevant)
t = get_manager().targets().current()
p = t.project()
if builds(self.target, p, rps, "%s builds" % self.target):
choosen = self.true_properties
else:
choosen = self.false_properties
return property.evaluate_conditionals_in_context(choosen, ps)
@bjam_signature((["target"], ["true_properties", "*"], ["false_properties", "*"]))
def METHOD_NAME(target, true_properties, false_properties):
worker = CheckTargetBuildsWorker(target, true_properties, false_properties)
value = value_to_jam(worker.check)
return "<conditional>" + value
get_manager().projects().add_rule("check-target-builds", METHOD_NAME)
| null |
5,063 |
import textwrap
import time
import subprocess
import os
import pytest
import conftest
from conftest import BINDIR, HlwmProcess
def test_herbstluftwm_default_autostart(hlwm):
expected_tags = [str(tag) for tag in range(1, 10)]
default_autostart = os.path.join(os.path.abspath(BINDIR), 'share/autostart')
env_with_bindir_path = os.environ.copy()
env_with_bindir_path['PATH'] = BINDIR + ":" + env_with_bindir_path['PATH']
subprocess.run(['bash', '-e', default_autostart], check=True, env=env_with_bindir_path)
assert hlwm.list_children('tags.by-name') == sorted(expected_tags)
# Test a random setting different from the default in settings.h:
assert hlwm.get_attr('settings.smart_frame_surroundings') == 'hide_all'
@pytest.mark.parametrize("method", ['home', 'xdg', 'shortopt', 'longopt'])
def test_autostart_path(tmpdir, method, xvfb):
# herbstluftwm environment:
env = {
'DISPLAY': xvfb.display,
}
args = [] # extra command line args
if method == 'home':
autostart = tmpdir / '.config' / 'herbstluftwm' / 'autostart'
env['HOME'] = str(tmpdir)
elif method == 'xdg':
autostart = tmpdir / 'herbstluftwm' / 'autostart'
env['XDG_CONFIG_HOME'] = str(tmpdir)
elif method == 'longopt':
autostart = tmpdir / 'somename'
args += ['--autostart', str(autostart)]
else:
autostart = tmpdir / 'somename'
args += ['-c', str(autostart)]
autostart.ensure()
autostart.write(textwrap.dedent("""
#!/usr/bin/env bash
echo "hlwm autostart test"
""".lstrip('\n')))
autostart.chmod(0o755)
env = conftest.extend_env_with_whitelist(env)
hlwm_proc = HlwmProcess('hlwm autostart test', env, args)
# TODO: verify the path as soon as we have an autostart object
hlwm_proc.shutdown()
def test_no_autostart(xvfb):
# no HOME, no XDG_CONFIG_HOME
env = {
'DISPLAY': xvfb.display,
}
env = conftest.extend_env_with_whitelist(env)
hlwm_proc = HlwmProcess('', env, [])
hlwm_proc.read_and_echo_output_until_stderr('Will not run autostart file.')
hlwm_proc.shutdown()
def wait_actively_for(callback):
"""wait actively for the callback to return True
"""
left_attempts = 20
while left_attempts > 0:
left_attempts -= 1
if callback() is True:
return
time.sleep(1)
assert False, "The returned value was not 'True'"
def run_autostart(hlwm, tmpdir, autostart_src, wait=True):
tmpfile = tmpdir / 'custom_autostart'
hc_path = hlwm.HC_PATH
full_src = textwrap.dedent(f"""\
#!/usr/bin/env bash
hc() {{
{hc_path} "$@"
}}
""")
full_src += autostart_src
tmpfile.ensure()
tmpfile.write(full_src)
tmpfile.chmod(0o755)
hlwm.attr.autostart.path = tmpfile
hlwm.call('reload')
if wait:
# wait for the autostart to terminate:
wait_actively_for(lambda: not hlwm.attr.autostart.running())
def METHOD_NAME(hlwm, tmpdir):
run_autostart(hlwm,
tmpdir,
"""
hc new_attr int my_pid
# copy pid of bash to the attribute system
hc set_attr my_pid $$
""")
assert hlwm.attr.autostart.last_status() == 0
assert hlwm.attr.my_pid() == hlwm.attr.autostart.pid()
def test_autostart_running(hlwm, tmpdir):
run_autostart(hlwm,
tmpdir,
"""
hc new_attr bool my_running
# copy the value of 'running' during autostart execution
hc substitute VALUE autostart.running set_attr my_running VALUE
""")
assert hlwm.attr.autostart.last_status() == 0
assert hlwm.attr.my_running() is True
assert hlwm.attr.autostart.running() is False
def test_autostart_last_status(hlwm, tmpdir):
for status in [0, 1, 2, 4, 9]:
run_autostart(hlwm,
tmpdir,
f"""
exit {status}
""")
assert hlwm.attr.autostart.last_status() == status
def process_status(pid):
ps_cmd = ['ps', '-p', str(pid), '-o', 'state']
proc = subprocess.run(ps_cmd,
stdout=subprocess.PIPE,
universal_newlines=True)
return proc.stdout.splitlines()[1].strip()
def test_autostart_sigstop(hlwm, tmpdir):
run_autostart(hlwm,
tmpdir,
"""
hc new_attr string my_attr firststop
kill -STOP $$
hc set_attr my_attr secondstop
kill -STOP $$
hc set_attr my_attr final
""",
wait=False)
pid = hlwm.attr.autostart.pid()
# wait until the process is 'stopped'
wait_actively_for(lambda: process_status(pid) == 'T')
# then, it is still marked as 'running' in hlwm:
assert hlwm.attr.autostart.running() is True
assert hlwm.attr.my_attr() == 'firststop'
# resume it:
subprocess.run(['kill', '-CONT', str(pid)])
# wait until the process is 'stopped' again
wait_actively_for(lambda: process_status(pid) == 'T')
assert hlwm.attr.my_attr() == 'secondstop'
# then, it is still marked as 'running' in hlwm:
# by this we verify that the 'continuation' did not
# trigger the wrong signal handler
assert hlwm.attr.autostart.running() is True
# finally, resume it a second time and let it terminate:
subprocess.run(['kill', '-CONT', str(pid)])
wait_actively_for(lambda: not hlwm.attr.autostart.running())
assert hlwm.attr.my_attr() == 'final'
| null |
5,064 |
#!/usr/bin/env python
"""Provides tests for array.py
"""
# SUPPORT2425
# from __future__ import with_statement
from unittest import TestCase, main
from warnings import filterwarnings
from numpy import array, transpose
from numpy.testing import assert_allclose, assert_equal
from cogent3.maths.util import (
column_degeneracy,
column_uncertainty,
row_degeneracy,
row_uncertainty,
safe_log,
safe_p_log_p,
)
filterwarnings("ignore", "invalid value encountered in", category=RuntimeWarning)
class ArrayMathTests(TestCase):
def test_safe_p_log_p(self):
"""safe_p_log_p: should handle pos/neg/zero/empty arrays"""
# normal valid array
a = array([[4, 0, 8], [2, 16, 4]])
assert_equal(safe_p_log_p(a), array([[-8, 0, -24], [-2, -64, -8]]))
# just zeros
a = array([[0, 0], [0, 0]])
assert_equal(safe_p_log_p(a), array([[0, 0], [0, 0]]))
# negative number -- throw error
with self.assertRaises(FloatingPointError):
safe_p_log_p(array([-4]))
# integer input, float output
assert_allclose(safe_p_log_p(array([3])), array([-4.75488750]))
# empty array
assert_equal(safe_p_log_p(array([])), array([]))
def test_safe_log(self):
"""safe_log: should handle pos/neg/zero/empty arrays"""
# normal valid array
a = array([[4, 0, 8], [2, 16, 4]])
assert_equal(safe_log(a), array([[2, 0, 3], [1, 4, 2]]))
# input integers, output floats
assert_allclose(safe_log(array([1, 2, 3])), array([0, 1, 1.5849625]))
# just zeros
a = array([[0, 0], [0, 0]])
assert_equal(safe_log(a), array([[0, 0], [0, 0]]))
# negative number
with self.assertRaises(FloatingPointError):
safe_log(array([0, 3, -4]))
# empty array
assert_equal(safe_log(array([])), array([]))
# double empty array
assert_equal(safe_log(array([[]])), array([[]]))
def METHOD_NAME(self):
"""row_uncertainty: should handle pos/neg/zero/empty arrays"""
# normal valid array
b = transpose(
array(
[
[0.25, 0.2, 0.45, 0.25, 1],
[0.25, 0.2, 0.45, 0, 0],
[0.25, 0.3, 0.05, 0.75, 0],
[0.25, 0.3, 0.05, 0, 0],
]
)
)
assert_allclose(row_uncertainty(b), [2, 1.97, 1.47, 0.81, 0], rtol=1e-2)
# one-dimensional array
self.assertRaises(ValueError, row_uncertainty, array([0.25, 0.25, 0.25, 0.25]))
# zeros
assert_equal(row_uncertainty(array([[0, 0]])), array([0]))
# empty 2D array
assert_equal(row_uncertainty(array([[]])), array([0]))
assert_equal(row_uncertainty(array([[], []])), array([0, 0]))
# negative number -- throw error
with self.assertRaises(FloatingPointError):
row_uncertainty(array([[-2]]))
def test_col_uncertainty(self):
"""column_uncertainty: should handle pos/neg/zero/empty arrays"""
b = array(
[
[0.25, 0.2, 0.45, 0.25, 1],
[0.25, 0.2, 0.45, 0, 0],
[0.25, 0.3, 0.05, 0.75, 0],
[0.25, 0.3, 0.05, 0, 0],
]
)
assert_allclose(column_uncertainty(b), [2, 1.97, 1.47, 0.81, 0], rtol=1e-2)
# one-dimensional array
self.assertRaises(
ValueError, column_uncertainty, array([0.25, 0.25, 0.25, 0.25])
)
# zeros
assert_equal(column_uncertainty(array([[0, 0]])), array([0, 0]))
# empty 2D array
assert_equal(column_uncertainty(array([[]])), array([]))
assert_equal(column_uncertainty(array([[], []])), array([]))
# negative number -- throw error
with self.assertRaises(FloatingPointError):
column_uncertainty(array([[-2]]))
def test_row_degeneracy(self):
"""row_degeneracy: should work with different cutoff values and arrays"""
a = array([[0.1, 0.3, 0.4, 0.2], [0.5, 0.3, 0, 0.2], [0.8, 0, 0.1, 0.1]])
assert_equal(row_degeneracy(a, cutoff=0.75), [3, 2, 1])
assert_equal(row_degeneracy(a, cutoff=0.95), [4, 3, 3])
# one-dimensional array
self.assertRaises(ValueError, row_degeneracy, array([0.25, 0.25, 0.25, 0.25]))
# if cutoff value is not found, results are clipped to the
# number of columns in the array
assert_equal(row_degeneracy(a, cutoff=2), [4, 4, 4])
# same behavior on empty array
assert_equal(row_degeneracy(array([[]])), [])
def test_column_degeneracy(self):
"""column_degeneracy: should work with different cutoff values"""
a = array([[0.1, 0.8, 0.3], [0.3, 0.2, 0.3], [0.6, 0, 0.4]])
assert_equal(column_degeneracy(a, cutoff=0.75), [2, 1, 3])
assert_equal(column_degeneracy(a, cutoff=0.45), [1, 1, 2])
# one-dimensional array
self.assertRaises(
ValueError, column_degeneracy, array([0.25, 0.25, 0.25, 0.25])
)
# if cutoff value is not found, results are clipped to the
# number of rows in the array
assert_equal(column_degeneracy(a, cutoff=2), [3, 3, 3])
# same behavior on empty array
assert_equal(column_degeneracy(array([[]])), [])
class TestUtils(TestCase):
def test_proportions_and_ratios(self):
"""interconverts proportions and ratios"""
from cogent3.maths.util import (
proportions_to_ratios,
ratios_to_proportions,
)
probs = array([0.3, 0.1, 0.1, 0.5])
ratios = proportions_to_ratios(probs)
assert_allclose(ratios, [0.6 / 0.4, 0.1 / 0.3, 0.5 / 0.1])
probs = array([0.3, 0.1, 0.6])
ratios = proportions_to_ratios(probs)
assert_allclose(ratios, [0.7 / 0.3, 0.6 / 0.1])
got = ratios_to_proportions(1, ratios)
assert_allclose(got, probs)
probs = array([0.3, 0.1, -0.1, 0.5])
with self.assertRaises(AssertionError):
proportions_to_ratios(probs)
probs = array([0.3, 0.1, 0.0, 0.5])
with self.assertRaises(AssertionError):
proportions_to_ratios(probs)
with self.assertRaises(AssertionError):
ratios_to_proportions(1.0, [2.3, 1.1, -0.3])
if __name__ == "__main__":
main()
| null |
5,065 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2021 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from mantidqtinterfaces.dns_powder_tof.data_structures.dns_treeitem import DNSTreeItem
class DNSTreeItemTest(unittest.TestCase):
# pylint: disable=protected-access
@classmethod
def setUpClass(cls):
cls.data = [
"number",
"det_rot",
"sample_rot",
"field",
"temperature",
"sample",
"time",
"tof_channels",
"tof_channel_width",
"filepath",
]
cls.item = DNSTreeItem(cls.data, parent=None)
def test___init__(self):
self.assertIsInstance(self.item, DNSTreeItem)
self.assertIsInstance(self.item, object)
self.assertIsNone(self.item.parent_item)
self.assertIsInstance(self.item.children_items, list)
self.assertIsInstance(self.item, object)
self.assertEqual(self.item._check_state, 0)
def METHOD_NAME(self):
self.item.children_items = [1, 2, 3]
self.item.clearChilds()
self.assertEqual(self.item.children_items, [])
def test_appendChild(self):
self.item.children_items = [1, 2, 3]
testv = self.item.appendChild(4)
self.assertEqual(self.item.children_items, [1, 2, 3, 4])
self.assertEqual(testv, 4)
def test_child(self):
self.item.children_items = [1, 2, 3]
testv = self.item.child(1)
self.assertEqual(testv, 2)
def test_removeChild(self):
self.item.children_items = [1, 2, 3]
self.item.removeChild(1)
self.assertEqual(self.item.children_items, [1, 3])
def test_childCount(self):
self.item.children_items = [1, 2, 3]
testv = self.item.childCount()
self.assertEqual(testv, 3)
def test_get_children_items(self):
self.item.children_items = [1, 2, 3]
testv = self.item.get_children_items()
self.assertEqual(testv, [1, 2, 3])
def test_columnCount(self):
testv = self.item.columnCount()
self.assertEqual(testv, 10)
def test_data(self):
testv = self.item.get_tree_item_data()
self.assertEqual(
testv,
["number", "det_rot", "sample_rot", "field", "temperature", "sample", "time", "tof_channels", "tof_channel_width", "filepath"],
)
testv = self.item.get_tree_item_data(100)
self.assertIsNone(testv)
testv = self.item.get_tree_item_data(2)
self.assertEqual(testv, "sample_rot")
def test_get_sample(self):
self.item.children_items = []
testv = self.item.get_sample()
self.assertEqual(testv, "sample")
child = DNSTreeItem([1, 2, 3, 4, 5, 6])
self.item.children_items = [child]
testv = self.item.get_sample()
self.assertEqual(testv, 6)
def test_get_sample_type(self):
child = DNSTreeItem([1, 2, 3, 4, 5, "123"])
self.item.children_items = [child]
testv = self.item.get_sample_type()
self.assertEqual(testv, "sample")
child = DNSTreeItem([1, 2, 3, 4, 5, "vana"])
self.item.children_items = [child]
testv = self.item.get_sample_type()
self.assertEqual(testv, "vanadium")
def test_is_type(self):
child = DNSTreeItem([1, 2, 3, 4, 5, "vana"])
self.item.children_items = [child]
testv = self.item.is_type("vanadium")
self.assertTrue(testv)
testv = self.item.is_type("nicr")
self.assertFalse(testv)
def test_hasChildren(self):
self.item.children_items = [1]
self.assertTrue(self.item.hasChildren())
self.item.children_items = []
self.assertFalse(self.item.hasChildren())
def test_isChecked(self):
self.item._check_state = 2
self.assertEqual(self.item.isChecked(), 2)
def test_parent(self):
self.assertIsNone(self.item.parent())
def test_row(self):
self.assertEqual(self.item.row(), 0)
child = DNSTreeItem([1, 2, 3, 4, 5, 6], parent=self.item)
child2 = DNSTreeItem([1, 2, 3, 4, 5, 6], parent=self.item)
self.item.children_items = [child, child2]
testv = child2.row()
self.assertEqual(testv, 1)
def test_setChecked(self):
self.item.setChecked()
self.assertEqual(self.item._check_state, 2)
self.item.setChecked(0)
self.assertEqual(self.item._check_state, 0)
def test_setData(self):
self.item.setData("x", 0)
self.assertEqual(self.item.item_data[0], "x")
with self.assertRaises(IndexError):
self.item.setData("x", 100)
if __name__ == "__main__":
unittest.main()
| null |
5,066 |
"""
Copyright (c) 2012-2020 Rockstor, Inc. <http://rockstor.com>
This file is part of Rockstor.
Rockstor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
Rockstor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from storageadmin.models import Share, Snapshot
from storageadmin.util import handle_exception
from fs.btrfs import (
add_clone,
share_id,
update_quota,
mount_share,
qgroup_create,
set_property,
remove_share,
share_pqgroup_assign,
is_subvol,
)
from rest_framework.response import Response
from storageadmin.serializers import ShareSerializer
import re
import shutil
from django.conf import settings
from system.osi import run_command
# The following model/db default setting is also used when quotas are disabled.
PQGROUP_DEFAULT = settings.MODEL_DEFS["pqgroup"]
def METHOD_NAME(share, request, logger, snapshot):
"""
Variant of create_clone but where the share already exists and is to be
supplanted by a snapshot which is effectively moved into the shares prior
position, both in the db and on the file system. This is achieved thus:
Unmount target share - (via remove_share()).
Btrfs subvol delete target share (via remove_share()).
Remove prior target share mount point (dir).
Move snap source to target share's former location (becomes share on disk).
Update existing target share db entry with source snap's qgroup / usage.
Remove source snap's db entry: updated share db entry makes it redundant.
Remount share (which now represents the prior snap's subvol relocated).
:param share: Share object to be supplanted
:param request:
:param logger: Logger object to reference
:param snapshot: Source snapshot/quirk share object to supplant target.
:return: response of serialized share (in it's updated form)
"""
try:
logger.info(
"Supplanting share ({}) with "
"snapshot ({}).".format(share.name, snapshot.name)
)
# We first strip our snapshot.name of any path as when we encounter the
# initially created receive subvol it is identified as a share with a
# snapshots location as it's subvol name (current quirk of import sys).
# E.g. first receive subvol/share-in-snapdir name example:
# ".snapshots/C583C37F-...1712B_sharename/sharename_19_replication_1".
# Subsequent more regular snapshots (in db as such) are named thus:
# "sharename_19_replication_2" or "sharename_19_replication_2" and on.
# The 19 in the above names is the generation of the replication task.
#
# Normalise source name across initial quirk share & subsequent snaps.
source_name = snapshot.name.split("/")[-1]
# Note in the above we have to use Object.name for polymorphism, but
# our share is passed by it's subvol (potential fragility point).
snap_path = "{}/.snapshots/{}/{}".format(
share.pool.mnt_pt, share.name, source_name
).replace("//", "/")
# e.g. for above: /mnt2/poolname/.snapshots/sharename/snapname
# or /.snapshots/sharename/snapname for system pool shares
share_path = ("{}/{}".format(share.pool.mnt_pt, share.name)).replace("//", "/")
# e.g. for above: /mnt2/poolname/sharename or /sharename for system pool shares
# Passed db snap assured by caller but this does not guarantee on disk.
if not is_subvol(snap_path):
raise Exception(
"Subvol with path ({}) does not exist. Aborting "
"replacement of share with path ({}).".format(snap_path, share_path)
)
# unmounts and then subvol deletes our on disk share
remove_share(share.pool, share.name, PQGROUP_DEFAULT)
# Remove read only flag on our snapshot subvol
set_property(snap_path, "ro", "false", mount=False)
# Ensure removed share path is clean, ie remove mount point.
run_command(["/usr/bin/rm", "-rf", share_path], throw=False)
# Now move snapshot to prior shares location. Given both a share and
# a snapshot are subvols, we effectively promote the snap to a share.
logger.info(
"Moving snapshot ({}) to prior share's pool location ({})".format(
snap_path, share_path
)
)
shutil.move(snap_path, share_path)
# This should have re-established our just removed subvol.
# Supplant share db info with snap info to reflect new on disk state.
share.qgroup = snapshot.qgroup
share.rusage = snapshot.rusage
share.eusage = snapshot.eusage
share.save()
# delete our now redundant snapshot/quirky share db entry
snapshot.delete()
# update our share's quota
update_quota(share.pool, share.pqgroup, share.size * 1024)
# mount our newly supplanted share
# We independently mount all shares, data pool or system pool, in /mnt2/name
mnt_pt = "{}{}".format(settings.MNT_PT, share.name)
mount_share(share, mnt_pt)
return Response(ShareSerializer(share).data)
except Exception as e:
handle_exception(e, request)
def create_clone(share, new_name, request, logger, snapshot=None):
# if snapshot is None, create clone of the share.
# If it's not, then clone it.
if re.match(settings.SHARE_REGEX + "$", new_name) is None:
e_msg = (
"Clone name is invalid. It must start with a letter and can "
"contain letters, digits, _, . and - characters."
)
handle_exception(Exception(e_msg), request)
if Share.objects.filter(name=new_name).exists():
e_msg = "Another share with name ({}) already exists.".format(new_name)
handle_exception(Exception(e_msg), request)
if Snapshot.objects.filter(share=share, name=new_name).exists():
e_msg = (
"Snapshot with name ({}) already exists for the "
"share ({}). Choose a different name."
).format(new_name, share.name)
handle_exception(Exception(e_msg), request)
try:
share_name = share.subvol_name
snap = None
if snapshot is not None:
snap = snapshot.real_name
add_clone(share.pool, share_name, new_name, snapshot=snap)
snap_id = share_id(share.pool, new_name)
qgroup_id = "0/{}".format(snap_id)
pqid = qgroup_create(share.pool)
new_share = Share(
pool=share.pool,
qgroup=qgroup_id,
pqgroup=pqid,
name=new_name,
size=share.size,
subvol_name=new_name,
)
new_share.save()
if pqid != PQGROUP_DEFAULT:
update_quota(new_share.pool, pqid, new_share.size * 1024)
share_pqgroup_assign(pqid, new_share)
# Mount our new clone share.
# We independently mount all shares, data pool or system pool, in /mnt2/name
mnt_pt = "{}{}".format(settings.MNT_PT, new_name)
mount_share(new_share, mnt_pt)
return Response(ShareSerializer(new_share).data)
except Exception as e:
handle_exception(e, request)
| null |
5,067 |
# SPDX-FileCopyrightText: 2018 Dave Astels for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
FlappyBird type game for the NeoTrellisM4
Adafruit invests time and resources providing this open source code.
Please support Adafruit and open source hardware by purchasing
products from Adafruit!
Written by Dave Astels for Adafruit Industries
Copyright (c) 2018 Adafruit Industries
Licensed under the MIT license.
All text above must be included in any redistribution.
"""
# pylint: disable=wildcard-import,unused-wildcard-import,eval-used
import time
import random
import math
from bird import Bird
from post import Post
from color_names import *
BLACK = 0x000000
class Game(object):
"""Overall game control."""
def __init__(self, trellis, accel, ramp=20, challenge_ramp=30):
"""initialize a Game instance.
trellis -- the TrellisM4Express instance to use as input and screen.
accel -- the accelerometer interface object to use as input
ramp -- how often (in steps) to increase the speed (default 20)
challenge_ramp -- how often (in steps) to increase the challenge of the posts
"""
self._trellis = trellis
self._accel = accel
self._delay_ramp = ramp
self._challenge_ramp = challenge_ramp
self._bird = Bird()
self._posts = []
self._interstitial_delay = 1.0
self._challenge = 10
self._currently_pressed = set([])
self._previous_accel_reading = (None, None, None)
self._previous_shake_result = False
def _restart(self):
"""Restart the game."""
self._bird = Bird()
self._posts = []
self._interstitial_delay = 0.5
self._challenge = 10
def _update(self):
"""Perform a periodic update: move the posts and remove any that go off the screen."""
for post in self._posts:
post.update()
if self._posts and self._posts[0].off_screen:
self._posts.pop(0)
def _shaken(self):
"""Return whether the Trellis is shaken."""
last_result = self._previous_shake_result
result = False
x, y, z = self._accel.acceleration
if self._previous_accel_reading[0] is not None:
result = math.fabs(self._previous_accel_reading[2] - z) > 4.0
self._previous_accel_reading = (x, y, z)
self._previous_shake_result = result
return result and not last_result
def _key_pressed(self):
"""Return whether a key was pressed since last time."""
pressed = set(self._trellis.pressed_keys)
key_just_pressed = len(pressed - self._currently_pressed) > 0
self._currently_pressed = pressed
return key_just_pressed
def _should_flap(self, mode):
"""Return whether the user wants the bird to flap.
mode -- input mode: False is key, True is accel
"""
if mode:
return self._shaken()
return self._key_pressed()
def _update_bird(self, mode):
"""Update the vertical position of the bird based on user activity and gravity.
mode -- input mode: False is key, True is accel
"""
self._bird.draw_on(self._trellis, BLACK)
if self._should_flap(mode):
self._bird.flap()
else:
self._bird.update()
self._bird.draw_on(self._trellis)
self._trellis.pixels.show()
def _check_for_collision(self):
"""Return whether this bird has collided with a post."""
collided = self._bird.did_hit_ground()
for post in self._posts:
collided |= self._bird.is_colliding_with(post)
return collided
def METHOD_NAME(self):
"""Update the screen."""
self._trellis.pixels.fill(BLACK)
for post in self._posts:
post.draw_on(self._trellis)
self._bird.draw_on(self._trellis)
self._trellis.pixels.show()
def _new_post(self):
"""Return a new post based on the current challenge level"""
bottom_blocks = random.randint(1, 3)
top_blocks = random.randint(1, 2)
# bottom post
if self._challenge > 6:
return Post(from_bottom=bottom_blocks)
# top possible as well
if self._challenge > 3:
if random.randint(1, 2) == 1:
return Post(from_bottom=bottom_blocks)
return Post(from_top=top_blocks)
# top, bottom, and both possible
r = random.randint(1, 3)
if r == 1:
return Post(from_bottom=bottom_blocks)
if r == 2:
return Post(from_top=top_blocks)
return Post(from_bottom=bottom_blocks, from_top=random.randint(1, 4 - bottom_blocks))
def _add_post(self):
"""Add a post."""
self._posts.append(self._new_post())
def play(self, mode=False):
"""Play the game.
mode -- input mode: False is key, True is accel
"""
self._restart()
collided = False
count = 0
last_tick = 0
while not collided:
now = time.monotonic()
self._update_bird(mode)
if now >= last_tick + self._interstitial_delay:
last_tick = now
count += 1
self._update()
collided = self._check_for_collision()
if count % max(1, (self._challenge - random.randint(0, 4))) == 0:
self._add_post()
self.METHOD_NAME()
# handle collision or wait and repeat
if collided:
self._bird.flash(self._trellis)
else:
# time to speed up?
if count % self._delay_ramp == 0:
self._interstitial_delay -= 0.01
# time to increase challenge of the posts?
if self._challenge > 0 and count % self._challenge_ramp == 0:
self._challenge -= 1
time.sleep(0.05)
| null |
5,068 |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import copy
import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.audio as atf
from mindspore import log as logger
CHANNEL = 1
FREQ = 5
TIME = 5
def allclose_nparray(data_expected, data_me, rtol, atol, equal_nan=True):
"""
Precision calculation formula
"""
if np.any(np.isnan(data_expected)):
assert np.allclose(data_me, data_expected, rtol, atol, equal_nan=equal_nan)
elif not np.allclose(data_me, data_expected, rtol, atol, equal_nan=equal_nan):
count_unequal_element(data_expected, data_me, rtol, atol)
def count_unequal_element(data_expected, data_me, rtol, atol):
"""
Precision calculation func
"""
assert data_expected.shape == data_me.shape
total_count = len(data_expected.flatten())
error = np.abs(data_expected - data_me)
greater = np.greater(error, atol + np.abs(data_expected) * rtol)
loss_count = np.count_nonzero(greater)
assert (loss_count / total_count) < rtol, "\ndata_expected_std:{0}\ndata_me_error:{1}\nloss:{2}".format(
data_expected[greater], data_me[greater], error[greater])
def gen(shape):
np.random.seed(0)
data = np.random.random(shape)
yield(np.array(data, dtype=np.float32),)
def test_mask_along_axis_eager_random_input():
"""
Feature: MaskAlongAxis
Description: Mindspore eager mode normal testcase with random input tensor
Expectation: The returned result is as expected
"""
logger.info("test Mask_Along_axis op")
spectrogram = next(gen((CHANNEL, FREQ, TIME)))[0]
expect_output = copy.deepcopy(spectrogram)
out_put = atf.MaskAlongAxis(mask_start=0, mask_width=1, mask_value=5.0, axis=2)(spectrogram)
for item in expect_output[0]:
item[0] = 5.0
assert out_put.shape == (CHANNEL, FREQ, TIME)
allclose_nparray(out_put, expect_output, 0.0001, 0.0001)
def METHOD_NAME():
"""
Feature: MaskAlongAxis
Description: Mindspore eager mode checking precision
Expectation: The returned result is as expected
"""
logger.info("test MaskAlongAxis op, checking precision")
spectrogram_0 = np.array([[[-0.0635, -0.6903],
[-1.7175, -0.0815],
[0.7981, -0.8297],
[-0.4589, -0.7506]],
[[0.6189, 1.1874],
[0.1856, -0.5536],
[1.0620, 0.2071],
[-0.3874, 0.0664]]]).astype(np.float32)
out_ms_0 = atf.MaskAlongAxis(mask_start=0, mask_width=1, mask_value=2.0, axis=2)(spectrogram_0)
spectrogram_1 = np.array([[[-0.0635, -0.6903],
[-1.7175, -0.0815],
[0.7981, -0.8297],
[-0.4589, -0.7506]],
[[0.6189, 1.1874],
[0.1856, -0.5536],
[1.0620, 0.2071],
[-0.3874, 0.0664]]]).astype(np.float64)
out_ms_1 = atf.MaskAlongAxis(mask_start=0, mask_width=1, mask_value=2.0, axis=2)(spectrogram_1)
out_benchmark = np.array([[[2.0000, -0.6903],
[2.0000, -0.0815],
[2.0000, -0.8297],
[2.0000, -0.7506]],
[[2.0000, 1.1874],
[2.0000, -0.5536],
[2.0000, 0.2071],
[2.0000, 0.0664]]]).astype(np.float32)
allclose_nparray(out_ms_0, out_benchmark, 0.0001, 0.0001)
allclose_nparray(out_ms_1, out_benchmark, 0.0001, 0.0001)
def test_mask_along_axis_pipeline():
"""
Feature: MaskAlongAxis
Description: Mindspore pipeline mode normal testcase
Expectation: The returned result is as expected
"""
logger.info("test MaskAlongAxis op, pipeline")
generator = gen((CHANNEL, FREQ, TIME))
expect_output = copy.deepcopy(next(gen((CHANNEL, FREQ, TIME)))[0])
data1 = ds.GeneratorDataset(source=generator, column_names=["multi_dimensional_data"])
transforms = [atf.MaskAlongAxis(mask_start=2, mask_width=2, mask_value=2.0, axis=2)]
data1 = data1.map(operations=transforms, input_columns=["multi_dimensional_data"])
for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
out_put = item["multi_dimensional_data"]
for item in expect_output[0]:
item[2] = 2.0
item[3] = 2.0
assert out_put.shape == (CHANNEL, FREQ, TIME)
allclose_nparray(out_put, expect_output, 0.0001, 0.0001)
def test_mask_along_axis_invalid_input():
"""
Feature: MaskAlongAxis
Description: Mindspore eager mode with invalid input tensor
Expectation: Throw correct error and message
"""
def test_invalid_param(test_name, mask_start, mask_width, mask_value, axis, error, error_msg):
"""
a function used for checking correct error and message with various input
"""
logger.info("Test MaskAlongAxis with wrong params: {0}".format(test_name))
with pytest.raises(error) as error_info:
atf.MaskAlongAxis(mask_start, mask_width, mask_value, axis)
assert error_msg in str(error_info.value)
test_invalid_param("invalid mask_start", -1, 10, 1.0, 1, ValueError,
"Input mask_start is not within the required interval of [0, 2147483647].")
test_invalid_param("invalid mask_width", 0, -1, 1.0, 1, ValueError,
"Input mask_width is not within the required interval of [1, 2147483647].")
test_invalid_param("invalid axis", 0, 10, 1.0, 1.0, TypeError,
"Argument axis with value 1.0 is not of type [<class 'int'>], but got <class 'float'>.")
test_invalid_param("invalid axis", 0, 10, 1.0, 0, ValueError,
"Input axis is not within the required interval of [1, 2].")
test_invalid_param("invalid axis", 0, 10, 1.0, 3, ValueError,
"Input axis is not within the required interval of [1, 2].")
test_invalid_param("invalid axis", 0, 10, 1.0, -1, ValueError,
"Input axis is not within the required interval of [1, 2].")
if __name__ == "__main__":
test_mask_along_axis_eager_random_input()
METHOD_NAME()
test_mask_along_axis_pipeline()
test_mask_along_axis_invalid_input()
| null |
5,069 |
# This file is part of cloud-init. See LICENSE file for license information.
import copy
import os
from cloudinit import safeyaml, stages, util
from cloudinit.config.modules import Modules
from cloudinit.settings import PER_INSTANCE
from tests.unittests import helpers
class TestSimpleRun(helpers.FilesystemMockingTestCase):
with_logs = True
def setUp(self):
super(TestSimpleRun, self).setUp()
self.new_root = self.tmp_dir()
self.replicateTestRoot("simple_ubuntu", self.new_root)
# Seed cloud.cfg file for our tests
self.cfg = {
"datasource_list": ["None"],
"runcmd": ["ls /etc"], # test ALL_DISTROS
"spacewalk": {}, # test non-ubuntu distros module definition
"system_info": {
"paths": {"run_dir": self.new_root},
"distro": "ubuntu",
},
"write_files": [
{
"path": "/etc/blah.ini",
"content": "blah",
"permissions": 0o755,
},
],
"cloud_init_modules": ["write_files", "spacewalk", "runcmd"],
}
cloud_cfg = safeyaml.dumps(self.cfg)
util.ensure_dir(os.path.join(self.new_root, "etc", "cloud"))
util.write_file(
os.path.join(self.new_root, "etc", "cloud", "cloud.cfg"), cloud_cfg
)
self.patchOS(self.new_root)
self.patchUtils(self.new_root)
def test_none_ds_populates_var_lib_cloud(self):
"""Init and run_section default behavior creates appropriate dirs."""
# Now start verifying whats created
initer = stages.Init()
initer.read_cfg()
initer.initialize()
self.assertTrue(os.path.exists("/var/lib/cloud"))
for d in ["scripts", "seed", "instances", "handlers", "sem", "data"]:
self.assertTrue(os.path.isdir(os.path.join("/var/lib/cloud", d)))
initer.fetch()
iid = initer.instancify()
self.assertEqual(iid, "iid-datasource-none")
initer.update()
self.assertTrue(os.path.islink("var/lib/cloud/instance"))
def test_none_ds_runs_modules_which_do_not_define_distros(self):
"""Any modules which do not define a distros attribute are run."""
initer = stages.Init()
initer.read_cfg()
initer.initialize()
initer.fetch()
initer.instancify()
initer.update()
initer.cloudify().run(
"consume_data",
initer.consume_data,
args=[PER_INSTANCE],
freq=PER_INSTANCE,
)
mods = Modules(initer)
(which_ran, failures) = mods.run_section("cloud_init_modules")
self.assertTrue(len(failures) == 0)
self.assertTrue(os.path.exists("/etc/blah.ini"))
self.assertIn("write_files", which_ran)
contents = util.load_file("/etc/blah.ini")
self.assertEqual(contents, "blah")
self.assertNotIn(
"Skipping modules ['write_files'] because they are not verified on"
" distro 'ubuntu'",
self.logs.getvalue(),
)
def test_none_ds_skips_modules_which_define_unmatched_distros(self):
"""Skip modules which define distros which don't match the current."""
initer = stages.Init()
initer.read_cfg()
initer.initialize()
initer.fetch()
initer.instancify()
initer.update()
initer.cloudify().run(
"consume_data",
initer.consume_data,
args=[PER_INSTANCE],
freq=PER_INSTANCE,
)
mods = Modules(initer)
(which_ran, failures) = mods.run_section("cloud_init_modules")
self.assertTrue(len(failures) == 0)
self.assertIn(
"Skipping modules 'spacewalk' because they are not verified on"
" distro 'ubuntu'",
self.logs.getvalue(),
)
self.assertNotIn("spacewalk", which_ran)
def test_none_ds_runs_modules_which_distros_all(self):
"""Skip modules which define distros attribute as supporting 'all'.
This is done in the module with the declaration:
distros = [ALL_DISTROS]. runcmd is an example.
"""
initer = stages.Init()
initer.read_cfg()
initer.initialize()
initer.fetch()
initer.instancify()
initer.update()
initer.cloudify().run(
"consume_data",
initer.consume_data,
args=[PER_INSTANCE],
freq=PER_INSTANCE,
)
mods = Modules(initer)
(which_ran, failures) = mods.run_section("cloud_init_modules")
self.assertTrue(len(failures) == 0)
self.assertIn("runcmd", which_ran)
self.assertNotIn(
"Skipping modules 'runcmd' because they are not verified on"
" distro 'ubuntu'",
self.logs.getvalue(),
)
def METHOD_NAME(self):
"""run_section forced skipped modules by using unverified_modules."""
# re-write cloud.cfg with unverified_modules override
cfg = copy.deepcopy(self.cfg)
cfg["unverified_modules"] = ["spacewalk"] # Would have skipped
cloud_cfg = safeyaml.dumps(cfg)
util.ensure_dir(os.path.join(self.new_root, "etc", "cloud"))
util.write_file(
os.path.join(self.new_root, "etc", "cloud", "cloud.cfg"), cloud_cfg
)
initer = stages.Init()
initer.read_cfg()
initer.initialize()
initer.fetch()
initer.instancify()
initer.update()
initer.cloudify().run(
"consume_data",
initer.consume_data,
args=[PER_INSTANCE],
freq=PER_INSTANCE,
)
mods = Modules(initer)
(which_ran, failures) = mods.run_section("cloud_init_modules")
self.assertTrue(len(failures) == 0)
self.assertIn("spacewalk", which_ran)
self.assertIn(
"running unverified_modules: 'spacewalk'", self.logs.getvalue()
)
def test_none_ds_run_with_no_config_modules(self):
"""run_section will report no modules run when none are configured."""
# re-write cloud.cfg with unverified_modules override
cfg = copy.deepcopy(self.cfg)
# Represent empty configuration in /etc/cloud/cloud.cfg
cfg["cloud_init_modules"] = None
cloud_cfg = safeyaml.dumps(cfg)
util.ensure_dir(os.path.join(self.new_root, "etc", "cloud"))
util.write_file(
os.path.join(self.new_root, "etc", "cloud", "cloud.cfg"), cloud_cfg
)
initer = stages.Init()
initer.read_cfg()
initer.initialize()
initer.fetch()
initer.instancify()
initer.update()
initer.cloudify().run(
"consume_data",
initer.consume_data,
args=[PER_INSTANCE],
freq=PER_INSTANCE,
)
mods = Modules(initer)
(which_ran, failures) = mods.run_section("cloud_init_modules")
self.assertTrue(len(failures) == 0)
self.assertEqual([], which_ran)
# vi: ts=4 expandtab
| null |
5,070 |
# Copyright (c) 2017, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import unittest
import pandas as pd
from coremltools._deps import _HAS_SKLEARN
from coremltools.models.utils import (_is_macos, _macos_version,
evaluate_regressor)
if _HAS_SKLEARN:
from sklearn.datasets import load_boston
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import OneHotEncoder
from sklearn.svm import LinearSVR
from coremltools.converters.sklearn import convert
@unittest.skipIf(not _HAS_SKLEARN, "Missing scikitlearn. Skipping tests.")
class LinearRegressionScikitTest(unittest.TestCase):
"""
Unit test class for testing scikit-learn converter.
"""
@classmethod
def METHOD_NAME(self):
"""
Set up the unit test by loading the dataset and training a model.
"""
scikit_data = load_boston()
scikit_model = LinearRegression()
scikit_model.fit(scikit_data["data"], scikit_data["target"])
# Save the data and the model
self.scikit_data = scikit_data
self.scikit_model = scikit_model
def test_conversion(self):
input_names = self.scikit_data.feature_names
spec = convert(self.scikit_model, input_names, "target").get_spec()
self.assertIsNotNone(spec)
# Test the model class
self.assertIsNotNone(spec.description)
# Test the interface class
self.assertEqual(spec.description.predictedFeatureName, "target")
# Test the inputs and outputs
self.assertEqual(len(spec.description.output), 1)
self.assertEqual(spec.description.output[0].name, "target")
self.assertEqual(
spec.description.output[0].type.WhichOneof("Type"), "doubleType"
)
for input_type in spec.description.input:
self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType")
self.assertEqual(
sorted(input_names), sorted(map(lambda x: x.name, spec.description.input))
)
# Test the linear regression parameters.
self.assertTrue(
spec.pipelineRegressor.pipeline.models[-1].HasField("glmRegressor")
)
lr = spec.pipelineRegressor.pipeline.models[-1].glmRegressor
self.assertEqual(lr.offset, self.scikit_model.intercept_)
self.assertEqual(len(lr.weights), 1)
self.assertEqual(len(lr.weights[0].value), 13)
i = 0
for w in lr.weights[0].value:
self.assertAlmostEqual(w, self.scikit_model.coef_[i])
i = i + 1
def test_conversion_bad_inputs(self):
# Error on converting an untrained model
with self.assertRaises(TypeError):
model = LinearRegression()
spec = convert(model, "data", "out")
# Check the expected class during covnersion.
with self.assertRaises(TypeError):
model = OneHotEncoder()
spec = convert(model, "data", "out")
@unittest.skipUnless(
_is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+"
)
def test_linear_regression_evaluation(self):
"""
Check that the evaluation results are the same in scikit learn and coremltools
"""
input_names = self.scikit_data.feature_names
df = pd.DataFrame(self.scikit_data.data, columns=input_names)
for normalize_value in (True, False):
cur_model = LinearRegression(normalize=normalize_value)
cur_model.fit(self.scikit_data["data"], self.scikit_data["target"])
spec = convert(cur_model, input_names, "target")
df["target"] = cur_model.predict(self.scikit_data.data)
metrics = evaluate_regressor(spec, df)
self.assertAlmostEqual(metrics["max_error"], 0)
@unittest.skipUnless(
_is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+"
)
def test_linear_svr_evaluation(self):
"""
Check that the evaluation results are the same in scikit learn and coremltools
"""
ARGS = [
{},
{"C": 0.5, "epsilon": 0.25},
{"dual": False, "loss": "squared_epsilon_insensitive"},
{"tol": 0.005},
{"fit_intercept": False},
{"intercept_scaling": 1.5},
]
input_names = self.scikit_data.feature_names
df = pd.DataFrame(self.scikit_data.data, columns=input_names)
for cur_args in ARGS:
cur_model = LinearSVR(**cur_args)
cur_model.fit(self.scikit_data["data"], self.scikit_data["target"])
spec = convert(cur_model, input_names, "target")
df["target"] = cur_model.predict(self.scikit_data.data)
metrics = evaluate_regressor(spec, df)
self.assertAlmostEqual(metrics["max_error"], 0)
| null |
5,071 |
from time import perf_counter as clock
import numpy as np
import random
DSN = "dbname=test port = 5435"
# in order to always generate the same random sequence
random.seed(19)
def flatten(l):
"""Flattens list of tuples l."""
return [x[0] for x in l]
def fill_arrays(start, stop):
col_i = np.arange(start, stop, type=np.int32)
if userandom:
col_j = np.random.uniform(0, nrows, size=[stop - start])
else:
col_j = np.array(col_i, type=np.float64)
return col_i, col_j
# Generator for ensure pytables benchmark compatibility
def int_generator(nrows):
step = 1000 * 100
j = 0
for i in range(nrows):
if i >= step * j:
stop = (j + 1) * step
if stop > nrows: # Seems unnecessary
stop = nrows
col_i, col_j = fill_arrays(i, stop)
j += 1
k = 0
yield (col_i[k], col_j[k])
k += 1
def int_generator_slow(nrows):
for i in range(nrows):
if userandom:
yield (i, float(random.randint(0, nrows)))
else:
yield (i, float(i))
class Stream32:
"Object simulating a file for reading"
def __init__(self):
self.n = None
self.read_it = self.read_iter()
# No va! Hi ha que convertir a un de normal!
def readline(self, n=None):
for tup in int_generator(nrows):
sout = "%s\t%s\n" % tup
if n is not None and len(sout) > n:
for i in range(0, len(sout), n):
yield sout[i:i + n]
else:
yield sout
def read_iter(self):
sout = ""
n = self.n
for tup in int_generator(nrows):
sout += "%s\t%s\n" % tup
if n is not None and len(sout) > n:
for i in range(n, len(sout), n):
rout = sout[:n]
sout = sout[n:]
yield rout
yield sout
def METHOD_NAME(self, n=None):
self.n = n
try:
str = next(self.read_it)
except StopIteration:
str = ""
return str
def open_db(filename, remove=0):
if not filename:
con = sqlite.connect(DSN)
else:
con = sqlite.connect(filename)
cur = con.cursor()
return con, cur
def create_db(filename, nrows):
con, cur = open_db(filename, remove=1)
try:
cur.execute("create table ints(i integer, j double precision)")
except:
con.rollback()
cur.execute("DROP TABLE ints")
cur.execute("create table ints(i integer, j double precision)")
con.commit()
con.set_isolation_level(2)
t1 = clock()
st = Stream32()
cur.copy_from(st, "ints")
# In case of postgres, the speeds of generator and loop are similar
#cur.executemany("insert into ints values (%s,%s)", int_generator(nrows))
# for i in xrange(nrows):
# cur.execute("insert into ints values (%s,%s)", (i, float(i)))
con.commit()
ctime = clock() - t1
if verbose:
print(f"insert time: {ctime:.5f}")
print(f"Krows/s: {nrows / 1000 / ctime:.5f}")
close_db(con, cur)
def index_db(filename):
con, cur = open_db(filename)
t1 = clock()
cur.execute("create index ij on ints(j)")
con.commit()
itime = clock() - t1
if verbose:
print(f"index time: {itime:.5f}")
print(f"Krows/s: {nrows / itime:.5f}")
# Close the DB
close_db(con, cur)
def query_db(filename, rng):
con, cur = open_db(filename)
t1 = clock()
ntimes = 10
for i in range(ntimes):
# between clause does not seem to take advantage of indexes
# cur.execute("select j from ints where j between %s and %s" % \
cur.execute("select i from ints where j >= %s and j <= %s" %
# cur.execute("select i from ints where i >= %s and i <=
# %s" %
(rng[0] + i, rng[1] + i))
results = cur.fetchall()
con.commit()
qtime = (clock() - t1) / ntimes
if verbose:
print(f"query time: {qtime:.5f}")
print(f"Mrows/s: {nrows / 1000 / qtime:.5f}")
results = sorted(flatten(results))
print(results)
close_db(con, cur)
def close_db(con, cur):
cur.close()
con.close()
if __name__ == "__main__":
import sys
import getopt
try:
import psyco
psyco_imported = 1
except:
psyco_imported = 0
usage = """usage: %s [-v] [-p] [-m] [-i] [-q] [-c] [-R range] [-n nrows] file
-v verbose
-p use "psyco" if available
-m use random values to fill the table
-q do query
-c create the database
-i index the table
-2 use sqlite2 (default is use sqlite3)
-R select a range in a field in the form "start,stop" (def "0,10")
-n sets the number of rows (in krows) in each table
\n""" % sys.argv[0]
try:
opts, pargs = getopt.getopt(sys.argv[1:], 'vpmiqc2R:n:')
except:
sys.stderr.write(usage)
sys.exit(0)
# default options
verbose = 0
usepsyco = 0
userandom = 0
docreate = 0
createindex = 0
doquery = 0
sqlite_version = "3"
rng = [0, 10]
nrows = 1
# Get the options
for option in opts:
if option[0] == '-v':
verbose = 1
elif option[0] == '-p':
usepsyco = 1
elif option[0] == '-m':
userandom = 1
elif option[0] == '-i':
createindex = 1
elif option[0] == '-q':
doquery = 1
elif option[0] == '-c':
docreate = 1
elif option[0] == "-2":
sqlite_version = "2"
elif option[0] == '-R':
rng = [int(i) for i in option[1].split(",")]
elif option[0] == '-n':
nrows = int(option[1])
# Catch the hdf5 file passed as the last argument
filename = pargs[0]
# if sqlite_version == "2":
# import sqlite
# else:
# from pysqlite2 import dbapi2 as sqlite
import psycopg2 as sqlite
if verbose:
# print "pysqlite version:", sqlite.version
if userandom:
print("using random values")
if docreate:
if verbose:
print("writing %s krows" % nrows)
if psyco_imported and usepsyco:
psyco.bind(create_db)
nrows *= 1000
create_db(filename, nrows)
if createindex:
index_db(filename)
if doquery:
query_db(filename, rng)
| null |
5,072 |
import textwrap
from collections import ChainMap
from discord.ext.commands import Cog, Context, group, has_any_role
from bot.bot import Bot
from bot.constants import Channels, MODERATION_ROLES, Webhooks
from bot.converters import MemberOrUser
from bot.exts.moderation.infraction._utils import post_infraction
from bot.exts.moderation.watchchannels._watchchannel import WatchChannel
from bot.log import get_logger
log = get_logger(__name__)
class BigBrother(WatchChannel, Cog, name="Big Brother"):
"""Monitors users by relaying their messages to a watch channel to assist with moderation."""
def __init__(self, bot: Bot) -> None:
super().__init__(
bot,
destination=Channels.big_brother,
webhook_id=Webhooks.big_brother.id,
api_endpoint="bot/infractions",
api_default_params={"active": "true", "type": "watch", "ordering": "-inserted_at", "limit": 10_000},
logger=log
)
@group(name="bigbrother", aliases=("bb",), invoke_without_command=True)
@has_any_role(*MODERATION_ROLES)
async def bigbrother_group(self, ctx: Context) -> None:
"""Monitors users by relaying their messages to the Big Brother watch channel."""
await ctx.send_help(ctx.command)
@bigbrother_group.command(name="watched", aliases=("all", "list"))
@has_any_role(*MODERATION_ROLES)
async def watched_command(
self, ctx: Context, oldest_first: bool = False, update_cache: bool = True
) -> None:
"""
Shows the users that are currently being monitored by Big Brother.
The optional kwarg `oldest_first` can be used to order the list by oldest watched.
The optional kwarg `update_cache` can be used to update the user
cache using the API before listing the users.
"""
await self.list_watched_users(ctx, oldest_first=oldest_first, update_cache=update_cache)
@bigbrother_group.command(name="oldest")
@has_any_role(*MODERATION_ROLES)
async def oldest_command(self, ctx: Context, update_cache: bool = True) -> None:
"""
Shows Big Brother monitored users ordered by oldest watched.
The optional kwarg `update_cache` can be used to update the user
cache using the API before listing the users.
"""
await ctx.invoke(self.watched_command, oldest_first=True, update_cache=update_cache)
@bigbrother_group.command(name="watch", aliases=("w",), root_aliases=("watch",))
@has_any_role(*MODERATION_ROLES)
async def watch_command(self, ctx: Context, user: MemberOrUser, *, reason: str) -> None:
"""
Relay messages sent by the given `user` to the `#big-brother` channel.
A `reason` for adding the user to Big Brother is required and will be displayed
in the header when relaying messages of this user to the watchchannel.
"""
await self.METHOD_NAME(ctx, user, reason)
@bigbrother_group.command(name="unwatch", aliases=("uw",), root_aliases=("unwatch",))
@has_any_role(*MODERATION_ROLES)
async def unwatch_command(self, ctx: Context, user: MemberOrUser, *, reason: str) -> None:
"""Stop relaying messages by the given `user`."""
await self.apply_unwatch(ctx, user, reason)
async def METHOD_NAME(self, ctx: Context, user: MemberOrUser, reason: str) -> None:
"""
Add `user` to watched users and apply a watch infraction with `reason`.
A message indicating the result of the operation is sent to `ctx`.
The message will include `user`'s previous watch infraction history, if it exists.
"""
if user.bot:
await ctx.send(f":x: I'm sorry {ctx.author}, I'm afraid I can't do that. I only watch humans.")
return
if not await self.fetch_user_cache():
await ctx.send(f":x: Updating the user cache failed, can't watch user {user.mention}")
return
if user.id in self.watched_users:
await ctx.send(f":x: {user.mention} is already being watched.")
return
# discord.User instances don't have a roles attribute
if hasattr(user, "roles") and any(role.id in MODERATION_ROLES for role in user.roles):
await ctx.send(f":x: I'm sorry {ctx.author}, I'm afraid I can't do that. I must be kind to my masters.")
return
response = await post_infraction(ctx, user, "watch", reason, hidden=True, active=True)
if response is not None:
self.watched_users[user.id] = response
msg = f":white_check_mark: Messages sent by {user.mention} will now be relayed to Big Brother."
history = await self.bot.api_client.get(
self.api_endpoint,
params={
"user__id": str(user.id),
"active": "false",
"type": "watch",
"ordering": "-inserted_at"
}
)
if len(history) > 1:
total = f"({len(history) // 2} previous infractions in total)"
end_reason = textwrap.shorten(history[0]["reason"], width=500, placeholder="...")
start_reason = f"Watched: {textwrap.shorten(history[1]['reason'], width=500, placeholder='...')}"
msg += f"\n\nUser's previous watch reasons {total}:```{start_reason}\n\n{end_reason}```"
else:
msg = ":x: Failed to post the infraction: response was empty."
await ctx.send(msg)
async def apply_unwatch(self, ctx: Context, user: MemberOrUser, reason: str, send_message: bool = True) -> None:
"""
Remove `user` from watched users and mark their infraction as inactive with `reason`.
If `send_message` is True, a message indicating the result of the operation is sent to
`ctx`.
"""
active_watches = await self.bot.api_client.get(
self.api_endpoint,
params=ChainMap(
{"user__id": str(user.id)},
self.api_default_params,
)
)
if active_watches:
log.trace("Active watches for user found. Attempting to remove.")
[infraction] = active_watches
await self.bot.api_client.patch(
f"{self.api_endpoint}/{infraction['id']}",
json={"active": False}
)
await post_infraction(ctx, user, "watch", f"Unwatched: {reason}", hidden=True, active=False)
self._remove_user(user.id)
if not send_message: # Prevents a message being sent to the channel if part of a permanent ban
log.debug(f"Perma-banned user {user} was unwatched.")
return
log.trace("User is not banned. Sending message to channel")
message = f":white_check_mark: Messages sent by {user.mention} will no longer be relayed."
else:
log.trace("No active watches found for user.")
if not send_message: # Prevents a message being sent to the channel if part of a permanent ban
log.debug(f"{user} was not on the watch list; no removal necessary.")
return
log.trace("User is not perma banned. Send the error message.")
message = ":x: The specified user is currently not being watched."
await ctx.send(message)
async def setup(bot: Bot) -> None:
"""Load the BigBrother cog."""
await bot.add_cog(BigBrother(bot))
| null |
5,073 |
"""Helper for listing a summary of finished prums and progress on open prums.
Projecta are small bite-sized project quanta that typically will result in
one manuscript.
"""
from gooey import GooeyParser
import datetime
import dateutil.parser as date_parser
from regolith.helpers.basehelper import SoutHelperBase
from regolith.fsclient import _id_key
from regolith.tools import (
all_docs_from_collection,
get_pi_id,
key_value_pair_filter,
)
from regolith.schemas import PROJECTUM_STATI, PROJECTUM_ACTIVE_STATI, PROJECTUM_FINISHED_STATI
TARGET_COLL = "projecta"
HELPER_TARGET = "l_progress"
def subparser(subpi):
listbox_kwargs = {}
if isinstance(subpi, GooeyParser):
listbox_kwargs['widget'] = 'Listbox'
subpi.add_argument("lead",
help="Generate report for this project lead"
)
subpi.add_argument("-v", "--verbose", action="store_true",
help='increase verbosity of output')
subpi.add_argument("-s", "--stati", nargs="+",
choices=PROJECTUM_STATI,
help=f"Filter projecta for these stati."
f" Default is {*(PROJECTUM_ACTIVE_STATI+PROJECTUM_FINISHED_STATI),}",
default=PROJECTUM_ACTIVE_STATI+PROJECTUM_FINISHED_STATI,
**listbox_kwargs
)
# The --filter and --keys flags should be in every lister
subpi.add_argument("-f", "--filter", nargs="+",
help="Search this collection by giving key element pairs"
)
subpi.add_argument("-k", "--keys", nargs="+", help="Specify what keys to return values from when running "
"--filter. If no argument is given the default is just the id.")
subpi.add_argument("--date", help="date used in testing. Defaults to "
"today's date.")
return subpi
class ProgressReportHelper(SoutHelperBase):
"""Helper for listing upcoming (and past) projectum milestones.
Projecta are small bite-sized project quanta that typically will result in
one manuscript.
"""
# btype must be the same as helper target in helper.py
btype = HELPER_TARGET
needed_colls = [f'{TARGET_COLL}']
def construct_global_ctx(self):
"""Constructs the global context"""
super().construct_global_ctx()
gtx = self.gtx
rc = self.rc
if "groups" in self.needed_colls:
rc.pi_id = get_pi_id(rc)
rc.coll = f"{TARGET_COLL}"
colls = [
sorted(
all_docs_from_collection(rc.client, collname), key=_id_key
)
for collname in self.needed_colls
]
for db, coll in zip(self.needed_colls, colls):
gtx[db] = coll
gtx["all_docs_from_collection"] = all_docs_from_collection
gtx["float"] = float
gtx["str"] = str
gtx["zip"] = zip
def METHOD_NAME(self, selected_projecta):
rc = self.rc
if selected_projecta == []:
return
selected_projecta.sort(key=lambda prum: prum.get('begin_date'),
reverse=True)
for p in selected_projecta:
if rc.verbose:
print(f"{p.get('_id')}")
if p.get("deliverable"):
print(
f" status: {p.get('status')}, begin_date: {p.get('begin_date')}, due_date: {p.get('deliverable').get('due_date')}")
if p.get('status') == 'finished':
print(f" finished: {p.get('end_date')}")
print(f" description: {p.get('description')}")
print(f" log_url: {p.get('log_url')}")
print(" team:")
grp_members = None
if p.get('group_members'):
grp_members = ', '.join(p.get('group_members'))
collaborators = None
if p.get('collaborators'):
collaborators = ', '.join(p.get('collaborators'))
print(f" group_members: {grp_members}")
print(f" collaborators: {collaborators}")
d = p.get('deliverable')
print(" deliverable:")
audience = None
if d.get('audience'):
audience = ', '.join(d.get('audience'))
print(f" audience: {audience}")
iter, title = 1, "scope:"
for scopum in d.get('scope', ["no scope"]):
print(f" {title} {str(iter)}. {scopum}")
iter += 1
title = " "
print(f" platform: {d.get('platform')}")
print(" milestones:")
for m in p.get('milestones'):
print(f" {m.get('due_date')}: {m.get('name')}")
print(f" objective: {m.get('objective')}")
print(f" status: {m.get('status')}")
else:
print(f"{p.get('_id')}")
if p.get("deliverable"):
print(
f" status: {p.get('status')}, begin_date: {p.get('begin_date')}, due_date: {p.get('deliverable').get('due_date')}")
print(f" description: {p.get('description')}")
if p.get('status') == 'finished':
print(f" finished: {p.get('end_date')}")
elif p.get('status') in PROJECTUM_ACTIVE_STATI:
print(f" log_url: {p.get('log_url')}")
if p.get('milestones'):
print(' milestones:')
for m in p.get('milestones'):
print(
f" due: {m.get('due_date')}, {m.get('name')}, type: {m.get('type')}, status: {m.get('status')}")
print(f" objective: {m.get('objective')}")
def sout(self):
rc = self.rc
if rc.filter:
collection = key_value_pair_filter(self.gtx["projecta"], rc.filter)
else:
collection = self.gtx["projecta"]
if not rc.date:
now = datetime.date.today()
else:
now = date_parser.parse(rc.date).date()
# remove checklist prums from the report
collection = [prum for prum in collection if
"checklist" not in prum.get('deliverable', {}).get('scope', [])]
title = f"\nProgress report for {rc.lead}, generated {now.isoformat()}"
print(title)
projecta = [valid_prum for valid_prum in collection if valid_prum.get("lead") == rc.lead]
finishedp, proposedp, startedp, otherp = [], [], [], []
for prum in projecta:
if prum.get('status') == "finished":
finishedp.append(prum)
elif prum.get('status') == "proposed":
proposedp.append(prum)
elif prum.get('status') == "started":
startedp.append(prum)
else:
otherp.append(prum)
print(f"*************************[Orphan Projecta]*************************")
for prum in otherp:
print(f"{prum.get('_id')}, status: {prum.get('status')}")
print(f"*************************[Finished Projecta]*************************")
for prum in finishedp:
print(f"{prum.get('_id')}, grant: {prum.get('grants')}")
print(f" description: {prum.get('description')}")
print(f" finished: {prum.get('end_date')}")
print(f"*************************[Proposed Projecta]*************************")
self.METHOD_NAME(proposedp)
print(f"*************************[In Progress Projecta]*************************")
self.METHOD_NAME(startedp)
| null |
5,074 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetGroupResult',
'AwaitableGetGroupResult',
'get_group',
'get_group_output',
]
@pulumi.output_type
class GetGroupResult:
"""
A collection of values returned by getGroup.
"""
def __init__(__self__, all_management_group_ids=None, all_subscription_ids=None, display_name=None, id=None, management_group_ids=None, name=None, METHOD_NAME=None, subscription_ids=None):
if all_management_group_ids and not isinstance(all_management_group_ids, list):
raise TypeError("Expected argument 'all_management_group_ids' to be a list")
pulumi.set(__self__, "all_management_group_ids", all_management_group_ids)
if all_subscription_ids and not isinstance(all_subscription_ids, list):
raise TypeError("Expected argument 'all_subscription_ids' to be a list")
pulumi.set(__self__, "all_subscription_ids", all_subscription_ids)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if management_group_ids and not isinstance(management_group_ids, list):
raise TypeError("Expected argument 'management_group_ids' to be a list")
pulumi.set(__self__, "management_group_ids", management_group_ids)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'parent_management_group_id' to be a str")
pulumi.set(__self__, "parent_management_group_id", METHOD_NAME)
if subscription_ids and not isinstance(subscription_ids, list):
raise TypeError("Expected argument 'subscription_ids' to be a list")
pulumi.set(__self__, "subscription_ids", subscription_ids)
@property
@pulumi.getter(name="allManagementGroupIds")
def all_management_group_ids(self) -> Sequence[str]:
"""
A list of Management Group IDs which directly or indirectly belong to this Management Group.
"""
return pulumi.get(self, "all_management_group_ids")
@property
@pulumi.getter(name="allSubscriptionIds")
def all_subscription_ids(self) -> Sequence[str]:
"""
A list of Subscription IDs which are assigned to this Management Group or its children Management Groups.
"""
return pulumi.get(self, "all_subscription_ids")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="managementGroupIds")
def management_group_ids(self) -> Sequence[str]:
"""
A list of Management Group IDs which directly belong to this Management Group.
"""
return pulumi.get(self, "management_group_ids")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="parentManagementGroupId")
def METHOD_NAME(self) -> str:
"""
The ID of any Parent Management Group.
"""
return pulumi.get(self, "parent_management_group_id")
@property
@pulumi.getter(name="subscriptionIds")
def subscription_ids(self) -> Sequence[str]:
"""
A list of Subscription IDs which are directly assigned to this Management Group.
"""
return pulumi.get(self, "subscription_ids")
class AwaitableGetGroupResult(GetGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetGroupResult(
all_management_group_ids=self.all_management_group_ids,
all_subscription_ids=self.all_subscription_ids,
display_name=self.display_name,
id=self.id,
management_group_ids=self.management_group_ids,
name=self.name,
METHOD_NAME=self.METHOD_NAME,
subscription_ids=self.subscription_ids)
def get_group(display_name: Optional[str] = None,
name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGroupResult:
"""
Use this data source to access information about an existing Management Group.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.management.get_group(name="00000000-0000-0000-0000-000000000000")
pulumi.export("displayName", example.display_name)
```
:param str display_name: Specifies the display name of this Management Group.
> **NOTE** Whilst multiple management groups may share the same display name, when filtering, the provider expects a single management group to be found with this name.
:param str name: Specifies the name or UUID of this Management Group.
"""
__args__ = dict()
__args__['displayName'] = display_name
__args__['name'] = name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure:management/getGroup:getGroup', __args__, opts=opts, typ=GetGroupResult).value
return AwaitableGetGroupResult(
all_management_group_ids=pulumi.get(__ret__, 'all_management_group_ids'),
all_subscription_ids=pulumi.get(__ret__, 'all_subscription_ids'),
display_name=pulumi.get(__ret__, 'display_name'),
id=pulumi.get(__ret__, 'id'),
management_group_ids=pulumi.get(__ret__, 'management_group_ids'),
name=pulumi.get(__ret__, 'name'),
METHOD_NAME=pulumi.get(__ret__, 'parent_management_group_id'),
subscription_ids=pulumi.get(__ret__, 'subscription_ids'))
@_utilities.lift_output_func(get_group)
def get_group_output(display_name: Optional[pulumi.Input[Optional[str]]] = None,
name: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetGroupResult]:
"""
Use this data source to access information about an existing Management Group.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.management.get_group(name="00000000-0000-0000-0000-000000000000")
pulumi.export("displayName", example.display_name)
```
:param str display_name: Specifies the display name of this Management Group.
> **NOTE** Whilst multiple management groups may share the same display name, when filtering, the provider expects a single management group to be found with this name.
:param str name: Specifies the name or UUID of this Management Group.
"""
...
| null |
5,075 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# pylint: disable=invalid-name,too-many-public-methods,too-many-arguments
import unittest
from pathlib import Path
from mantid.kernel import *
from mantid.api import *
from testhelpers import run_algorithm
import os, mantid, hashlib
class CreateCacheFilename(unittest.TestCase):
def METHOD_NAME(self):
"""CreateCacheFilename: one prop"""
pm = PropertyManager()
pm.declareProperty("a", 0)
pm.setProperty("a", 3)
mantid.PropertyManagerDataService.add("pm", pm)
# Execute
alg_test = run_algorithm(
"CreateCacheFilename",
PropertyManager="pm",
Properties=[],
OtherProperties=[],
Prefix="",
CacheDir="",
)
# executed?
self.assertTrue(alg_test.isExecuted())
# Verify ....
expected = os.path.join(ConfigService.getUserPropertiesDir(), "cache", "%s.nxs" % hashlib.sha1("a=3".encode("utf-8")).hexdigest())
self.assertEqual(alg_test.getPropertyValue("OutputFilename"), expected)
# Another test. don't specify the default values
alg_test = run_algorithm(
"CreateCacheFilename",
PropertyManager="pm",
)
# executed?
self.assertTrue(alg_test.isExecuted())
# Verify ....
expected = os.path.join(ConfigService.getUserPropertiesDir(), "cache", "%s.nxs" % hashlib.sha1("a=3".encode("utf-8")).hexdigest())
self.assertEqual(alg_test.getPropertyValue("OutputFilename"), expected)
return
def test_wronginput(self):
"""CreateCacheFilename: wrong input"""
# Execute
with self.assertRaisesRegex(RuntimeError, "Either PropertyManager or OtherProperties should be supplied"):
run_algorithm(
"CreateCacheFilename",
)
def test_glob(self):
"""CreateCacheFilename: globbing"""
# glob pattern search anything with 'a' in it
# and leave other props out
pm = PropertyManager()
aprops = ["a", "alibaba", "taa", "sa", "a75"]
props = aprops + ["b", "c", "d"]
for p in props:
pm.declareProperty(p, 0)
pm.setProperty(p, 3)
continue
mantid.PropertyManagerDataService.add("test_glob", pm)
# Execute
alg_test = run_algorithm(
"CreateCacheFilename",
PropertyManager="test_glob",
Properties=["*a*"],
)
# executed?
self.assertTrue(alg_test.isExecuted())
# Verify ....
s = ",".join(sorted(["%s=3" % p for p in aprops]))
expected = os.path.join(ConfigService.getUserPropertiesDir(), "cache", "%s.nxs" % hashlib.sha1(s.encode("utf-8")).hexdigest())
self.assertEqual(alg_test.getPropertyValue("OutputFilename"), expected)
return
def test_otherprops_only(self):
"""CreateCacheFilename: other_props only"""
# Execute
alg_test = run_algorithm(
"CreateCacheFilename",
OtherProperties=["a=1", "b=2"],
)
# executed?
self.assertTrue(alg_test.isExecuted())
# Verify ....
expected = os.path.join(
ConfigService.getUserPropertiesDir(), "cache", "%s.nxs" % hashlib.sha1("a=1,b=2".encode("utf-8")).hexdigest()
)
self.assertEqual(alg_test.getPropertyValue("OutputFilename"), expected)
return
def test_bothprops(self):
"""CreateCacheFilename: use both PropertyManager and OtherProperties"""
pm = PropertyManager()
aprops = ["a", "alibaba", "taa", "sa", "a75"]
props = aprops + ["b", "c", "d"]
for p in props:
pm.declareProperty(p, "")
pm.setProperty(p, "fish")
continue
mantid.PropertyManagerDataService.add("test_bothprops", pm)
other_props = ["A=1", "B=2"]
# Execute
alg_test = run_algorithm(
"CreateCacheFilename",
PropertyManager="test_bothprops",
Properties=["*a*"],
OtherProperties=other_props,
)
# executed?
self.assertTrue(alg_test.isExecuted())
# Verify ....
s = ",".join(sorted(["%s=fish" % p for p in aprops] + other_props))
expected = os.path.join(ConfigService.getUserPropertiesDir(), "cache", "%s.nxs" % hashlib.sha1(s.encode("utf-8")).hexdigest())
self.assertEqual(alg_test.getPropertyValue("OutputFilename"), expected)
return
def test_prefix(self):
"""CreateCacheFilename: prefix"""
# Execute
alg_test = run_algorithm(
"CreateCacheFilename",
OtherProperties=["a=1", "b=2"],
Prefix="vanadium",
)
# executed?
self.assertTrue(alg_test.isExecuted())
# Verify ....
expected = os.path.join(
ConfigService.getUserPropertiesDir(), "cache", "vanadium_%s.nxs" % hashlib.sha1("a=1,b=2".encode("utf-8")).hexdigest()
)
self.assertEqual(alg_test.getPropertyValue("OutputFilename"), expected)
return
def test_cache_dir(self):
"""CreateCacheFilename: cache_dir"""
# Execute
alg_test = run_algorithm(
"CreateCacheFilename",
OtherProperties=["a=1", "b=2"],
CacheDir="my_cache",
)
# executed?
self.assertTrue(alg_test.isExecuted())
# Verify ....
expected = f"{hashlib.sha1('a=1,b=2'.encode('utf-8')).hexdigest()}.nxs"
self.assertEqual(Path(alg_test.getPropertyValue("OutputFilename")).name, expected)
return
if __name__ == "__main__":
unittest.main()
| null |
5,076 |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Read constant tensor test script for offline debugger APIs.
"""
import os
import json
import shutil
import numpy as np
import mindspore.offline_debug.dbg_services as d
from dump_test_utils import build_dump_structure_with_constant, write_tensor_to_json
from tests.security_utils import security_off_wrap
class TestOfflineReadConstantTensor:
"""Test reading constant tensor for offline debugger"""
GENERATE_GOLDEN = False
test_name = "read_constant_tensor"
tensor_json = []
temp_dir = ''
@classmethod
def setup_class(cls):
"""Init setup for offline read tensor test"""
tensor1 = np.array([32.0, 4096.0], np.float32)
name1 = "Parameter.data-1.0.0."
info1 = d.TensorInfo(node_name="Default--data-1",
slot=0, iteration=0, rank_id=0, root_graph_id=0, is_output=True)
tensor_name = [name1]
cls.tensor_info = [info1]
tensor_list = [tensor1]
cls.temp_dir = build_dump_structure_with_constant(tensor_name, tensor_list, "Cst", cls.tensor_info)
@classmethod
def teardown_class(cls):
shutil.rmtree(cls.temp_dir)
@security_off_wrap
def METHOD_NAME(self):
debugger_backend = d.DbgServices(dump_file_path=self.temp_dir)
_ = debugger_backend.initialize(net_name="Cst", is_sync_mode=True)
tensor_data = debugger_backend.read_tensors(self.tensor_info)
# Check the length of tensor data
assert len(tensor_data) == 1
if self.GENERATE_GOLDEN:
self.print_read_tensors(self.tensor_info, tensor_data, 0, False)
else:
self.compare_expect_actual_result(self.tensor_info, tensor_data, 0)
@security_off_wrap
def test_read_invalid_constant_name_tensor(self):
debugger_backend = d.DbgServices(dump_file_path=self.temp_dir)
_ = debugger_backend.initialize(net_name="Cst", is_sync_mode=True)
info = d.TensorInfo(node_name="Default/data-1",
slot=0, iteration=0, rank_id=0, root_graph_id=0, is_output=True)
tensor_data = debugger_backend.read_tensors([info])
assert len(tensor_data) == 1
if self.GENERATE_GOLDEN:
self.print_read_tensors([info], tensor_data, 1, True)
else:
self.compare_expect_actual_result([info], tensor_data, 1)
def compare_expect_actual_result(self, tensor_info_list, tensor_data_list, test_index):
"""Compare actual result with golden file."""
golden_file = os.path.realpath(os.path.join("../data/dump/gpu_dumps/golden/",
self.test_name + "_expected.json"))
with open(golden_file) as f:
expected_list = json.load(f)
for x, (tensor_info, tensor_data) in enumerate(zip(tensor_info_list, tensor_data_list)):
tensor_id = "tensor_" + str(test_index + x + 1)
expect_tensor = expected_list[x + test_index][tensor_id]
actual_tensor = write_tensor_to_json(tensor_info, tensor_data)
assert expect_tensor == actual_tensor
def print_read_tensors(self, tensor_info_list, tensor_data_list, test_index, is_print):
"""Print read tensors result if GENERATE_GOLDEN is True."""
for x, (tensor_info, tensor_data) in enumerate(zip(tensor_info_list, tensor_data_list)):
tensor_name = "tensor_" + str(test_index + x + 1)
tensor = write_tensor_to_json(tensor_info, tensor_data)
self.tensor_json.append({tensor_name: tensor})
if is_print:
with open(self.test_name + "_expected.json", "w") as dump_f:
json.dump(self.tensor_json, dump_f, indent=4, separators=(',', ': '))
| null |
5,077 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetTopicResult',
'AwaitableGetTopicResult',
'get_topic',
'get_topic_output',
]
@pulumi.output_type
class GetTopicResult:
"""
A collection of values returned by getTopic.
"""
def __init__(__self__, endpoint=None, id=None, location=None, name=None, primary_access_key=None, resource_group_name=None, secondary_access_key=None, tags=None):
if endpoint and not isinstance(endpoint, str):
raise TypeError("Expected argument 'endpoint' to be a str")
pulumi.set(__self__, "endpoint", endpoint)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if primary_access_key and not isinstance(primary_access_key, str):
raise TypeError("Expected argument 'primary_access_key' to be a str")
pulumi.set(__self__, "primary_access_key", primary_access_key)
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
pulumi.set(__self__, "resource_group_name", resource_group_name)
if secondary_access_key and not isinstance(secondary_access_key, str):
raise TypeError("Expected argument 'secondary_access_key' to be a str")
pulumi.set(__self__, "secondary_access_key", secondary_access_key)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def endpoint(self) -> str:
"""
The Endpoint associated with the EventGrid Topic.
"""
return pulumi.get(self, "endpoint")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="primaryAccessKey")
def primary_access_key(self) -> str:
"""
The Primary Shared Access Key associated with the EventGrid Topic.
"""
return pulumi.get(self, "primary_access_key")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> str:
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="secondaryAccessKey")
def secondary_access_key(self) -> str:
"""
The Secondary Shared Access Key associated with the EventGrid Topic.
"""
return pulumi.get(self, "secondary_access_key")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
return pulumi.get(self, "tags")
class AwaitableGetTopicResult(GetTopicResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetTopicResult(
endpoint=self.endpoint,
id=self.id,
location=self.location,
name=self.name,
primary_access_key=self.primary_access_key,
resource_group_name=self.resource_group_name,
secondary_access_key=self.secondary_access_key,
tags=self.tags)
def METHOD_NAME(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTopicResult:
"""
Use this data source to access information about an existing EventGrid Topic
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.eventgrid.get_topic(name="my-eventgrid-topic",
resource_group_name="example-resources")
```
:param str name: The name of the EventGrid Topic resource.
:param str resource_group_name: The name of the resource group in which the EventGrid Topic exists.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure:eventgrid/getTopic:getTopic', __args__, opts=opts, typ=GetTopicResult).value
return AwaitableGetTopicResult(
endpoint=pulumi.get(__ret__, 'endpoint'),
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
primary_access_key=pulumi.get(__ret__, 'primary_access_key'),
resource_group_name=pulumi.get(__ret__, 'resource_group_name'),
secondary_access_key=pulumi.get(__ret__, 'secondary_access_key'),
tags=pulumi.get(__ret__, 'tags'))
@_utilities.lift_output_func(METHOD_NAME)
def get_topic_output(name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetTopicResult]:
"""
Use this data source to access information about an existing EventGrid Topic
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.eventgrid.get_topic(name="my-eventgrid-topic",
resource_group_name="example-resources")
```
:param str name: The name of the EventGrid Topic resource.
:param str resource_group_name: The name of the resource group in which the EventGrid Topic exists.
"""
...
| null |
5,078 |
import pytest
from rlberry.envs import gym_make
from rlberry.agents.torch.dqn import DQNAgent
from rlberry.agents.torch.utils.training import model_factory
from rlberry.manager import ExperimentManager
import os
import pathlib
import tempfile
@pytest.mark.parametrize(
"use_double_dqn, use_prioritized_replay", [(False, False), (True, True)]
)
def test_dqn_agent(use_double_dqn, use_prioritized_replay):
env = gym_make("CartPole-v1")
agent = DQNAgent(
env,
learning_starts=5,
eval_interval=75,
train_interval=2,
gradient_steps=-1,
use_double_dqn=use_double_dqn,
use_prioritized_replay=use_prioritized_replay,
)
agent.fit(budget=500)
model_configs = {
"type": "MultiLayerPerceptron",
"layer_sizes": (5, 5),
"reshape": False,
}
def mlp(env, **kwargs):
"""
Returns a default Q value network.
"""
kwargs["in_size"] = env.observation_space.shape[0]
kwargs["out_size"] = env.action_space.n
return model_factory(**kwargs)
new_agent = DQNAgent(
env, q_net_constructor=mlp, q_net_kwargs=model_configs, learning_starts=100
)
new_agent.fit(budget=2000)
def test_dqn_classic_env():
env = gym_make("CartPole-v1")
agent = DQNAgent(
env,
learning_starts=5,
eval_interval=75,
train_interval=2,
gradient_steps=-1,
use_double_dqn=True,
use_prioritized_replay=True,
)
agent.fit(budget=200)
with tempfile.TemporaryDirectory() as tmpdirname:
saving_path = tmpdirname + "/agent_test_dqn_classic_env.pickle"
# test the save function
agent.save(saving_path)
assert os.path.exists(saving_path)
# test the loading function
test_load_env = gym_make("CartPole-v1")
loaded_agent = DQNAgent.load(saving_path, **dict(env=test_load_env))
assert loaded_agent
# test the agent
observation, info = test_load_env.reset()
for tt in range(100):
action = loaded_agent.policy(observation)
next_observation, reward, terminated, truncated, info = test_load_env.step(
action
)
done = terminated or truncated
if done:
next_observation, info = test_load_env.reset()
observation = next_observation
def METHOD_NAME():
# saving_path = "rlberry/agents/torch/tests/agentmanager_test_dqn_classic_env"
with tempfile.TemporaryDirectory() as tmpdirname:
saving_path = tmpdirname + "/agentmanager_test_dqn_classic_env"
test_experiment_manager = ExperimentManager(
DQNAgent, # The Agent class.
(
gym_make,
dict(
id="CartPole-v1",
),
), # The Environment to solve.
init_kwargs=dict( # Where to put the agent's hyperparameters
learning_starts=5,
eval_interval=75,
train_interval=2,
gradient_steps=-1,
use_double_dqn=True,
use_prioritized_replay=True,
chunk_size=1,
),
fit_budget=200, # The number of interactions between the agent and the environment during training.
eval_kwargs=dict(
eval_horizon=50
), # The number of interactions between the agent and the environment during evaluations.
n_fit=1, # The number of agents to train. Usually, it is good to do more than 1 because the training is stochastic.
agent_name="test_dqn_classic_env", # The agent's name.
output_dir=saving_path,
)
test_experiment_manager.fit(budget=200)
# test the save function
test_experiment_manager.save()
assert os.path.exists(saving_path)
# test the loading function
test_load_env = gym_make("CartPole-v1")
path_to_load = next(pathlib.Path(saving_path).glob("**/*.pickle"))
loaded_experiment_manager = ExperimentManager.load(path_to_load)
assert loaded_experiment_manager
# test the agent
state, info = test_load_env.reset()
for tt in range(50):
action = loaded_experiment_manager.get_agent_instances()[0].policy(state)
next_s, _, terminated, truncated, test = test_load_env.step(action)
done = terminated or truncated
if done:
break
state = next_s
| null |
5,079 |
import builtins
import os
import select
import socket
import unittest
import errno
from errno import EEXIST
class SubOSError(OSError):
pass
class SubOSErrorWithInit(OSError):
def __init__(self, message, bar):
self.bar = bar
super().__init__(message)
class SubOSErrorWithNew(OSError):
def __new__(cls, message, baz):
self = super().__new__(cls, message)
self.baz = baz
return self
class SubOSErrorCombinedInitFirst(SubOSErrorWithInit, SubOSErrorWithNew):
pass
class SubOSErrorCombinedNewFirst(SubOSErrorWithNew, SubOSErrorWithInit):
pass
class SubOSErrorWithStandaloneInit(OSError):
def __init__(self):
pass
class HierarchyTest(unittest.TestCase):
def test_builtin_errors(self):
self.assertEqual(OSError.__name__, 'OSError')
self.assertIs(IOError, OSError)
self.assertIs(EnvironmentError, OSError)
def test_socket_errors(self):
self.assertIs(socket.error, OSError)
self.assertIs(socket.gaierror.__base__, OSError)
self.assertIs(socket.herror.__base__, OSError)
self.assertIs(socket.timeout, TimeoutError)
def test_select_error(self):
self.assertIs(select.error, OSError)
# mmap.error is tested in test_mmap
_pep_map = """
+-- BlockingIOError EAGAIN, EALREADY, EWOULDBLOCK, EINPROGRESS
+-- ChildProcessError ECHILD
+-- ConnectionError
+-- BrokenPipeError EPIPE, ESHUTDOWN
+-- ConnectionAbortedError ECONNABORTED
+-- ConnectionRefusedError ECONNREFUSED
+-- ConnectionResetError ECONNRESET
+-- FileExistsError EEXIST
+-- FileNotFoundError ENOENT
+-- InterruptedError EINTR
+-- IsADirectoryError EISDIR
+-- NotADirectoryError ENOTDIR
+-- PermissionError EACCES, EPERM, ENOTCAPABLE
+-- ProcessLookupError ESRCH
+-- TimeoutError ETIMEDOUT
"""
def _make_map(s):
_map = {}
for line in s.splitlines():
line = line.strip('+- ')
if not line:
continue
excname, _, errnames = line.partition(' ')
for errname in filter(None, errnames.strip().split(', ')):
if errname == "ENOTCAPABLE" and not hasattr(errno, errname):
continue
_map[getattr(errno, errname)] = getattr(builtins, excname)
return _map
_map = _make_map(_pep_map)
@unittest.expectedFailureIfWindows("TODO: RUSTPYTHON")
def test_errno_mapping(self):
# The OSError constructor maps errnos to subclasses
# A sample test for the basic functionality
e = OSError(EEXIST, "Bad file descriptor")
self.assertIs(type(e), FileExistsError)
# Exhaustive testing
for errcode, exc in self._map.items():
e = OSError(errcode, "Some message")
self.assertIs(type(e), exc)
othercodes = set(errno.errorcode) - set(self._map)
for errcode in othercodes:
e = OSError(errcode, "Some message")
self.assertIs(type(e), OSError, repr(e))
def test_try_except(self):
filename = "some_hopefully_non_existing_file"
# This checks that try .. except checks the concrete exception
# (FileNotFoundError) and not the base type specified when
# PyErr_SetFromErrnoWithFilenameObject was called.
# (it is therefore deliberate that it doesn't use assertRaises)
try:
open(filename)
except FileNotFoundError:
pass
else:
self.fail("should have raised a FileNotFoundError")
# Another test for PyErr_SetExcFromWindowsErrWithFilenameObject()
self.assertFalse(os.path.exists(filename))
try:
os.unlink(filename)
except FileNotFoundError:
pass
else:
self.fail("should have raised a FileNotFoundError")
class AttributesTest(unittest.TestCase):
def test_windows_error(self):
if os.name == "nt":
self.assertIn('winerror', dir(OSError))
else:
self.assertNotIn('winerror', dir(OSError))
@unittest.skip("TODO: RUSTPYTHON")
def test_posix_error(self):
e = OSError(EEXIST, "File already exists", "foo.txt")
self.assertEqual(e.errno, EEXIST)
self.assertEqual(e.args[0], EEXIST)
self.assertEqual(e.strerror, "File already exists")
self.assertEqual(e.filename, "foo.txt")
if os.name == "nt":
self.assertEqual(e.winerror, None)
# TODO: RUSTPYTHON
@unittest.expectedFailure
@unittest.skipUnless(os.name == "nt", "Windows-specific test")
def test_errno_translation(self):
# ERROR_ALREADY_EXISTS (183) -> EEXIST
e = OSError(0, "File already exists", "foo.txt", 183)
self.assertEqual(e.winerror, 183)
self.assertEqual(e.errno, EEXIST)
self.assertEqual(e.args[0], EEXIST)
self.assertEqual(e.strerror, "File already exists")
self.assertEqual(e.filename, "foo.txt")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_blockingioerror(self):
args = ("a", "b", "c", "d", "e")
for n in range(6):
e = BlockingIOError(*args[:n])
with self.assertRaises(AttributeError):
e.characters_written
with self.assertRaises(AttributeError):
del e.characters_written
e = BlockingIOError("a", "b", 3)
self.assertEqual(e.characters_written, 3)
e.characters_written = 5
self.assertEqual(e.characters_written, 5)
del e.characters_written
with self.assertRaises(AttributeError):
e.characters_written
class ExplicitSubclassingTest(unittest.TestCase):
def test_errno_mapping(self):
# When constructing an OSError subclass, errno mapping isn't done
e = SubOSError(EEXIST, "Bad file descriptor")
self.assertIs(type(e), SubOSError)
def test_init_overridden(self):
e = SubOSErrorWithInit("some message", "baz")
self.assertEqual(e.bar, "baz")
self.assertEqual(e.args, ("some message",))
def test_init_kwdargs(self):
e = SubOSErrorWithInit("some message", bar="baz")
self.assertEqual(e.bar, "baz")
self.assertEqual(e.args, ("some message",))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_new_overridden(self):
e = SubOSErrorWithNew("some message", "baz")
self.assertEqual(e.baz, "baz")
self.assertEqual(e.args, ("some message",))
def test_new_kwdargs(self):
e = SubOSErrorWithNew("some message", baz="baz")
self.assertEqual(e.baz, "baz")
self.assertEqual(e.args, ("some message",))
def test_init_new_overridden(self):
e = SubOSErrorCombinedInitFirst("some message", "baz")
self.assertEqual(e.bar, "baz")
self.assertEqual(e.baz, "baz")
self.assertEqual(e.args, ("some message",))
e = SubOSErrorCombinedNewFirst("some message", "baz")
self.assertEqual(e.bar, "baz")
self.assertEqual(e.baz, "baz")
self.assertEqual(e.args, ("some message",))
def METHOD_NAME(self):
# __init__ doesn't propagate to OSError.__init__ (see issue #15229)
e = SubOSErrorWithStandaloneInit()
self.assertEqual(e.args, ())
self.assertEqual(str(e), '')
if __name__ == "__main__":
unittest.main()
| null |
5,080 |
"""Test Randomizer class."""
from mpf.tests.MpfTestCase import MpfTestCase
from mpf.core.randomizer import Randomizer
class TestRandomizer(MpfTestCase):
def METHOD_NAME(self):
return 'randomizer.yaml'
def get_machine_path(self):
return 'tests/machine_files/randomizer/'
def test_one_element_with_force_different(self):
items = ['1']
r = Randomizer(items)
self.assertTrue(r.force_different)
# it has one element and should thereby always return it
self.assertEqual('1', next(r))
self.assertEqual('1', next(r))
self.assertEqual('1', next(r))
def test_machine_randomizer(self):
items = [
'1',
'2',
'3',
]
r = Randomizer(items)
results = list()
for x in range(10000):
results.append(next(r))
self.assertAlmostEqual(3333, results.count('1'), delta=500)
self.assertAlmostEqual(3333, results.count('2'), delta=500)
self.assertAlmostEqual(3333, results.count('3'), delta=500)
def test_force_different(self):
items = [
('1', 1),
('2', 1),
('3', 1),
]
r = Randomizer(items)
r.force_different = True
last_item = None
for x in range(1000):
this_item = next(r)
self.assertNotEqual(this_item, last_item)
last_item = this_item
def test_force_all(self):
items = [
('1', 1),
('2', 1),
('3', 1),
]
r = Randomizer(items)
r.force_all = True
last_item = None
for x in range(100):
results = set()
results.add(next(r))
self.assertNotEqual(last_item, r.get_current())
results.add(next(r))
results.add(next(r))
last_item = r.get_current()
self.assertEqual(len(results), 3)
def test_no_loop(self):
items = [
('1', 1),
('2', 1),
('3', 1),
]
r = Randomizer(items)
r.loop = False
x = 0
for _ in r:
x += 1
self.assertEqual(3, x)
def test_weights(self):
items = [
('1', 2),
('2', 1),
('3', 1),
]
r = Randomizer(items)
r.force_different = False
results = list()
for x in range(10000):
results.append(next(r))
self.assertAlmostEqual(5000, results.count('1'), delta=500)
self.assertAlmostEqual(2500, results.count('2'), delta=500)
self.assertAlmostEqual(2500, results.count('3'), delta=500)
items = [
('1', 1),
('2', 1),
('3', 3),
]
r = Randomizer(items)
r.force_different = False
results = list()
for x in range(10000):
results.append(next(r))
self.assertAlmostEqual(2000, results.count('1'), delta=500)
self.assertAlmostEqual(2000, results.count('2'), delta=500)
self.assertAlmostEqual(6000, results.count('3'), delta=500)
items = [
('1', 1),
('2', 6),
('3', 3),
]
r = Randomizer(items)
r.force_different = False
results = list()
for x in range(10000):
results.append(next(r))
self.assertAlmostEqual(1000, results.count('1'), delta=500)
self.assertAlmostEqual(6000, results.count('2'), delta=500)
self.assertAlmostEqual(3000, results.count('3'), delta=500)
def test_loop_no_random(self):
items = [
('1', 1),
('2', 1),
('3', 1),
]
r = Randomizer(items)
r.disable_random = True
for i1 in range(50):
self.assertEqual(next(r), '1')
self.assertEqual(next(r), '2')
self.assertEqual(next(r), '3')
def test_no_loop_no_random(self):
items = [
('1', 1),
('2', 1),
('3', 1),
]
for _ in range(50):
r = Randomizer(items)
r.loop = False
r.disable_random = True
x = 0
for i, result in enumerate(r):
x += 1
self.assertEqual(items[i][0], result)
self.assertEqual(3, x)
def test_fallback_value(self):
items = []
r = Randomizer(items)
r.fallback_value = "foo"
results = list()
for x in range(100):
results.append(next(r))
self.assertEqual(100, results.count('foo')
| null |
5,081 |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test graph fallback control flow."""
import mindspore as ms
from mindspore import Tensor, jit, context, nn, Parameter
import numpy as np
from tests.st.fallback.cases_register import case_register
context.set_context(mode=context.GRAPH_MODE)
@case_register.level0
@case_register.target_gpu
@case_register.target_ascend
def test_while_in_if_1():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@jit
def control_flow_if():
x = Tensor([1])
if x > Tensor([0]):
while x < Tensor([7]):
x *= 2
return x
res = control_flow_if()
print(res)
assert res == 8
@case_register.level1
@case_register.target_gpu
@case_register.target_ascend
def test_while_in_if_2():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@jit
def control_flow_while():
x = Tensor([6]).astype("int32")
y = Tensor([0]).astype("int32")
if x > Tensor([0]):
while x >= y:
y = y + x
return y
res = control_flow_while()
assert res == 12
@case_register.level1
@case_register.target_gpu
@case_register.target_ascend
def test_while_in_if_3():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@jit
def control_flow_while():
x = Tensor([7]).astype("int32")
y = Tensor([0]).astype("int32")
z = np.array([1])
if z <= 1:
while x >= Tensor([0]).astype("int32"):
y += x
x = Tensor([-1]).astype("int32")
return y
res = control_flow_while()
assert res == 7
@case_register.level1
@case_register.target_gpu
@case_register.target_ascend
def test_while_two_cond_in_if_1():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@jit
def control_flow_while():
x = Tensor([1])
y = Tensor([8])
z = Tensor([12])
if z > x + y:
while x < y and x + y <= z:
y = x + y + z
x += Tensor(-1)
return x + y
res = control_flow_while()
assert res == 21
@case_register.level1
@case_register.target_gpu
@case_register.target_ascend
def test_while_two_cond_in_if_2():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@jit
def control_flow_while():
x = Tensor([7]).astype("int32")
y = Tensor([0]).astype("int32")
if x > y:
while Tensor([0]).astype("int32") < x and y >= x - Tensor(7).astype("int32"):
y += x
x += Tensor([-6]).astype("int32")
return y
res = control_flow_while()
assert res == 8
@case_register.level1
@case_register.target_gpu
@case_register.target_ascend
def test_while_param_in_if():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.param_a = Parameter(Tensor(1, ms.float32), name="name_a")
def METHOD_NAME(self):
x = Tensor(7).astype("int32")
y = Tensor(0).astype("int32")
if x < y:
pass
else:
while x >= self.param_a:
y += x
x -= Tensor(-2).astype("int32")
self.param_a += y
return self.param_a
net = Net()
res = net()
assert res == 24
| null |
5,082 |
from base64 import b64encode
from .base import TestBase
from ..keyvault.crypto import default_eccrypto
from ..messaging.interfaces.udp.endpoint import UDPv4Address, UDPv6Address
from ..peer import Peer
class TestPeer(TestBase):
test_key = default_eccrypto.generate_key("very-low")
def setUp(self):
super().setUp()
self.peer = Peer(TestPeer.test_key, ("1.2.3.4", 5))
def test_default_timestamp(self):
"""
Check if the default Lamport timestamp of a Peer is 0.
"""
self.assertEqual(self.peer.get_lamport_timestamp(), 0)
def test_increment_timestamp(self):
"""
Check if the Lamport timestamp of a Peer can be incremented.
"""
self.peer.update_clock(1)
self.assertEqual(self.peer.get_lamport_timestamp(), 1)
def test_increase_timestamp(self):
"""
Check if the Lamport timestamp of a Peer can be increased arbitrarily.
"""
self.peer.update_clock(42)
self.assertEqual(self.peer.get_lamport_timestamp(), 42)
def test_decrease_timestamp(self):
"""
Check if the Lamport timestamp of a Peer cannot be decreased.
"""
self.peer.update_clock(-1)
self.assertEqual(self.peer.get_lamport_timestamp(), 0)
def test_peer_equality(self):
"""
Check if peers with the same key and address are equal.
"""
other = Peer(self.peer.key, self.peer.address)
self.assertTrue(self.peer == other)
self.assertFalse(self.peer != other)
def test_peer_inequality_key(self):
"""
Check if peers with a different key and same address are not equal.
"""
other = Peer(default_eccrypto.generate_key("very-low"), self.peer.address)
self.assertNotEqual(self.peer, other)
def test_median_ping_none(self):
"""
No ping measurements should lead to a None median ping.
"""
self.assertIsNone(self.peer.get_median_ping())
def test_avg_ping_none(self):
"""
No ping measurements should lead to a None average ping.
"""
self.assertIsNone(self.peer.get_average_ping())
def test_median_ping_odd(self):
"""
Median ping should return the median ping for odd length measurements.
"""
self.peer.pings.append(2.0)
self.peer.pings.append(3.0)
self.peer.pings.append(4.0)
self.assertEqual(3.0, self.peer.get_median_ping())
def test_median_ping_even(self):
"""
Median ping should return the median ping for even length measurements.
"""
self.peer.pings.append(2.0)
self.peer.pings.append(3.0)
self.peer.pings.append(4.0)
self.peer.pings.append(5.0)
self.assertEqual(3.5, self.peer.get_median_ping())
def test_avg_ping(self):
"""
Average ping should return the average ping.
"""
self.peer.pings.append(3.0)
self.peer.pings.append(4.0)
self.assertEqual(3.5, self.peer.get_average_ping())
def test_peer_inequality_address(self):
"""
Check if peers with the same key and a different address are equal.
"""
other = Peer(self.peer.key)
self.assertEqual(self.peer, other)
def test_to_string(self):
"""
Check if the __str__ method functions properly.
"""
self.assertEqual(str(self.peer), "Peer<1.2.3.4:5, %s>" % b64encode(self.peer.mid).decode('utf-8'))
def METHOD_NAME(self):
"""
Check if the address property properly sets from the init.
"""
address = UDPv4Address("1.2.3.4", 5)
peer = Peer(TestPeer.test_key, address)
self.assertEqual(peer.address, address)
def test_set_address_setter(self):
"""
Check if the address property properly sets from the setter.
"""
address = UDPv4Address("1.2.3.4", 5)
peer = Peer(TestPeer.test_key)
peer.address = address
self.assertEqual(peer.address, address)
def test_set_address_add(self):
"""
Check if the address property properly sets from add_address.
"""
address = UDPv4Address("1.2.3.4", 5)
peer = Peer(TestPeer.test_key)
peer.add_address(address)
self.assertEqual(peer.address, address)
def test_set_address_addv6(self):
"""
Check if IPv6 addresses are properly returned.
"""
address = UDPv6Address("1:2:3:4:5:6", 7)
peer = Peer(TestPeer.test_key)
peer.add_address(address)
self.assertEqual(peer.address, address)
def test_address_order1(self):
"""
Check if IPv6 is preferred over IPv4 (append out-of-order).
"""
address1 = UDPv4Address("1.2.3.4", 5)
address2 = UDPv6Address("1:2:3:4:5:6", 7)
peer = Peer(TestPeer.test_key)
peer.add_address(address2)
peer.add_address(address1)
self.assertEqual(peer.address, address2)
def test_address_order2(self):
"""
Check if IPv6 is preferred over IPv4 (append in-order).
"""
address1 = UDPv4Address("1.2.3.4", 5)
address2 = UDPv6Address("1:2:3:4:5:6", 7)
peer = Peer(TestPeer.test_key)
peer.add_address(address1)
peer.add_address(address2)
self.assertEqual(peer.address, address2)
def test_default_address(self):
"""
Check if the default address is UDPv4Address("0.0.0.0", 0)
"""
self.assertEqual(Peer(TestPeer.test_key).address, UDPv4Address("0.0.0.0", 0))
def test_manual_update(self):
"""
Check if manual updates to the addresses dictionary are caught.
"""
address = UDPv4Address("1.2.3.4", 5)
peer = Peer(TestPeer.test_key)
peer.addresses.update({UDPv4Address: address})
self.assertEqual(peer.address, address)
def test_manual_updates(self):
"""
Check if manual updates to the addresses dictionary are caught (double update, out-of-order).
"""
address1 = UDPv6Address("1:2:3:4:5:6", 7)
address2 = UDPv4Address("1.2.3.4", 5)
peer = Peer(TestPeer.test_key)
peer.addresses.update({UDPv4Address: address2})
peer.addresses.update({UDPv6Address: address1})
self.assertEqual(peer.address, address1)
def test_manual_update_overwrite(self):
"""
Check if manual updates to the addresses dictionary are caught (overwrite same class).
"""
address = UDPv4Address("1.2.3.4", 5)
peer = Peer(TestPeer.test_key, UDPv4Address("6.7.8.9", 10))
peer.addresses.update({UDPv4Address: address})
self.assertEqual(peer.address, address)
| null |
5,083 |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Test nn.probability.distribution.Gamma.
"""
import numpy as np
import pytest
import mindspore.nn as nn
import mindspore.nn.probability.distribution as msd
from mindspore import dtype
from mindspore import Tensor
from mindspore import context
skip_flag = context.get_context("device_target") != "Ascend"
def test_gamma_shape_errpr():
"""
Invalid shapes.
"""
with pytest.raises(ValueError):
msd.Gamma([[2.], [1.]], [[2.], [3.], [4.]], dtype=dtype.float32)
def test_type():
with pytest.raises(TypeError):
msd.Gamma([0.], [1.], dtype=dtype.int32)
def test_name():
with pytest.raises(TypeError):
msd.Gamma([0.], [1.], name=1.0)
def test_seed():
with pytest.raises(TypeError):
msd.Gamma([0.], [1.], seed='seed')
def test_concentration1():
with pytest.raises(ValueError):
msd.Gamma([0.], [1.])
with pytest.raises(ValueError):
msd.Gamma([-1.], [1.])
def test_concentration0():
with pytest.raises(ValueError):
msd.Gamma([1.], [0.])
with pytest.raises(ValueError):
msd.Gamma([1.], [-1.])
def test_scalar():
with pytest.raises(TypeError):
msd.Gamma(3., [4.])
with pytest.raises(TypeError):
msd.Gamma([3.], -4.)
def test_arguments():
"""
args passing during initialization.
"""
g = msd.Gamma()
assert isinstance(g, msd.Distribution)
g = msd.Gamma([3.0], [4.0], dtype=dtype.float32)
assert isinstance(g, msd.Distribution)
class GammaProb(nn.Cell):
"""
Gamma distribution: initialize with concentration1/concentration0.
"""
def __init__(self):
super(GammaProb, self).__init__()
self.gamma = msd.Gamma([3.0, 4.0], [1.0, 1.0], dtype=dtype.float32)
def construct(self, value):
prob = self.gamma.prob(value)
log_prob = self.gamma.log_prob(value)
return prob + log_prob
@pytest.mark.skipif(skip_flag, reason="not support running in CPU and GPU")
def test_gamma_prob():
"""
Test probability functions: passing value through construct.
"""
net = GammaProb()
value = Tensor([0.5, 1.0], dtype=dtype.float32)
ans = net(value)
assert isinstance(ans, Tensor)
class GammaProb1(nn.Cell):
"""
Gamma distribution: initialize without concentration1/concentration0.
"""
def __init__(self):
super(GammaProb1, self).__init__()
self.gamma = msd.Gamma()
def construct(self, value, concentration1, concentration0):
prob = self.gamma.prob(value, concentration1, concentration0)
log_prob = self.gamma.log_prob(value, concentration1, concentration0)
return prob + log_prob
@pytest.mark.skipif(skip_flag, reason="not support running in CPU and GPU")
def test_gamma_prob1():
"""
Test probability functions: passing concentration1/concentration0, value through construct.
"""
net = GammaProb1()
value = Tensor([0.5, 1.0], dtype=dtype.float32)
concentration1 = Tensor([2.0, 3.0], dtype=dtype.float32)
concentration0 = Tensor([1.0], dtype=dtype.float32)
ans = net(value, concentration1, concentration0)
assert isinstance(ans, Tensor)
class GammaKl(nn.Cell):
"""
Test class: kl_loss of Gamma distribution.
"""
def __init__(self):
super(GammaKl, self).__init__()
self.g1 = msd.Gamma(np.array([3.0]), np.array([4.0]), dtype=dtype.float32)
self.g2 = msd.Gamma(dtype=dtype.float32)
def construct(self, concentration1_b, concentration0_b, concentration1_a, concentration0_a):
kl1 = self.g1.kl_loss('Gamma', concentration1_b, concentration0_b)
kl2 = self.g2.kl_loss('Gamma', concentration1_b, concentration0_b, concentration1_a, concentration0_a)
return kl1 + kl2
@pytest.mark.skipif(skip_flag, reason="not support running in CPU and GPU")
def test_kl():
"""
Test kl_loss.
"""
net = GammaKl()
concentration1_b = Tensor(np.array([1.0]).astype(np.float32), dtype=dtype.float32)
concentration0_b = Tensor(np.array([1.0]).astype(np.float32), dtype=dtype.float32)
concentration1_a = Tensor(np.array([2.0]).astype(np.float32), dtype=dtype.float32)
concentration0_a = Tensor(np.array([3.0]).astype(np.float32), dtype=dtype.float32)
ans = net(concentration1_b, concentration0_b, concentration1_a, concentration0_a)
assert isinstance(ans, Tensor)
class GammaCrossEntropy(nn.Cell):
"""
Test class: cross_entropy of Gamma distribution.
"""
def __init__(self):
super(GammaCrossEntropy, self).__init__()
self.g1 = msd.Gamma(np.array([3.0]), np.array([4.0]), dtype=dtype.float32)
self.g2 = msd.Gamma(dtype=dtype.float32)
def construct(self, concentration1_b, concentration0_b, concentration1_a, concentration0_a):
h1 = self.g1.cross_entropy('Gamma', concentration1_b, concentration0_b)
h2 = self.g2.cross_entropy('Gamma', concentration1_b, concentration0_b, concentration1_a, concentration0_a)
return h1 + h2
@pytest.mark.skipif(skip_flag, reason="not support running in CPU and GPU")
def test_cross_entropy():
"""
Test cross entropy between Gamma distributions.
"""
net = GammaCrossEntropy()
concentration1_b = Tensor(np.array([1.0]).astype(np.float32), dtype=dtype.float32)
concentration0_b = Tensor(np.array([1.0]).astype(np.float32), dtype=dtype.float32)
concentration1_a = Tensor(np.array([2.0]).astype(np.float32), dtype=dtype.float32)
concentration0_a = Tensor(np.array([3.0]).astype(np.float32), dtype=dtype.float32)
ans = net(concentration1_b, concentration0_b, concentration1_a, concentration0_a)
assert isinstance(ans, Tensor)
class GammaBasics(nn.Cell):
"""
Test class: basic mean/sd function.
"""
def __init__(self):
super(GammaBasics, self).__init__()
self.g = msd.Gamma(np.array([3.0, 4.0]), np.array([4.0, 6.0]), dtype=dtype.float32)
def construct(self):
mean = self.g.mean()
sd = self.g.sd()
mode = self.g.mode()
return mean + sd + mode
@pytest.mark.skipif(skip_flag, reason="not support running in CPU and GPU")
def METHOD_NAME():
"""
Test mean/sd/mode/entropy functionality of Gamma.
"""
net = GammaBasics()
ans = net()
assert isinstance(ans, Tensor)
class GammaConstruct(nn.Cell):
"""
Gamma distribution: going through construct.
"""
def __init__(self):
super(GammaConstruct, self).__init__()
self.gamma = msd.Gamma([3.0], [4.0])
self.gamma1 = msd.Gamma()
def construct(self, value, concentration1, concentration0):
prob = self.gamma('prob', value)
prob1 = self.gamma('prob', value, concentration1, concentration0)
prob2 = self.gamma1('prob', value, concentration1, concentration0)
return prob + prob1 + prob2
@pytest.mark.skipif(skip_flag, reason="not support running in CPU and GPU")
def test_gamma_construct():
"""
Test probability function going through construct.
"""
net = GammaConstruct()
value = Tensor([0.5, 1.0], dtype=dtype.float32)
concentration1 = Tensor([0.0], dtype=dtype.float32)
concentration0 = Tensor([1.0], dtype=dtype.float32)
ans = net(value, concentration1, concentration0)
assert isinstance(ans, Tensor)
| null |
5,084 |
from bs4 import BeautifulSoup
import requests
import re
import json
from fastapi import HTTPException
from jaseci.jsorc.live_actions import jaseci_action
from jaseci.jsorc.remote_actions import launch_server
url_link = "https://imsdb.com/all-scripts.html"
_whitespace_re = re.compile(r"\s+")
def METHOD_NAME(film_url):
"""
Scrape the script from the given url.
Parameters:
------------
film_url : Sring, a url to the script from https://imsdb.com/
Return:
-------------
html_content : bs4.element.ResultSet. The bs4 resultset object, This contains the uncleaned moview script with html tags.
"""
html_doc = requests.get(film_url).text
soup = BeautifulSoup(html_doc, "html.parser")
html_content = soup.find_all("pre")
return html_content
def get_scenes(movie_script):
"""
Extract movie scenes from the movie script.
Parameters:
------------
movie_script : bs4.element.ResultSet. The bs4 resultset object, This contains the uncleaned moview script with html tags.
Return:
------------
scenes : list. movie scenes as a list.
"""
scenes = []
for item in movie_script[0].find_all("b"):
tag = item.get_text()
tag = re.sub(_whitespace_re, " ", tag).strip()
if tag.__contains__("EXT.") or tag.__contains__("INT."):
scenes.append(tag)
return scenes
def find_between(text, first, last):
"""
Get the substring between two substrings.
Parameters:
------------
text : String. The main input string, where need to be chuncked.
first : String. The first substring.
Return:
------------
substring: String.
"""
start = text.index(first) + len(first)
end = text.index(last, start)
substring = text[start:end]
return substring
def actors_content(scene):
"""
Extract information about scenes.
Parameters:
------------
scene : String
Return:
------------
scene_dict: Dictionary
actors_dict: Dictionary
"""
scene_items = scene.replace("\r", "").split("\n")
actors = []
actor_line = []
actors_dict = {}
for i in range(0, len(scene_items)):
leading_space = len(scene_items[i]) - len(scene_items[i].lstrip())
if leading_space == 25 and len(scene_items[i].strip()) != 0:
actor = scene_items[i].strip()
if actor.isalpha():
actors.append(actor)
actor_line.append(i)
if len(actors) != 0:
scene_desc = " ".join(scene_items[: actor_line[0]])
scene_desc = re.sub(_whitespace_re, " ", scene_desc).strip()
for i in range(0, len(actors)):
try:
actor_content = " ".join(
scene_items[actor_line[i] + 1 : actor_line[i + 1]]
)
actor_content = re.sub(_whitespace_re, " ", actor_content).strip()
except IndexError as e:
actor_content = " ".join(scene_items[actor_line[i] + 1 :])
actor_content = re.sub(_whitespace_re, " ", actor_content).strip()
if not (actors[i] in actors_dict.keys()):
actors_dict[actors[i]] = [actor_content]
else:
get_prev = actors_dict[actors[i]]
actors_dict[actors[i]] = get_prev + [actor_content]
return scene_desc, actors_dict
else:
scene_desc = re.sub(_whitespace_re, " ", scene).strip()
return scene_desc
@jaseci_action(act_group=["scrapy"], allow_remote=True)
def scrape_content(url: str):
try:
movie_script = METHOD_NAME(url)
movie_scenes = {}
full_script = movie_script[0].get_text()
scenes = get_scenes(movie_script)
for i in range(0, len(scenes)):
try:
scene_content = find_between(full_script, scenes[i], scenes[i + 1])
except IndexError as e:
scene_content = full_script.split(scenes[i])[1]
movie_scenes[scenes[i]] = actors_content(scene_content)
with open("movie_data.json", "w") as outfile:
json.dump(movie_scenes, outfile)
return movie_scenes
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
| null |
5,085 |
from sympy.core.function import (Derivative, diff)
from sympy.core.numbers import (Float, I, nan, oo, pi)
from sympy.core.relational import Eq
from sympy.core.symbol import (Symbol, symbols)
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.special.delta_functions import (DiracDelta, Heaviside)
from sympy.functions.special.singularity_functions import SingularityFunction
from sympy.series.order import O
from sympy.core.expr import unchanged
from sympy.core.function import ArgumentIndexError
from sympy.testing.pytest import raises
x, y, a, n = symbols('x y a n')
def METHOD_NAME():
assert SingularityFunction(x, 4, 5).fdiff() == 5*SingularityFunction(x, 4, 4)
assert SingularityFunction(x, 4, -1).fdiff() == SingularityFunction(x, 4, -2)
assert SingularityFunction(x, 4, 0).fdiff() == SingularityFunction(x, 4, -1)
assert SingularityFunction(y, 6, 2).diff(y) == 2*SingularityFunction(y, 6, 1)
assert SingularityFunction(y, -4, -1).diff(y) == SingularityFunction(y, -4, -2)
assert SingularityFunction(y, 4, 0).diff(y) == SingularityFunction(y, 4, -1)
assert SingularityFunction(y, 4, 0).diff(y, 2) == SingularityFunction(y, 4, -2)
n = Symbol('n', positive=True)
assert SingularityFunction(x, a, n).fdiff() == n*SingularityFunction(x, a, n - 1)
assert SingularityFunction(y, a, n).diff(y) == n*SingularityFunction(y, a, n - 1)
expr_in = 4*SingularityFunction(x, a, n) + 3*SingularityFunction(x, a, -1) + -10*SingularityFunction(x, a, 0)
expr_out = n*4*SingularityFunction(x, a, n - 1) + 3*SingularityFunction(x, a, -2) - 10*SingularityFunction(x, a, -1)
assert diff(expr_in, x) == expr_out
assert SingularityFunction(x, -10, 5).diff(evaluate=False) == (
Derivative(SingularityFunction(x, -10, 5), x))
raises(ArgumentIndexError, lambda: SingularityFunction(x, 4, 5).fdiff(2))
def test_eval():
assert SingularityFunction(x, a, n).func == SingularityFunction
assert unchanged(SingularityFunction, x, 5, n)
assert SingularityFunction(5, 3, 2) == 4
assert SingularityFunction(3, 5, 1) == 0
assert SingularityFunction(3, 3, 0) == 1
assert SingularityFunction(3, 3, 1) == 0
assert SingularityFunction(Symbol('z', zero=True), 0, 1) == 0 # like sin(z) == 0
assert SingularityFunction(4, 4, -1) is oo
assert SingularityFunction(4, 2, -1) == 0
assert SingularityFunction(4, 7, -1) == 0
assert SingularityFunction(5, 6, -2) == 0
assert SingularityFunction(4, 2, -2) == 0
assert SingularityFunction(4, 4, -2) is oo
assert (SingularityFunction(6.1, 4, 5)).evalf(5) == Float('40.841', '5')
assert SingularityFunction(6.1, pi, 2) == (-pi + 6.1)**2
assert SingularityFunction(x, a, nan) is nan
assert SingularityFunction(x, nan, 1) is nan
assert SingularityFunction(nan, a, n) is nan
raises(ValueError, lambda: SingularityFunction(x, a, I))
raises(ValueError, lambda: SingularityFunction(2*I, I, n))
raises(ValueError, lambda: SingularityFunction(x, a, -3))
def test_leading_term():
l = Symbol('l', positive=True)
assert SingularityFunction(x, 3, 2).as_leading_term(x) == 0
assert SingularityFunction(x, -2, 1).as_leading_term(x) == 2
assert SingularityFunction(x, 0, 0).as_leading_term(x) == 1
assert SingularityFunction(x, 0, 0).as_leading_term(x, cdir=-1) == 0
assert SingularityFunction(x, 0, -1).as_leading_term(x) == 0
assert SingularityFunction(x, 0, -2).as_leading_term(x) == 0
assert (SingularityFunction(x + l, 0, 1)/2\
- SingularityFunction(x + l, l/2, 1)\
+ SingularityFunction(x + l, l, 1)/2).as_leading_term(x) == -x/2
def test_series():
l = Symbol('l', positive=True)
assert SingularityFunction(x, -3, 2).series(x) == x**2 + 6*x + 9
assert SingularityFunction(x, -2, 1).series(x) == x + 2
assert SingularityFunction(x, 0, 0).series(x) == 1
assert SingularityFunction(x, 0, 0).series(x, dir='-') == 0
assert SingularityFunction(x, 0, -1).series(x) == 0
assert SingularityFunction(x, 0, -2).series(x) == 0
assert (SingularityFunction(x + l, 0, 1)/2\
- SingularityFunction(x + l, l/2, 1)\
+ SingularityFunction(x + l, l, 1)/2).nseries(x) == -x/2 + O(x**6)
def test_rewrite():
assert SingularityFunction(x, 4, 5).rewrite(Piecewise) == (
Piecewise(((x - 4)**5, x - 4 >= 0), (0, True)))
assert SingularityFunction(x, -10, 0).rewrite(Piecewise) == (
Piecewise((1, x + 10 >= 0), (0, True)))
assert SingularityFunction(x, 2, -1).rewrite(Piecewise) == (
Piecewise((oo, Eq(x - 2, 0)), (0, True)))
assert SingularityFunction(x, 0, -2).rewrite(Piecewise) == (
Piecewise((oo, Eq(x, 0)), (0, True)))
n = Symbol('n', nonnegative=True)
p = SingularityFunction(x, a, n).rewrite(Piecewise)
assert p == (
Piecewise(((x - a)**n, x - a >= 0), (0, True)))
assert p.subs(x, a).subs(n, 0) == 1
expr_in = SingularityFunction(x, 4, 5) + SingularityFunction(x, -3, -1) - SingularityFunction(x, 0, -2)
expr_out = (x - 4)**5*Heaviside(x - 4, 1) + DiracDelta(x + 3) - DiracDelta(x, 1)
assert expr_in.rewrite(Heaviside) == expr_out
assert expr_in.rewrite(DiracDelta) == expr_out
assert expr_in.rewrite('HeavisideDiracDelta') == expr_out
expr_in = SingularityFunction(x, a, n) + SingularityFunction(x, a, -1) - SingularityFunction(x, a, -2)
expr_out = (x - a)**n*Heaviside(x - a, 1) + DiracDelta(x - a) + DiracDelta(a - x, 1)
assert expr_in.rewrite(Heaviside) == expr_out
assert expr_in.rewrite(DiracDelta) == expr_out
assert expr_in.rewrite('HeavisideDiracDelta') == expr_out
| null |
5,086 |
import logging
from dataclasses import dataclass
from typing import List, Tuple, Union
from pymobiledevice3.exceptions import InvalidServiceError, NoDeviceConnectedError, PyMobileDevice3Exception, \
StartServiceError
from pymobiledevice3.lockdown_service_provider import LockdownServiceProvider
from pymobiledevice3.remote.bonjour import DEFAULT_BONJOUR_TIMEOUT, get_remoted_addresses
from pymobiledevice3.remote.remotexpc import RemoteXPCConnection
from pymobiledevice3.service_connection import LockdownServiceConnection
@dataclass
class RSDDevice:
hostname: str
udid: str
product_type: str
os_version: str
# from remoted ([RSDRemoteNCMDeviceDevice createPortListener])
RSD_PORT = 58783
class RemoteServiceDiscoveryService(LockdownServiceProvider):
def __init__(self, address: Tuple[str, int]):
super().__init__()
self.service = RemoteXPCConnection(address)
self.peer_info = None
@property
def product_version(self) -> str:
return self.peer_info['Properties']['OSVersion']
def connect(self) -> None:
self.service.connect()
self.peer_info = self.service.receive_response()
self.udid = self.peer_info['Properties']['UniqueDeviceID']
self.product_type = self.peer_info['Properties']['ProductType']
def start_lockdown_service_without_checkin(self, name: str) -> LockdownServiceConnection:
return LockdownServiceConnection.create_using_tcp(self.service.address[0], self.METHOD_NAME(name))
def start_lockdown_service(self, name: str, escrow_bag: bytes = None) -> LockdownServiceConnection:
service = self.start_lockdown_service_without_checkin(name)
checkin = {'Label': 'pymobiledevice3', 'ProtocolVersion': '2', 'Request': 'RSDCheckin'}
if escrow_bag is not None:
checkin['EscrowBag'] = escrow_bag
response = service.send_recv_plist(checkin)
if response['Request'] != 'RSDCheckin':
raise PyMobileDevice3Exception(f'Invalid response for RSDCheckIn: {response}. Expected "RSDCheckIn"')
response = service.recv_plist()
if response['Request'] != 'StartService':
raise PyMobileDevice3Exception(f'Invalid response for RSDCheckIn: {response}. Expected "ServiceService"')
return service
async def aio_start_lockdown_service(self, name: str, escrow_bag: bytes = None) -> LockdownServiceConnection:
service = self.start_lockdown_service(name, escrow_bag=escrow_bag)
await service.aio_start()
return service
def start_lockdown_developer_service(self, name, escrow_bag: bytes = None) -> LockdownServiceConnection:
try:
return self.start_lockdown_service_without_checkin(name)
except StartServiceError:
logging.getLogger(self.__module__).error(
'Failed to connect to required service. Make sure DeveloperDiskImage.dmg has been mounted. '
'You can do so using: pymobiledevice3 mounter mount'
)
raise
def start_remote_service(self, name: str) -> RemoteXPCConnection:
service = RemoteXPCConnection((self.service.address[0], self.METHOD_NAME(name)))
return service
def start_service(self, name: str) -> Union[RemoteXPCConnection, LockdownServiceConnection]:
service = self.peer_info['Services'][name]
service_properties = service.get('Properties', {})
use_remote_xpc = service_properties.get('UsesRemoteXPC', False)
return self.start_remote_service(name) if use_remote_xpc else self.start_lockdown_service(name)
def METHOD_NAME(self, name: str) -> int:
"""takes a service name and returns the port that service is running on if the service exists"""
service = self.peer_info['Services'].get(name)
if service is None:
raise InvalidServiceError(f'No such service: {name}')
return int(service['Port'])
def __enter__(self) -> 'RemoteServiceDiscoveryService':
self.connect()
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self.service.close()
def __repr__(self) -> str:
return (f'<{self.__class__.__name__} PRODUCT:{self.product_type} VERSION:{self.product_version} '
f'UDID:{self.udid}>')
def get_remoted_devices(timeout: int = DEFAULT_BONJOUR_TIMEOUT) -> List[RSDDevice]:
result = []
for hostname in get_remoted_addresses(timeout):
with RemoteServiceDiscoveryService((hostname, RSD_PORT)) as rsd:
properties = rsd.peer_info['Properties']
result.append(RSDDevice(hostname=hostname, udid=properties['UniqueDeviceID'],
product_type=properties['ProductType'], os_version=properties['OSVersion']))
return result
def get_remoted_device(udid: str, timeout: int = DEFAULT_BONJOUR_TIMEOUT) -> RSDDevice:
devices = get_remoted_devices(timeout=timeout)
for device in devices:
if device.udid == udid:
return device
raise NoDeviceConnectedError()
| null |
5,087 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration code for AFL fuzzer."""
import json
import os
import shutil
import subprocess
from fuzzers import utils
def prepare_build_environment():
"""Set environment variables used to build targets for AFL-based
fuzzers."""
cflags = ['-fsanitize-coverage=trace-pc-guard']
utils.append_flags('CFLAGS', cflags)
utils.append_flags('CXXFLAGS', cflags)
os.environ['CC'] = 'clang'
os.environ['CXX'] = 'clang++'
os.environ['FUZZER_LIB'] = '/libAFL.a'
def build():
"""Build benchmark."""
prepare_build_environment()
utils.build_benchmark()
print('[post_build] Copying afl-fuzz to $OUT directory')
# Copy out the afl-fuzz binary as a build artifact.
shutil.copy('/fafuzz/afl-fuzz', os.environ['OUT'])
def get_stats(output_corpus, fuzzer_log): # pylint: disable=unused-argument
"""Gets fuzzer stats for AFL."""
# Get a dictionary containing the stats AFL reports.
stats_file = os.path.join(output_corpus, 'fuzzer_stats')
with open(stats_file, encoding='utf-8') as file_handle:
stats_file_lines = file_handle.read().splitlines()
stats_file_dict = {}
for stats_line in stats_file_lines:
key, value = stats_line.split(': ')
stats_file_dict[key.strip()] = value.strip()
# Report to FuzzBench the stats it accepts.
stats = {'execs_per_sec': float(stats_file_dict['execs_per_sec'])}
return json.dumps(stats)
def prepare_fuzz_environment(input_corpus):
"""Prepare to fuzz with AFL or another AFL-based fuzzer."""
# Tell AFL to not use its terminal UI so we get usable logs.
os.environ['AFL_NO_UI'] = '1'
# Skip AFL's CPU frequency check (fails on Docker).
os.environ['AFL_SKIP_CPUFREQ'] = '1'
# No need to bind affinity to one core, Docker enforces 1 core usage.
os.environ['AFL_NO_AFFINITY'] = '1'
# AFL will abort on startup if the core pattern sends notifications to
# external programs. We don't care about this.
os.environ['AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES'] = '1'
# Don't exit when crashes are found. This can happen when corpus from
# OSS-Fuzz is used.
os.environ['AFL_SKIP_CRASHES'] = '1'
# Shuffle the queue
os.environ['AFL_SHUFFLE_QUEUE'] = '1'
# AFL needs at least one non-empty seed to start.
utils.create_seed_file_for_empty_corpus(input_corpus)
def check_skip_det_compatible(additional_flags):
""" Checks if additional flags are compatible with '-d' option"""
# AFL refuses to take in '-d' with '-M' or '-S' options for parallel mode.
# (cf. https://github.com/google/AFL/blob/8da80951/afl-fuzz.c#L7477)
if '-M' in additional_flags or '-S' in additional_flags:
return False
return True
def run_afl_fuzz(input_corpus,
output_corpus,
target_binary,
additional_flags=None,
hide_output=False):
"""Run afl-fuzz."""
# Spawn the afl fuzzing process.
print('[run_afl_fuzz] Running target with afl-fuzz')
command = [
'./afl-fuzz',
'-A 1',
#enable FA mode
'-i',
input_corpus,
'-o',
output_corpus,
# Use no memory limit as ASAN doesn't play nicely with one.
'-m',
'none',
'-t',
'1000+', # Use same default 1 sec timeout, but add '+' to skip hangs.
]
# Use '-d' to skip deterministic mode, as long as it it compatible with
# additional flags.
if not additional_flags or check_skip_det_compatible(additional_flags):
command.append('-d')
if additional_flags:
command.extend(additional_flags)
dictionary_path = utils.get_dictionary_path(target_binary)
if dictionary_path:
command.extend(['-x', dictionary_path])
command += [
'--',
target_binary,
# Pass INT_MAX to afl the maximize the number of persistent loops it
# performs.
'2147483647'
]
print('[run_afl_fuzz] Running command: ' + ' '.join(command))
output_stream = subprocess.DEVNULL if hide_output else None
subprocess.check_call(command, stdout=output_stream, stderr=output_stream)
def METHOD_NAME(input_corpus, output_corpus, target_binary):
"""Run afl-fuzz on target."""
prepare_fuzz_environment(input_corpus)
run_afl_fuzz(input_corpus, output_corpus, target_binary)
| null |
5,088 |
# SPDX-FileCopyrightText: 2019 Collin Cunningham for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
LED Disco Tie with Bluetooth
=========================================================
Give your suit an sound-reactive upgrade with Circuit
Playground Bluefruit & Neopixels. Set color and animation
mode using the Bluefruit LE Connect app.
Author: Collin Cunningham for Adafruit Industries, 2019
"""
# pylint: disable=global-statement
import time
import array
import math
import audiobusio
import board
from rainbowio import colorwheel
import neopixel
from adafruit_ble import BLERadio
from adafruit_ble.advertising.standard import ProvideServicesAdvertisement
from adafruit_ble.services.nordic import UARTService
from adafruit_bluefruit_connect.packet import Packet
from adafruit_bluefruit_connect.color_packet import ColorPacket
from adafruit_bluefruit_connect.button_packet import ButtonPacket
ble = BLERadio()
uart_service = UARTService()
advertisement = ProvideServicesAdvertisement(uart_service)
# User input vars
mode = 0 # 0=audio, 1=rainbow, 2=larsen_scanner, 3=solid
user_color= (127,0,0)
# Audio meter vars
PEAK_COLOR = (100, 0, 255)
NUM_PIXELS = 10
NEOPIXEL_PIN = board.A1
# Use this instead if you want to use the NeoPixels on the Circuit Playground Bluefruit.
# NEOPIXEL_PIN = board.NEOPIXEL
CURVE = 2
SCALE_EXPONENT = math.pow(10, CURVE * -0.1)
NUM_SAMPLES = 160
# Restrict value to be between floor and ceiling.
def constrain(value, floor, ceiling):
return max(floor, min(value, ceiling))
# Scale input_value between output_min and output_max, exponentially.
def log_scale(input_value, input_min, input_max, output_min, output_max):
normalized_input_value = (input_value - input_min) / \
(input_max - input_min)
return output_min + \
math.pow(normalized_input_value, SCALE_EXPONENT) \
* (output_max - output_min)
# Remove DC bias before computing RMS.
def normalized_rms(values):
minbuf = int(METHOD_NAME(values))
samples_sum = sum(
float(sample - minbuf) * (sample - minbuf)
for sample in values
)
return math.sqrt(samples_sum / len(values))
def METHOD_NAME(values):
return sum(values) / len(values)
def volume_color(volume):
return 200, volume * (255 // NUM_PIXELS), 0
# Set up NeoPixels and turn them all off.
pixels = neopixel.NeoPixel(NEOPIXEL_PIN, NUM_PIXELS, brightness=0.1, auto_write=False)
pixels.fill(0)
pixels.show()
mic = audiobusio.PDMIn(board.MICROPHONE_CLOCK, board.MICROPHONE_DATA,
sample_rate=16000, bit_depth=16)
# Record an initial sample to calibrate. Assume it's quiet when we start.
samples = array.array('H', [0] * NUM_SAMPLES)
mic.record(samples, len(samples))
# Set lowest level to expect, plus a little.
input_floor = normalized_rms(samples) + 10
# Corresponds to sensitivity: lower means more pixels light up with lower sound
input_ceiling = input_floor + 500
peak = 0
def rainbow_cycle(delay):
for j in range(255):
for i in range(NUM_PIXELS):
pixel_index = (i * 256 // NUM_PIXELS) + j
pixels[i] = colorwheel(pixel_index & 255)
pixels.show()
time.sleep(delay)
def audio_meter(new_peak):
mic.record(samples, len(samples))
magnitude = normalized_rms(samples)
# Compute scaled logarithmic reading in the range 0 to NUM_PIXELS
c = log_scale(constrain(magnitude, input_floor, input_ceiling),
input_floor, input_ceiling, 0, NUM_PIXELS)
# Light up pixels that are below the scaled and interpolated magnitude.
pixels.fill(0)
for i in range(NUM_PIXELS):
if i < c:
pixels[i] = volume_color(i)
# Light up the peak pixel and animate it slowly dropping.
if c >= new_peak:
new_peak = min(c, NUM_PIXELS - 1)
elif new_peak > 0:
new_peak = new_peak - 1
if new_peak > 0:
pixels[int(new_peak)] = PEAK_COLOR
pixels.show()
return new_peak
pos = 0 # position
direction = 1 # direction of "eye"
def larsen_set(index, color):
if index < 0:
return
else:
pixels[index] = color
def larsen(delay):
global pos
global direction
color_dark = (int(user_color[0]/8), int(user_color[1]/8),
int(user_color[2]/8))
color_med = (int(user_color[0]/2), int(user_color[1]/2),
int(user_color[2]/2))
larsen_set(pos - 2, color_dark)
larsen_set(pos - 1, color_med)
larsen_set(pos, user_color)
larsen_set(pos + 1, color_med)
if (pos + 2) < NUM_PIXELS:
# Dark red, do not exceed number of pixels
larsen_set(pos + 2, color_dark)
pixels.write()
time.sleep(delay)
# Erase all and draw a new one next time
for j in range(-2, 2):
larsen_set(pos + j, (0, 0, 0))
if (pos + 2) < NUM_PIXELS:
larsen_set(pos + 2, (0, 0, 0))
# Bounce off ends of strip
pos += direction
if pos < 0:
pos = 1
direction = -direction
elif pos >= (NUM_PIXELS - 1):
pos = NUM_PIXELS - 2
direction = -direction
def solid(new_color):
pixels.fill(new_color)
pixels.show()
def map_value(value, in_min, in_max, out_min, out_max):
out_range = out_max - out_min
in_range = in_max - in_min
return out_min + out_range * ((value - in_min) / in_range)
speed = 6.0
wait = 0.097
def change_speed(mod, old_speed):
new_speed = constrain(old_speed + mod, 1.0, 10.0)
return(new_speed, map_value(new_speed, 10.0, 0.0, 0.01, 0.3))
def animate(pause, top):
# Determine animation based on mode
if mode == 0:
top = audio_meter(top)
elif mode == 1:
rainbow_cycle(0.001)
elif mode == 2:
larsen(pause)
elif mode == 3:
solid(user_color)
return top
while True:
ble.start_advertising(advertisement)
while not ble.connected:
# Animate while disconnected
peak = animate(wait, peak)
# While BLE is connected
while ble.connected:
if uart_service.in_waiting:
try:
packet = Packet.from_stream(uart_service)
# Ignore malformed packets.
except ValueError:
continue
# Received ColorPacket
if isinstance(packet, ColorPacket):
user_color = packet.color
# Received ButtonPacket
elif isinstance(packet, ButtonPacket):
if packet.pressed:
if packet.button == ButtonPacket.UP:
speed, wait = change_speed(1, speed)
elif packet.button == ButtonPacket.DOWN:
speed, wait = change_speed(-1, speed)
elif packet.button == ButtonPacket.BUTTON_1:
mode = 0
elif packet.button == ButtonPacket.BUTTON_2:
mode = 1
elif packet.button == ButtonPacket.BUTTON_3:
mode = 2
elif packet.button == ButtonPacket.BUTTON_4:
mode = 3
# Animate while connected
peak = animate(wait, peak)
| null |
5,089 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
from qtpy.QtCore import Qt
from qtpy.QtWidgets import QWidget, QTabBar
from mantidqt.utils.qt import load_ui
from mantidqt.utils.qt.line_edit_double_validator import LineEditDoubleValidator
from mantidqt.widgets.plotconfigdialog.axestabwidget import AxProperties
from mantidqt.widgets.plotconfigdialog.colorselector import ColorSelector
class AxesTabWidgetView(QWidget):
def __init__(self, parent=None):
super(AxesTabWidgetView, self).__init__(parent=parent)
self.ui = load_ui(__file__, "axes_tab_widget.ui", baseinstance=self)
self.color_selector_widget = ColorSelector(parent=self)
self.color_selector_layout.replaceWidget(self.color_selector_dummy_widget, self.color_selector_widget)
self.setAttribute(Qt.WA_DeleteOnClose, True)
# QTabBar cannot be created in QTDesigner
# QTabWidget not suitable because we reuse controls for each axis
self.axis_tab_bar = QTabBar(parent=self)
self.x_tab = self.axis_tab_bar.addTab("x")
self.y_tab = self.axis_tab_bar.addTab("y")
self.z_tab = self.axis_tab_bar.addTab("z")
self.axis_tab_bar_layout.replaceWidget(self.dummy_axis_tab_bar, self.axis_tab_bar)
self.lower_limit_validator = LineEditDoubleValidator(self.lower_limit_line_edit, 0.0)
self.upper_limit_validator = LineEditDoubleValidator(self.upper_limit_line_edit, 1.0)
self.lower_limit_line_edit.setValidator(self.lower_limit_validator)
self.upper_limit_line_edit.setValidator(self.upper_limit_validator)
def populate_select_axes_combo_box(self, axes_names):
self.select_axes_combo_box.addItems(axes_names)
def METHOD_NAME(self, new_text):
"""Replace the text of the selected item in the combo box"""
current_index = self.select_axes_combo_box.currentIndex()
self.select_axes_combo_box.setItemText(current_index, new_text)
def get_selected_ax_name(self):
return self.select_axes_combo_box.currentText()
def get_properties(self):
return AxProperties.from_view(self)
def get_title(self):
return self.axes_title_line_edit.text()
def set_title(self, title):
self.axes_title_line_edit.setText(title)
def get_show_minor_ticks(self):
return self.show_minor_ticks_check_box.isChecked()
def set_show_minor_ticks(self, check):
self.show_minor_ticks_check_box.setChecked(check)
def get_show_minor_gridlines(self):
return self.show_minor_gridlines_check_box.isChecked()
def set_show_minor_gridlines(self, check):
self.show_minor_gridlines_check_box.setChecked(check)
def set_minor_grid_tick_controls_visible(self, visible):
self.show_minor_gridlines_check_box.setVisible(visible)
self.show_minor_ticks_check_box.setVisible(visible)
def set_minor_gridlines_check_box_enabled(self, enabled):
self.show_minor_gridlines_check_box.setEnabled(enabled)
def get_lower_limit(self):
return float(self.lower_limit_line_edit.text())
def get_upper_limit(self):
return float(self.upper_limit_line_edit.text())
def get_label(self):
return self.label_line_edit.text()
def get_scale(self):
return self.scale_combo_box.currentText()
def get_canvas_color(self):
return self.color_selector_widget.get_color()
def get_autoscale_enabled(self):
return self.autoscale.isChecked()
def get_z_axis_selector_checked(self):
return self.axis_tab_bar.currentIndex() == 2
def set_lower_limit(self, limit):
self.lower_limit_validator.last_valid_value = str(limit)
self.lower_limit_line_edit.setText(str(limit))
def set_upper_limit(self, limit):
self.upper_limit_validator.last_valid_value = str(limit)
self.upper_limit_line_edit.setText(str(limit))
def set_label(self, label):
self.label_line_edit.setText(label)
def set_scale(self, scale):
self.scale_combo_box.setCurrentText(scale.title())
def set_canvas_color(self, color_hex):
self.color_selector_widget.set_color(color_hex)
def set_autoscale_enabled(self, enabled):
self.autoscale.setChecked(enabled)
def set_limit_input_enabled(self, enabled):
self.lower_limit_line_edit.setEnabled(enabled)
self.upper_limit_line_edit.setEnabled(enabled)
def set_z_axis_selector_enabled(self, enabled):
self.axis_tab_bar.setTabEnabled(2, enabled)
def set_x_axis_selector_click(self):
self.axis_tab_bar.setCurrentIndex(0)
def set_scale_combo_box_enabled(self, eneabled):
self.scale_combo_box.setEnabled(eneabled)
def get_axis(self):
return "xyz"[self.axis_tab_bar.currentIndex()]
| null |
5,090 |
"""
ID list class for Jaseci
Generalized functions for managing '_ids' convention for lists of Jaseci
objects
parent_obj is the instance that the list belongs to
"""
from jaseci.utils.utils import logger
class IdList(list):
"""
ID list class for tracking lists of objects in Jaseci
ingest_list is a list of hex strings to convert to UUID and append.
"""
def __init__(self, parent_obj, auto_save=True, in_list=None):
self.parent_obj = parent_obj
self.cached_objects = []
self.heal_list = []
self.auto_save = auto_save
if in_list:
self.extend(in_list)
def cache_reset(self):
self.cached_objects = []
def add_obj(
self, obj, push_front=False, allow_dups=False, silent=False, bypass=False
):
"""Adds a obj obj to Jaseci object"""
self.parent_obj.check_hooks_match(obj)
if not allow_dups and obj.jid in self:
if not silent:
logger.warning(str(f"{obj} is already in {self.parent_obj}'s list"))
else:
self.cache_reset()
if push_front:
self.insert(0, obj.jid)
else:
self.append(obj.jid)
if not bypass:
if not obj.j_parent:
obj.j_parent = self.parent_obj.jid
self.save(obj)
self.save()
def add_obj_list(self, obj_list, push_front=False, allow_dups=False, silent=False):
self.cache_reset()
if push_front:
obj_list.reverse()
for i in obj_list:
self.add_obj(i, push_front=push_front, allow_dups=allow_dups, silent=silent)
def remove_obj(self, obj):
"""Remove a Jaseci obj from list"""
self.cache_reset()
self.remove(obj.jid)
self.save()
def heal(self):
for i in self.heal_list:
self.remove(i)
if len(self.heal_list) and hasattr(self.parent_obj, "save"):
self.save()
self.heal_list = []
def METHOD_NAME(self, obj):
"""Completely destroys a Jaseci obj obj by it's name"""
self.remove_obj(obj)
obj.destroy()
def obj_for_id_not_exist_error(self, item_id):
self.heal_list.append(item_id)
my_name = "id_list"
for k, v in self.parent_obj.__dict__.items():
if id(v) == id(self):
my_name = k
return f"{item_id} not found in {my_name} of {self.parent_obj}!"
def get_obj_by_name(self, name, kind=None, silent=False):
"""Returns a Jaseci obj obj by it's name"""
ret = None
for i in self:
obj = self.parent_obj._h.get_obj(self.parent_obj._m_id, i)
if not obj:
logger.critical(self.obj_for_id_not_exist_error(i))
continue
if obj.name == name:
if kind and obj.kind != kind:
continue
ret = obj
break
if not ret and not silent:
logger.error(str(f"object for '{name}' not found in '{self.parent_obj}'!"))
self.heal()
return ret
def has_obj_by_name(self, name, kind=None):
"""Returns whether a Jaseci obj exists by it's name"""
return self.get_obj_by_name(name, kind, silent=True) is not None
def remove_obj_by_name(self, name, kind=None):
"""Remove a Jaseci obj by it's name"""
self.remove_obj(self.get_obj_by_name(name, kind))
def destroy_obj_by_name(self, name, kind=None):
"""Destroy a Jaseci obj by it's name"""
self.METHOD_NAME(self.get_obj_by_name(name, kind))
def obj_list(self):
"""Return list of objects from ids"""
if not len(self.cached_objects):
for i in self:
obj = self.parent_obj._h.get_obj(self.parent_obj._m_id, i)
if not obj:
logger.critical(self.obj_for_id_not_exist_error(i))
else:
self.cached_objects.append(obj)
self.heal()
return self.cached_objects.copy()
def remove_all(self):
"""Remove a Jaseci obj obj by it's name"""
for i in self.obj_list():
self.remove_obj(i)
if len(self):
logger.critical(
str(
f"Remove all failed in id_list of {self.parent_obj} - "
+ f"still has {self}!"
)
)
def destroy_all(self):
"""Remove a Jaseci obj obj by it's name"""
for i in self.obj_list():
self.METHOD_NAME(i)
if len(self):
logger.critical(
str(
f"Destroy all failed in id_list of {self.parent_obj} - "
+ f"still has {self}!"
)
)
def first_obj(self):
"""Get first object in list"""
if not self:
logger.error(str(f"List in '{self.parent_obj}' is empty!"))
return None
return self.parent_obj._h.get_obj(self.parent_obj._m_id, self[0])
def pop_first_obj(self):
"""Get first object in list"""
ret = self.first_obj()
if ret:
self.remove_obj(ret)
return ret
def save(self, obj=None):
if self.auto_save:
self.parent_obj.save()
if obj:
obj.save()
| null |
5,091 |
import inspect
import shutil
from pathlib import Path
import pytest
from lektor.assets import Directory
from lektor.assets import File
from lektor.assets import get_asset
from lektor.assets import get_asset_root
from lektor.project import Project
def write_text(path, text):
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(inspect.cleandoc(text))
@pytest.fixture(scope="module")
def project_path(tmp_path_factory, data_path):
"""Make our own private copy of the demo-project"""
demo = data_path / "demo-project"
path = tmp_path_factory.mktemp("test_serve") / "demo-project"
shutil.copytree(demo, path)
write_text(path / "assets/TEST.TXT", "Text file.\n")
return path
@pytest.fixture
def pad(project_path, save_sys_path):
return Project.from_path(project_path).make_env().new_pad()
@pytest.fixture(params=["/", "/static", "/static/demo.css", "/TEST.TXT"])
def asset_path(request):
return request.param
@pytest.fixture
def asset(pad, asset_path):
return pad.get_asset(asset_path)
@pytest.mark.parametrize(
"parent_path, child_name",
[
(None, "static"),
("/static", "demo.css"),
],
)
def test_get_asset(pad, parent_path, child_name):
parent = pad.get_asset(parent_path) if parent_path is not None else None
with pytest.deprecated_call(match=r"\bget_asset\b.*\bdeprecated\b") as warnings:
assert get_asset(pad, child_name, parent=parent).name == child_name
assert all(warning.filename == __file__ for warning in warnings)
def test_asset_source_filename(asset, pad, asset_path):
expected = Path(pad.env.root_path, "assets", asset_path.lstrip("/"))
assert asset.source_filename == str(expected)
@pytest.mark.parametrize(
"asset_path, url_path",
[
("/", "/"),
("/static", "/static/"),
("/static/demo.css", "/static/demo.css"),
("/TEST.TXT", "/TEST.txt"),
],
)
def test_asset_url_path(asset, url_path):
assert asset.url_path == url_path
@pytest.mark.parametrize(
"asset_path, expected",
[
("/", "/"),
("/static", "/static/"),
("/static/demo.css", None),
("/TEST.TXT", None),
],
)
def test_asset_url_content_path(asset, expected):
assert asset.url_content_path == expected
@pytest.mark.parametrize(
"asset_path, artifact_name",
[
("/", "/"),
("/static", "/static"),
("/static/demo.css", "/static/demo.css"),
("/TEST.TXT", "/TEST.txt"),
],
)
def test_asset_artifact_name(asset, artifact_name):
assert asset.artifact_name == artifact_name
@pytest.mark.parametrize(
"asset_path, child_names",
[
("/dir_with_index_html", {"index.html"}),
("/static", {"demo.css"}),
("/static/demo.css", set()),
],
)
def test_asset_children(asset, child_names):
assert set(child.name for child in asset.children) == child_names
@pytest.mark.parametrize("asset_path", ["/static"])
def test_asset_children_no_children_if_dir_unreadable(asset):
asset._paths = tuple(
path.with_name(path.name + "-missing") for path in asset._paths
)
assert len(set(asset.children)) == 0
@pytest.mark.parametrize(
"asset_path, name, child_name",
[
("/", "empty", "empty"),
("/", "missing", None),
("/", "foo-prefix-makes-me-excluded", None),
(
"/",
"_include_me_despite_underscore",
"_include_me_despite_underscore",
),
("/static", "demo.css", "demo.css"),
("/static/demo.css", "x", None),
("/empty", "demo.css", None),
# Invalid child names
("/static", ".", None),
("/", "", None),
],
)
def METHOD_NAME(asset, name, child_name):
if child_name is None:
assert asset.get_child(name) is None
else:
assert asset.get_child(name).name == child_name
def test_asset_get_child_from_url_param_deprecated(asset):
with pytest.deprecated_call(match=r"\bfrom_url\b.*\bignored\b") as warnings:
asset.get_child("name", from_url=True)
assert all(warning.filename == __file__ for warning in warnings)
@pytest.mark.parametrize(
"asset_path, url_path, expected",
[
("/", ("static",), "/static"),
("/", ("static", "demo.css"), "/static/demo.css"),
("/", ("static", "demo.css", "parent-not-a-dir"), None),
("/", ("missing", "demo.css"), None),
("/", ("foo-prefix-makes-me-excluded",), None),
("/", ("foo-prefix-makes-me-excluded", "static"), None),
("/static", ("demo.css",), "/static/demo.css"),
("/", ("TEST.txt",), "/TEST.txt"),
],
)
def test_resolve_url_path(asset, url_path, expected):
if expected is None:
assert asset.resolve_url_path(url_path) is None
else:
assert asset.resolve_url_path(url_path).artifact_name == expected
@pytest.mark.parametrize(
"asset_path, expected",
[
("/", "<Directory '/'>"),
("/static", "<Directory '/static'>"),
("/static/demo.css", "<File '/static/demo.css'>"),
("/TEST.TXT", "<File '/TEST.txt'>"),
],
)
def test_asset_repr(asset, expected):
assert repr(asset) == expected
@pytest.fixture
def asset_paths(tmp_path):
paths = tmp_path / "assets1", tmp_path / "assets2"
for path in paths:
path.mkdir()
return paths
@pytest.fixture
def asset_root(pad, asset_paths):
return get_asset_root(pad, asset_paths)
def test_directory_merges_subdirectories(asset_root, asset_paths):
for n, path in enumerate(asset_paths):
subdir = path / "subdir"
subdir.mkdir()
subdir.joinpath(f"file{n}").touch()
subdir_asset = asset_root.get_child("subdir")
child_names = [child.name for child in subdir_asset.children]
child_names.sort()
assert child_names == ["file0", "file1"]
def test_directory_file_shadows_directory(asset_root, asset_paths):
for n, path in enumerate(asset_paths):
child_path = path / "child"
if n == 0:
child_path.touch()
else:
child_path.mkdir()
children = list(asset_root.children)
assert len(children) == 1
assert isinstance(children[0], File)
assert children[0].name == "child"
def test_directory_directory_conflicts_with_file(asset_root, asset_paths):
for n, path in enumerate(asset_paths):
child_path = path / "child"
if n == 0:
child_path.mkdir()
else:
child_path.touch()
children = list(asset_root.children)
assert len(children) == 1
assert all(isinstance(child, Directory) for child in children)
| null |
5,092 |
import binascii
import json
import os
from .gabi.attributes import make_attribute_list
from .gabi.keys import DefaultSystemParameters
from .gabi.proofs import createChallenge
from .wrappers import challenge_response, serialize_proof_d, unserialize_proof_d
from ..primitives.structs import ipack, iunpack
from ...identity_formats import Attestation, IdentityAlgorithm
class IRMAAttestation(Attestation):
def __init__(self, sign_date, proofd, z=None):
self.sign_date = sign_date
self.proofd = proofd
self.z = z
def serialize(self):
return ipack(self.sign_date) + serialize_proof_d(self.proofd)
def serialize_private(self, PK):
return ipack(self.z) + ipack(self.sign_date) + serialize_proof_d(self.proofd)
@classmethod
def unserialize(cls, s, id_format):
sign_date, rem = iunpack(s)
return IRMAAttestation(sign_date, unserialize_proof_d(rem))
@classmethod
def METHOD_NAME(cls, SK, s, id_format):
z, rem = iunpack(s)
sign_date, rem = iunpack(rem)
return IRMAAttestation(sign_date, unserialize_proof_d(rem), z)
class KeyStub:
def public_key(self):
return self
def serialize(self):
return b''
@classmethod
def unserialize(cls, s):
return KeyStub()
class IRMAExactAlgorithm(IdentityAlgorithm):
def __init__(self, id_format, formats):
super().__init__(id_format, formats)
# Check algorithm match
if formats[id_format]["algorithm"] != "irmaexact":
raise RuntimeError("Identity format linked to wrong algorithm")
self.issuer_pk = formats[self.id_format]["issuer_pk"]
self.attribute_order = formats[self.id_format]["order"]
self.validity = formats[self.id_format]["validity"]
self.base_meta = {
"credential": formats[self.id_format]["credential"],
"keyCounter": formats[self.id_format]["keyCounter"],
"validity": formats[self.id_format]["validity"]
}
self.system_parameters = DefaultSystemParameters[1024]
self.challenge_count = 8
def generate_secret_key(self):
return KeyStub()
def load_secret_key(self, serialized):
return KeyStub()
def load_public_key(self, serialized):
return KeyStub()
def get_attestation_class(self):
return IRMAAttestation
def attest(self, PK, value):
raise NotImplementedError("Only import_blob is supported (now) for IRMA.")
def certainty(self, value, aggregate):
value_json = {"attributes": json.loads(value)}
value_json.update(self.base_meta)
attestation = aggregate['attestation']
attr_ints, sign_date = make_attribute_list(value_json, self.attribute_order,
(self.validity, attestation.sign_date))
reconstructed_attr_map = {}
for i in range(len(attr_ints)):
reconstructed_attr_map[i + 1] = attr_ints[i]
verified = 0.0
failure = False
for k, v in aggregate.items():
if k != 'attestation' and v:
challenge_verif, _ = iunpack(k)
p = attestation.proofd.Copy()
p.ADisclosed = reconstructed_attr_map
Ap, Zp = p.ChallengeContribution(self.issuer_pk)
p.C, _ = iunpack(v)
reconstructed_challenge = createChallenge(challenge_verif, challenge_verif, [Ap, Zp], False)
if p.VerifyWithChallenge(self.issuer_pk, reconstructed_challenge):
verified += 1.0
else:
failure = True
return 0.0 if failure else (verified / self.challenge_count)
def create_challenges(self, PK, attestation):
return [ipack(int(binascii.hexlify(os.urandom(32)), 16) % self.issuer_pk.N)
for _ in range(self.challenge_count)]
def create_challenge_response(self, SK, attestation, challenge):
return challenge_response(attestation.proofd, attestation.z, challenge)
def create_certainty_aggregate(self, attestation):
return {'attestation': attestation}
def create_honesty_challenge(self, PK, value):
raise NotImplementedError()
def process_honesty_challenge(self, value, response):
raise NotImplementedError()
def process_challenge_response(self, aggregate, challenge, response):
aggregate[challenge] = response
def import_blob(self, blob):
blob_json = json.loads(blob)
sign_date = blob_json["sign_date"]
proofd = unserialize_proof_d(binascii.unhexlify(blob_json["proofd"]))
z = blob_json["z"]
inst = self.get_attestation_class()(sign_date, proofd, z)
return inst.serialize_private(None), None
| null |
5,093 |
#!/usr/bin/env python3
import rospy
import json
from lg_msg_defs.srv import USCSMessage
from lg_msg_defs.srv import DesiredState
from interactivespaces_msgs.msg import GenericMessage
from std_msgs.msg import String
from appctl_msg_defs.msg import Mode
from lg_common.helpers import run_with_influx_exception_handler
NODE_NAME = 'state_setter'
from lg_common.logger import get_logger
logger = get_logger(NODE_NAME)
class StateSetter(object):
def __init__(self, state_pub, display_url_pub, kiosk_url_pub, runway_pub, last_uscs_service):
self.state_pub = state_pub
self.display_url_pub = display_url_pub
self.kiosk_url_pub = kiosk_url_pub
self.runway_pub = runway_pub
self.last_uscs_service = last_uscs_service
self.state_display = None
self.state_kiosk = None
self.state = None
def get_current_state(self):
state = self.last_uscs_service().message
try:
return json.loads(state)
except Exception:
logger.error("Last state from /uscs/message service returned non-json parsable (%s)" % state)
return {}
def handle_state_setting(self, msg):
self.state = None
try:
state = json.loads(msg.data)
except Exception:
logger.error('Error with the state message, non json format:\n%s' % msg.data)
return
self.state = state
# if the current state is tactile and the new state is tactile, then
# we follow a special path, also if just the new state is tactile we
# follow another but different special path
#if self.handle_current_and_new_tactile(state):
# self._clear_state()
# return
#if self.handle_new_state_tactile(state):
# return
self.publish_uscs(state)
self.METHOD_NAME()
def METHOD_NAME(self):
# set state to none since we don't need to store it when
# we change the url / handle tactile ourself
self.state = self.state_display = self.state_kiosk = None
def handle_current_and_new_tactile(self, new_state):
# if the current and new state are tactile, only urls / runway
# cards need to be changed / emitted
current_state = self.get_current_state()
if not self.is_tactile(current_state) or not self.is_tactile(new_state):
return False
if self.valid_runway_card(state.get('runway_card', None)):
self.runway_pub.publish(state['runway_card'])
else:
# if the runway card isn't valid we want to just set the urls
self.kiosk_pub.publish(self.get_kiosk_url(state))
self.display_pub.publish(self.get_display_url(state))
return True
def handle_new_state_tactile(self, new_state):
self.publish_uscs(new_state)
# store state so when the kiosk and display are finished
# loading they can query their own runway cards
self.state = self.state_kiosk = self.state_display = new_state
def publish_uscs(self, state):
self.state_pub.publish(self.make_director(state))
def desired_state(self, req):
state = self.state
if state is None:
return ''
if req.node == '42-a' or req.node == 'display':
if self.state_display is None:
return ''
self.state_display = None
return json.dumps(state)
if req.node == '42-b' or req.node == 'kiosk':
if self.state_kiosk is None:
return ''
self.state_kiosk = None
return json.dumps(state)
return ''
def make_director(self, uscs_message):
# makes a generic message and returns it
ret = GenericMessage()
ret.type = 'json'
try:
ret.message = json.dumps(uscs_message)
except Exception:
logger.error('Could not dump state message into json...')
ret.message = ''
return ret
def valid_runway_card(self, runway_card):
# runway cards can sometimes be "None" as a string
if runway_card is None and runway_card == 'None':
return False
if runway_card[11] == '3':
return False
return True
def handle_tactile(self, new_state):
if new_state.get('runway_card', 'None') != 'None' and \
new_state.get('runway_card') is not None and \
new_state.get('runway_card')[11] != '3':
self.runway_pub.publish(new_state['runway_card'])
return
self.publish_urls(new_state['kiosk_url'], new_state['display_url'])
def publish_urls(self, kiosk_url, display_url):
self.kiosk_url_pub.publish(kiosk_url)
self.display_url_pub.publish(display_url)
def grab_urls(self, state):
# grabs urls which are the only asset when the
# activity is "browser" If there are more assets
# then we ignore the window
urls = []
for window in state.get('windows', []):
if window.get('activity') != 'browser':
continue
if len(window.get('assets', [])) == 1:
urls.append(window['assets'][0])
return urls
def _is_tactile_url(self, urls):
# checking that the length of the filter is not zero, if it is then no urls
# matched those that should be tactile
return len([url for url in urls if 'maps.google.com' in url or 'google.com/maps' in url]) != 0
def is_tactile(self, state):
self._is_tactile_url(self.grab_urls(state))
def main():
rospy.init_node(NODE_NAME)
state_pub = rospy.Publisher('/director/scene', GenericMessage, queue_size=10)
runway_pub = rospy.Publisher('/portal_kiosk/runway_change', String, queue_size=10)
display_pub = rospy.Publisher('/display/switch', String, queue_size=10)
kiosk_pub = rospy.Publisher('/kiosk/switch', String, queue_size=10)
last_uscs_service = rospy.ServiceProxy('/uscs/message', USCSMessage, persistent=False)
state_setter = StateSetter(state_pub, display_pub, kiosk_pub, runway_pub, last_uscs_service)
rospy.Service('/state_setter/desired_state', DesiredState, state_setter.desired_state)
rospy.Subscriber('/state_setter/set_state', String, state_setter.handle_state_setting)
rospy.spin()
if __name__ == '__main__':
run_with_influx_exception_handler(main, NODE_NAME)
| null |
5,094 |
#!/usr/bin/env python3
from __future__ import print_function
import argparse
import json
import os
import sys
import codecs
import requests
# Python 2/3 compatibility
if sys.version_info.major > 2:
xrange = range
# install path to repository mapping
# if path mapped to None, it means that the file should be ignored (i.e. test file/helper)
# first matched path counts.
# terminating slash should be added for directories
path_mapping = [
("${install-dir}/share/rspamd/lib/fun.lua", None),
("${install-dir}/share/rspamd/lib/", "lualib/"),
("${install-dir}/share/rspamd/rules/" , "rules/"),
("${install-dir}/share/rspamd/lib/torch/" , None),
("${build-dir}/CMakeFiles/", None),
("${build-dir}/contrib/", None),
("${build-dir}/test", None),
("${project-root}/test/lua/", None),
("${project-root}/test/", None),
("${project-root}/clang-plugin/", None),
("${project-root}/CMakeFiles/", None),
("${project-root}/contrib/", None),
("${project-root}/", ""),
("contrib/", None),
("CMakeFiles/", None),
]
parser = argparse.ArgumentParser(description='')
parser.add_argument('--input', required=True, nargs='+', help='input files')
parser.add_argument('--output', help='output file)')
parser.add_argument('--root', default="/rspamd/src/github.com/rspamd/rspamd", help='repository root)')
parser.add_argument('--install-dir', default="/rspamd/install", help='install root)')
parser.add_argument('--build-dir', default="/rspamd/build", help='build root)')
parser.add_argument('--token', help='If present, the file will be uploaded to coveralls)')
def merge_coverage_vectors(c1, c2):
assert(len(c1) == len(c2))
for i in range(0, len(c1)):
if c1[i] is None and c2[i] is None:
pass
elif type(c1[i]) is int and c2[i] is None:
pass
elif c1[i] is None and type(c2[i]) is int:
c1[i] = c2[i]
elif type(c1[i]) is int and type(c2[i]) is int:
c1[i] += c2[i]
else:
raise RuntimeError("bad element types at %d: %s, %s", i, type(c1[i]), type(c1[i]))
return c1
def METHOD_NAME(name):
name = os.path.normpath(name)
if not os.path.isabs(name):
name = os.path.abspath(repository_root + "/" + name)
for k in path_mapping:
if name.startswith(k[0]):
if k[1] is None:
return None
else:
name = k[1] + name[len(k[0]):]
break
return name
def merge(files, j1):
for sf in j1['source_files']:
name = METHOD_NAME(sf['name'])
if name is None:
continue
if name in files:
files[name]['coverage'] = merge_coverage_vectors(files[name]['coverage'], sf['coverage'])
else:
sf['name'] = name
files[name] = sf
return files
def prepare_path_mapping():
for i in range(0, len(path_mapping)):
new_key = path_mapping[i][0].replace("${install-dir}", install_dir)
new_key = new_key.replace("${project-root}", repository_root)
new_key = new_key.replace("${build-dir}", build_dir)
path_mapping[i] = (new_key, path_mapping[i][1])
if __name__ == '__main__':
args = parser.parse_args()
repository_root = os.path.abspath(os.path.expanduser(args.root))
install_dir = os.path.normpath(os.path.expanduser(args.install_dir))
build_dir = os.path.normpath(os.path.expanduser(args.build_dir))
prepare_path_mapping()
with codecs.open(args.input[0], 'r', encoding='utf-8') as fh:
j1 = json.load(fh)
files = merge({}, j1)
for i in range(1, len(args.input)):
with codecs.open(args.input[i], 'r', encoding='utf-8') as fh:
j2 = json.load(fh)
files = merge(files, j2)
if 'git' not in j1 and 'git' in j2:
j1['git'] = j2['git']
if 'service_name' not in j1 and 'service_name' in j2:
j1['service_name'] = j2['service_name']
if 'service_job_id' not in j1 and 'service_job_id' in j2:
j1['service_job_id'] = j2['service_job_id']
if os.getenv('CIRCLECI'):
j1['service_name'] = 'circleci'
j1['service_job_id'] = os.getenv('CIRCLE_BUILD_NUM')
elif os.getenv('DRONE') == 'true':
j1['service_name'] = 'drone'
j1['service_branch'] = os.getenv('DRONE_COMMIT_BRANCH')
j1['service_build_url'] = os.getenv('DRONE_BUILD_LINK')
j1['service_number'] = os.getenv('DRONE_BUILD_NUMBER')
j1['commit_sha'] = os.getenv('DRONE_COMMIT_SHA')
if os.getenv('DRONE_BUILD_EVENT') == 'pull_request':
j1['service_pull_request'] = os.getenv('DRONE_PULL_REQUEST')
# git data can be filled by cpp-coveralls, but in our layout it can't find repo
# so we can override git info witout merging
j1['git'] = {
'head': {
'id': j1['commit_sha'],
'author_email': os.getenv('DRONE_COMMIT_AUTHOR_EMAIL'),
'message': os.getenv('DRONE_COMMIT_MESSAGE')
},
'branch': j1['service_branch'],
'remotes': [{
'name': 'origin',
'url': os.getenv('DRONE_GIT_HTTP_URL')
}]
}
j1['source_files'] = list(files.values())
if args.output:
with open(args.output, 'w') as f:
f.write(json.dumps(j1))
if args.token:
j1['repo_token'] = args.token
try:
r = requests.post('https://coveralls.io/api/v1/jobs', files={"json_file": json.dumps(j1)})
r.raise_for_status()
except requests.exceptions.RequestException as e:
print("Failed to send data to coveralls: %s" % e)
sys.exit()
try:
response = r.json()
print("[coveralls] %s" % response['message'])
if 'url' in response:
print("[coveralls] Uploaded to %s" % response['url'])
except json.decoder.JSONDecodeError:
print("Bad response: '%s'" % r.text)
| null |
5,095 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from testhelpers import run_algorithm
from mantid.api import mtd, WorkspaceGroup, MatrixWorkspace, AnalysisDataService, WorkspaceFactory
class WorkspaceGroupTest(unittest.TestCase):
def create_matrix_workspace_in_ADS(self, name):
run_algorithm("CreateWorkspace", OutputWorkspace=name, DataX=[1.0, 2.0, 3.0], DataY=[2.0, 3.0], DataE=[2.0, 3.0], UnitX="TOF")
def create_group_via_GroupWorkspace_algorithm(self):
self.create_matrix_workspace_in_ADS("First")
self.create_matrix_workspace_in_ADS("Second")
run_algorithm("GroupWorkspaces", InputWorkspaces="First,Second", OutputWorkspace="grouped")
return mtd["grouped"]
def tearDown(self):
AnalysisDataService.clear()
# ------------------------------------------------------------------------------------------------------------------
# TESTS
# ------------------------------------------------------------------------------------------------------------------
def test_WorkspaceGroup_can_be_instantiated(self):
ws_group = WorkspaceGroup()
self.assertIsNotNone(ws_group)
self.assertIsInstance(ws_group, WorkspaceGroup)
def test_that_instantiated_WorkspaceGroup_is_not_added_to_the_ADS(self):
_ = WorkspaceGroup()
self.assertEqual(len(AnalysisDataService.getObjectNames()), 0)
class _HoldWsGroup:
def __init__(self):
self.group = WorkspaceGroup()
def __enter__(self):
return self.group
def __exit__(self, exc_type, exc_val, exc_tb):
del self.group # Make our intention explicit for future readers
def test_that_get_item_scope_works_not_in_ads(self):
with self._HoldWsGroup() as group:
# Note we want to create this inline so our ref is through getItem
group.addWorkspace(WorkspaceFactory.create("Workspace2D", 1, 1, 1))
ws = group.getItem(0)
self.assertFalse(ws.name()) # Not in ADS so should be no name
self.assertEqual(len(AnalysisDataService.getObjectNames()), 0)
# Now the group should be deleted, this should continue to work now
self.assertFalse(ws.name())
def test_that_instantiated_WorkspaceGroup_can_be_added_to_the_ADS(self):
ws_group = WorkspaceGroup()
mtd.add("group1", ws_group)
self.assertEqual(AnalysisDataService.getObjectNames(), ["group1"])
self.assertIsInstance(mtd["group1"], WorkspaceGroup)
def test_that_can_add_workspaces_to_WorkspaceGroup_when_in_ADS(self):
self.create_matrix_workspace_in_ADS("ws1")
self.create_matrix_workspace_in_ADS("ws2")
ws_group = WorkspaceGroup()
mtd.add("group1", ws_group)
ws_group.add("ws1")
ws_group.add("ws2")
self.assertTrue("ws1" in mtd["group1"])
self.assertTrue("ws2" in mtd["group1"])
def test_that_can_add_workspaces_to_WorkspaceGroup_when_not_in_ADS(self):
ws1 = WorkspaceFactory.create("Workspace2D", 2, 2, 2)
ws2 = WorkspaceFactory.create("Workspace2D", 2, 2, 2)
ws_group = WorkspaceGroup()
ws_group.addWorkspace(ws1)
ws_group.addWorkspace(ws2)
self.assertEqual(ws_group.size(), 2)
def test_that_GroupWorkspaces_algorithm_creates_group_of_the_correct_size(self):
group = self.create_group_via_GroupWorkspace_algorithm()
self.assertEqual(type(group), WorkspaceGroup)
self.assertEqual(2, group.size())
self.assertEqual(2, group.getNumberOfEntries())
def test_that_python__len__method_works_correctly_on_group(self):
group = self.create_group_via_GroupWorkspace_algorithm()
self.assertEqual(len(group), group.getNumberOfEntries())
def test_that_getName_method_returns_correct_names(self):
group = self.create_group_via_GroupWorkspace_algorithm()
names = group.getNames()
self.assertCountEqual(names, ["First", "Second"])
def test_that_a_group_is_invalidated_if_ADS_is_cleared_and_RuntimeError_raised(self):
group = self.create_group_via_GroupWorkspace_algorithm()
mtd.clear()
with self.assertRaises(RuntimeError):
group.getNames()
def test_that_IndexError_raised_when_attemtping_to_access_an_index_which_doesnt_exist(self):
group = self.create_group_via_GroupWorkspace_algorithm()
with self.assertRaises(IndexError):
group.__getitem__(2)
def METHOD_NAME(self):
group = self.create_group_via_GroupWorkspace_algorithm()
for i in range(2):
member = group[i]
self.assertTrue(isinstance(member, MatrixWorkspace))
def test_that_sortByName_sorts_names_alphabetically_when_using_getNames(self):
group = self.create_group_via_GroupWorkspace_algorithm()
group.sortByName()
names = group.getNames()
self.assertEqual(names[0], "First")
self.assertEqual(names[1], "Second")
def test_SimpleAlgorithm_Accepts_Group_Handle(self):
from mantid.simpleapi import Scale
self.create_matrix_workspace_in_ADS("First")
self.create_matrix_workspace_in_ADS("Second")
run_algorithm("GroupWorkspaces", InputWorkspaces="First,Second", OutputWorkspace="grouped")
group = mtd["grouped"]
try:
w = Scale(group, 1.5)
mtd.remove(str(w))
except Exception as exc:
self.fail("Algorithm raised an exception with input as WorkspaceGroup: '" + str(exc) + "'")
mtd.remove(str(group))
def test_complex_binary_operations_with_group_do_not_leave_temporary_workspaces_in_ADS(self):
run_algorithm(
"CreateWorkspace", OutputWorkspace="grouped_1", DataX=[1.0, 2.0, 3.0], DataY=[2.0, 3.0], DataE=[2.0, 3.0], UnitX="TOF"
)
run_algorithm(
"CreateWorkspace", OutputWorkspace="grouped_2", DataX=[1.0, 2.0, 3.0], DataY=[2.0, 3.0], DataE=[2.0, 3.0], UnitX="TOF"
)
run_algorithm("GroupWorkspaces", InputWorkspaces="grouped_1,grouped_2", OutputWorkspace="grouped")
w1 = (mtd["grouped"] * 0.0) + 1.0
self.assertTrue("w1" in mtd)
self.assertTrue("grouped" in mtd)
self.assertTrue("grouped_1" in mtd)
self.assertTrue("grouped_2" in mtd)
self.assertTrue("__python_op_tmp0" not in mtd)
self.assertTrue("__python_op_tmp0_1" not in mtd)
self.assertTrue("__python_op_tmp0_2" not in mtd)
mtd.remove("w1")
mtd.remove("grouped")
mtd.remove("grouped_1")
mtd.remove("grouped_2")
def test_negative_indices_return_correct_ws_from_group(self):
group = self.create_group_via_GroupWorkspace_algorithm()
self.assertEqual(group[-1].name(), "Second")
self.assertEqual(group[-2].name(), "First")
def test_out_of_bounds_negative_index_returns_IndexError(self):
group = self.create_group_via_GroupWorkspace_algorithm()
with self.assertRaises(IndexError):
group[-400]
def test_getItem_negative_int_index_values(self):
group = self.create_group_via_GroupWorkspace_algorithm()
self.assertEqual(group.getItem(-1).name(), "Second")
self.assertEqual(group.getItem(-2).name(), "First")
with self.assertRaises(IndexError):
group.getItem(-400)
def test_isGroup(self):
group = self.create_group_via_GroupWorkspace_algorithm()
self.assertEqual(group.isGroup(), True)
if __name__ == "__main__":
unittest.main()
| null |
5,096 |
# Copyright Kevin Deldycke <[email protected]> and contributors.
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from __future__ import annotations
import re
from typing import Iterator
from click_extra.platforms import MACOS
from meta_package_manager.base import Package, PackageManager
from meta_package_manager.capabilities import (
search_capabilities,
version_not_implemented,
)
class MAS(PackageManager):
name = "Mac AppStore"
homepage_url = "https://github.com/argon/mas"
platforms = MACOS
# 'mas search' output has been fixed in 1.6.1:
# https://github.com/mas-cli/mas/pull/205
requirement = "1.6.1"
version_cli_options = ("version",)
"""
.. code-block:: shell-session
► mas version
1.8.3
"""
@property
def installed(self) -> Iterator[Package]:
"""Fetch installed packages.
.. code-block:: shell-session
► mas list
1569813296 1Password for Safari (2.3.5)
1295203466 Microsoft Remote Desktop (10.7.6)
409183694 Keynote (12.0)
1408727408 com.adriangranados.wifiexplorerlite (1.5.5)
409203825 Numbers (12.0)
"""
output = self.run_cli("list")
regexp = re.compile(
r"""
(?P<package_id>\d+)
\s+
(?P<package_name>.+?)
\s+
\(
(?P<version>\S+)
\)
""",
re.MULTILINE | re.VERBOSE,
)
for package_id, package_name, version in regexp.findall(output):
yield self.package(
id=package_id,
name=package_name,
installed_version=version,
)
@property
def outdated(self) -> Iterator[Package]:
"""Fetch outdated packages.
.. code-block:: shell-session
► mas outdated
409183694 Keynote (11.0 -> 12.0)
1176895641 Spark (2.11.20 -> 2.11.21)
"""
output = self.run_cli("outdated")
regexp = re.compile(
r"""
(?P<package_id>\d+)
\s+
(?P<package_name>.+?)
\s+
\(
(?P<installed_version>\S+)
\s+->\s+
(?P<latest_version>\S+)
\)
""",
re.MULTILINE | re.VERBOSE,
)
for (
package_id,
package_name,
installed_version,
latest_version,
) in regexp.findall(output):
yield self.package(
id=package_id,
name=package_name,
installed_version=installed_version,
latest_version=latest_version,
)
@search_capabilities(extended_support=False, exact_support=False)
def search(self, query: str, extended: bool, exact: bool) -> Iterator[Package]:
"""Fetch matching packages.
.. caution::
Search does not support extended or exact matching. So we returns the best
subset of results and let
:py:meth:`meta_package_manager.base.PackageManager.refiltered_search` refine
them.
.. code-block:: shell-session
► mas search python
689176796 Python Runner (1.3)
630736088 Learning Python (1.0)
945397020 Run Python (1.0)
1164498373 PythonGames (1.0)
1400050251 Pythonic (1.0.0)
"""
output = self.run_cli("search", query)
regexp = re.compile(
r"""
(?P<package_id>\d+)
\s+
(?P<package_name>.+?)
\s+
\(
(?P<version>\S+)
\)
""",
re.MULTILINE | re.VERBOSE,
)
for package_id, package_name, version in regexp.findall(output):
yield self.package(id=package_id, name=package_name, latest_version=version)
@version_not_implemented
def METHOD_NAME(self, package_id: str, version: str | None = None) -> str:
"""Install one package.
.. code-block:: shell-session
► mas install 945397020
"""
return self.run_cli("install", package_id)
def upgrade_all_cli(self) -> tuple[str, ...]:
"""Generates the CLI to upgrade all packages (default) or only the one provided
as parameter.
.. code-block:: shell-session
► mas upgrade
"""
return self.build_cli("upgrade")
@version_not_implemented
def upgrade_one_cli(
self,
package_id: str,
version: str | None = None,
) -> tuple[str, ...]:
"""Generates the CLI to upgrade all packages (default) or only the one provided
as parameter.
.. code-block:: shell-session
► mas upgrade 945397020
"""
return self.build_cli("upgrade", package_id)
| null |
5,097 |
import unittest
from ctypes import *
from ctypes.test import need_symbol
import _ctypes_test
dll = CDLL(_ctypes_test.__file__)
try:
CALLBACK_FUNCTYPE = WINFUNCTYPE
except NameError:
# fake to enable this test on Linux
CALLBACK_FUNCTYPE = CFUNCTYPE
class POINT(Structure):
_fields_ = [("x", c_int), ("y", c_int)]
class BasicWrapTestCase(unittest.TestCase):
def wrap(self, param):
return param
@need_symbol('c_wchar')
def test_wchar_parm(self):
f = dll._testfunc_i_bhilfd
f.argtypes = [c_byte, c_wchar, c_int, c_long, c_float, c_double]
result = f(self.wrap(1), self.wrap("x"), self.wrap(3), self.wrap(4), self.wrap(5.0), self.wrap(6.0))
self.assertEqual(result, 139)
self.assertIs(type(result), int)
def test_pointers(self):
f = dll._testfunc_p_p
f.restype = POINTER(c_int)
f.argtypes = [POINTER(c_int)]
# This only works if the value c_int(42) passed to the
# function is still alive while the pointer (the result) is
# used.
v = c_int(42)
self.assertEqual(pointer(v).contents.value, 42)
result = f(self.wrap(pointer(v)))
self.assertEqual(type(result), POINTER(c_int))
self.assertEqual(result.contents.value, 42)
# This on works...
result = f(self.wrap(pointer(v)))
self.assertEqual(result.contents.value, v.value)
p = pointer(c_int(99))
result = f(self.wrap(p))
self.assertEqual(result.contents.value, 99)
def test_shorts(self):
f = dll._testfunc_callback_i_if
args = []
expected = [262144, 131072, 65536, 32768, 16384, 8192, 4096, 2048,
1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1]
def callback(v):
args.append(v)
return v
CallBack = CFUNCTYPE(c_int, c_int)
cb = CallBack(callback)
f(self.wrap(2**18), self.wrap(cb))
self.assertEqual(args, expected)
################################################################
def test_callbacks(self):
f = dll._testfunc_callback_i_if
f.restype = c_int
f.argtypes = None
MyCallback = CFUNCTYPE(c_int, c_int)
def callback(value):
#print "called back with", value
return value
cb = MyCallback(callback)
result = f(self.wrap(-10), self.wrap(cb))
self.assertEqual(result, -18)
# test with prototype
f.argtypes = [c_int, MyCallback]
cb = MyCallback(callback)
result = f(self.wrap(-10), self.wrap(cb))
self.assertEqual(result, -18)
result = f(self.wrap(-10), self.wrap(cb))
self.assertEqual(result, -18)
AnotherCallback = CALLBACK_FUNCTYPE(c_int, c_int, c_int, c_int, c_int)
# check that the prototype works: we call f with wrong
# argument types
cb = AnotherCallback(callback)
self.assertRaises(ArgumentError, f, self.wrap(-10), self.wrap(cb))
def test_callbacks_2(self):
# Can also use simple datatypes as argument type specifiers
# for the callback function.
# In this case the call receives an instance of that type
f = dll._testfunc_callback_i_if
f.restype = c_int
MyCallback = CFUNCTYPE(c_int, c_int)
f.argtypes = [c_int, MyCallback]
def callback(value):
#print "called back with", value
self.assertEqual(type(value), int)
return value
cb = MyCallback(callback)
result = f(self.wrap(-10), self.wrap(cb))
self.assertEqual(result, -18)
@need_symbol('c_longlong')
def test_longlong_callbacks(self):
f = dll._testfunc_callback_q_qf
f.restype = c_longlong
MyCallback = CFUNCTYPE(c_longlong, c_longlong)
f.argtypes = [c_longlong, MyCallback]
def callback(value):
self.assertIsInstance(value, int)
return value & 0x7FFFFFFF
cb = MyCallback(callback)
self.assertEqual(13577625587, int(f(self.wrap(1000000000000), self.wrap(cb))))
def test_byval(self):
# without prototype
ptin = POINT(1, 2)
ptout = POINT()
# EXPORT int _testfunc_byval(point in, point *pout)
result = dll._testfunc_byval(ptin, byref(ptout))
got = result, ptout.x, ptout.y
expected = 3, 1, 2
self.assertEqual(got, expected)
# with prototype
ptin = POINT(101, 102)
ptout = POINT()
dll._testfunc_byval.argtypes = (POINT, POINTER(POINT))
dll._testfunc_byval.restype = c_int
result = dll._testfunc_byval(self.wrap(ptin), byref(ptout))
got = result, ptout.x, ptout.y
expected = 203, 101, 102
self.assertEqual(got, expected)
def test_struct_return_2H(self):
class S2H(Structure):
_fields_ = [("x", c_short),
("y", c_short)]
dll.ret_2h_func.restype = S2H
dll.ret_2h_func.argtypes = [S2H]
inp = S2H(99, 88)
s2h = dll.ret_2h_func(self.wrap(inp))
self.assertEqual((s2h.x, s2h.y), (99*2, 88*3))
# Test also that the original struct was unmodified (i.e. was passed by
# value)
self.assertEqual((inp.x, inp.y), (99, 88))
def test_struct_return_8H(self):
class S8I(Structure):
_fields_ = [("a", c_int),
("b", c_int),
("c", c_int),
("d", c_int),
("e", c_int),
("f", c_int),
("g", c_int),
("h", c_int)]
dll.ret_8i_func.restype = S8I
dll.ret_8i_func.argtypes = [S8I]
inp = S8I(9, 8, 7, 6, 5, 4, 3, 2)
s8i = dll.ret_8i_func(self.wrap(inp))
self.assertEqual((s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h),
(9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9))
def test_recursive_as_param(self):
from ctypes import c_int
class A(object):
pass
a = A()
a._as_parameter_ = a
with self.assertRaises(RecursionError):
c_int.from_param(a)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class AsParamWrapper(object):
def __init__(self, param):
self._as_parameter_ = param
class AsParamWrapperTestCase(BasicWrapTestCase):
wrap = AsParamWrapper
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class AsParamPropertyWrapper(object):
def __init__(self, param):
self._param = param
def METHOD_NAME(self):
return self._param
_as_parameter_ = property(METHOD_NAME)
class AsParamPropertyWrapperTestCase(BasicWrapTestCase):
wrap = AsParamPropertyWrapper
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if __name__ == '__main__':
unittest.main()
| null |
5,098 |
#!/usr/bin/env python3
# Copyright (c) 2017-2022 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test getblockstats rpc call
#
from test_framework.test_framework import PocketcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
import json
import os
TESTSDIR = os.path.dirname(os.path.realpath(__file__))
class GetblockstatsTest(PocketcoinTestFramework):
start_height = 101
max_stat_pos = 2
def add_options(self, parser):
parser.add_argument('--gen-test-data', dest='gen_test_data',
default=False, action='store_true',
help='Generate test data')
parser.add_argument('--test-data', dest='test_data',
default='data/rpc_getblockstats.json',
action='store', metavar='FILE',
help='Test data file')
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.supports_cli = False
def METHOD_NAME(self):
return [self.nodes[0].getblockstats(hash_or_height=self.start_height + i) for i in range(self.max_stat_pos+1)]
def generate_test_data(self, filename):
mocktime = 1525107225
self.nodes[0].setmocktime(mocktime)
self.nodes[0].generate(101)
address = self.nodes[0].get_deterministic_priv_key().address
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=True)
self.nodes[0].generate(1)
self.sync_all()
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=True)
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=False)
self.nodes[0].settxfee(amount=0.003)
self.nodes[0].sendtoaddress(address=address, amount=1, subtractfeefromamount=True)
self.sync_all()
self.nodes[0].generate(1)
self.expected_stats = self.METHOD_NAME()
blocks = []
tip = self.nodes[0].getbestblockhash()
blockhash = None
height = 0
while tip != blockhash:
blockhash = self.nodes[0].getblockhash(height)
blocks.append(self.nodes[0].getblock(blockhash, 0))
height += 1
to_dump = {
'blocks': blocks,
'mocktime': int(mocktime),
'stats': self.expected_stats,
}
with open(filename, 'w', encoding="utf8") as f:
json.dump(to_dump, f, sort_keys=True, indent=2)
def load_test_data(self, filename):
with open(filename, 'r', encoding="utf8") as f:
d = json.load(f)
blocks = d['blocks']
mocktime = d['mocktime']
self.expected_stats = d['stats']
# Set the timestamps from the file so that the nodes can get out of Initial Block Download
self.nodes[0].setmocktime(mocktime)
self.sync_all()
for b in blocks:
self.nodes[0].submitblock(b)
def run_test(self):
test_data = os.path.join(TESTSDIR, self.options.test_data)
if self.options.gen_test_data:
self.generate_test_data(test_data)
else:
self.load_test_data(test_data)
self.sync_all()
stats = self.METHOD_NAME()
# Make sure all valid statistics are included but nothing else is
expected_keys = self.expected_stats[0].keys()
assert_equal(set(stats[0].keys()), set(expected_keys))
assert_equal(stats[0]['height'], self.start_height)
assert_equal(stats[self.max_stat_pos]['height'], self.start_height + self.max_stat_pos)
for i in range(self.max_stat_pos+1):
self.log.info('Checking block %d\n' % (i))
assert_equal(stats[i], self.expected_stats[i])
# Check selecting block by hash too
blockhash = self.expected_stats[i]['blockhash']
stats_by_hash = self.nodes[0].getblockstats(hash_or_height=blockhash)
assert_equal(stats_by_hash, self.expected_stats[i])
# Make sure each stat can be queried on its own
for stat in expected_keys:
for i in range(self.max_stat_pos+1):
result = self.nodes[0].getblockstats(hash_or_height=self.start_height + i, stats=[stat])
assert_equal(list(result.keys()), [stat])
if result[stat] != self.expected_stats[i][stat]:
self.log.info('result[%s] (%d) failed, %r != %r' % (
stat, i, result[stat], self.expected_stats[i][stat]))
assert_equal(result[stat], self.expected_stats[i][stat])
# Make sure only the selected statistics are included (more than one)
some_stats = {'minfee', 'maxfee'}
stats = self.nodes[0].getblockstats(hash_or_height=1, stats=list(some_stats))
assert_equal(set(stats.keys()), some_stats)
# Test invalid parameters raise the proper json exceptions
tip = self.start_height + self.max_stat_pos
assert_raises_rpc_error(-8, 'Target block height %d after current tip %d' % (tip+1, tip),
self.nodes[0].getblockstats, hash_or_height=tip+1)
assert_raises_rpc_error(-8, 'Target block height %d is negative' % (-1),
self.nodes[0].getblockstats, hash_or_height=-1)
# Make sure not valid stats aren't allowed
inv_sel_stat = 'asdfghjkl'
inv_stats = [
[inv_sel_stat],
['minfee' , inv_sel_stat],
[inv_sel_stat, 'minfee'],
['minfee', inv_sel_stat, 'maxfee'],
]
for inv_stat in inv_stats:
assert_raises_rpc_error(-8, 'Invalid selected statistic %s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=inv_stat)
# Make sure we aren't always returning inv_sel_stat as the culprit stat
assert_raises_rpc_error(-8, 'Invalid selected statistic aaa%s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=['minfee' , 'aaa%s' % inv_sel_stat])
# Mainchain's genesis block shouldn't be found on regtest
assert_raises_rpc_error(-5, 'Block not found', self.nodes[0].getblockstats,
hash_or_height='000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f')
# Invalid number of args
assert_raises_rpc_error(-1, 'getblockstats hash_or_height ( stats )', self.nodes[0].getblockstats, '00', 1, 2)
assert_raises_rpc_error(-1, 'getblockstats hash_or_height ( stats )', self.nodes[0].getblockstats)
if __name__ == '__main__':
GetblockstatsTest().main()
| null |
5,099 |
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for gathering telemetry for TFX components and pipelines."""
import contextlib
import re
import sys
import threading
from typing import Dict, List
from absl import logging
from googleapiclient import http
from tfx import version
# Common label names used.
#
# TODO(b/190444391): Add component label.
LABEL_TFX_RUNNER = 'tfx_runner'
LABEL_TFX_EXECUTOR = 'tfx_executor'
_LABEL_TFX_VERSION = 'tfx_version'
_LABEL_TFX_PY_VERSION = 'tfx_py_version'
# The GKE pod label indicating the SDK environment.
LABEL_KFP_SDK_ENV = 'pipelines.kubeflow.org/pipeline-sdk-type'
# Thread local labels registered so far.
_thread_local_labels_state = threading.local()
_thread_local_labels_state.dictionary = {}
@contextlib.contextmanager
def scoped_labels(labels: Dict[str, str]):
"""Register thread local labels used in current context."""
if getattr(_thread_local_labels_state, 'dictionary', None) is None:
_thread_local_labels_state.dictionary = {}
for key, value in labels.items():
_thread_local_labels_state.dictionary[key] = value
try:
yield
finally:
for key in labels:
_thread_local_labels_state.dictionary.pop(key)
def _normalize_label(value: str) -> str:
"""Lowercase and replace illegal characters in labels."""
# See https://cloud.google.com/compute/docs/labeling-resources.
result = re.sub(r'[^a-z0-9\_\-]', '-', value.lower())
if len(result) > 63:
logging.warning('Length of label `%s` exceeds maximum length(63), trimmed.',
result)
return result[:63]
return result
def METHOD_NAME() -> Dict[str, str]:
"""Get all registered and system generated labels as a dict.
Returns:
All registered and system generated labels as a dict.
"""
result = dict(
{
_LABEL_TFX_VERSION:
version.__version__,
_LABEL_TFX_PY_VERSION:
'%d.%d' % (sys.version_info.major, sys.version_info.minor),
}, **getattr(_thread_local_labels_state, 'dictionary', {}))
# Only first-party tfx component's executor telemetry will be collected.
# All other executors will be recorded as `third_party_executor`.
if (result.get(LABEL_TFX_EXECUTOR) and
not result[LABEL_TFX_EXECUTOR].startswith('tfx.')):
result[LABEL_TFX_EXECUTOR] = 'third_party_executor'
for k, v in result.items():
result[k] = _normalize_label(v)
return result
def make_beam_labels_args() -> List[str]:
"""Make Beam arguments for common labels used in TFX pipelines.
Returns:
New Beam pipeline args with labels.
"""
labels = METHOD_NAME()
# See following file for reference to the '--labels ' flag.
# https://github.com/apache/beam/blob/master/sdks/python/apache_beam/options/pipeline_options.py
result = []
for k in sorted(labels):
result.extend(['--labels', '%s=%s' % (k, labels[k])])
return result
def noop_telemetry(*args, **kwargs) -> None:
"""This function is a no-op in OSS."""
del args
del kwargs
return None
class TFXHttpRequest(http.HttpRequest):
"""HttpRequest builder that sets a customized useragent header for TFX.
This is used to track the usage of the TFX on Cloud AI.
"""
def __init__(self, *args, **kwargs):
"""Construct a HttpRequest.
Args:
*args: Positional arguments to pass to the base class constructor.
**kwargs: Keyword arguments to pass to the base class constructor.
"""
headers = kwargs.setdefault('headers', {})
# See Mozilla standard User Agent header Syntax:
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/User-Agent
# TODO(b/193915978): Stop relying on '-tfxpipeline-' suffix and use
# tfx/version instead. More labels set to scoped_labels can also be added
# the comments variable below, upon needed.
comments = '(client_context:tfxpipeline;)'
user_agent = f'tfx/{version.__version__} {comments}'
headers['user-agent'] = user_agent
super().__init__(*args, **kwargs)
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.