id
int64 0
6k
| code
stringlengths 4k
8k
| code_compressed
null |
---|---|---|
4,800 |
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2019 Benedikt Otto <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
class SamplerateError(Exception):
pass
class Decoder(srd.Decoder):
api_version = 3
id = 'ir_rc6'
name = 'IR RC-6'
longname = 'IR RC-6'
desc = 'RC-6 infrared remote control protocol.'
license = 'gplv2+'
inputs = ['logic']
outputs = []
tags = ['IR']
channels = (
{'id': 'ir', 'name': 'IR', 'desc': 'IR data line', 'idn':'dec_ir_rc6_chan_ir'},
)
options = (
{'id': 'polarity', 'desc': 'Polarity', 'default': 'auto',
'values': ('auto', 'active-low', 'active-high'), 'idn':'dec_ir_rc6_opt_polarity'},
)
annotations = (
('bit', 'Bit'),
('sync', 'Sync'),
('startbit', 'Startbit'),
('field', 'Field'),
('togglebit', 'Togglebit'),
('address', 'Address'),
('command', 'Command'),
)
annotation_rows = (
('bits', 'Bits', (0,)),
('fields', 'Fields', (1, 2, 3, 4, 5, 6)),
)
def __init__(self):
self.reset()
def reset(self):
self.samplerate = None
self.edges, self.deltas, self.bits = [], [], []
self.state = 'IDLE'
self.mode = 0
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
def metadata(self, key, value):
if key == srd.SRD_CONF_SAMPLERATE:
self.samplerate = value
# One bit: 0.889ms (one half low, one half high).
self.halfbit = int((self.samplerate * 0.000889) / 2.0)
def putb(self, bit, data):
self.put(bit[0], bit[1], self.out_ann, data)
def putbits(self, bit1, bit2, data):
self.put(bit1[0], bit2[1], self.out_ann, data)
def METHOD_NAME(self, ss, es, data):
self.put(ss, es, self.out_ann, data)
def handle_bit(self):
if len(self.bits) != 6:
return
if self.bits[0][2] == 8 and self.bits[0][3] == 1:
self.putb(self.bits[0], [1, ['Synchronisation', 'Sync']])
else:
return
if self.bits[1][3] == 1:
self.putb(self.bits[1], [2, ['Startbit', 'Start']])
else:
return
self.mode = sum([self.bits[2 + i][3] << (2 - i) for i in range(3)])
self.putbits(self.bits[2], self.bits[4], [3, ['Field: %d' % self.mode]])
self.putb(self.bits[5], [4, ['Toggle: %d' % self.bits[5][3]]])
def handle_package(self):
# Sync and start bits have to be 1.
if self.bits[0][3] == 0 or self.bits[1][3] == 0:
return
if len(self.bits) <= 6:
return
if self.mode == 0 and len(self.bits) == 22: # Mode 0 standard
value = sum([self.bits[6 + i][3] << (7 - i) for i in range(8)])
self.putbits(self.bits[6], self.bits[13], [5, ['Address: %0.2X' % value]])
value = sum([self.bits[14 + i][3] << (7 - i) for i in range(8)])
self.putbits(self.bits[14], self.bits[21], [6, ['Data: %0.2X' % value]])
self.bits = []
if self.mode == 6 and len(self.bits) >= 15: # Mode 6
if self.bits[6][3] == 0: # Short addr, Mode 6A
value = sum([self.bits[6 + i][3] << (7 - i) for i in range(8)])
self.putbits(self.bits[6], self.bits[13], [5, ['Address: %0.2X' % value]])
num_data_bits = len(self.bits) - 14
value = sum([self.bits[14 + i][3] << (num_data_bits - 1 - i) for i in range(num_data_bits)])
self.putbits(self.bits[14], self.bits[-1], [6, ['Data: %X' % value]])
self.bits = []
elif len(self.bits) >= 23: # Long addr, Mode 6B
value = sum([self.bits[6 + i][3] << (15 - i) for i in range(16)])
self.putbits(self.bits[6], self.bits[21], [5, ['Address: %0.2X' % value]])
num_data_bits = len(self.bits) - 22
value = sum([self.bits[22 + i][3] << (num_data_bits - 1 - i) for i in range(num_data_bits)])
self.putbits(self.bits[22], self.bits[-1], [6, ['Data: %X' % value]])
self.bits = []
def decode(self):
if not self.samplerate:
raise SamplerateError('Cannot decode without samplerate.')
value = 0
num_edges = -1
self.invert = False
while True:
conditions = [{0: 'e'}]
if self.state == 'DATA':
conditions.append({'skip': self.halfbit * 6})
(self.ir,) = self.wait(conditions)
if len(conditions) == 2:
if self.matched & 0b10:
self.state = 'IDLE'
self.edges.append(self.samplenum)
if len(self.edges) < 2:
continue
delta = (self.edges[-1] - self.edges[-2]) / self.halfbit
delta = int(delta + 0.5)
self.deltas.append(delta)
if len(self.deltas) < 2:
continue
if self.deltas[-2:] == [6, 2]:
self.state = 'SYNC'
num_edges = 0
self.bits = []
if self.options['polarity'] == 'auto':
value = 1
else:
value = self.ir if self.options['polarity'] == 'active-high' else 1 - self.ir
self.bits.append((self.edges[-3], self.edges[-1], 8, value))
self.invert = self.ir == 0
self.putb(self.bits[-1], [0, ['%d' % value]]) # Add bit.
if (num_edges % 2) == 0: # Only count every second edge.
if self.deltas[-2] in [1, 2, 3] and self.deltas[-1] in [1, 2, 3, 6]:
self.state = 'DATA'
if self.deltas[-2] != self.deltas[-1]:
# Insert border between 2 bits.
self.edges.insert(-1, self.edges[-2] + self.deltas[-2] * self.halfbit)
total = self.deltas[-1]
self.deltas[-1] = self.deltas[-2]
self.deltas.append(total - self.deltas[-1])
self.bits.append((self.edges[-4], self.edges[-2], self.deltas[-2] * 2, value))
num_edges += 1
else:
self.bits.append((self.edges[-3], self.edges[-1], self.deltas[-1] * 2, value))
self.putb(self.bits[-1], [0, ['%d' % value]]) # Add bit.
if len(self.bits) > 0:
self.handle_bit()
if self.state == 'IDLE':
self.handle_package()
if self.options['polarity'] == 'auto':
value = self.ir if self.invert else 1 - self.ir
else:
value = self.ir if self.options['polarity'] == 'active-low' else 1 - self.ir
num_edges += 1
| null |
4,801 |
"""
Pure Python implementations of a Fixed Priority Queue and an Element Priority Queue
using Python lists.
"""
class OverFlowError(Exception):
pass
class UnderFlowError(Exception):
pass
class FixedPriorityQueue:
"""
Tasks can be added to a Priority Queue at any time and in any order but when Tasks
are removed then the Task with the highest priority is removed in FIFO order. In
code we will use three levels of priority with priority zero Tasks being the most
urgent (high priority) and priority 2 tasks being the least urgent.
Examples
>>> fpq = FixedPriorityQueue()
>>> fpq.enqueue(0, 10)
>>> fpq.enqueue(1, 70)
>>> fpq.enqueue(0, 100)
>>> fpq.enqueue(2, 1)
>>> fpq.enqueue(2, 5)
>>> fpq.enqueue(1, 7)
>>> fpq.enqueue(2, 4)
>>> fpq.enqueue(1, 64)
>>> fpq.enqueue(0, 128)
>>> print(fpq)
Priority 0: [10, 100, 128]
Priority 1: [70, 7, 64]
Priority 2: [1, 5, 4]
>>> fpq.dequeue()
10
>>> fpq.dequeue()
100
>>> fpq.dequeue()
128
>>> fpq.dequeue()
70
>>> fpq.dequeue()
7
>>> print(fpq)
Priority 0: []
Priority 1: [64]
Priority 2: [1, 5, 4]
>>> fpq.dequeue()
64
>>> fpq.dequeue()
1
>>> fpq.dequeue()
5
>>> fpq.dequeue()
4
>>> fpq.dequeue()
Traceback (most recent call last):
...
data_structures.queue.priority_queue_using_list.UnderFlowError: All queues are empty
>>> print(fpq)
Priority 0: []
Priority 1: []
Priority 2: []
"""
def __init__(self):
self.queues = [
[],
[],
[],
]
def enqueue(self, priority: int, data: int) -> None:
"""
Add an element to a queue based on its priority.
If the priority is invalid ValueError is raised.
If the queue is full an OverFlowError is raised.
"""
try:
if len(self.queues[priority]) >= 100:
raise OverflowError("Maximum queue size is 100")
self.queues[priority].append(data)
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2")
def dequeue(self) -> int:
"""
Return the highest priority element in FIFO order.
If the queue is empty then an under flow exception is raised.
"""
for queue in self.queues:
if queue:
return queue.pop(0)
raise UnderFlowError("All queues are empty")
def __str__(self) -> str:
return "\n".join(f"Priority {i}: {q}" for i, q in enumerate(self.queues))
class ElementPriorityQueue:
"""
Element Priority Queue is the same as Fixed Priority Queue except that the value of
the element itself is the priority. The rules for priorities are the same the as
Fixed Priority Queue.
>>> epq = ElementPriorityQueue()
>>> epq.enqueue(10)
>>> epq.enqueue(70)
>>> epq.enqueue(4)
>>> epq.enqueue(1)
>>> epq.enqueue(5)
>>> epq.enqueue(7)
>>> epq.enqueue(4)
>>> epq.enqueue(64)
>>> epq.enqueue(128)
>>> print(epq)
[10, 70, 4, 1, 5, 7, 4, 64, 128]
>>> epq.dequeue()
1
>>> epq.dequeue()
4
>>> epq.dequeue()
4
>>> epq.dequeue()
5
>>> epq.dequeue()
7
>>> epq.dequeue()
10
>>> print(epq)
[70, 64, 128]
>>> epq.dequeue()
64
>>> epq.dequeue()
70
>>> epq.dequeue()
128
>>> epq.dequeue()
Traceback (most recent call last):
...
data_structures.queue.priority_queue_using_list.UnderFlowError: The queue is empty
>>> print(epq)
[]
"""
def __init__(self):
self.queue = []
def enqueue(self, data: int) -> None:
"""
This function enters the element into the queue
If the queue is full an Exception is raised saying Over Flow!
"""
if len(self.queue) == 100:
raise OverFlowError("Maximum queue size is 100")
self.queue.append(data)
def dequeue(self) -> int:
"""
Return the highest priority element in FIFO order.
If the queue is empty then an under flow exception is raised.
"""
if not self.queue:
raise UnderFlowError("The queue is empty")
else:
data = min(self.queue)
self.queue.remove(data)
return data
def __str__(self) -> str:
"""
Prints all the elements within the Element Priority Queue
"""
return str(self.queue)
def fixed_priority_queue():
fpq = FixedPriorityQueue()
fpq.enqueue(0, 10)
fpq.enqueue(1, 70)
fpq.enqueue(0, 100)
fpq.enqueue(2, 1)
fpq.enqueue(2, 5)
fpq.enqueue(1, 7)
fpq.enqueue(2, 4)
fpq.enqueue(1, 64)
fpq.enqueue(0, 128)
print(fpq)
print(fpq.dequeue())
print(fpq.dequeue())
print(fpq.dequeue())
print(fpq.dequeue())
print(fpq.dequeue())
print(fpq)
print(fpq.dequeue())
print(fpq.dequeue())
print(fpq.dequeue())
print(fpq.dequeue())
print(fpq.dequeue())
def METHOD_NAME():
epq = ElementPriorityQueue()
epq.enqueue(10)
epq.enqueue(70)
epq.enqueue(100)
epq.enqueue(1)
epq.enqueue(5)
epq.enqueue(7)
epq.enqueue(4)
epq.enqueue(64)
epq.enqueue(128)
print(epq)
print(epq.dequeue())
print(epq.dequeue())
print(epq.dequeue())
print(epq.dequeue())
print(epq.dequeue())
print(epq)
print(epq.dequeue())
print(epq.dequeue())
print(epq.dequeue())
print(epq.dequeue())
print(epq.dequeue())
if __name__ == "__main__":
fixed_priority_queue()
METHOD_NAME()
| null |
4,802 |
from mpf.plugins.auditor import Auditor
from mpf.tests.MpfFakeGameTestCase import MpfFakeGameTestCase
class TestAuditor(MpfFakeGameTestCase):
def get_config_file(self):
return 'config.yaml'
def get_platform(self):
return 'smart_virtual'
def METHOD_NAME(self):
return 'tests/machine_files/auditor/'
def setUp(self):
self.machine_config_patches['mpf']['plugins'] = ['mpf.plugins.auditor.Auditor']
super().setUp()
def test_auditor_player_vars(self):
auditor = self.machine.plugins[0]
self.assertIsInstance(auditor, Auditor)
data_manager = auditor.data_manager
# start a game
self.start_game()
self.post_event("add_score")
self.post_event("add_custom")
self.post_event("add_custom")
self.post_event("add_not_audited")
self.assertPlayerVarEqual(100, "score")
self.drain_all_balls()
self.assertGameIsNotRunning()
self.assertEqual(100, auditor.current_audits['player']['score']['average'])
self.assertEqual([100], auditor.current_audits['player']['score']['top'])
self.assertEqual(1, auditor.current_audits['player']['score']['total'])
self.assertEqual(200, auditor.current_audits['player']['my_var']['average'])
self.assertEqual([200], auditor.current_audits['player']['my_var']['top'])
self.assertEqual(1, auditor.current_audits['player']['my_var']['total'])
self.assertNotIn("not_audited", auditor.current_audits['player'])
# start a game
self.start_game()
self.post_event("add_score")
self.post_event("add_score")
self.assertPlayerVarEqual(200, "score")
self.drain_all_balls()
self.assertGameIsNotRunning()
self.assertEqual(150, auditor.current_audits['player']['score']['average'])
self.assertEqual([200, 100], auditor.current_audits['player']['score']['top'])
self.assertEqual(2, auditor.current_audits['player']['score']['total'])
self.assertEqual(100, auditor.current_audits['player']['my_var']['average'])
self.assertEqual([200, 0], auditor.current_audits['player']['my_var']['top'])
self.assertEqual(2, auditor.current_audits['player']['my_var']['total'])
self.assertNotIn("not_audited", auditor.current_audits['player'])
self.assertEqual({'score': {'top': [200, 100], 'average': 150.0, 'total': 2},
'my_var': {'top': [200, 0], 'average': 100.0, 'total': 2}},
auditor.data_manager.written_data["player"])
def test_auditor_switches_events(self):
auditor = self.machine.plugins[0]
self.assertIsInstance(auditor, Auditor)
data_manager = auditor.data_manager
self.machine.switch_controller.process_switch("s_ball", 1)
self.machine.switch_controller.process_switch("s_test", 1)
self.advance_time_and_run(.1)
self.machine.switch_controller.process_switch("s_test", 0)
self.advance_time_and_run(2)
self.assertEqual(0, auditor.current_audits['switches']['s_test'])
self.assertMachineVarEqual(0, "audits_switches_s_test")
# start a game
self.start_game()
self.machine.switch_controller.process_switch("s_test", 1)
self.advance_time_and_run(1)
self.machine.switch_controller.process_switch("s_test", 0)
self.advance_time_and_run(1)
self.drain_all_balls()
self.assertGameIsNotRunning()
self.assertEqual(1, auditor.current_audits['switches']['s_test'])
self.assertMachineVarEqual(1, "audits_switches_s_test")
self.machine.switch_controller.process_switch("s_test", 1)
self.advance_time_and_run(.1)
self.machine.switch_controller.process_switch("s_test", 0)
self.advance_time_and_run(.1)
self.assertEqual(1, auditor.current_audits['switches']['s_test'])
self.assertMachineVarEqual(1, "audits_switches_s_test")
self.assertEqual(1, data_manager.written_data['switches']['s_test'])
# start a game
self.start_game()
self.machine.switch_controller.process_switch("s_test", 1)
self.advance_time_and_run(.1)
self.machine.switch_controller.process_switch("s_test", 0)
self.advance_time_and_run(.1)
self.assertEqual(2, auditor.current_audits['switches']['s_test'])
self.assertMachineVarEqual(2, "audits_switches_s_test")
self.assertEqual(2, data_manager.written_data['switches']['s_test'])
self.post_event("test_event1")
self.post_event("test_event2")
self.post_event("test_event3")
self.advance_time_and_run(.1)
self.assertMachineVarEqual(2, "audits_switches_s_test")
self.assertEqual(1, data_manager.written_data['events']['test_event1'])
self.assertEqual(1, data_manager.written_data['events']['test_event2'])
self.assertNotIn("test_event3", data_manager.written_data['events'])
self.post_event("test_event1")
self.advance_time_and_run(.1)
self.assertEqual(2, data_manager.written_data['events']['test_event1'])
self.assertEqual(1, data_manager.written_data['events']['test_event2'])
# should not crash on unknown switch
self.machine.switch_controller.process_switch_by_num(123123123123, 1, self.machine.default_platform)
self.advance_time_and_run(.1)
self.drain_all_balls()
self.assertGameIsNotRunning()
self.machine.switch_controller.process_switch("s_test", 1)
self.advance_time_and_run(.1)
self.machine.switch_controller.process_switch("s_test", 0)
self.advance_time_and_run(.1)
self.assertEqual(2, auditor.current_audits['switches']['s_test'])
self.assertEqual(2, data_manager.written_data['switches']['s_test'])
self.post_event("auditor_reset")
self.advance_time_and_run(.1)
self.assertEqual(0, auditor.current_audits['switches']['s_test'])
self.assertEqual(0, data_manager.written_data['switches']['s_test'])
self.assertEqual(0, auditor.current_audits['events']['game_started'])
self.assertEqual(0, data_manager.written_data['events']['game_started'])
| null |
4,803 |
"""Abstract Protocol class."""
__all__ = ['BaseProtocol', 'Protocol', 'DatagramProtocol',
'SubprocessProtocol']
class BaseProtocol:
"""Common base class for protocol interfaces.
Usually user implements protocols that derived from BaseProtocol
like Protocol or ProcessProtocol.
The only case when BaseProtocol should be implemented directly is
write-only transport like write pipe
"""
def connection_made(self, transport):
"""Called when a connection is made.
The argument is the transport representing the pipe connection.
To receive data, wait for data_received() calls.
When the connection is closed, connection_lost() is called.
"""
def connection_lost(self, exc):
"""Called when the connection is lost or closed.
The argument is an exception object or None (the latter
meaning a regular EOF is received or the connection was
aborted or closed).
"""
def METHOD_NAME(self):
"""Called when the transport's buffer goes over the high-water mark.
Pause and resume calls are paired -- pause_writing() is called
once when the buffer goes strictly over the high-water mark
(even if subsequent writes increases the buffer size even
more), and eventually resume_writing() is called once when the
buffer size reaches the low-water mark.
Note that if the buffer size equals the high-water mark,
pause_writing() is not called -- it must go strictly over.
Conversely, resume_writing() is called when the buffer size is
equal or lower than the low-water mark. These end conditions
are important to ensure that things go as expected when either
mark is zero.
NOTE: This is the only Protocol callback that is not called
through EventLoop.call_soon() -- if it were, it would have no
effect when it's most needed (when the app keeps writing
without yielding until pause_writing() is called).
"""
def resume_writing(self):
"""Called when the transport's buffer drains below the low-water mark.
See pause_writing() for details.
"""
class Protocol(BaseProtocol):
"""Interface for stream protocol.
The user should implement this interface. They can inherit from
this class but don't need to. The implementations here do
nothing (they don't raise exceptions).
When the user wants to requests a transport, they pass a protocol
factory to a utility function (e.g., EventLoop.create_connection()).
When the connection is made successfully, connection_made() is
called with a suitable transport object. Then data_received()
will be called 0 or more times with data (bytes) received from the
transport; finally, connection_lost() will be called exactly once
with either an exception object or None as an argument.
State machine of calls:
start -> CM [-> DR*] [-> ER?] -> CL -> end
* CM: connection_made()
* DR: data_received()
* ER: eof_received()
* CL: connection_lost()
"""
def data_received(self, data):
"""Called when some data is received.
The argument is a bytes object.
"""
def eof_received(self):
"""Called when the other end calls write_eof() or equivalent.
If this returns a false value (including None), the transport
will close itself. If it returns a true value, closing the
transport is up to the protocol.
"""
class DatagramProtocol(BaseProtocol):
"""Interface for datagram protocol."""
def datagram_received(self, data, addr):
"""Called when some datagram is received."""
def error_received(self, exc):
"""Called when a send or receive operation raises an OSError.
(Other than BlockingIOError or InterruptedError.)
"""
class SubprocessProtocol(BaseProtocol):
"""Interface for protocol for subprocess calls."""
def pipe_data_received(self, fd, data):
"""Called when the subprocess writes data into stdout/stderr pipe.
fd is int file descriptor.
data is bytes object.
"""
def pipe_connection_lost(self, fd, exc):
"""Called when a file descriptor associated with the child process is
closed.
fd is the int file descriptor that was closed.
"""
def process_exited(self):
"""Called when subprocess has exited."""
| null |
4,804 |
#!/usr/bin/env python3
# Copyright 2022 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
from pathlib import Path
from re import A
import os
import shlex
from signal import SIGQUIT
import subprocess
import signal
import tempfile
import time
import psutil
import multiprocessing
from unittest import result
renderer_cmd_file = Path(__file__).parent / 'linux-perf-renderer-cmd.sh'
assert renderer_cmd_file.is_file()
renderer_cmd_prefix = f"{renderer_cmd_file} --perf-data-prefix=chrome_renderer"
# ==============================================================================
usage = """Usage: %prog $CHROME_BIN [OPTION]... -- [CHROME_OPTION]... [URL]
This script runs linux-perf on all render process with custom V8 logging to get
support to resolve JS function names.
The perf data is written to OUT_DIR separate by renderer process.
See http://v8.dev//linux-perf for more detailed instructions.
"""
parser = optparse.OptionParser(usage=usage)
parser.add_option(
'--perf-data-dir',
default=None,
metavar="OUT_DIR",
help="Output directory for linux perf profile files")
parser.add_option(
"--profile-browser-process",
action="store_true",
default=False,
help="Also start linux-perf for the browser process. "
"By default only renderer processes are sampled. "
"Outputs 'browser_*.perf.data' in the CDW")
parser.add_option("--timeout", type=int, help="Stop chrome after N seconds")
chrome_options = optparse.OptionGroup(
parser, "Chrome-forwarded Options",
"These convenience for a better script experience that are forward directly"
"to chrome. Any other chrome option can be passed after the '--' arguments"
"separator.")
chrome_options.add_option("--user-data-dir", dest="user_data_dir", default=None)
chrome_options.add_option("--js-flags", dest="js_flags")
chrome_options.add_option(
"--renderer-cmd-prefix",
default=None,
help=f"Set command prefix, used for each new chrome renderer process."
"Default: {renderer_cmd_prefix}")
FEATURES_DOC = "See chrome's base/feature_list.h source file for more dertails"
chrome_options.add_option(
"--enable-features",
help="Comma-separated list of enabled chrome features. " + FEATURES_DOC)
chrome_options.add_option(
"--disable-features",
help="Command-separated list of disabled chrome features. " + FEATURES_DOC)
parser.add_option_group(chrome_options)
# ==============================================================================
def METHOD_NAME(*args):
print("")
print("=" * 80)
print(*args)
print("=" * 80)
# ==============================================================================
(options, args) = parser.parse_args()
if len(args) == 0:
parser.error("No chrome binary provided")
chrome_bin = Path(args.pop(0))
if not chrome_bin.exists():
parser.error(f"Chrome '{chrome_bin}' does not exist")
if options.renderer_cmd_prefix is not None:
if options.perf_data_dir is not None:
parser.error("Cannot specify --perf-data-dir "
"if a custom --renderer-cmd-prefix is provided")
else:
options.renderer_cmd_prefix = str(renderer_cmd_file)
if options.perf_data_dir is None:
options.perf_data_dir = Path.cwd()
else:
options.perf_data_dir = Path(options.perf_data_dir).absolute()
if not options.perf_data_dir.is_dir():
parser.error(f"--perf-data-dir={options.perf_data_dir} "
"is not an directory or does not exist.")
if options.timeout and options.timeout < 2:
parser.error("--timeout should be more than 2 seconds")
# ==============================================================================
old_cwd = Path.cwd()
os.chdir(options.perf_data_dir)
# ==============================================================================
JS_FLAGS_PERF = ("--perf-prof --no-write-protect-code-memory "
"--interpreted-frames-native-stack")
with tempfile.TemporaryDirectory(prefix="chrome-") as tmp_dir_path:
tempdir = Path(tmp_dir_path)
cmd = [
str(chrome_bin),
]
if options.user_data_dir is None:
cmd.append(f"--user-data-dir={tempdir}")
cmd += [
"--no-sandbox", "--incognito", "--enable-benchmarking", "--no-first-run",
"--no-default-browser-check",
f"--renderer-cmd-prefix={options.renderer_cmd_prefix}",
f"--js-flags={JS_FLAGS_PERF}"
]
if options.js_flags:
cmd += [f"--js-flags={options.js_flags}"]
if options.enable_features:
cmd += [f"--enable-features={options.enable_features}"]
if options.disable_features:
cmd += [f"--disable-features={options.disable_features}"]
cmd += args
METHOD_NAME("CHROME CMD: ", shlex.join(cmd))
if options.profile_browser_process:
perf_data_file = f"{tempdir.name}_browser.perf.data"
perf_cmd = [
"perf", "record", "--call-graph=fp", "--freq=max", "--clockid=mono",
f"--output={perf_data_file}", "--"
]
cmd = perf_cmd + cmd
METHOD_NAME("LINUX PERF CMD: ", shlex.join(cmd))
if options.timeout is None:
subprocess.run(cmd)
else:
process = subprocess.Popen(cmd)
time.sleep(options.timeout)
METHOD_NAME(f"QUITING chrome child processes after {options.timeout}s timeout")
current_process = psutil.Process()
children = current_process.children(recursive=True)
for child in children:
if "chrome" in child.name() or "content_shell" in child.name():
print(f" quitting PID={child.pid}")
child.send_signal(signal.SIGQUIT)
# Wait for linux-perf to write out files
time.sleep(1)
process.send_signal(signal.SIGQUIT)
process.wait()
# ==============================================================================
METHOD_NAME("PARALLEL POST PROCESSING: Injecting JS symbols")
def inject_v8_symbols(perf_dat_file):
output_file = perf_dat_file.with_suffix(".data.jitted")
cmd = [
"perf", "inject", "--jit", f"--input={perf_dat_file}",
f"--output={output_file}"
]
try:
subprocess.run(cmd)
print(f"Processed: {output_file}")
except:
print(shlex.join(cmd))
return None
return output_file
results = []
with multiprocessing.Pool() as pool:
results = list(
pool.imap_unordered(inject_v8_symbols,
options.perf_data_dir.glob("*perf.data")))
results = list(filter(lambda x: x is not None, results))
if len(results) == 0:
print("No perf files were successfully processed"
" Check for errors or partial results in '{options.perf_data_dir}'")
exit(1)
METHOD_NAME(f"RESULTS in '{options.perf_data_dir}'")
results.sort(key=lambda x: x.stat().st_size)
BYTES_TO_MIB = 1 / 1024 / 1024
for output_file in reversed(results):
print(
f"{output_file.name:67}{(output_file.stat().st_size*BYTES_TO_MIB):10.2f}MiB"
)
METHOD_NAME("PPROF EXAMPLE")
path_strings = map(lambda f: str(f.relative_to(old_cwd)), results)
print(f"pprof -flame { ' '.join(path_strings)}")
| null |
4,805 |
# SPDX-FileCopyrightText: 2019 Melissa LeBlanc-Williams for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
CircuitPython library to handle the input and calculations
* Author(s): Melissa LeBlanc-Williams
"""
# pylint: disable=eval-used
def calculate(number_one, operator, number_two):
result = eval(number_one + operator + number_two)
if int(result) == result:
result = int(result)
return str(result)
class Calculator:
def __init__(self, calc_display, clear_button, label_offset):
self._error = False
self._calc_display = calc_display
self._clear_button = clear_button
self._label_offset = label_offset
self._accumulator = "0"
self._operator = None
self._equal_pressed = False
self._operand = None
self._all_clear()
def get_current_operator(self):
operator = self._operator
if operator == "*":
operator = "x"
return operator
def _all_clear(self):
self._accumulator = "0"
self._operator = None
self._equal_pressed = False
self._clear_entry()
def _clear_entry(self):
self._operand = None
self._error = False
self._set_button_ce(False)
self._set_text("0")
def _set_button_ce(self, entry_only):
self._clear_button.selected = False
if entry_only:
self._clear_button.label = "CE"
else:
self._clear_button.label = "AC"
def _set_text(self, text):
self._calc_display.text = text
_, _, screen_w, _ = self._calc_display.bounding_box
self._calc_display.x = self._label_offset - screen_w
def _get_text(self):
return self._calc_display.text
def METHOD_NAME(self, input_key):
display_text = self._get_text()
if self._operand is None and self._operator is not None:
display_text = ""
elif self._operand is not None and self._operator is not None and self._equal_pressed:
self._accumulator = self._operand
self._operator = None
self._operand = None
display_text = ""
elif display_text == "0":
display_text = ""
display_text += input_key
self._set_text(display_text)
if self._operator is not None:
self._operand = display_text
self._set_button_ce(True)
self._equal_pressed = False
def _handle_operator(self, input_key):
if input_key == "x":
input_key = "*"
if self._equal_pressed:
self._operand = None
if self._operator is None:
self._operator = input_key
else:
# Perform current calculation before changing input_keys
if self._operand is not None:
self._accumulator = calculate(self._accumulator, self._operator, self._operand)
self._set_text(self._accumulator)
self._operand = None
self._operator = input_key
self._accumulator = self._get_text()
self._equal_pressed = False
def _handle_equal(self):
if self._operator is not None:
if self._operand is None:
self._operand = self._get_text()
self._accumulator = calculate(self._accumulator, self._operator, self._operand)
self._set_text(self._accumulator)
self._equal_pressed = True
def _update_operand(self):
if self._operand is not None:
self._operand = self._get_text()
def add_input(self, input_key):
try:
if self._error:
self._clear_entry()
elif input_key == "AC":
self._all_clear()
elif input_key == "CE":
self._clear_entry()
elif self._operator is None and input_key == "0":
pass
elif len(input_key) == 1 and 48 <= ord(input_key) <= 57:
self.METHOD_NAME(input_key)
elif input_key in ('+', '-', '/', 'x'):
self._handle_operator(input_key)
elif input_key == ".":
if not input_key in self._get_text():
self._set_text(self._get_text() + input_key)
self._set_button_ce(True)
self._equal_pressed = False
elif input_key == "+/-":
self._set_text(calculate(self._get_text(), "*", "-1"))
self._update_operand()
elif input_key == "%":
self._set_text(calculate(self._get_text(), "/", "100"))
self._update_operand()
elif input_key == "=":
self._handle_equal()
except (ZeroDivisionError, RuntimeError):
self._all_clear()
self._error = True
self._set_text("Error")
| null |
4,806 |
"""A pure Python implementation of import."""
__all__ = ['__import__', 'import_module', 'invalidate_caches', 'reload']
# Bootstrap help #####################################################
# Until bootstrapping is complete, DO NOT import any modules that attempt
# to import importlib._bootstrap (directly or indirectly). Since this
# partially initialised package would be present in sys.modules, those
# modules would get an uninitialised copy of the source version, instead
# of a fully initialised version (either the frozen one or the one
# initialised below if the frozen one is not available).
import _imp # Just the builtin component, NOT the full Python module
import sys
try:
import _frozen_importlib as _bootstrap
except ImportError:
from . import _bootstrap
_bootstrap._setup(sys, _imp)
else:
# importlib._bootstrap is the built-in import, ensure we don't create
# a second copy of the module.
_bootstrap.__name__ = 'importlib._bootstrap'
_bootstrap.__package__ = 'importlib'
try:
_bootstrap.__file__ = __file__.replace('__init__.py', '_bootstrap.py')
except NameError:
# __file__ is not guaranteed to be defined, e.g. if this code gets
# frozen by a tool like cx_Freeze.
pass
sys.modules['importlib._bootstrap'] = _bootstrap
try:
import _frozen_importlib_external as _bootstrap_external
except ImportError:
from . import _bootstrap_external
_bootstrap_external._set_bootstrap_module(_bootstrap)
_bootstrap._bootstrap_external = _bootstrap_external
else:
_bootstrap_external.__name__ = 'importlib._bootstrap_external'
_bootstrap_external.__package__ = 'importlib'
try:
_bootstrap_external.__file__ = __file__.replace('__init__.py', '_bootstrap_external.py')
except NameError:
# __file__ is not guaranteed to be defined, e.g. if this code gets
# frozen by a tool like cx_Freeze.
pass
sys.modules['importlib._bootstrap_external'] = _bootstrap_external
# To simplify imports in test code
_pack_uint32 = _bootstrap_external._pack_uint32
_unpack_uint32 = _bootstrap_external._unpack_uint32
# Fully bootstrapped at this point, import whatever you like, circular
# dependencies and startup overhead minimisation permitting :)
import warnings
# Public API #########################################################
from ._bootstrap import __import__
def invalidate_caches():
"""Call the invalidate_caches() method on all meta path finders stored in
sys.meta_path (where implemented)."""
for finder in sys.meta_path:
if hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches()
def find_loader(name, path=None):
"""Return the loader for the specified module.
This is a backward-compatible wrapper around find_spec().
This function is deprecated in favor of importlib.util.find_spec().
"""
warnings.warn('Deprecated since Python 3.4 and slated for removal in '
'Python 3.12; use importlib.util.find_spec() instead',
DeprecationWarning, stacklevel=2)
try:
loader = sys.modules[name].__loader__
if loader is None:
raise ValueError('{}.__loader__ is None'.format(name))
else:
return loader
except KeyError:
pass
except AttributeError:
raise ValueError('{}.__loader__ is not set'.format(name)) from None
spec = _bootstrap._find_spec(name, path)
# We won't worry about malformed specs (missing attributes).
if spec is None:
return None
if spec.loader is None:
if spec.submodule_search_locations is None:
raise ImportError('spec for {} missing loader'.format(name),
name=name)
raise ImportError('namespace packages do not have loaders',
name=name)
return spec.loader
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
level = 0
if name.startswith('.'):
if not package:
msg = ("the 'package' argument is required to perform a relative "
"import for {!r}")
raise TypeError(msg.format(name))
for character in name:
if character != '.':
break
level += 1
return _bootstrap._gcd_import(name[level:], package, level)
_RELOADING = {}
def METHOD_NAME(module):
"""Reload the module and return it.
The module must have been successfully imported before.
"""
try:
name = module.__spec__.name
except AttributeError:
try:
name = module.__name__
except AttributeError:
raise TypeError("reload() argument must be a module")
if sys.modules.get(name) is not module:
msg = "module {} not in sys.modules"
raise ImportError(msg.format(name), name=name)
if name in _RELOADING:
return _RELOADING[name]
_RELOADING[name] = module
try:
parent_name = name.rpartition('.')[0]
if parent_name:
try:
parent = sys.modules[parent_name]
except KeyError:
msg = "parent {!r} not in sys.modules"
raise ImportError(msg.format(parent_name),
name=parent_name) from None
else:
pkgpath = parent.__path__
else:
pkgpath = None
target = module
spec = module.__spec__ = _bootstrap._find_spec(name, pkgpath, target)
if spec is None:
raise ModuleNotFoundError(f"spec not found for the module {name!r}", name=name)
_bootstrap._exec(spec, module)
# The module may have replaced itself in sys.modules!
return sys.modules[name]
finally:
try:
del _RELOADING[name]
except KeyError:
pass
| null |
4,807 |
import unittest
from django.contrib.contenttypes.models import ContentType
from devilry.devilry_account.models import PermissionGroup, SubjectPermissionGroup
from devilry.devilry_import_v2database.models import ImportedModel
from django import test
from django.conf import settings
from model_bakery import baker
from devilry.apps.core.models import Subject
from devilry.devilry_account import models as account_models
from devilry.devilry_import_v2database.modelimporters.subject_importer import SubjectImporter
from .importer_testcase_mixin import ImporterTestCaseMixin
@unittest.skip('Not relevant anymore, keep for history.')
class TestSubjectImporter(ImporterTestCaseMixin, test.TestCase):
def _create_model_meta(self):
return {
'model_class_name': 'Subject',
'max_id': 16,
'app_label': 'core'
}
def METHOD_NAME(self, test_admin_user=None):
return {
'pk': 1,
'model': 'core.subject',
'admin_user_ids': [test_admin_user.id] if test_admin_user else [],
'fields': {
'long_name': 'DUCK1010 - Programming for the natural sciences',
'admins': [test_admin_user.id] if test_admin_user else [],
'etag': '2017-05-15T11:04:46.567',
'short_name': 'duck1100',
'parentnode': 1
}
}
def test_importer(self):
test_admin_user = baker.make(settings.AUTH_USER_MODEL)
self.create_v2dump(model_name='core.subject',
data=self.METHOD_NAME(test_admin_user=test_admin_user))
subjectimporter = SubjectImporter(input_root=self.temp_root_dir)
subjectimporter.import_models()
self.assertEqual(Subject.objects.count(), 1)
def test_importer_pk(self):
test_admin_user = baker.make(settings.AUTH_USER_MODEL)
self.create_v2dump(model_name='core.subject',
data=self.METHOD_NAME(test_admin_user=test_admin_user))
subjectimporter = SubjectImporter(input_root=self.temp_root_dir)
subjectimporter.import_models()
subject = Subject.objects.first()
self.assertEqual(subject.pk, 1)
def test_importer_imported_model_with_admins(self):
test_admin_user = baker.make(settings.AUTH_USER_MODEL)
self.create_v2dump(model_name='core.subject',
data=self.METHOD_NAME(test_admin_user=test_admin_user))
subjectimporter = SubjectImporter(input_root=self.temp_root_dir)
subjectimporter.import_models()
self.assertEqual(SubjectPermissionGroup.objects.count(), 1)
def test_importer_imported_model_without_admins(self):
self.create_v2dump(model_name='core.subject',
data=self.METHOD_NAME())
subjectimporter = SubjectImporter(input_root=self.temp_root_dir)
subjectimporter.import_models()
self.assertEqual(SubjectPermissionGroup.objects.count(), 0)
def test_importer_short_name(self):
test_admin_user = baker.make(settings.AUTH_USER_MODEL)
self.create_v2dump(model_name='core.subject',
data=self.METHOD_NAME(test_admin_user=test_admin_user))
subjectimporter = SubjectImporter(input_root=self.temp_root_dir)
subjectimporter.import_models()
subject = Subject.objects.first()
self.assertEqual(subject.short_name, 'duck1100')
def test_importer_long_name(self):
test_admin_user = baker.make(settings.AUTH_USER_MODEL)
self.create_v2dump(model_name='core.subject',
data=self.METHOD_NAME(test_admin_user=test_admin_user))
subjectimporter = SubjectImporter(input_root=self.temp_root_dir)
subjectimporter.import_models()
subject = Subject.objects.first()
self.assertEqual(subject.long_name, 'DUCK1010 - Programming for the natural sciences')
def test_importer_permissiongroups_is_created(self):
test_admin_user = baker.make(settings.AUTH_USER_MODEL)
self.create_v2dump(model_name='core.subject',
data=self.METHOD_NAME(test_admin_user=test_admin_user))
subjectimporter = SubjectImporter(input_root=self.temp_root_dir)
subjectimporter.import_models()
self.assertEqual(Subject.objects.count(), 1)
subject = Subject.objects.first()
self.assertEqual(account_models.PermissionGroup.objects.count(), 1)
self.assertEqual(account_models.SubjectPermissionGroup.objects.count(), 1)
subjects_for_admin_list = Subject.objects.filter_user_is_admin(test_admin_user)
self.assertEqual(len(subjects_for_admin_list), 1)
self.assertEqual(subjects_for_admin_list[0], subject)
def test_auto_sequence_numbered_objects_uses_meta_max_id(self):
test_admin_user = baker.make(settings.AUTH_USER_MODEL)
self.create_v2dump(model_name='core.subject',
data=self.METHOD_NAME(test_admin_user=test_admin_user),
model_meta=self._create_model_meta())
subjectimporter = SubjectImporter(input_root=self.temp_root_dir)
subjectimporter.import_models()
self.assertEqual(Subject.objects.count(), 1)
subject = Subject.objects.first()
self.assertEqual(subject.pk, 1)
self.assertEqual(subject.id, 1)
subject_with_auto_id = baker.make('core.Subject')
self.assertEqual(subject_with_auto_id.id, self._create_model_meta()['max_id']+1)
self.assertEqual(subject_with_auto_id.pk, self._create_model_meta()['max_id']+1)
| null |
4,808 |
import unittest
from django.contrib.contenttypes.models import ContentType
from devilry.devilry_import_v2database.models import ImportedModel
from django import test
from django.conf import settings
from django.utils.dateparse import parse_datetime
from model_bakery import baker
from devilry.apps.core.models import RelatedStudent, PeriodTag, Period
from devilry.devilry_import_v2database.modelimporters.relateduser_importer import RelatedStudentImporter
from .importer_testcase_mixin import ImporterTestCaseMixin
@unittest.skip('Not relevant anymore, keep for history.')
class TestRelatedStudentImporter(ImporterTestCaseMixin, test.TestCase):
def _create_model_meta(self):
return {
'model_class_name': 'RelatedStudent',
'max_id': 19,
'app_label': 'core'
}
def _create_related_student_dict(self, period, user):
return {
'pk': 19,
'model': 'core.relatedstudent',
'fields': {
'user': user.id,
'period': period.id,
'candidate_id': None,
'tags': 'group1'
}
}
def test_importer(self):
test_user = baker.make(settings.AUTH_USER_MODEL)
test_period = baker.make('core.Period')
self.create_v2dump(model_name='core.relatedstudent',
data=self._create_related_student_dict(period=test_period, user=test_user))
relatedstudent_importer = RelatedStudentImporter(input_root=self.temp_root_dir)
relatedstudent_importer.import_models()
self.assertEqual(RelatedStudent.objects.count(), 1)
self.assertEqual(PeriodTag.objects.count(), 1)
def test_importer_related_examiner_pk(self):
test_user = baker.make(settings.AUTH_USER_MODEL)
test_period = baker.make('core.Period')
self.create_v2dump(model_name='core.relatedstudent',
data=self._create_related_student_dict(period=test_period, user=test_user))
relatedstudent_importer = RelatedStudentImporter(input_root=self.temp_root_dir)
relatedstudent_importer.import_models()
related_examiner = RelatedStudent.objects.first()
self.assertEqual(related_examiner.pk, 19)
self.assertEqual(related_examiner.id, 19)
def test_importer_period_tag_period(self):
test_user = baker.make(settings.AUTH_USER_MODEL)
test_period = baker.make('core.Period')
self.create_v2dump(model_name='core.relatedstudent',
data=self._create_related_student_dict(period=test_period, user=test_user))
relatedstudent_importer = RelatedStudentImporter(input_root=self.temp_root_dir)
relatedstudent_importer.import_models()
period_tag = PeriodTag.objects.first()
self.assertEqual(period_tag.period, test_period)
def test_importer_period_tag_single_tag_created(self):
test_user = baker.make(settings.AUTH_USER_MODEL)
test_period = baker.make('core.Period')
self.create_v2dump(model_name='core.relatedstudent',
data=self._create_related_student_dict(period=test_period, user=test_user))
relatedstudent_importer = RelatedStudentImporter(input_root=self.temp_root_dir)
relatedstudent_importer.import_models()
period_tag = PeriodTag.objects.first()
self.assertEqual(period_tag.tag, 'group1')
def test_importer_period_tag_multiple_tags_created(self):
test_user = baker.make(settings.AUTH_USER_MODEL)
test_period = baker.make('core.Period')
relatedexaminer_data_dict = self._create_related_student_dict(period=test_period, user=test_user)
relatedexaminer_data_dict['fields']['tags'] = 'group1,group2'
self.create_v2dump(model_name='core.relatedstudent',
data=relatedexaminer_data_dict)
relatedstudent_importer = RelatedStudentImporter(input_root=self.temp_root_dir)
relatedstudent_importer.import_models()
period_tags_list = [period_tag.tag for period_tag in PeriodTag.objects.all()]
self.assertEqual(len(period_tags_list), 2)
self.assertIn('group1', period_tags_list)
self.assertIn('group2', period_tags_list)
def test_importer_single_period_tag_related_student_is_added(self):
test_user = baker.make(settings.AUTH_USER_MODEL)
test_period = baker.make('core.Period')
self.create_v2dump(model_name='core.relatedstudent',
data=self._create_related_student_dict(period=test_period, user=test_user))
relatedstudent_importer = RelatedStudentImporter(input_root=self.temp_root_dir)
relatedstudent_importer.import_models()
related_examiner = RelatedStudent.objects.first()
period_tag = PeriodTag.objects.first()
self.assertEqual(period_tag.relatedstudents.count(), 1)
self.assertIn(related_examiner, period_tag.relatedstudents.all())
def test_importer_multiple_period_tags_related_student_is_added(self):
test_user = baker.make(settings.AUTH_USER_MODEL)
test_period = baker.make('core.Period')
relatedexaminer_data_dict = self._create_related_student_dict(period=test_period, user=test_user)
relatedexaminer_data_dict['fields']['tags'] = 'group1,group2'
self.create_v2dump(model_name='core.relatedstudent',
data=relatedexaminer_data_dict)
relatedstudent_importer = RelatedStudentImporter(input_root=self.temp_root_dir)
relatedstudent_importer.import_models()
related_examiner = RelatedStudent.objects.first()
period_tags = PeriodTag.objects.all()
self.assertEqual(period_tags.count(), 2)
for period_tag in period_tags:
self.assertIn(related_examiner, period_tag.relatedstudents.all())
def test_importer_related_student_is_added_to_existing_tags_and_new_tags(self):
test_user = baker.make(settings.AUTH_USER_MODEL)
test_period = baker.make('core.Period')
baker.make('core.PeriodTag', period=test_period, tag='group1')
baker.make('core.PeriodTag', period=test_period, tag='group4')
relatedexaminer_data_dict = self._create_related_student_dict(period=test_period, user=test_user)
relatedexaminer_data_dict['fields']['tags'] = 'group1,group2,group3,group4'
self.create_v2dump(model_name='core.relatedstudent',
data=relatedexaminer_data_dict)
relatedstudent_importer = RelatedStudentImporter(input_root=self.temp_root_dir)
relatedstudent_importer.import_models()
related_examiner = RelatedStudent.objects.first()
period_tags = PeriodTag.objects.all()
self.assertEqual(period_tags.count(), 4)
for period_tag in period_tags:
self.assertIn(related_examiner, period_tag.relatedstudents.all())
def METHOD_NAME(self):
test_user = baker.make(settings.AUTH_USER_MODEL)
test_period = baker.make('core.Period')
self.create_v2dump(model_name='core.relatedstudent',
data=self._create_related_student_dict(period=test_period, user=test_user),
model_meta=self._create_model_meta())
relatedstudent_importer = RelatedStudentImporter(input_root=self.temp_root_dir)
relatedstudent_importer.import_models()
self.assertEqual(RelatedStudent.objects.count(), 1)
related_student = RelatedStudent.objects.first()
self.assertEqual(related_student.pk, 19)
self.assertEqual(related_student.id, 19)
related_student_with_auto_id = baker.make('core.RelatedStudent')
self.assertEqual(related_student_with_auto_id.pk, self._create_model_meta()['max_id']+1)
self.assertEqual(related_student_with_auto_id.id, self._create_model_meta()['max_id']+1)
| null |
4,809 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2020 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
import functools
from qtpy.QtWidgets import QMenu, QMessageBox
from os.path import join, exists
from mantid import ConfigService
from mantid.kernel import logger
from mantidqt.utils.qt import create_action
from workbench.config import CONF
RECENT_SCRIPT_MAX_NUMBER = 10
CACHE_FILE_NAME = "recent_script_file"
MENU_CACHE_LOCATION = join(ConfigService.getAppDataDirectory(), CACHE_FILE_NAME)
RECENT_SCRIPTS_KEY = "RecentScripts"
class RecentlyClosedScriptsMenu(QMenu):
def __init__(self, mainwindow):
super(RecentlyClosedScriptsMenu, self).__init__()
self.setTitle("Open Recently Closed Scripts")
self.aboutToShow.connect(self.repopulate_menu)
self.mainwindow = mainwindow
def repopulate_menu(self):
self.clear()
self.populate_menu()
def populate_menu(self):
# Check cache is present or don't do anything.
scripts = self._get_scripts_from_settings()
if len(scripts) > 0:
for script_path in scripts:
script_name = self.METHOD_NAME(script_path)
new_action = create_action(
parent=self.mainwindow, text=script_name, on_triggered=functools.partial(self.open_script, script_path)
)
self.addAction(new_action)
else:
self.addAction(create_action(parent=self.mainwindow, text="No recently closed scripts found"))
@staticmethod
def METHOD_NAME(path):
return path if len(path) < 33 else "..." + path[-30:]
def open_script(self, path):
# Check if it exists, if it doesn't pop up small window saying sorry this doesn't exist, then remove it from
# stack. else pass it off to the script window to open.
if exists(path):
self.mainwindow.editor.open_file_in_new_tab(path)
else:
# Remove path from script settings, then warn user.
self.remove_script_from_settings(path)
QMessageBox().warning(
None,
"That script no longer exists!",
"Are all network drives properly mounted? or are there any network connectivity " "problems?",
QMessageBox.Ok,
)
def remove_script_from_settings(self, path):
scripts = self._get_scripts_from_settings()
if path in scripts:
scripts.remove(path)
self._store_scripts_to_settings(scripts)
def add_script_to_settings(self, path):
if path is None or path == "":
return
scripts = self._get_scripts_from_settings()
if path not in scripts:
scripts.insert(0, path)
if len(scripts) > RECENT_SCRIPT_MAX_NUMBER:
scripts.pop()
self._store_scripts_to_settings(scripts)
@staticmethod
def _get_scripts_from_settings():
scripts = []
try:
scripts = CONF.get(RECENT_SCRIPTS_KEY, type=list)
except KeyError:
# Happens quite often and should fail silently.
pass
except TypeError:
# Happens when garbage data is found in the QSettings .ini file
logger.error("Recently Opened Scripts were lost during save, and workbench has recovered from an error.")
CONF.set(RECENT_SCRIPTS_KEY, [])
def sort_key(sub_list):
return sub_list[0]
scripts.sort(key=sort_key)
# strip scripts of it's extra data and overwrite the list
for index, script in enumerate(scripts):
scripts[index] = script[1]
return scripts
@staticmethod
def _store_scripts_to_settings(scripts):
# Add an index to a tuple in the script
for index, script in enumerate(scripts):
scripts[index] = (index, script)
CONF.set(RECENT_SCRIPTS_KEY, scripts)
| null |
4,810 |
"""Signer implementation for project SPHINCS+ post-quantum signature support.
"""
import logging
import os
from typing import Any, Dict, Optional, Tuple
from securesystemslib.exceptions import (
UnsupportedLibraryError,
UnverifiedSignatureError,
VerificationError,
)
from securesystemslib.signer._key import Key
from securesystemslib.signer._signature import Signature
from securesystemslib.signer._signer import SecretsHandler, Signer
from securesystemslib.signer._utils import compute_default_keyid
SPX_IMPORT_ERROR = None
try:
from pyspx import shake_128s
except ImportError:
SPX_IMPORT_ERROR = "spinhcs+ key support requires the pyspx library"
_SHAKE_SEED_LEN = 48
logger = logging.getLogger(__name__)
def generate_spx_key_pair() -> Tuple[bytes, bytes]:
"""Generate SPHINCS+ key pair and return public and private bytes."""
if SPX_IMPORT_ERROR:
raise UnsupportedLibraryError(SPX_IMPORT_ERROR)
seed = os.urandom(_SHAKE_SEED_LEN)
public, private = shake_128s.generate_keypair(seed)
return public, private
class SpxKey(Key):
"""SPHINCS+ verifier.
NOTE: The SPHINCS+ key and signature serialization formats are not yet
considered stable in securesystemslib. They may change in future releases
and may not be supported by other implementations.
"""
DEFAULT_KEY_TYPE = "sphincs"
DEFAULT_SCHEME = "sphincs-shake-128s"
@classmethod
def from_dict(cls, keyid: str, key_dict: Dict[str, Any]) -> "SpxKey":
keytype, scheme, keyval = cls._from_dict(key_dict)
return cls(keyid, keytype, scheme, keyval, key_dict)
@classmethod
def from_bytes(cls, public: bytes) -> "SpxKey":
"""Create SpxKey instance from public key bytes."""
keytype = cls.DEFAULT_KEY_TYPE
scheme = cls.DEFAULT_SCHEME
keyval = {"public": public.hex()}
keyid = compute_default_keyid( # pylint: disable=protected-access
keytype, scheme, keyval
)
return cls(keyid, keytype, scheme, keyval)
def METHOD_NAME(self) -> Dict[str, Any]:
return self._to_dict()
def verify_signature(self, signature: Signature, data: bytes) -> None:
valid = None
try:
if SPX_IMPORT_ERROR:
raise UnsupportedLibraryError(SPX_IMPORT_ERROR)
key = bytes.fromhex(self.keyval["public"])
sig = bytes.fromhex(signature.signature)
valid = shake_128s.verify(data, sig, key)
except Exception as e:
logger.info("Key %s failed to verify sig: %s", self.keyid, str(e))
raise VerificationError(
f"Unknown failure to verify signature by {self.keyid}"
) from e
if not valid:
raise UnverifiedSignatureError(
f"Failed to verify signature by {self.keyid}"
)
class SpxSigner(Signer):
"""SPHINCS+ signer.
NOTE: The SPHINCS+ key and signature serialization formats are not yet
considered stable in securesystemslib. They may change in future releases
and may not be supported by other implementations.
Usage::
public_bytes, private_bytes = generate_spx_key_pair()
public_key = SpxKey.from_bytes(public_bytes)
signer = SpxSigner(private_bytes, public_key)
signature = signer.sign(b"payload")
# Use public_key.to_dict() / Key.from_dict() to transport public key data
public_key = signer.public_key
public_key.verify_signature(signature, b"payload")
"""
def __init__(self, private: bytes, public: SpxKey):
self.private_key = private
self.public_key = public
@classmethod
def from_priv_key_uri(
cls,
priv_key_uri: str,
public_key: Key,
secrets_handler: Optional[SecretsHandler] = None,
) -> "SpxSigner":
raise NotImplementedError
def sign(self, payload: bytes) -> Signature:
"""Signs payload with SPHINCS+ private key on the instance.
Arguments:
payload: bytes to be signed.
Raises:
UnsupportedLibraryError: PySPX is not available.
Returns:
Signature.
"""
if SPX_IMPORT_ERROR:
raise UnsupportedLibraryError(SPX_IMPORT_ERROR)
raw = shake_128s.sign(payload, self.private_key)
return Signature(self.public_key.keyid, raw.hex())
| null |
4,811 |
import asyncio
import binascii
import json
import logging
import time
import requests
from paradox.exceptions import ConnectToSiteFailed, StunSessionRefreshFailed
from paradox.lib import stun
logger = logging.getLogger("PAI").getChild(__name__)
class StunSession:
def __init__(self, site_id, email, panel_serial):
self.site_id = site_id
self.email = email
self.panel_serial = panel_serial
self.site_info = None
self.module = None
self.stun_control = None
self.stun_tunnel = None
self.connection_timestamp = 0
async def connect(self) -> None:
self.connection_timestamp = 0
logger.info("Connecting to Site: {}".format(self.site_id))
if self.site_info is None:
self.site_info = await self._get_site_info(
siteid=self.site_id, email=self.email
)
if self.site_info is None:
raise ConnectToSiteFailed("Unable to get site info")
logger.debug("Site Info: {}".format(json.dumps(self.site_info, indent=4)))
self.module = self._select_module()
if self.module is None:
self.site_info = None # Reset state
raise ConnectToSiteFailed("Unable to find module with desired panel serial")
xoraddr = binascii.unhexlify(self.module["xoraddr"])
await self._stun_tcp_change_request()
await self.METHOD_NAME()
stun_r = await self._stun_connect(xoraddr)
self.connection_timestamp = time.time()
connection_id = stun_r[0]["attr_body"]
raddr = self.stun_control.sock.getpeername()
logger.debug("STUN Connection Bind Request")
self.stun_tunnel = stun.StunClient(host=raddr[0], port=raddr[1])
self.stun_tunnel.send_connection_bind_request(binascii.unhexlify(connection_id))
stun_r = self.stun_tunnel.receive_response()
if stun.is_error(stun_r):
raise ConnectToSiteFailed(
f"STUN Connection Bind Request error: {stun.get_error(stun_r)}"
)
logger.info("Connected to Site: {}".format(self.site_id))
async def _stun_connect(self, xoraddr):
logger.debug("STUN Connect Request")
self.stun_control.send_connect_request(xoraddr=xoraddr)
stun_r = self.stun_control.receive_response()
if stun.is_error(stun_r):
raise ConnectToSiteFailed(
f"STUN Connect Request error: {stun.get_error(stun_r)}"
)
return stun_r
async def METHOD_NAME(self):
logger.debug("STUN TCP Binding Request")
self.stun_control.send_binding_request()
stun_r = self.stun_control.receive_response()
if stun.is_error(stun_r):
raise ConnectToSiteFailed(
f"STUN TCP Binding Request error: {stun.get_error(stun_r)}"
)
async def _stun_tcp_change_request(self):
stun_host = "turn.paradoxmyhome.com"
logger.debug("STUN TCP Change Request")
self.stun_control = stun.StunClient(stun_host)
self.stun_control.send_tcp_change_request()
stun_r = self.stun_control.receive_response()
if stun.is_error(stun_r):
raise ConnectToSiteFailed(
f"STUN TCP Change Request error: {stun.get_error(stun_r)}"
)
def _select_module(self):
for site in self.site_info["site"]:
for module in site["module"]:
if module.get("xoraddr") is None:
continue
logger.debug(
"Found module with panel serial: {}".format(
module["panelSerial"]
)
)
if not self.panel_serial: # Pick first available
return module
elif module["panelSerial"] == self.panel_serial:
return module
def get_socket(self):
return self.stun_tunnel.sock
def refresh_session_if_required(self) -> None:
if self.site_info is None or self.connection_timestamp == 0:
return
# Refresh session if required
if time.time() - self.connection_timestamp >= 500:
logger.info("STUN Session Refresh")
self.stun_control.send_refresh_request()
stun_r = self.stun_control.receive_response()
if stun.is_error(stun_r):
self.connected = False
raise StunSessionRefreshFailed(
f"STUN Session Refresh failed: {stun.get_error(stun_r)}"
)
self.connection_timestamp = time.time()
def close(self):
if self.stun_control:
try:
self.stun_control.close()
self.stun_control = None
except:
logger.exception("stun_control socket close failed")
if self.stun_tunnel:
try:
self.stun_tunnel.close()
self.stun_tunnel = None
except:
logger.exception("stun_tunnel socket close failed")
self.connection_timestamp = 0
@staticmethod
async def _get_site_info(email, siteid):
logger.info("Getting site info")
URL = "https://api.insightgoldatpmh.com/v1/site"
headers = {
"User-Agent": "Mozilla/3.0 (compatible; Indy Library)",
"Accept-Encoding": "identity",
"Accept": "text/html, */*",
}
tries = 5
loop = asyncio.get_event_loop()
while tries > 0:
req = await loop.run_in_executor(
None,
lambda: requests.get(
URL, headers=headers, params={"email": email, "name": siteid}
),
)
if req.status_code == 200:
return req.json()
logger.warning("Unable to get site info. Retrying...")
tries -= 1
time.sleep(5)
return None
def get_potential_modules(self):
pass
| null |
4,812 |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""test imagenet to mindrecord tool"""
import os
import pytest
from mindspore import log as logger
from mindspore.mindrecord import FileReader
from mindspore.mindrecord import ImageNetToMR
IMAGENET_MAP_FILE = "../data/mindrecord/testImageNetDataWhole/labels_map.txt"
IMAGENET_IMAGE_DIR = "../data/mindrecord/testImageNetDataWhole/images"
PARTITION_NUMBER = 4
@pytest.fixture
def METHOD_NAME():
"""add/remove file"""
def remove_one_file(x):
if os.path.exists(x):
os.remove(x)
def remove_file(file_name):
x = file_name
remove_one_file(x)
x = file_name + ".db"
remove_one_file(x)
for i in range(PARTITION_NUMBER):
x = file_name + str(i)
remove_one_file(x)
x = file_name + str(i) + ".db"
remove_one_file(x)
file_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
remove_file(file_name)
yield "yield_fixture_data"
remove_file(file_name)
def read(filename):
"""test file reade"""
count = 0
reader = FileReader(filename)
for _, x in enumerate(reader.get_next()):
assert len(x) == 3
count = count + 1
if count == 1:
logger.info("data: {}".format(x))
assert count == 20
reader.close()
def test_imagenet_to_mindrecord(METHOD_NAME):
"""test transform imagenet dataset to mindrecord."""
file_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
imagenet_transformer = ImageNetToMR(IMAGENET_MAP_FILE, IMAGENET_IMAGE_DIR,
file_name, PARTITION_NUMBER)
imagenet_transformer.transform()
for i in range(PARTITION_NUMBER):
assert os.path.exists(file_name + str(i))
assert os.path.exists(file_name + str(i) + ".db")
read([file_name + "0",
file_name + "1",
file_name + "2",
file_name + "3"])
def test_imagenet_to_mindrecord_default_partition_number(METHOD_NAME):
"""
test transform imagenet dataset to mindrecord
when partition number is default.
"""
file_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
imagenet_transformer = ImageNetToMR(IMAGENET_MAP_FILE, IMAGENET_IMAGE_DIR,
file_name, 1)
imagenet_transformer.transform()
assert os.path.exists(file_name)
assert os.path.exists(file_name + ".db")
read(file_name)
def test_imagenet_to_mindrecord_partition_number_0(METHOD_NAME):
"""
test transform imagenet dataset to mindrecord
when partition number is 0.
"""
file_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
with pytest.raises(Exception, match="Invalid parameter value"):
imagenet_transformer = ImageNetToMR(IMAGENET_MAP_FILE,
IMAGENET_IMAGE_DIR,
file_name, 0)
imagenet_transformer.transform()
def test_imagenet_to_mindrecord_partition_number_none(METHOD_NAME):
"""
test transform imagenet dataset to mindrecord
when partition number is none.
"""
file_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
with pytest.raises(Exception,
match="The parameter partition_number must be int"):
imagenet_transformer = ImageNetToMR(IMAGENET_MAP_FILE,
IMAGENET_IMAGE_DIR,
file_name, None)
imagenet_transformer.transform()
def test_imagenet_to_mindrecord_illegal_filename(METHOD_NAME):
"""
test transform imagenet dataset to mindrecord
when file name contains illegal character.
"""
filename = "imagenet_not_*ok"
with pytest.raises(Exception, match="File name should not contains"):
imagenet_transformer = ImageNetToMR(IMAGENET_MAP_FILE,
IMAGENET_IMAGE_DIR, filename,
PARTITION_NUMBER)
imagenet_transformer.transform()
def test_imagenet_to_mindrecord_illegal_1_filename(METHOD_NAME):
"""
test transform imagenet dataset to mindrecord
when file name end with '/'.
"""
filename = "imagenet/path/"
with pytest.raises(Exception, match="File path can not end with '/'"):
imagenet_transformer = ImageNetToMR(IMAGENET_MAP_FILE,
IMAGENET_IMAGE_DIR, filename,
PARTITION_NUMBER)
imagenet_transformer.transform()
| null |
4,813 |
from django import forms
from django.contrib import messages
from django.db import transaction
from django.http import Http404
from django.utils import timezone
from django.shortcuts import redirect
from django.utils.translation import gettext_lazy
from devilry.devilry_comment.editor_widget import DevilryMarkdownNoPreviewWidget
from devilry.devilry_admin.views.assignment.students import groupview_base
from devilry.devilry_cradmin import devilry_listbuilder
from devilry.devilry_group import models as group_models
from devilry.devilry_comment import models as comment_models
class TargetRenderer(devilry_listbuilder.assignmentgroup.GroupTargetRenderer):
def get_submit_button_text(self):
return gettext_lazy('Pass students')
def get_with_items_title(self):
return gettext_lazy('Students to pass')
def get_field_layout(self):
return [
'feedback_comment_text'
]
class SelectedAssignmentGroupsForm(groupview_base.SelectedGroupsForm):
"""
We subclass the :class:`~.devilry.devilry_admin.view.assignment.students.groupview_base.SelectedGroupsForm` form so
that we can add a text widget for feedback comment.
"""
def __init__(self, *args, **kwargs):
super(SelectedAssignmentGroupsForm, self).__init__(*args, **kwargs)
self.fields['feedback_comment_text'] = forms.CharField(
widget=DevilryMarkdownNoPreviewWidget(),
initial=gettext_lazy('Delivery has been corrected. Passed in a previous semester.'),
label=False
)
class PassAssignmentGroupsView(groupview_base.BaseMultiselectView):
"""
This workflow mitigates the problem of having no previous semesters that match the assignment on the current
period, or if this for instance is a new subject derived from an old subject.
This is basically the same as bulk correcting ``AssignmentGroups``, but can be done at an early stage from the
admin dashboard.
"""
template_name = 'devilry_admin/assignment/passed_previous_period/select_groups_to_pass.django.html'
def get(self, request, *args, **kwargs):
response = super(PassAssignmentGroupsView, self).get(request, *args, **kwargs)
if self.get_unfiltered_queryset_for_role(role=self.request.cradmin_role).exists():
return response
else:
messages.info(self.request,
gettext_lazy('There are no students on this assignment.'))
return redirect(str(self.get_success_url()))
def get_pagetitle(self):
return gettext_lazy('Bulk pass students')
def get_form_class(self):
return SelectedAssignmentGroupsForm
def get_target_renderer_class(self):
return TargetRenderer
def METHOD_NAME(self, filters_string):
return self.request.cradmin_app.reverse_appurl(
viewname='manually_select_groups',
kwargs={'filters_string': filters_string})
def get_success_url(self):
return self.request.cradmin_instance.rolefrontpage_url()
def __get_grading_points(self):
return self.assignment.max_points
def __publish_grading_on_current_assignment(self, queryset, published_by, comment_text):
"""
Publish grading on current assignment ``self.assignment``
Args:
queryset: An :class:`~.devilry.apps.core.models.assignment_group.AssignmentGroup` ``QuerySet``.
published_by: will be published by this user
"""
grading_points = self.__get_grading_points()
with transaction.atomic():
for group in queryset:
group_models.GroupComment.objects.create(
feedback_set_id=group.cached_data.last_feedbackset_id,
part_of_grading=True,
visibility=group_models.GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE,
user=self.request.user,
user_role=comment_models.Comment.USER_ROLE_ADMIN,
text=comment_text,
comment_type=comment_models.Comment.COMMENT_TYPE_GROUPCOMMENT,
published_datetime=timezone.now()
)
group.cached_data.last_feedbackset.publish(published_by, grading_points)
group_models.FeedbacksetPassedPreviousPeriod(
feedbackset=group.cached_data.last_feedbackset,
passed_previous_period_type=group_models.FeedbacksetPassedPreviousPeriod.PASSED_PREVIOUS_SEMESTER_TYPES.MANUAL.value,
created_by=self.request.user
).save()
def form_valid(self, form):
queryset = form.cleaned_data['selected_items']
self.__publish_grading_on_current_assignment(
queryset=queryset,
published_by=self.request.user,
comment_text=form.cleaned_data['feedback_comment_text'])
return redirect(str(self.get_success_url()))
| null |
4,814 |
# -*- coding: utf-8 -*-
"""simulation data operations
:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkcollections
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdc, pkdlog, pkdp
from pykern.pkdebug import pkdp
import re
import sirepo.sim_data
class SimData(sirepo.sim_data.SimDataBase):
ANALYSIS_ONLY_FIELDS = frozenset(
("colorMap", "notes", "color", "impactColorMap", "axes", "slice")
)
@classmethod
def _compute_model(cls, analysis_model, *args, **kwargs):
if analysis_model in (
"currentAnimation",
"egunCurrentAnimation",
"fieldAnimation",
"impactDensityAnimation",
"particle3d",
"particleAnimation",
):
return "animation"
if analysis_model == "optimizerAnimation":
return analysis_model
if analysis_model in (
"fieldCalcAnimation",
"fieldCalculationAnimation",
"fieldComparisonAnimation",
):
return "fieldCalculationAnimation"
# TODO(pjm): special case, should be an Animation model
if analysis_model == "particle3d":
return "animation"
return super(SimData, cls)._compute_model(analysis_model, *args, **kwargs)
@classmethod
def fixup_old_data(cls, data, qcall, **kwargs):
def _fixup_reflector(m):
if "isReflector" not in m:
return
if m.isReflector == "1":
for f in "specProb", "diffProb":
m[f] = float(m[f])
if m.specProb > 0:
m.reflectorType = "specular"
m.reflectorProbability = m.specProb
elif m.diffProb > 0:
m.reflectorType = "diffuse"
m.reflectorProbability = m.diffProb
for f in ("isReflector", "specProb", "diffProb", "refScheme"):
del m[f]
dm = data.models
dm.pksetdefault(optimizer=PKDict)
dm.optimizer.pksetdefault(
constraints=list,
enabledFields=PKDict,
fields=list,
)
cls._init_models(
dm,
(
# simulationGrid must be first
"simulationGrid",
"anode",
"egunCurrentAnimation",
"fieldAnimation",
"fieldCalcAnimation",
"fieldCalculationAnimation",
"fieldComparisonAnimation",
"fieldComparisonReport",
"fieldReport",
"impactDensityAnimation",
"optimizer",
"optimizerAnimation",
"optimizerStatus",
"particle3d",
"particleAnimation",
"simulation",
"cathode",
),
dynamic=lambda m: cls.__dynamic_defaults(data, m),
)
pkcollections.unchecked_del(dm.particle3d, "joinEvery")
for m in ("anode", "cathode"):
_fixup_reflector(dm[m])
s = cls.schema()
for c in dm.conductorTypes:
x = c.setdefault("isConductor", "1" if c.voltage > 0 else "0")
# conductor.color is null is examples
if not c.get("color", 0):
c.color = s.constants[
"zeroVoltsColor" if x == "0" else "nonZeroVoltsColor"
]
cls.update_model_defaults(c, c.type)
_fixup_reflector(c)
for c in dm.conductors:
cls.update_model_defaults(c, "conductorPosition")
if dm.optimizer.objective == "efficiency":
dm.optimizer.objective = "transparency"
cls._organize_example(data)
@classmethod
def warpvnd_is_3d(cls, data):
return data.models.simulationGrid.simulation_mode == "3d"
@classmethod
def _compute_job_fields(cls, data, r, compute_model):
res = ["simulationGrid"]
res.append(cls.__non_opt_fields_to_array(data.models.beam))
for container in ("conductors", "conductorTypes"):
for m in data.models[container]:
res.append(cls.__non_opt_fields_to_array(m))
return res + cls._non_analysis_fields(data, r)
@classmethod
def __dynamic_defaults(cls, data, model):
"""defaults that depend on the current data"""
if not model.startswith("fieldComparison"):
return PKDict()
g = data.models.simulationGrid
t = cls.warpvnd_is_3d(data)
return PKDict(
dimension="x",
xCell1=0,
xCell2=int(g.num_x / 2.0),
xCell3=g.num_x,
yCell1=0,
yCell2=int(g.num_y / 2.0) if t else 0,
yCell3=g.num_y if t else 0,
zCell1=0,
zCell2=int(g.num_z / 2.0),
zCell3=g.num_z,
)
@classmethod
def METHOD_NAME(cls, data):
res = []
for m in data.models.conductorTypes:
if m.type == "stl":
res.append(cls.lib_file_name_with_model_field("stl", "file", m.file))
return res
@classmethod
def __non_opt_fields_to_array(cls, model):
res = []
for f in model:
if not re.search(r"\_opt$", f) and f not in cls.ANALYSIS_ONLY_FIELDS:
res.append(model[f])
return res
| null |
4,815 |
# This program and the accompanying materials are made available under the
# terms of the Mozilla Public License v2.0 which accompanies this distribution,
# and is available at https://www.mozilla.org/en-US/MPL/2.0/
import sys, random
class Solution:
"""Abstract solution. To be implemented."""
def __init__(self, num_objectives):
"""Constructor. Parameters: number of objectives."""
self.num_objectives = num_objectives
self.objectives = []
for _ in range(num_objectives):
self.objectives.append(None)
self.attributes = []
self.rank = sys.maxsize
self.distance = 0.0
self.chromos_fitness = {}
self.sch = {}
def __rshift__(self, other):
"""True if this solution dominates the other (">>" operator)."""
dominates = False
for i in range(len(self.objectives)):
if self.objectives[i] > other.objectives[i]:
return False
elif self.objectives[i] < other.objectives[i]:
dominates = True
return dominates
def __lshift__(self, other):
"""True if this solution is dominated by the other ("<<" operator)."""
return other >> self
def crowded_comparison(s1, s2):
"""Compare the two solutions based on crowded comparison.
Args:
s1 (obj): One chromosome.
s2 (obj): Another chromosome.
Returns:
float: A comparison value.
"""
if s1.rank < s2.rank:
return 1
elif s1.rank > s2.rank:
return -1
elif s1.distance > s2.distance:
return 1
elif s1.distance < s2.distance:
return -1
else:
return 0
class NSGAII:
"""Implementation of NSGA-II algorithm."""
current_evaluated_objective = 0
def __init__(self, num_objectives, mutation_rate=0.1, crossover_rate=1.0):
"""Constructor.
Args:
num_objectives (obj): Number of objectives.
mutation_rate (float): Mutation rate (default value 10%).
crossover_rate (float): Crossover rate (default value 100%)..
"""
self.num_objectives = num_objectives
self.mutation_rate = mutation_rate
self.crossover_rate = crossover_rate
random.seed(100)
def run(self, p, population_size, num_generations):
"""Run NSGA-II.
Args:
p (obj): A set of chromosomes (population).
population_size (obj): A population size.
num_generations (obj): A number of generations.
Returns:
list: First front of Pareto front.
"""
for s in p:
s.evaluate_solution(0)
first_front = []
for i in range(num_generations):
r = []
r.extend(p)
fronts = self.fast_nondominated_sort(r)
del p[:]
for front in fronts.values():
if len(front) == 0:
break
self.crowding_distance_assignment(front)
p.extend(front)
if len(p) >= population_size:
break
self.sort_crowding(p)
if len(p) > population_size:
del p[population_size:]
first_front = list(fronts.values())[0]
return first_front
@staticmethod
def sort_ranking(p):
"""Run sort the sort of chromosomes according to their ranks.
Args:
p (obj): A set of chromosomes (population).
"""
for i in range(len(p) - 1, -1, -1):
for j in range(1, i + 1):
s1 = p[j - 1]
s2 = p[j]
if s1.rank > s2.rank:
p[j - 1] = s2
p[j] = s1
@staticmethod
def METHOD_NAME(p, obj_idx):
"""Run sort the chromosome based on their objective value.
Args:
p (obj): A set of chromosomes (population).
obj_idx (int): The index of objective function.
"""
for i in range(len(p) - 1, -1, -1):
for j in range(1, i + 1):
s1 = p[j - 1]
s2 = p[j]
if s1.objectives[obj_idx] > s2.objectives[obj_idx]:
p[j - 1] = s2
p[j] = s1
@staticmethod
def sort_crowding(p):
"""Run calculate the crowding distance of adjacent two chromosome in a front level.
Args:
p (obj): A set of chromosomes (population).
"""
for i in range(len(p) - 1, -1, -1):
for j in range(1, i + 1):
s1 = p[j - 1]
s2 = p[j]
if crowded_comparison(s1, s2) < 0:
p[j - 1] = s2
p[j] = s1
def make_new_pop(self, p):
"""Make new population Q, offspring of P.
Args:
p (obj): A set of chromosomes (population).
Returns:
list: Offspring.
"""
q = []
while len(q) != len(p):
selected_solutions = [None, None]
while selected_solutions[0] == selected_solutions[1]:
for i in range(2):
s1 = random.choice(p)
s2 = s1
while s1 == s2:
s2 = random.choice(p)
if crowded_comparison(s1, s2) > 0:
selected_solutions[i] = s1
else:
selected_solutions[i] = s2
if random.random() < self.crossover_rate:
child_solution = selected_solutions[0].crossover(selected_solutions[1])
if random.random() < self.mutation_rate:
child_solution.mutate()
child_solution.evaluate_solution(0)
q.append(child_solution)
return q
@staticmethod
def fast_nondominated_sort(p):
"""Discover Pareto fronts in P, based on non-domination criterion.
Args:
p (obj): A set of chromosomes (population).
Returns:
dict: Fronts.
"""
fronts = {}
s = {}
n = {}
for i in p:
s[i] = []
n[i] = 0
fronts[1] = []
for pk in p:
for qk in p:
if pk == qk:
continue
if pk >> qk:
s[pk].append(qk)
elif pk << qk:
n[pk] += 1
if n[pk] == 0:
fronts[1].append(pk)
i = 1
while len(fronts[i]) != 0:
next_front = []
for r in fronts[i]:
for j in s[r]:
n[j] -= 1
if n[j] == 0:
next_front.append(j)
i += 1
fronts[i] = next_front
return fronts
def crowding_distance_assignment(self, front):
"""Assign a crowding distance for each solution in the front.
Args:
front (dict): A set of chromosomes in the front level.
"""
for p in front:
p.distance = 0
for obj_index in range(self.num_objectives):
self.METHOD_NAME(front, obj_index)
front[0].distance = float('inf')
front[len(front) - 1].distance = float('inf')
for i in range(1, len(front) - 1):
front[i].distance += (front[i + 1].distance - front[i - 1].distance)
| null |
4,816 |
import ctypes
import numpy as np
from collections import defaultdict, deque
from typing import TypeVar, Type, Any, Dict, Deque, Tuple
from tinygrad.helpers import DType, dtypes, prod, GlobalCounters, ImageDType
_T = TypeVar("_T")
class RawBuffer: # pylint: disable=abstract-method
def __init__(self, size:int, dtype:DType, buf:Any=None, allocator:Any=None, **kwargs):
self.size: int = size
self.dtype: DType = dtype
self._buf = buf if buf is not None else (allocator.alloc(size, dtype, **kwargs) if allocator else None) # If buf is provided, use it. Otherwise try to allocate from the allocator.
self._memsz: int = size*dtype.itemsize
self._allocator = allocator
self._device = kwargs.get('device', None)
GlobalCounters.mem_used += self._memsz
def __del__(self): # NOTE: if it fails on init (bad dtype), it won't have a _memsz
if hasattr(self, '_memsz'): GlobalCounters.mem_used -= self._memsz
if hasattr(self, '_allocator') and self._allocator: self._allocator.free(self._buf)
def __repr__(self): return f"buffer<{self.size}, {self.dtype}>"
@property
def key(self): return (self.size, self.dtype)
# NOTE: this interface allows for 0 copy
@classmethod
def fromCPU(cls:Type[_T], x:np.ndarray) -> _T: raise NotImplementedError("must be implemented")
def toCPU(self) -> np.ndarray: raise NotImplementedError("must be implemented")
class RawConst(RawBuffer): # pylint: disable=abstract-method
def __repr__(self): return f"const<{self._buf}, {self.dtype}>"
@property
def key(self): return (str(self._buf), self.dtype)
def buf_is_kernel_arg(x) -> bool:
return x.realized is not None and x.realized.__class__ is not RawConst
# --teenygrad--
class RawBufferCopyIn(RawBuffer):
def _copyin(self, x:np.ndarray) -> None: raise NotImplementedError("must be implemented")
@classmethod
def fromCPU(cls, x:np.ndarray, **kwargs):
ret = cls(prod(x.shape), dtypes.from_np(x.dtype), **kwargs)
if x.size > 0: ret._copyin(x)
return ret
class RawBufferMapped(RawBufferCopyIn):
def _buffer(self) -> memoryview: raise NotImplementedError("must be implemented")
# NOTE: this metadata prevents the backing buffer from being freed. hack can be removed with PEP688
def toCPU(self) -> np.ndarray: return np.frombuffer(self._buffer(), dtype=np.dtype(self.dtype.np, metadata={"backing": self}), count=self.size) # type: ignore
def _copyin(self, x:np.ndarray) -> None: np.copyto(self.toCPU(), x.reshape(-1))
# this one is simple enough that i moved it out of the runtimes
class RawMallocBuffer(RawBufferMapped):
def __init__(self, size, dtype: DType): super().__init__(size, dtype, ({dtypes.float64:ctypes.c_double, dtypes.float32: ctypes.c_float, dtypes.float16: ctypes.c_int16, dtypes.bfloat16: ctypes.c_int16, dtypes.int8: ctypes.c_int8, dtypes.uint8: ctypes.c_uint8, dtypes.bool: ctypes.c_uint8, dtypes.int32: ctypes.c_int32, dtypes.uint32: ctypes.c_uint32, dtypes.int64: ctypes.c_int64, dtypes.uint64: ctypes.c_uint64}[dtype] * size)())
def _buffer(self): return memoryview(self._buf)
class RawBufferCopyInOut(RawBufferCopyIn):
def _copyout(self, x:np.ndarray) -> None: raise NotImplementedError("must be implemented")
def toCPU(self) -> np.ndarray:
x: np.ndarray = np.empty(self.size, dtype=self.dtype.np)
if x.size > 0: self._copyout(x)
return x
class RawBufferTransfer(RawBuffer):
def _transfer(self, x) -> None: raise NotImplementedError("must be implemented")
@classmethod
def transfer(cls, x, shape, dtype, **kwargs):
ret = cls(prod(shape), dtype, **kwargs)
ret._transfer(x)
return ret
class LRUAllocator:
def __init__(self, dev_memsz=(4<<30)):
self.epoch = 0
self.free_space: Dict[Any, int] = defaultdict(lambda: dev_memsz)
self.buffer_info: Dict[Any, Tuple[int, DType, str]] = dict()
self.cached_buffers: Dict[Tuple[int, ...], Deque[Tuple[Any, int]]] = defaultdict(deque) # Cached buffer storage, splitted by type and size, newest first.
self.aging_order: Dict[Any, Deque[Tuple[Tuple[int, ...], int]]] = defaultdict(deque) # Keys of cached_buffers, ordered from oldest to newest updates.
def __del__(self):
for v in self.cached_buffers.values():
for buf, _ in v: self._free_buffer(buf)
def _cache_reuse_buffer(self, rawbufs: Deque[Tuple[Any, int]]): # The newest cached buffer is reused.
GlobalCounters.mem_cached -= self._underlying_buf_memsz(rawbufs[0][0])
return rawbufs.popleft()[0]
def _alloc_buffer(self, size, dtype, device, **kwargs):
self.free_space[device] -= size*dtype.itemsize
while len(self.aging_order[device]) and self.free_space[device] < 0: # When OOM removing lru buffers.
bucket, epoch = self.aging_order[device].popleft()
if self.cached_buffers[bucket] and self.cached_buffers[bucket][-1][1] == epoch: self._free_buffer(self.cached_buffers[bucket].pop()[0]) # Free cached buffer if it is still in cache.
newbuf = self._do_alloc(max(1, size), dtype, device, **kwargs)
self.buffer_info[newbuf] = (size, dtype, device)
return newbuf
def _free_buffer(self, buf_to_free):
from tinygrad.jit import CacheCollector
CacheCollector._on_buf_free(buf_to_free)
self.free_space[self.buffer_info[buf_to_free][2]] += self._underlying_buf_memsz(buf_to_free)
GlobalCounters.mem_cached -= self._underlying_buf_memsz(buf_to_free)
self.buffer_info.pop(buf_to_free)
self.METHOD_NAME(buf_to_free)
def alloc(self, size, dtype, device='0', **kwargs):
rawbufs = self.cached_buffers.get(self._cached_bufkey(size, dtype, device), None)
return self._cache_reuse_buffer(rawbufs) if rawbufs else self._alloc_buffer(size, dtype, device, **kwargs)
def free(self, buf): # free() just caches buffer. It might be freed later when OOM during allocation.
self.epoch += 1
size, dtype, device = self.buffer_info[buf]
self.cached_buffers[self._cached_bufkey(size, dtype, device)].appendleft((buf, self.epoch))
self.aging_order[device].append((self._cached_bufkey(size, dtype, device), self.epoch))
GlobalCounters.mem_cached += self._underlying_buf_memsz(buf)
def _underlying_buf_memsz(self, buf): return self.buffer_info[buf][0] * self.buffer_info[buf][1].itemsize
def _cached_bufkey(self, size, dtype, device) -> Tuple[int, ...]: return (device, size, dtype, dtype.shape) if isinstance(dtype, ImageDType) else (device, size, dtype) # Provides a key for reusing device buffers with identical keys.
def _do_alloc(self, size, dtype, device, **kwargs): raise NotImplementedError("must be implemented")
def METHOD_NAME(self, buf): pass
| null |
4,817 |
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
from typing import Dict, Optional, Sequence
import mmcv
import numpy as np
from mmengine.evaluator import BaseMetric
from mmengine.logging import MMLogger
from mmdet3d.evaluation import seg_eval
from mmdet3d.registry import METRICS
@METRICS.register_module()
class SegMetric(BaseMetric):
"""3D semantic segmentation evaluation metric.
Args:
collect_device (str, optional): Device name used for collecting
results from different ranks during distributed training.
Must be 'cpu' or 'gpu'. Defaults to 'cpu'.
prefix (str): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Default: None.
pklfile_prefix (str, optional): The prefix of pkl files, including
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
submission_prefix (str, optional): The prefix of submission data.
If not specified, the submission data will not be generated.
Default: None.
"""
def __init__(self,
collect_device: str = 'cpu',
prefix: Optional[str] = None,
pklfile_prefix: str = None,
submission_prefix: str = None,
**kwargs):
self.pklfile_prefix = pklfile_prefix
self.submission_prefix = submission_prefix
super(SegMetric, self).__init__(
prefix=prefix, collect_device=collect_device)
def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions.
The processed results should be stored in ``self.results``,
which will be used to compute the metrics when all batches
have been processed.
Args:
data_batch (dict): A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of outputs from
the model.
"""
for data_sample in data_samples:
pred_3d = data_sample['pred_pts_seg']
eval_ann_info = data_sample['eval_ann_info']
cpu_pred_3d = dict()
for k, v in pred_3d.items():
if hasattr(v, 'to'):
cpu_pred_3d[k] = v.to('cpu').numpy()
else:
cpu_pred_3d[k] = v
self.results.append((eval_ann_info, cpu_pred_3d))
def format_results(self, results):
r"""Format the results to txt file. Refer to `ScanNet documentation
<http://kaldir.vc.in.tum.de/scannet_benchmark/documentation>`_.
Args:
outputs (list[dict]): Testing results of the dataset.
Returns:
tuple: (outputs, tmp_dir), outputs is the detection results,
tmp_dir is the temporal directory created for saving submission
files when ``submission_prefix`` is not specified.
"""
submission_prefix = self.submission_prefix
if submission_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
submission_prefix = osp.join(tmp_dir.name, 'results')
mmcv.mkdir_or_exist(submission_prefix)
ignore_index = self.dataset_meta['ignore_index']
# need to map network output to original label idx
cat2label = np.zeros(len(self.dataset_meta['label2cat'])).astype(
np.int64)
for original_label, output_idx in self.dataset_meta['label2cat'].items(
):
if output_idx != ignore_index:
cat2label[output_idx] = original_label
for i, (eval_ann, result) in enumerate(results):
sample_idx = eval_ann['point_cloud']['lidar_idx']
pred_sem_mask = result['semantic_mask'].numpy().astype(np.int64)
pred_label = cat2label[pred_sem_mask]
curr_file = f'{submission_prefix}/{sample_idx}.txt'
np.savetxt(curr_file, pred_label, fmt='%d')
def METHOD_NAME(self, results: list) -> Dict[str, float]:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
Dict[str, float]: The computed metrics. The keys are the names of
the metrics, and the values are corresponding results.
"""
logger: MMLogger = MMLogger.get_current_instance()
if self.submission_prefix:
self.format_results(results)
return None
label2cat = self.dataset_meta['label2cat']
ignore_index = self.dataset_meta['ignore_index']
gt_semantic_masks = []
pred_semantic_masks = []
for eval_ann, sinlge_pred_results in results:
gt_semantic_masks.append(eval_ann['pts_semantic_mask'])
pred_semantic_masks.append(
sinlge_pred_results['pts_semantic_mask'])
ret_dict = seg_eval(
gt_semantic_masks,
pred_semantic_masks,
label2cat,
ignore_index,
logger=logger)
return ret_dict
| null |
4,818 |
#
# MIT No Attribution
#
# Copyright (C) 2010-2023 Joel Andersson, Joris Gillis, Moritz Diehl, KU Leuven.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#
# -*- coding: utf-8 -*-
from casadi import *
#
# How to use Callback
# Joel Andersson
#
class MyCallback(Callback):
def __init__(self, name, d, opts={}):
Callback.__init__(self)
self.d = d
self.construct(name, opts)
# Number of inputs and outputs
def get_n_in(self): return 1
def get_n_out(self): return 1
# Initialize the object
def init(self):
print('initializing object')
# Evaluate numerically
def eval(self, arg):
x = arg[0]
f = sin(self.d*x)
return [f]
# Use the function
f = MyCallback('f', 0.5)
res = f(2)
print(res)
# You may call the Callback symbolically
x = MX.sym("x")
print(f(x))
# Derivates OPTION 1: finite-differences
eps = 1e-5
print((f(2+eps)-f(2))/eps)
f = MyCallback('f', 0.5, {"enable_fd":True})
J = Function('J',[x],[jacobian(f(x),x)])
print(J(2))
# Derivates OPTION 2: Supply forward mode
# Example from https://www.youtube.com/watch?v=mYOkLkS5yqc&t=4s
class Example4To3(Callback):
def __init__(self, name, opts={}):
Callback.__init__(self)
self.construct(name, opts)
def get_n_in(self): return 1
def get_n_out(self): return 1
def get_sparsity_in(self,i):
return Sparsity.dense(4,1)
def get_sparsity_out(self,i):
return Sparsity.dense(3,1)
# Evaluate numerically
def eval(self, arg):
a,b,c,d = vertsplit(arg[0])
ret = vertcat(sin(c)*d+d**2,2*a+c,b**2+5*c)
return [ret]
class Example4To3_Fwd(Example4To3):
def has_forward(self,nfwd):
# This example is written to work with a single forward seed vector
# For efficiency, you may allow more seeds at once
return nfwd==1
def get_forward(self,nfwd,name,inames,onames,opts):
class ForwardFun(Callback):
def __init__(self, opts={}):
Callback.__init__(self)
self.construct(name, opts)
def get_n_in(self): return 3
def get_n_out(self): return 1
def get_sparsity_in(self,i):
if i==0: # nominal input
return Sparsity.dense(4,1)
elif i==1: # nominal output
return Sparsity(3,1)
else: # Forward seed
return Sparsity.dense(4,1)
def get_sparsity_out(self,i):
# Forward sensitivity
return Sparsity.dense(3,1)
# Evaluate numerically
def eval(self, arg):
a,b,c,d = vertsplit(arg[0])
a_dot,b_dot,c_dot,d_dot = vertsplit(arg[2])
print("Forward sweep with", a_dot,b_dot,c_dot,d_dot)
w0 = sin(c)
w0_dot = cos(c)*c_dot
w1 = w0*d
w1_dot = w0_dot*d+w0*d_dot
w2 = d**2
w2_dot = 2*d_dot*d
r0 = w1+w2
r0_dot = w1_dot + w2_dot
w3 = 2*a
w3_dot = 2*a_dot
r1 = w3+c
r1_dot = w3_dot+c_dot
w4 = b**2
w4_dot = 2*b_dot*b
w5 = 5*w0
w5_dot = 5*w0_dot
r2 = w4+w5
r2_dot = w4_dot + w5_dot
ret = vertcat(r0_dot,r1_dot,r2_dot)
return [ret]
# You are required to keep a reference alive to the returned Callback object
self.fwd_callback = ForwardFun()
return self.fwd_callback
f = Example4To3_Fwd('f')
x = MX.sym("x",4)
J = Function('J',[x],[jacobian(f(x),x)])
print(J(vertcat(1,2,0,3)))
# Derivates OPTION 3: Supply reverse mode
class Example4To3_Rev(Example4To3):
def has_reverse(self,nadj):
# This example is written to work with a single forward seed vector
# For efficiency, you may allow more seeds at once
return nadj==1
def get_reverse(self,nfwd,name,inames,onames,opts):
class ReverseFun(Callback):
def __init__(self, opts={}):
Callback.__init__(self)
self.construct(name, opts)
def get_n_in(self): return 3
def get_n_out(self): return 1
def get_sparsity_in(self,i):
if i==0: # nominal input
return Sparsity.dense(4,1)
elif i==1: # nominal output
return Sparsity(3,1)
else: # Reverse seed
return Sparsity.dense(3,1)
def get_sparsity_out(self,i):
# Reverse sensitivity
return Sparsity.dense(4,1)
# Evaluate numerically
def eval(self, arg):
a,b,c,d = vertsplit(arg[0])
r0_bar,r1_bar,r2_bar = vertsplit(arg[2])
print("Reverse sweep with", r0_bar, r1_bar, r2_bar)
w0 = sin(c)
w1 = w0*d
w2 = d**2
r0 = w1+w2
w3 = 2*a
r1 = w3+c
w4 = b**2
w5 = 5*w0
r2 = w4+w5
w4_bar = r2_bar
w5_bar = r2_bar
w0_bar = 5*w5_bar
b_bar = 2*b*w4_bar
w3_bar = r1_bar
c_bar = r1_bar
a_bar = 2*w3_bar
w1_bar = r0_bar
w2_bar = r0_bar
d_bar = 2*d*w2_bar
w0_bar = w0_bar + w1_bar*d
d_bar = d_bar + w0*w1_bar
c_bar = c_bar + cos(c)*w0_bar
ret = vertcat(a_bar,b_bar,c_bar,d_bar)
return [ret]
# You are required to keep a reference alive to the returned Callback object
self.rev_callback = ReverseFun()
return self.rev_callback
f = Example4To3_Rev('f')
x = MX.sym("x",4)
J = Function('J',[x],[jacobian(f(x),x)])
print(J(vertcat(1,2,0,3)))
# Derivates OPTION 4: Supply full Jacobian
class Example4To3_Jac(Example4To3):
def METHOD_NAME(self): return True
def get_jacobian(self,name,inames,onames,opts):
class JacFun(Callback):
def __init__(self, opts={}):
Callback.__init__(self)
self.construct(name, opts)
def get_n_in(self): return 2
def get_n_out(self): return 1
def get_sparsity_in(self,i):
if i==0: # nominal input
return Sparsity.dense(4,1)
elif i==1: # nominal output
return Sparsity(3,1)
def get_sparsity_out(self,i):
return sparsify(DM([[0,0,1,1],[1,0,1,0],[0,1,1,0]])).sparsity()
# Evaluate numerically
def eval(self, arg):
a,b,c,d = vertsplit(arg[0])
ret = DM(3,4)
ret[0,2] = d*cos(c)
ret[0,3] = sin(c)+2*d
ret[1,0] = 2
ret[1,2] = 1
ret[2,1] = 2*b
ret[2,2] = 5
return [ret]
# You are required to keep a reference alive to the returned Callback object
self.jac_callback = JacFun()
return self.jac_callback
f = Example4To3_Jac('f')
x = MX.sym("x",4)
J = Function('J',[x],[jacobian(f(x),x)])
print(J(vertcat(1,2,0,3)))
| null |
4,819 |
import json
from unittest.mock import Mock, patch
import pytest
from backend.layers.common.entities import DatasetProcessingStatus
from tests.unit.processing.schema_migration.conftest import make_mock_collection_version, make_mock_dataset_version
@pytest.fixture
def local_schema_migrate(schema_migrate):
def download_file(bucket, key_name, local_path):
contents = {
"datasets": [
{
"dataset_id": "dataset_id_1",
"dataset_version_id": "prev_successful_dataset_version_id",
},
{
"dataset_id": "dataset_id_2",
"dataset_version_id": "prev_failed_dataset_version_id",
},
{
"dataset_id": "dataset_id_3",
"dataset_version_id": "prev_non_migrated_dataset_version_id",
},
]
# these datasets populate the processed_dataset variable in the publish_and_cleanup function
}
with open(local_path, "w") as f:
f.write(json.dumps(contents))
schema_migrate.business_logic.s3_provider.download_file = download_file
schema_migrate.business_logic.s3_provider.delete_files = Mock()
return schema_migrate
@patch("backend.layers.processing.schema_migration.cxs_get_current_schema_version", return_value="1.0.0")
@patch("backend.layers.processing.schema_migration.json.dump")
class TestPublishAndCleanup:
def test_OK(self, mock_json, mock_cxs_get_current_schema_version, local_schema_migrate):
datasets = [
make_mock_dataset_version(
dataset_id="dataset_id_1",
version_id="new_successful_dataset_version_id",
status=dict(processing_status=DatasetProcessingStatus.SUCCESS),
)
]
collection_version = make_mock_collection_version(datasets)
local_schema_migrate.business_logic.get_collection_version.return_value = collection_version
errors = local_schema_migrate.publish_and_cleanup(collection_version.version_id.id, True)
assert errors == []
local_schema_migrate.business_logic.publish_collection_version.assert_called_once_with(
collection_version.version_id
)
local_schema_migrate.s3_provider.delete_files.assert_any_call(
"artifact-bucket", ["schema_migration/test-execution-arn/publish_and_cleanup/collection_id.json"]
)
local_schema_migrate.s3_provider.delete_files.assert_any_call(
"artifact-bucket",
["prev_successful_dataset_version_id/migrated.h5ad"],
)
def test_with_errors(self, mock_json, mock_cxs_get_current_schema_version, local_schema_migrate):
failed_dataset = make_mock_dataset_version(
dataset_id="dataset_id_2",
version_id="new_failed_dataset_version_id",
status=dict(processing_status=DatasetProcessingStatus.FAILURE, validation_message="rds conversion failed"),
metadata=dict(schema_version="1.0.0"),
)
non_migrated_dataset = make_mock_dataset_version(
dataset_id="dataset_id_3",
version_id="new_non_migrated_dataset_version_id",
metadata=dict(schema_version="0.9.0"),
)
datasets = [
failed_dataset,
non_migrated_dataset,
]
collection_version = make_mock_collection_version(datasets)
local_schema_migrate.business_logic.get_collection_version.return_value = collection_version
errors = local_schema_migrate.publish_and_cleanup(collection_version.version_id.id, True)
assert len(errors) == 2
assert {
"message": failed_dataset.status.validation_message,
"dataset_status": failed_dataset.status.to_dict(),
"collection_id": collection_version.collection_id.id,
"collection_version_id": collection_version.version_id.id,
"dataset_version_id": failed_dataset.version_id.id,
"dataset_id": failed_dataset.dataset_id.id,
"rollback": True,
} in errors
assert {
"message": "Did Not Migrate.",
"collection_id": collection_version.collection_id.id,
"collection_version_id": collection_version.version_id.id,
"dataset_version_id": non_migrated_dataset.version_id.id,
"dataset_id": non_migrated_dataset.dataset_id.id,
"rollback": False,
} in errors
local_schema_migrate.business_logic.publish_collection_version.assert_not_called()
local_schema_migrate.s3_provider.delete_files.assert_any_call(
"artifact-bucket", ["schema_migration/test-execution-arn/publish_and_cleanup/collection_id.json"]
)
local_schema_migrate.s3_provider.delete_files.assert_any_call(
"artifact-bucket",
[
"prev_failed_dataset_version_id/migrated.h5ad",
"prev_non_migrated_dataset_version_id/migrated.h5ad",
],
)
def test_can_not_publish(self, mock_json, mock_cxs_get_current_schema_version, local_schema_migrate):
dataset_status = dict(processing_status=DatasetProcessingStatus.SUCCESS)
metadata = dict(schema_version="1.0.0")
collection_version = make_mock_collection_version(
[
make_mock_dataset_version(
dataset_id="dataset_id_1",
version_id="new_successful_dataset_version_id",
status=dataset_status,
metadata=metadata,
)
]
)
local_schema_migrate.business_logic.get_collection_version.return_value = collection_version
errors = local_schema_migrate.publish_and_cleanup(collection_version.version_id.id, False)
assert errors == []
local_schema_migrate.business_logic.publish_collection_version.assert_not_called()
local_schema_migrate.s3_provider.delete_files.assert_any_call(
"artifact-bucket", ["schema_migration/test-execution-arn/publish_and_cleanup/collection_id.json"]
)
local_schema_migrate.s3_provider.delete_files.assert_any_call(
"artifact-bucket", ["prev_successful_dataset_version_id/migrated.h5ad"]
)
def METHOD_NAME(self, mock_json, mock_cxs_get_current_schema_version, local_schema_migrate):
"""
Test that datasets that do not appear in the processed_datasets variable in publish_and_cleanup are skipped
"""
local_schema_migrate._check_dataset_is_latest_schema_version = Mock(
wraps=local_schema_migrate._check_dataset_is_latest_schema_version
)
collection_version = make_mock_collection_version([make_mock_dataset_version()])
local_schema_migrate.business_logic.get_collection_version.return_value = collection_version
errors = local_schema_migrate.publish_and_cleanup(collection_version.version_id.id, False)
assert errors == []
local_schema_migrate._check_dataset_is_latest_schema_version.assert_not_called()
local_schema_migrate.s3_provider.delete_files.assert_any_call(
"artifact-bucket", ["schema_migration/test-execution-arn/publish_and_cleanup/collection_id.json"]
)
local_schema_migrate.s3_provider.delete_files.assert_any_call("artifact-bucket", [])
| null |
4,820 |
import mock
from django import test
from django.conf import settings
from model_bakery import baker
from devilry.devilry_dbcache.customsql import AssignmentGroupDbCacheCustomSql
from devilry.devilry_account import models as account_models
from devilry.devilry_group.cradmin_instances import crinstance_admin
class TestCrinstanceAdmin(test.TestCase):
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_get_rolequeryset_not_admin(self):
baker.make('core.AssignmentGroup', parentnode=baker.make('core.Assignment'))
testuser = baker.make(settings.AUTH_USER_MODEL)
mockrequest = mock.MagicMock()
mockrequest.user = testuser
instance = crinstance_admin.AdminCrInstance(request=mockrequest)
self.assertEqual([], list(instance.get_rolequeryset()))
def test_get_rolequeryset_superuser(self):
testgroup = baker.make('core.AssignmentGroup', parentnode=baker.make('core.Assignment'))
testuser = baker.make(settings.AUTH_USER_MODEL, is_superuser=True)
mockrequest = mock.MagicMock()
mockrequest.user = testuser
instance = crinstance_admin.AdminCrInstance(request=mockrequest)
self.assertEqual([testgroup], list(instance.get_rolequeryset()))
def METHOD_NAME(self):
baker.make('core.AssignmentGroup', parentnode=baker.make('core.Assignment'))
testuser = baker.make(settings.AUTH_USER_MODEL)
mockrequest = mock.MagicMock()
mockrequest.user = testuser
instance = crinstance_admin.AdminCrInstance(request=mockrequest)
self.assertEqual([], list(instance.get_rolequeryset()))
def test_getrolequeryset_admin_on_period(self):
testassignment = baker.make('core.Assignment')
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
periodpermissiongroup = baker.make('devilry_account.PeriodPermissionGroup',
period=testassignment.period)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('devilry_account.PermissionGroupUser',
user=testuser,
permissiongroup=periodpermissiongroup.permissiongroup)
mockrequest = mock.MagicMock()
mockrequest.user = testuser
instance = crinstance_admin.AdminCrInstance(request=mockrequest)
self.assertEqual([testgroup], list(instance.get_rolequeryset()))
def test_getrolequeryset_not_admin_on_period(self):
testassignment_another = baker.make('core.Assignment')
testgroup_another = baker.make('core.AssignmentGroup', parentnode=testassignment_another)
periodpermissiongroup_another = baker.make('devilry_account.PeriodPermissionGroup',
period=testassignment_another.period)
testuser_another = baker.make(settings.AUTH_USER_MODEL)
baker.make('devilry_account.PermissionGroupUser',
user=testuser_another,
permissiongroup=periodpermissiongroup_another.permissiongroup)
testassignment = baker.make('core.Assignment')
baker.make('core.AssignmentGroup', parentnode=testassignment)
periodpermissiongroup = baker.make('devilry_account.PeriodPermissionGroup',
period=testassignment.period)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('devilry_account.PermissionGroupUser',
user=testuser,
permissiongroup=periodpermissiongroup.permissiongroup)
mockrequest = mock.MagicMock()
mockrequest.user = testuser
instance = crinstance_admin.AdminCrInstance(request=mockrequest)
self.assertNotEqual([testgroup_another], list(instance.get_rolequeryset()))
def test_getrolequeryset_admin_on_subject(self):
testassignment = baker.make('core.Assignment')
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
subjectpermissiongroup = baker.make('devilry_account.SubjectPermissionGroup',
subject=testassignment.subject)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('devilry_account.PermissionGroupUser',
user=testuser,
permissiongroup=subjectpermissiongroup.permissiongroup)
mockrequest = mock.MagicMock()
mockrequest.user = testuser
instance = crinstance_admin.AdminCrInstance(request=mockrequest)
self.assertEqual([testgroup], list(instance.get_rolequeryset()))
def test_getrolequeryset_not_admin_on_subject(self):
testassignment_another = baker.make('core.Assignment')
testgroup_another = baker.make('core.AssignmentGroup', parentnode=testassignment_another)
subjectpermissiongroup_another = baker.make('devilry_account.SubjectPermissionGroup',
subject=testassignment_another.subject)
testuser_another = baker.make(settings.AUTH_USER_MODEL)
baker.make('devilry_account.PermissionGroupUser',
user=testuser_another,
permissiongroup=subjectpermissiongroup_another.permissiongroup)
testassignment = baker.make('core.Assignment')
baker.make('core.AssignmentGroup', parentnode=testassignment)
subjectpermissiongroup = baker.make('devilry_account.SubjectPermissionGroup',
subject=testassignment.subject)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('devilry_account.PermissionGroupUser',
user=testuser,
permissiongroup=subjectpermissiongroup.permissiongroup)
mockrequest = mock.MagicMock()
mockrequest.user = testuser
instance = crinstance_admin.AdminCrInstance(request=mockrequest)
self.assertNotEqual([testgroup_another], list(instance.get_rolequeryset()))
def test_admin_devilryrole_periodadmin(self):
testperiod = baker.make('core.Period')
testgroup = baker.make('core.AssignmentGroup', parentnode__parentnode=testperiod)
testuser = baker.make(settings.AUTH_USER_MODEL, shortname='thor', fullname='Thor Thunder God')
baker.make('devilry_account.PermissionGroupUser',
user=testuser,
permissiongroup=baker.make(
'devilry_account.PeriodPermissionGroup',
permissiongroup__grouptype=account_models.PermissionGroup.GROUPTYPE_PERIODADMIN,
period=testperiod).permissiongroup)
mockrequest = mock.MagicMock()
mockrequest.user = testuser
mockrequest.cradmin_role = testgroup
testinstance = crinstance_admin.AdminCrInstance(request=mockrequest)
self.assertEqual('periodadmin', testinstance.get_devilryrole_for_requestuser())
def test_admin_devilryrole_subjectadmin(self):
testsubject = baker.make('core.Subject')
testgroup = baker.make('core.AssignmentGroup', parentnode__parentnode__parentnode=testsubject)
testuser = baker.make(settings.AUTH_USER_MODEL, shortname='thor', fullname='Thor Thunder God')
baker.make('devilry_account.PermissionGroupUser',
user=testuser,
permissiongroup=baker.make(
'devilry_account.SubjectPermissionGroup',
permissiongroup__grouptype=account_models.PermissionGroup.GROUPTYPE_SUBJECTADMIN,
subject=testsubject).permissiongroup)
mockrequest = mock.MagicMock()
mockrequest.user = testuser
mockrequest.cradmin_role = testgroup
testinstance = crinstance_admin.AdminCrInstance(request=mockrequest)
self.assertEqual('subjectadmin', testinstance.get_devilryrole_for_requestuser())
| null |
4,821 |
# Copyright 2019-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0(the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == ==
import numpy as np
import pytest
import mindspore.common.dtype as mstype
import mindspore.nn as nn
from mindspore import Tensor, context
from mindspore.common.api import jit
from mindspore.ops import operations as P
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.mul = P.Mul()
@jit
def construct(self, x, y):
return self.mul(x, y)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def METHOD_NAME():
x0 = Tensor(np.random.uniform(-2, 2, (2, 3, 4, 4)).astype(np.float32))
y0 = Tensor(np.random.uniform(-2, 2, (1, 1, 1, 1)).astype(np.float32))
x1 = Tensor(np.random.uniform(-2, 2, (1, 3, 1, 4)).astype(np.float32))
y1 = Tensor(np.random.uniform(-2, 2, (2, 3, 4, 4)).astype(np.float32))
x2 = Tensor(np.random.uniform(-2, 2, (2, 3, 4, 4)).astype(np.float32))
y2 = Tensor(2, mstype.float32)
x3 = Tensor(2, mstype.float32)
y3 = Tensor(2, mstype.float32)
x4 = Tensor(np.random.uniform(-2, 2, (4)).astype(np.float32))
y4 = Tensor(np.random.uniform(-2, 2, (4, 4)).astype(np.float32))
mul = Net()
out = mul(x0, y0).asnumpy()
exp = x0.asnumpy() * y0.asnumpy()
diff = np.abs(out - exp)
err = np.ones(shape=exp.shape) * 1.0e-5
assert np.all(diff < err)
assert out.shape == exp.shape
out = mul(x1, y1).asnumpy()
exp = x1.asnumpy() * y1.asnumpy()
diff = np.abs(out - exp)
err = np.ones(shape=exp.shape) * 1.0e-5
assert np.all(diff < err)
assert out.shape == exp.shape
out = mul(x2, y2).asnumpy()
exp = x2.asnumpy() * y2.asnumpy()
diff = np.abs(out - exp)
err = np.ones(shape=exp.shape) * 1.0e-5
assert np.all(diff < err)
assert out.shape == exp.shape
out = mul(x3, y3).asnumpy()
exp = x3.asnumpy() * y3.asnumpy()
diff = np.abs(out - exp)
err = np.ones(shape=exp.shape) * 1.0e-5
assert np.all(diff < err)
assert out.shape == exp.shape
out = mul(x4, y4).asnumpy()
exp = x4.asnumpy() * y4.asnumpy()
diff = np.abs(out - exp)
err = np.ones(shape=exp.shape) * 1.0e-5
assert np.all(diff < err)
assert out.shape == exp.shape
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_mul_int32():
x0 = Tensor(np.random.uniform(-2, 2, (2, 3, 4, 4)).astype(np.int32))
y0 = Tensor(np.random.uniform(-2, 2, (1, 1, 1, 1)).astype(np.int32))
x1 = Tensor(np.random.uniform(-2, 2, (1, 3, 1, 4)).astype(np.int32))
y1 = Tensor(np.random.uniform(-2, 2, (2, 3, 4, 4)).astype(np.int32))
x2 = Tensor(np.random.uniform(-2, 2, (2, 3, 4, 4)).astype(np.int32))
y2 = Tensor(2, mstype.int32)
x3 = Tensor(2, mstype.int32)
y3 = Tensor(2, mstype.int32)
x4 = Tensor(np.random.uniform(-2, 2, (4)).astype(np.int32))
y4 = Tensor(np.random.uniform(-2, 2, (4, 4)).astype(np.int32))
mul = Net()
out = mul(x0, y0).asnumpy()
exp = x0.asnumpy() * y0.asnumpy()
diff = np.abs(out - exp)
err = np.ones(shape=exp.shape) * 1.0e-5
assert np.all(diff < err)
assert out.shape == exp.shape
out = mul(x1, y1).asnumpy()
exp = x1.asnumpy() * y1.asnumpy()
diff = np.abs(out - exp)
err = np.ones(shape=exp.shape) * 1.0e-5
assert np.all(diff < err)
assert out.shape == exp.shape
out = mul(x2, y2).asnumpy()
exp = x2.asnumpy() * y2.asnumpy()
diff = np.abs(out - exp)
err = np.ones(shape=exp.shape) * 1.0e-5
assert np.all(diff < err)
assert out.shape == exp.shape
out = mul(x3, y3).asnumpy()
exp = x3.asnumpy() * y3.asnumpy()
diff = np.abs(out - exp)
err = np.ones(shape=exp.shape) * 1.0e-5
assert np.all(diff < err)
assert out.shape == exp.shape
out = mul(x4, y4).asnumpy()
exp = x4.asnumpy() * y4.asnumpy()
diff = np.abs(out - exp)
err = np.ones(shape=exp.shape) * 1.0e-5
assert np.all(diff < err)
assert out.shape == exp.shape
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_mul_tensor_api_modes(mode):
"""
Feature: Test mul tensor api.
Description: Test mul tensor api for Graph and PyNative modes.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="CPU")
x = Tensor([1.0, 2.0, 3.0], mstype.float32)
y = Tensor([4.0, 5.0, 6.0], mstype.float32)
output = x.mul(y)
expected = np.array([4., 10., 18.], np.float32)
np.testing.assert_array_equal(output.asnumpy(), expected)
| null |
4,822 |
# Author: Neal Shrader <[email protected]>
# Author: Ben Howard <[email protected]>
#
# This file is part of cloud-init. See LICENSE file for license information.
# DigitalOcean Droplet API:
# https://developers.digitalocean.com/documentation/metadata/
import cloudinit.sources.helpers.digitalocean as do_helper
from cloudinit import log as logging
from cloudinit import sources, util
LOG = logging.getLogger(__name__)
BUILTIN_DS_CONFIG = {
"metadata_url": "http://169.254.169.254/metadata/v1.json",
}
# Wait for a up to a minute, retrying the meta-data server
# every 2 seconds.
MD_RETRIES = 30
MD_TIMEOUT = 2
MD_WAIT_RETRY = 2
MD_USE_IPV4LL = True
class DataSourceDigitalOcean(sources.DataSource):
dsname = "DigitalOcean"
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.distro = distro
self.metadata = dict()
self.ds_cfg = util.mergemanydict(
[
util.get_cfg_by_path(
sys_cfg, ["datasource", "DigitalOcean"], {}
),
BUILTIN_DS_CONFIG,
]
)
self.METHOD_NAME()
self.metadata_address = self.ds_cfg["metadata_url"]
self.retries = self.ds_cfg.get("retries", MD_RETRIES)
self.timeout = self.ds_cfg.get("timeout", MD_TIMEOUT)
self.use_ip4LL = self.ds_cfg.get("use_ip4LL", MD_USE_IPV4LL)
self.wait_retry = self.ds_cfg.get("wait_retry", MD_WAIT_RETRY)
self._network_config = None
def _unpickle(self, ci_pkl_version: int) -> None:
super()._unpickle(ci_pkl_version)
self.METHOD_NAME()
def METHOD_NAME(self):
util.deprecate(
deprecated="DataSourceDigitalOcean",
deprecated_version="23.2",
extra_message="Deprecated in favour of DataSourceConfigDrive.",
)
def _get_sysinfo(self):
return do_helper.read_sysinfo()
def _get_data(self):
(is_do, droplet_id) = self._get_sysinfo()
# only proceed if we know we are on DigitalOcean
if not is_do:
return False
LOG.info("Running on DigitalOcean. droplet_id=%s", droplet_id)
ipv4LL_nic = None
if self.use_ip4LL:
ipv4LL_nic = do_helper.assign_ipv4_link_local(self.distro)
md = do_helper.read_metadata(
self.metadata_address,
timeout=self.timeout,
sec_between=self.wait_retry,
retries=self.retries,
)
self.metadata_full = md
self.metadata["instance-id"] = md.get("droplet_id", droplet_id)
self.metadata["local-hostname"] = md.get("hostname", droplet_id)
self.metadata["interfaces"] = md.get("interfaces")
self.metadata["public-keys"] = md.get("public_keys")
self.metadata["availability_zone"] = md.get("region", "default")
self.vendordata_raw = md.get("vendor_data", None)
self.userdata_raw = md.get("user_data", None)
if ipv4LL_nic:
do_helper.del_ipv4_link_local(ipv4LL_nic)
return True
def check_instance_id(self, sys_cfg):
return sources.instance_id_matches_system_uuid(
self.get_instance_id(), "system-serial-number"
)
@property
def network_config(self):
"""Configure the networking. This needs to be done each boot, since
the IP information may have changed due to snapshot and/or
migration.
"""
if self._network_config:
return self._network_config
interfaces = self.metadata.get("interfaces")
LOG.debug(interfaces)
if not interfaces:
raise RuntimeError("Unable to get meta-data from server....")
nameservers = self.metadata_full["dns"]["nameservers"]
self._network_config = do_helper.convert_network_configuration(
interfaces, nameservers
)
return self._network_config
# Used to match classes to dependencies
datasources = [
(DataSourceDigitalOcean, (sources.DEP_FILESYSTEM,)),
]
# Return a list of data sources that match this set of dependencies
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
# vi: ts=4 expandtab
| null |
4,823 |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""random_ops vmap impl."""
from __future__ import absolute_import
from mindspore.ops.operations.random_ops import UniformCandidateSampler, RandomShuffle, Multinomial, \
RandomChoiceWithMask
from mindspore.ops.function import _VmapGeneralRule
from mindspore.ops._vmap.vmap_base import vmap_rules_getters, _bdim_at_front, _vmap_clone_prim, \
vmap_general_preprocess, _raise_value_error
@vmap_rules_getters.register(UniformCandidateSampler)
def get_uniform_candidate_sampler_vmap_rule(prim, axis_size):
"""VmapRule for `UniformCandidateSampler` operation."""
if hasattr(prim, 'batch_rank'):
batch_rank = prim.batch_rank + 1
else:
batch_rank = 1
batch_prim = _vmap_clone_prim(prim)
batch_prim.add_prim_attr("batch_rank", batch_rank)
def vmap_rule(x_bdim):
x, x_dim = x_bdim
if x_dim is None:
sampled_candidates, true_expected_count, sampled_expected_count = prim(x)
return (sampled_candidates, None), (true_expected_count, None), (sampled_expected_count, None)
x = _bdim_at_front(x, x_dim, axis_size)
sampled_candidates, true_expected_count, sampled_expected_count = batch_prim(x)
return (sampled_candidates, 0), (true_expected_count, 0), (sampled_expected_count, 0)
return vmap_rule
@vmap_rules_getters.register(RandomShuffle)
def get_random_shuffle_vmap_rule(prim, axis_size):
"""VmapRule for `RandomShuffle` operation."""
if hasattr(prim, 'batch_rank'):
batch_rank = prim.batch_rank + 1
else:
batch_rank = 1
batch_prim = _vmap_clone_prim(prim)
batch_prim.add_prim_attr("batch_rank", batch_rank)
def vmap_rule(x_bdim):
is_all_none, result = vmap_general_preprocess(prim, x_bdim)
if is_all_none:
return result
x, x_dim = x_bdim
x = _bdim_at_front(x, x_dim, axis_size)
out = batch_prim(x)
return out, 0
return vmap_rule
@vmap_rules_getters.register(Multinomial)
def get_multinomial_vmap_rule(prim, axis_size):
"""VmapRule for `Multinomial` operation."""
prim_name = prim.name
prim_vmap = _VmapGeneralRule(prim, axis_size)
def vmap_rule(x_bdim, num_samples_bdim):
is_all_none, result = vmap_general_preprocess(
prim, x_bdim, num_samples_bdim)
if is_all_none:
return result
x, x_dim = x_bdim
num_samples, num_samples_dim = num_samples_bdim
if len(x.shape) > 2:
out = prim_vmap(x_bdim, num_samples_bdim)
return out
if num_samples_dim is not None:
_raise_value_error("The source axis of args in {} must be None, "
"but got {}.".format(prim_name, num_samples_dim))
x = _bdim_at_front(x, x_dim, axis_size)
out = prim(x, num_samples)
return (out, 0)
return vmap_rule
@vmap_rules_getters.register(RandomChoiceWithMask)
def METHOD_NAME(prim, axis_size):
"""VmapRule for 'RandomChoiceWithMask' operation."""
if hasattr(prim, 'batch_rank'):
batch_rank = prim.batch_rank + 1
else:
batch_rank = 1
batch_prim = _vmap_clone_prim(prim)
batch_prim.add_prim_attr('batch_rank', batch_rank)
def vmap_rule(x_bdim):
is_all_none, result = vmap_general_preprocess(prim, x_bdim)
if is_all_none:
return result
x_data, x_dim = x_bdim
x = _bdim_at_front(x_data, x_dim, axis_size)
index, mask = batch_prim(x)
return (index, 0), (mask, 0)
return vmap_rule
| null |
4,824 |
# Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous helper code.
"""
from inspect import currentframe
import typing
import platform
from typing import Any, Callable, Dict, List, Optional, TypeVar, Union
import warnings
import torch
T = TypeVar("T")
TKey = TypeVar("TKey")
def METHOD_NAME(stuff: T, device: Union[torch.device, str] = "cpu", exclude: Optional[List[str]] = None) -> T:
"""Set everything in the dict to the specified torch device.
Args:
stuff: things to convert to torch
device: machine to put the "stuff" on
exclude: list of keys to skip over transferring to device
"""
if isinstance(stuff, dict):
for k, v in stuff.items():
if exclude and k in exclude:
stuff[k] = v
else:
stuff[k] = METHOD_NAME(v, device)
return stuff
if isinstance(stuff, torch.Tensor):
return stuff.to(device)
return stuff
def get_dict_to_cpu(stuff: T) -> T:
"""Set everything in the dict to CPU.
Args:
stuff: things to place onto cpu
"""
if isinstance(stuff, dict):
for k, v in stuff.items():
stuff[k] = get_dict_to_cpu(v)
return stuff
if isinstance(stuff, torch.Tensor):
return stuff.detach().cpu()
return stuff
def get_masked_dict(d: Dict[TKey, torch.Tensor], mask) -> Dict[TKey, torch.Tensor]:
"""Return a masked dictionary.
TODO(ethan): add more asserts/checks so this doesn't have unpredictable behavior.
Args:
d: dict to process
mask: mask to apply to values in dictionary
"""
masked_dict = {}
for key, value in d.items():
masked_dict[key] = value[mask]
return masked_dict
class IterableWrapper:
"""A helper that will allow an instance of a class to return multiple kinds of iterables bound
to different functions of that class.
To use this, take an instance of a class. From that class, pass in the <instance>.<new_iter_function>
and <instance>.<new_next_function> to the IterableWrapper constructor. By passing in the instance's
functions instead of just the class's functions, the self argument should automatically be accounted
for.
Args:
new_iter: function that will be called instead as the __iter__() function
new_next: function that will be called instead as the __next__() function
length: length of the iterable. If -1, the iterable will be infinite.
Attributes:
new_iter: object's pointer to the function we are calling for __iter__()
new_next: object's pointer to the function we are calling for __next__()
length: length of the iterable. If -1, the iterable will be infinite.
i: current index of the iterable.
"""
i: int
def __init__(self, new_iter: Callable, new_next: Callable, length: int = -1):
self.new_iter = new_iter
self.new_next = new_next
self.length = length
def __next__(self):
if self.length != -1 and self.i >= self.length:
raise StopIteration
self.i += 1
return self.new_next()
def __iter__(self):
self.new_iter()
self.i = 0
return self
def scale_dict(dictionary: Dict[Any, Any], coefficients: Dict[str, float]) -> Dict[Any, Any]:
"""Scale a dictionary in-place given a coefficients dictionary.
Args:
dictionary: input dict to be scaled.
coefficients: scalar dict config for holding coefficients.
Returns:
Input dict scaled by coefficients.
"""
for key in dictionary:
if key in coefficients:
dictionary[key] *= coefficients[key]
return dictionary
def step_check(step, step_size, run_at_zero=False) -> bool:
"""Returns true based on current step and step interval."""
if step_size == 0:
return False
return (run_at_zero or step != 0) and step % step_size == 0
def update_avg(prev_avg: float, new_val: float, step: int) -> float:
"""helper to calculate the running average
Args:
prev_avg (float): previous average value
new_val (float): new value to update the average with
step (int): current step number
Returns:
float: new updated average
"""
return (step * prev_avg + new_val) / (step + 1)
def strtobool(val) -> bool:
"""Cheap replacement for `distutils.util.strtobool()` which is deprecated
FMI https://stackoverflow.com/a/715468
"""
return val.lower() in ("yes", "y", "true", "t", "on", "1")
def torch_compile(*args, **kwargs) -> Any:
"""
Safe torch.compile with backward compatibility for PyTorch 1.x
"""
if not hasattr(torch, "compile"):
# Backward compatibility for PyTorch 1.x
warnings.warn(
"PyTorch 1.x will no longer be supported by Nerstudio. Please upgrade to PyTorch 2.x.", DeprecationWarning
)
if args and isinstance(args[0], torch.nn.Module):
return args[0]
else:
return torch.jit.script
elif platform.system() == "Windows":
# torch.compile is not supported on Windows
# https://github.com/orgs/pytorch/projects/27
# TODO: @jkulhanek, remove this once torch.compile is supported on Windows
warnings.warn(
"Windows does not yet support torch.compile and the performance will be affected.", RuntimeWarning
)
if args and isinstance(args[0], torch.nn.Module):
return args[0]
else:
return lambda x: x
else:
return torch.compile(*args, **kwargs)
def get_orig_class(obj, default=None):
"""Returns the __orig_class__ class of `obj` even when it is not initialized in __init__ (Python>=3.8).
Workaround for https://github.com/python/typing/issues/658.
Inspired by https://github.com/Stewori/pytypes/pull/53.
"""
try:
return object.__getattribute__(obj, "__orig_class__")
except AttributeError:
cls = object.__getattribute__(obj, "__class__")
try:
is_type_generic = isinstance(cls, typing.GenericMeta) # type: ignore
except AttributeError: # Python 3.8
is_type_generic = issubclass(cls, typing.Generic)
if is_type_generic:
frame = currentframe().f_back.f_back # type: ignore
try:
while frame:
try:
res = frame.f_locals["self"]
if res.__origin__ is cls:
return res
except (KeyError, AttributeError):
frame = frame.f_back
finally:
del frame
return default
| null |
4,825 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from qtpy import QtWidgets, QtCore
from qtpy.QtGui import QIntValidator, QDoubleValidator
from mantidqt.utils.qt import load_ui
Ui_settings, _ = load_ui(__file__, "settings_widget.ui")
class SettingsView(QtWidgets.QDialog, Ui_settings):
def __init__(self, parent=None):
super(SettingsView, self).__init__(parent)
self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)
self.setupUi(self)
self.setModal(True)
self.finder_save.setLabelText("Save Location")
self.finder_save.isForRunFiles(False)
self.finder_save.isForDirectory(True)
self.finder_fullCalib.setLabelText("Full Calibration")
self.finder_fullCalib.isForRunFiles(False)
self.finder_path_to_gsas2.setLabelText("Path to GSASII")
self.finder_path_to_gsas2.isForRunFiles(False)
self.finder_path_to_gsas2.isForDirectory(True)
self.finder_path_to_gsas2.isOptional(True)
self.timeout_lineedit.setValidator(QIntValidator(0, 200))
self.dSpacing_min_lineedit.setValidator(QDoubleValidator(0.0, 200.0, 3))
# set text of labels
self.log_list_label.setText("Check logs to average when loading focused data")
self.primary_log_label.setText(
"Sort workspaces by selected log average in sequential fitting (default is ascending order)\n"
"If the box below is empty the workspaces will be fitted in the order they appear in the table."
)
self.peak_list_label.setText("Default Peak Function")
# ===============
# Slot Connectors
# ===============
def set_on_apply_clicked(self, slot):
self.btn_apply.clicked.connect(slot)
def set_on_ok_clicked(self, slot):
self.btn_ok.clicked.connect(slot)
def set_on_cancel_clicked(self, slot):
self.btn_cancel.clicked.connect(slot)
def set_on_log_changed(self, slot):
self.log_list.itemChanged.connect(slot)
def set_on_check_ascending_changed(self, slot):
self.check_ascending.stateChanged.connect(slot)
def set_on_check_descending_changed(self, slot):
self.check_descending.stateChanged.connect(slot)
def set_on_gsas2_path_edited(self, slot):
self.finder_path_to_gsas2.fileEditingFinished.connect(slot)
# =================
# Component Getters
# =================
def get_save_location(self):
return self.finder_save.getFirstFilename()
def get_full_calibration(self):
return self.finder_fullCalib.getFirstFilename()
def get_checked_logs(self):
return ",".join(
[
self.log_list.item(ilog).text()
for ilog in range(self.log_list.count())
if self.log_list.item(ilog).checkState() == QtCore.Qt.Checked
]
)
def get_primary_log(self):
return self.primary_log.currentText()
def get_ascending_checked(self):
return self.check_ascending.isChecked()
def get_peak_function(self):
return self.peak_list.currentText()
def get_path_to_gsas2(self):
return self.finder_path_to_gsas2.getFirstFilename()
def get_timeout(self):
return self.timeout_lineedit.text()
def get_dSpacing_min(self):
return self.dSpacing_min_lineedit.text()
# =================
# Component Setters
# =================
def set_save_location(self, text):
self.finder_save.setText(text)
def set_full_calibration(self, text):
self.finder_fullCalib.setText(text)
def set_van_recalc(self, checked):
self.check_vanRecalc.setChecked(checked)
def add_log_checkboxs(self, logs):
for log in logs.split(","):
item = QtWidgets.QListWidgetItem(self.log_list)
item.setText(log)
item.setCheckState(QtCore.Qt.Unchecked)
self.log_list.addItem(item)
def set_checked_logs(self, logs):
# block signal so as not to reset primary log
self.log_list.blockSignals(True)
for log in logs.split(","):
items = self.log_list.findItems(log, QtCore.Qt.MatchExactly)
items[0].setCheckState(QtCore.Qt.Checked)
self.log_list.blockSignals(False)
def set_primary_log_combobox(self, primary_log):
checked_logs = self.get_checked_logs().split(",") + [""]
self.primary_log.clear()
self.primary_log.addItems(checked_logs)
if primary_log in checked_logs:
self.primary_log.setCurrentText(primary_log)
else:
self.primary_log.setCurrentText("")
def set_ascending_checked(self, checked):
self.check_ascending.setChecked(checked)
def set_descending_checked(self, checked):
self.check_descending.setChecked(checked)
def METHOD_NAME(self, peak_name):
self.peak_list.setCurrentText(peak_name)
def populate_peak_function_list(self, peak_names):
self.peak_list.addItems(peak_names.split(","))
def set_path_to_gsas2(self, text):
self.finder_path_to_gsas2.setText(text)
def set_timeout(self, text):
self.timeout_lineedit.setText(text)
def set_dSpacing_min(self, text):
self.dSpacing_min_lineedit.setText(text)
# =================
# Force Actions
# =================
def find_full_calibration(self):
self.finder_fullCalib.findFiles(True)
def find_save(self):
self.finder_save.findFiles(True)
def find_path_to_gsas2(self):
self.finder_path_to_gsas2.findFiles(True)
| null |
4,826 |
import json
import textwrap
from conans.test.assets.genconanfile import GenConanfile
from conans.test.utils.tools import TestClient
def test_basic_inspect():
t = TestClient()
t.save({"foo/conanfile.py": GenConanfile().with_name("foo").with_shared_option()})
t.run("inspect foo/conanfile.py")
lines = t.out.splitlines()
assert lines == ['default_options:',
' shared: False',
'generators: []',
'label: ',
'name: foo',
'options:',
' shared: False',
'options_definitions:',
" shared: ['True', 'False']",
'package_type: None',
'requires: []',
'revision_mode: hash']
def test_options_description():
t = TestClient()
conanfile = textwrap.dedent("""\
from conan import ConanFile
class Pkg(ConanFile):
options = {"shared": [True, False, None], "fpic": [True, False, None]}
options_description = {"shared": "Some long explanation about shared option",
"fpic": "Yet another long explanation of fpic"}
""")
t.save({"foo/conanfile.py": conanfile})
t.run("inspect foo/conanfile.py")
assert "shared: Some long explanation about shared option" in t.out
assert "fpic: Yet another long explanation of fpic" in t.out
def test_missing_conanfile():
t = TestClient()
t.run("inspect missing/conanfile.py", assert_error=True)
assert "Conanfile not found at" in t.out
def test_dot_and_folder_conanfile():
t = TestClient()
t.save({"conanfile.py": GenConanfile().with_name("foo")})
t.run("inspect .")
assert 'name: foo' in t.out
t.save({"foo/conanfile.py": GenConanfile().with_name("foo")}, clean_first=True)
t.run("inspect foo")
assert 'name: foo' in t.out
def METHOD_NAME():
tc = TestClient()
conanfile = textwrap.dedent("""
from conan import ConanFile
class Pkg(ConanFile):
settings = "os", "arch"
def set_name(self):
self.name = "foo"
def set_version(self):
self.version = "1.0"
""")
tc.save({"conanfile.py": conanfile})
tc.run("inspect .")
assert "foo" in tc.out
assert "1.0" in tc.out
def test_normal_inspect():
tc = TestClient()
tc.run("new basic -d name=pkg -d version=1.0")
tc.run("inspect .")
assert tc.out.splitlines() == ['description: A basic recipe',
'generators: []',
'homepage: <Your project homepage goes here>',
'label: ',
'license: <Your project license goes here>',
'name: pkg',
'options:',
'options_definitions:',
'package_type: None',
'requires: []',
'revision_mode: hash',
'version: 1.0']
def test_empty_inspect():
conanfile = textwrap.dedent("""
from conan import ConanFile
class Pkg(ConanFile):
pass""")
tc = TestClient()
tc.save({"conanfile.py": conanfile})
tc.run("inspect . -f json")
def test_basic_new_inspect():
tc = TestClient()
tc.run("new basic")
tc.run("inspect . -f json")
tc.run("new cmake_lib -d name=pkg -d version=1.0 -f")
tc.run("inspect . -f json")
def test_requiremens_inspect():
tc = TestClient()
conanfile = textwrap.dedent("""
from conan import ConanFile
class Pkg(ConanFile):
requires = "zlib/1.2.13"
license = "MIT", "Apache"
""")
tc.save({"conanfile.py": conanfile})
tc.run("inspect .")
assert ['generators: []',
'label: ',
"license: ['MIT', 'Apache']",
'options:',
'options_definitions:',
'package_type: None',
"requires: [{'ref': 'zlib/1.2.13', 'run': False, 'libs': True, 'skip': "
"False, 'test': False, 'force': False, 'direct': True, 'build': "
"False, 'transitive_headers': None, 'transitive_libs': None, 'headers': "
"True, 'package_id_mode': None, 'visible': True}]",
'revision_mode: hash'] == tc.out.splitlines()
def test_pythonrequires_remote():
tc = TestClient(default_server_user=True)
pyrequires = textwrap.dedent("""
from conan import ConanFile
class MyBase:
def set_name(self):
self.name = "my_company_package"
class PyReq(ConanFile):
name = "pyreq"
version = "1.0"
package_type = "python-require"
""")
tc.save({"pyreq/conanfile.py": pyrequires})
tc.run("create pyreq/")
tc.run("upload pyreq/1.0 -r default")
tc.run("search * -r default")
assert "pyreq/1.0" in tc.out
tc.run("remove * -c")
conanfile = textwrap.dedent("""
from conan import ConanFile
class Pkg(ConanFile):
python_requires = "pyreq/1.0"
python_requires_extend = "pyreq.MyBase"
def set_version(self):
self.version = "1.0"
""")
tc.save({"conanfile.py": conanfile})
# Not specifying the remote also works
tc.run("inspect .")
assert "pyreq/1.0: Downloaded recipe revision 0ca726ab0febe1100901fffb27dc421f" in tc.out
assert "name: my_company_package" in tc.out
assert "version: 1.0" in tc.out
# It now finds it on the cache, because it was downloaded
tc.run("inspect . -nr")
assert "name: my_company_package" in tc.out
assert "version: 1.0" in tc.out
assert "'recipe': 'Cache'" in tc.out
tc.run("remove pyreq/* -c")
# And now no remotes fails
tc.run("inspect . -nr", assert_error=True)
assert "Cannot resolve python_requires 'pyreq/1.0': No remote defined" in tc.out
def test_serializable_inspect():
tc = TestClient()
tc.save({"conanfile.py": GenConanfile("a", "1.0")
.with_requires("b/2.0")
.with_setting("os")
.with_option("shared", [True, False])
.with_generator("CMakeDeps")})
tc.run("inspect . --format=json")
assert json.loads(tc.out)["name"] == "a"
| null |
4,827 |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore import dtype as mstype
from mindspore.ops.operations.sparse_ops import Sspaddmm
class SspaddmmNet(nn.Cell):
def __init__(self):
super(SspaddmmNet, self).__init__()
self.sspaddmm = Sspaddmm()
def construct(self, x1_indices, x1_values, x1_shape, x2_indices, x2_values,
x2_shape, x3_dense, alpha, beta):
return self.sspaddmm(x1_indices, x1_values, x1_shape, x2_indices,
x2_values, x2_shape, x3_dense, alpha, beta)
@pytest.mark.levle0
@pytest.mark.platform_x86_gpu
@pytest.mark.env_onecard
def METHOD_NAME():
"""
Feature: test Sspaddmm ops in gpu.
Description: test the ops in dynamic shape.
Expectation: expect correct shape result.
"""
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
net = SspaddmmNet()
x1_indices_dyn = Tensor(shape=[2, None], dtype=mstype.int64)
x1_values_dyn = Tensor(shape=[None], dtype=mstype.int32)
x1_shape_dyn = Tensor(shape=[None], dtype=mstype.int64)
x2_indices_dyn = Tensor(shape=[None, None], dtype=mstype.int64)
x2_values_dyn = Tensor(shape=[None], dtype=mstype.int32)
x2_shape_dyn = Tensor(shape=[None], dtype=mstype.int64)
x3_dense_dyn = Tensor(shape=[None, None], dtype=mstype.int32)
alpha = Tensor(1, dtype=mstype.int32)
beta = Tensor(1, dtype=mstype.int32)
net.set_inputs(x1_indices_dyn, x1_values_dyn, x1_shape_dyn, x2_indices_dyn,
x2_values_dyn, x2_shape_dyn, x3_dense_dyn, alpha, beta)
x1_indices = Tensor(np.array([[0, 1], [0, 1]]), mstype.int64)
x1_values = Tensor(np.array([1, 2]), mstype.int32)
x1_shape = Tensor(np.array([3, 3]), mstype.int64)
x2_indices = Tensor(np.array([[0, 1], [2, 2]]), mstype.int64)
x2_values = Tensor(np.array([3, 4]), mstype.int32)
x2_shape = Tensor(np.array([3, 3]), mstype.int64)
x3_dense = Tensor(np.array([[1, 2, 3], [1, 3, 2], [3, 2, 1]]),
mstype.int32)
out = net(x1_indices, x1_values, x1_shape, x2_indices, x2_values, x2_shape,
x3_dense, alpha, beta)
expect_shapes = [(2, 8), (8,), (2,)]
for i in range(3):
assert out[i].asnumpy().shape == expect_shapes[i]
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_sspaddmm_input_int32():
"""
Feature: Sspaddmm gpu TEST.
Description: 2d int32 test case for Sspaddmm
Expectation: The value and shape of output are the expected values.
"""
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
x1_indices = Tensor(np.array([[0, 1], [0, 1]]), mstype.int32)
x1_values = Tensor(np.array([1, 2]), mstype.int32)
x1_shape = Tensor(np.array([3, 3]), mstype.int32)
x2_indices = Tensor(np.array([[0, 1], [2, 2]]), mstype.int32)
x2_values = Tensor(np.array([3, 4]), mstype.int32)
x2_shape = Tensor(np.array([3, 3]), mstype.int32)
x3_dense = Tensor(np.array([[1, 2, 3], [1, 3, 2], [3, 2, 1]]),
mstype.int32)
alpha = Tensor(np.array([1]), mstype.int32)
beta = Tensor(np.array([1]), mstype.int32)
net = SspaddmmNet()
y_indices, y_values, y_shape = net(x1_indices, x1_values, x1_shape,
x2_indices, x2_values, x2_shape,
x3_dense, alpha, beta)
y_indices_expect = np.array(
[[0, 1, 0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 0, 1, 2]], dtype=np.int64)
y_values_expect = np.array([1, 2, 9, 6, 3, 12, 8, 4], dtype=np.int32)
y_shape_expect = np.array([3, 3], dtype=np.int64)
assert np.allclose(y_indices.asnumpy(), y_indices_expect.astype(np.int64),
0.0001, 0.0001)
assert np.allclose(y_values.asnumpy(), y_values_expect.astype(np.int32),
0.0001, 0.0001)
assert np.allclose(y_shape.asnumpy(), y_shape_expect.astype(np.int64),
0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_sspaddmm_input_int64():
"""
Feature: Sspaddmm gpu TEST.
Description: 2d int64 test case for Sspaddmm
Expectation: The value and shape of output are the expected values.
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
x1_indices = Tensor(np.array([[0, 1], [0, 1]]), mstype.int32)
x1_values = Tensor(np.array([7, 6]), mstype.int32)
x1_shape = Tensor(np.array([3, 3]), mstype.int32)
x2_indices = Tensor(np.array([[0, 1], [2, 2]]), mstype.int32)
x2_values = Tensor(np.array([11, 23]), mstype.int32)
x2_shape = Tensor(np.array([3, 3]), mstype.int32)
x3_dense = Tensor(np.array([[1, 2, 3], [1, 3, 2], [3, 2, 1]]),
mstype.int32)
alpha = Tensor(np.array([2]), mstype.int32)
beta = Tensor(np.array([2]), mstype.int32)
net = SspaddmmNet()
y_indices, y_values, y_shape = net(x1_indices, x1_values, x1_shape,
x2_indices, x2_values, x2_shape,
x3_dense, alpha, beta)
y_indices_expect = np.array([[0, 1, 0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 0, 1, 2]])
y_values_expect = np.array([14, 12, 66, 44, 22, 138, 92, 46])
y_shape_expect = np.array([3, 3])
assert np.allclose(y_indices.asnumpy(), y_indices_expect.astype(np.int64),
0.0001, 0.0001)
assert np.allclose(y_values.asnumpy(), y_values_expect.astype(np.int32),
0.0001, 0.0001)
assert np.allclose(y_shape.asnumpy(), y_shape_expect.astype(np.int64),
0.0001, 0.0001)
| null |
4,828 |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test_tensor_slice """
import numpy as np
import pytest
from mindspore import Tensor
from mindspore import context
from mindspore import dtype as mstype
from mindspore.nn import Cell
from ....mindspore_test_framework.mindspore_test import mindspore_test
from ....mindspore_test_framework.pipeline.forward.compile_forward \
import pipeline_for_compile_forward_ge_graph_for_case_by_case_config, \
pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception
class NetWorkFancyIndex(Cell):
def __init__(self, index):
super(NetWorkFancyIndex, self).__init__()
self.index = index
def construct(self, tensor):
return tensor[self.index]
class TensorItemByNone(Cell):
def construct(self, tensor):
ret = tensor.item()
return ret
class TensorItemByItem(Cell):
def construct(self, tensor, index):
ret = tensor.item(index)
return ret
def test_tensor_fancy_index_integer_list():
context.set_context(mode=context.GRAPH_MODE)
index = [0, 2, 1]
net = NetWorkFancyIndex(index)
input_np = np.arange(60).reshape(3, 4, 5)
input_me = Tensor(input_np, dtype=mstype.float32)
net(input_me)
def test_tensor_fancy_index_boolean_list():
context.set_context(mode=context.GRAPH_MODE)
index = [True, True, False]
net = NetWorkFancyIndex(index)
input_np = np.arange(60).reshape(3, 4, 5)
input_me = Tensor(input_np, dtype=mstype.float32)
net(input_me)
def test_tensor_fancy_index_integer_boolean_list_graph():
context.set_context(mode=context.GRAPH_MODE)
index = [1, 2, True, False]
net = NetWorkFancyIndex(index)
input_np = np.arange(60).reshape(3, 4, 5)
input_me = Tensor(input_np, dtype=mstype.float32)
net(input_me)
def test_tensor_fancy_index_integer_list_mixed():
context.set_context(mode=context.GRAPH_MODE)
index = (1, [2, 1, 3], slice(1, 3, 1), ..., 4)
net = NetWorkFancyIndex(index)
input_np = np.arange(3*4*5*6*7*8).reshape(3, 4, 5, 6, 7, 8)
input_me = Tensor(input_np, dtype=mstype.float32)
net(input_me)
def test_tensor_fancy_index_integer_tuple_mixed():
context.set_context(mode=context.GRAPH_MODE)
index = (1, (2, 1, 3), slice(1, 3, 1), ..., 4)
net = NetWorkFancyIndex(index)
input_np = np.arange(3*4*5*6*7*8).reshape(3, 4, 5, 6, 7, 8)
input_me = Tensor(input_np, dtype=mstype.float32)
net(input_me)
def test_tensor_fancy_index_integer_list_tuple_mixed():
context.set_context(mode=context.GRAPH_MODE)
index = (1, [2, 1, 3], (3, 2, 1), slice(1, 3, 1), ..., 4)
net = NetWorkFancyIndex(index)
input_np = np.arange(3*4*5*6*7*8).reshape(3, 4, 5, 6, 7, 8)
input_me = Tensor(input_np, dtype=mstype.float32)
net(input_me)
def METHOD_NAME():
context.set_context(mode=context.GRAPH_MODE)
index = (1, [2, 1, 3], True, (3, 2, 1), slice(1, 3, 1), ..., True, 4)
net = NetWorkFancyIndex(index)
input_np = np.arange(3*4*5*6*7*8).reshape(3, 4, 5, 6, 7, 8)
input_me = Tensor(input_np, dtype=mstype.float32)
net(input_me)
def test_tensor_fancy_index_integer_list_tuple_bool_mixed_error():
context.set_context(mode=context.GRAPH_MODE)
index = (1, [2, 1, 3], True, (3, 2, 1), slice(1, 3, 1), ..., False, 4)
net = NetWorkFancyIndex(index)
input_np = np.arange(3*4*5*6*7*8).reshape(3, 4, 5, 6, 7, 8)
input_me = Tensor(input_np, dtype=mstype.float32)
with pytest.raises(IndexError):
net(input_me)
input_1d_np = np.ndarray([1]).astype(np.float32)
input_1d_ms = Tensor(input_1d_np, mstype.float32)
input_3d_np = np.random.randint(3, size=(3, 4, 5)).astype(np.int32)
input_3d_ms = Tensor(input_3d_np, mstype.float32)
index_np_1, index_np_2, index_np_3, index_np_4 = 0, 1.0, 30, 60
tuple_index_np_1, tuple_index_np_2, tuple_index_np_3, tuple_index_np_4, tuple_index_np_5 = \
(0,), (1, 2), (1, 2, 3), (3, 4, 4), (1, 2, 3, 4)
test_cases = [
('TensorItemByNone', {'block': TensorItemByNone(), 'desc_inputs': [input_1d_ms],}),
('1dTensorItemByInt', {'block': TensorItemByItem(), 'desc_inputs': [input_1d_ms, index_np_1],}),
('3dTensorItemByInt', {'block': TensorItemByItem(), 'desc_inputs': [input_3d_ms, index_np_1],}),
('3dTensorItemByInt2', {'block': TensorItemByItem(), 'desc_inputs': [input_3d_ms, index_np_3],}),
('1dTensorItemByTuple', {'block': TensorItemByItem(), 'desc_inputs': [input_1d_ms, tuple_index_np_1],}),
('3dTensorItemByTuple', {'block': TensorItemByItem(), 'desc_inputs': [input_3d_ms, tuple_index_np_3],}),
]
test_error_cases = [
('TensorItemByNoneForMulDimsTensor', {
'block': (TensorItemByNone(), {'exception': ValueError}),
'desc_inputs': [input_3d_ms]
}),
('TensorItemByFloatError', {
'block': (TensorItemByItem(), {'exception': TypeError}),
'desc_inputs': [input_1d_ms, index_np_2]
}),
('TensorItemByFloatError2', {
'block': (TensorItemByItem(), {'exception': TypeError}),
'desc_inputs': [input_3d_ms, index_np_2]
}),
('TensorItemByIntOverBoundary', {
'block': (TensorItemByItem(), {'exception': IndexError}),
'desc_inputs': [input_1d_ms, index_np_3]
}),
('TensorItemByIntOverBoundary2', {
'block': (TensorItemByItem(), {'exception': IndexError}),
'desc_inputs': [input_3d_ms, index_np_4]
}),
('1dTensorItemBy2dTuple', {
'block': (TensorItemByItem(), {'exception': ValueError}),
'desc_inputs': [input_1d_ms, tuple_index_np_2]
}),
('3dTensorItemBy2dTuple', {
'block': (TensorItemByItem(), {'exception': ValueError}),
'desc_inputs': [input_3d_ms, tuple_index_np_2]
}),
('3dTensorItemBy3dTupleOutOfBoundary', {
'block': (TensorItemByItem(), {'exception': IndexError}),
'desc_inputs': [input_3d_ms, tuple_index_np_4]
}),
('3dTensorItemBy4dTuple', {
'block': (TensorItemByItem(), {'exception': ValueError}),
'desc_inputs': [input_3d_ms, tuple_index_np_5]
})
]
@mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config)
def test_exec():
context.set_context(mode=context.GRAPH_MODE)
return test_cases
@mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception)
def test_check_exception():
return test_error_cases
| null |
4,829 |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore as ms
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.ops.operations import _inner_ops as inner
import mindspore.nn as nn
import mindspore.context as context
class DTypeNet(nn.Cell):
def __init__(self):
super(DTypeNet, self).__init__()
self.dtype = P.DType()
def construct(self, x):
return self.dtype(x)
class DTypeDynamicNet(nn.Cell):
def __init__(self):
super(DTypeDynamicNet, self).__init__()
self.d = inner.GpuConvertToDynamicShape()
self.dtype = P.DType()
def construct(self, x):
x = self.d(x)
return self.dtype(x)
def dtype_with_testcase(mstype):
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
x = Tensor(np.arange(34).reshape(2, 17), dtype=mstype)
net = DTypeNet()
assert mstype == net(x)
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
assert mstype == net(x)
def dtype_dynamic_with_testcase(mstype):
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
x = Tensor(np.arange(34).reshape(2, 17), dtype=mstype)
net = DTypeDynamicNet()
assert mstype == net(x)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dtype_bool():
dtype_with_testcase(ms.bool_)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dtype_int8():
dtype_with_testcase(ms.int8)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dtype_uint8():
dtype_with_testcase(ms.uint8)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dtype_int16():
dtype_with_testcase(ms.int16)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dtype_uint16():
dtype_with_testcase(ms.uint16)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dtype_int32():
dtype_with_testcase(ms.int32)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dtype_int64():
dtype_with_testcase(ms.int64)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dtype_float16():
dtype_with_testcase(ms.float16)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dtype_float32():
dtype_with_testcase(ms.float32)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dtype_float64():
dtype_with_testcase(ms.float64)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_dtype_bool():
dtype_dynamic_with_testcase(ms.bool_)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def METHOD_NAME():
dtype_dynamic_with_testcase(ms.int8)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_dtype_uint8():
dtype_dynamic_with_testcase(ms.uint8)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_dtype_int16():
dtype_dynamic_with_testcase(ms.int16)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_dtype_uint16():
dtype_dynamic_with_testcase(ms.uint16)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_dtype_int32():
dtype_dynamic_with_testcase(ms.int32)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_dtype_int64():
dtype_dynamic_with_testcase(ms.int64)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_dtype_float16():
dtype_dynamic_with_testcase(ms.float16)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_dtype_float32():
dtype_dynamic_with_testcase(ms.float32)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_dtype_float64():
dtype_dynamic_with_testcase(ms.float64)
| null |
4,830 |
# -*- coding: utf-8 -*-
"""User state management via an HTTP cookie
:copyright: Copyright (c) 2015 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from pykern import pkcompat
from pykern import pkconfig
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdc, pkdexc, pkdlog, pkdp
import base64
import cryptography.fernet
import itertools
import re
import sirepo.events
import sirepo.quest
import sirepo.util
_MAX_AGE_SECONDS = 10 * 365 * 24 * 3600
#: Identifies if the cookie has been returned at least once by the client
_COOKIE_SENTINEL = "srk"
#: Unique, truthy that can be asserted on decrypt
_COOKIE_SENTINEL_VALUE = "z"
_SERIALIZER_SEP = " "
_cfg = None
def init_quest(qcall):
c = _Cookie(qcall)
qcall.attr_set("cookie", c)
if qcall.bucket_unchecked_get("in_pkcli"):
c.set_sentinel()
class _Cookie(sirepo.quest.Attr):
def __init__(self, qcall):
super().__init__()
self.__incoming_serialized = ""
self._from_cookie_header(qcall)
def get_value(self, key):
return self.__values[key]
def has_key(self, key):
return key in self.__values
def has_sentinel(self):
return _COOKIE_SENTINEL in self.__values
def reset_state(self, error):
"""Clear all values and log `error` with values.
Args:
error (str): to be logged
"""
pkdlog("resetting cookie: error={} values={}", error, _state())
self.__values.clear()
def save_to_cookie(self, resp):
self.set_sentinel()
s = self._serialize()
if s == self.__incoming_serialized:
return
resp.cookie_set(
key=_cfg.http_name,
value=self._encrypt(s),
max_age=_MAX_AGE_SECONDS,
httponly=True,
secure=_cfg.is_secure,
samesite="Lax",
)
def set_sentinel(self):
self.__values[_COOKIE_SENTINEL] = _COOKIE_SENTINEL_VALUE
def METHOD_NAME(self, key, value):
v = str(value)
assert (
not _SERIALIZER_SEP in v
), f"value={v} must not contain _SERIALIZER_SEP={_SERIALIZER_SEP}"
assert (
key != _COOKIE_SENTINEL
), f"key={key} is _COOKIE_SENTINEL={_COOKIE_SENTINEL}"
assert (
_COOKIE_SENTINEL in self.__values
), f"_COOKIE_SENTINEL not set self keys={sorted(self.__values.keys())} for key={key}"
self.__values[key] = v
def unchecked_get_value(self, key, default=None):
return self.__values.get(key, default)
def unchecked_remove(self, key):
return self.__values.pkdel(key)
def _crypto(self):
if "_crypto_alg" not in self:
if _cfg.private_key is None:
assert pkconfig.in_dev_mode(), "must configure private_key in non-dev"
_cfg.private_key = base64.urlsafe_b64encode(
b"01234567890123456789012345678912"
)
assert (
len(base64.urlsafe_b64decode(_cfg.private_key)) == 32
), "private_key must be 32 characters and encoded with urlsafe_b64encode"
self._crypto_alg = cryptography.fernet.Fernet(_cfg.private_key)
return self._crypto_alg
def _decrypt(self, value):
d = self._crypto().decrypt(
base64.urlsafe_b64decode(pkcompat.to_bytes(value)),
)
pkdc("{}", d)
return pkcompat.from_bytes(d)
def _deserialize(self, value):
v = value.split(_SERIALIZER_SEP)
v = dict(zip(v[::2], v[1::2]))
assert (
v[_COOKIE_SENTINEL] == _COOKIE_SENTINEL_VALUE
), "cookie sentinel value is not correct"
return v
def _encrypt(self, text):
return pkcompat.from_bytes(
base64.urlsafe_b64encode(
self._crypto().encrypt(pkcompat.to_bytes(text)),
),
)
def _from_cookie_header(self, qcall):
header = qcall.sreq.header_uget("Cookie")
self.__values = PKDict()
if not header:
return
s = None
err = None
try:
match = re.search(
r"\b{}=([^;]+)".format(_cfg.http_name),
header,
)
if match:
s = self._decrypt(match.group(1))
self.__values.update(qcall.auth.cookie_cleaner(self._deserialize(s)))
self.__incoming_serialized = s
return
except Exception as e:
if "crypto" in type(e).__module__:
# cryptography module exceptions serialize to empty string
# so just report the type.
e = type(e)
err = e
pkdc("{}", pkdexc())
if err:
pkdlog("Cookie decoding failed: {} value={}", err, s)
def _serialize(self):
return _SERIALIZER_SEP.join(
itertools.chain.from_iterable(
[(k, self.__values[k]) for k in sorted(self.__values.keys())],
),
)
@pkconfig.parse_none
def _cfg_http_name(value):
assert re.search(
r"^\w{1,32}$", value
), "must be 1-32 word characters; http_name={}".format(value)
return value
def _end_api_call(qcall, kwargs):
qcall.cookie.save_to_cookie(kwargs.resp)
def init_module():
global _cfg
if _cfg:
return
_cfg = pkconfig.init(
http_name=(
"sirepo_" + pkconfig.cfg.channel,
_cfg_http_name,
"Set-Cookie name",
),
private_key=(None, str, "urlsafe base64 encrypted 32-byte key"),
is_secure=(
not pkconfig.in_dev_mode(),
pkconfig.parse_bool,
"Add secure attribute to Set-Cookie",
),
)
sirepo.events.register(PKDict(end_api_call=_end_api_call))
| null |
4,831 |
import torch
import torch.nn as nn
from avalanche.models.dynamic_modules import (
MultiTaskModule,
MultiHeadClassifier,
)
class MultiTaskDecorator(MultiTaskModule):
"""
Encapsulates an existing nn.Module to make it subclass MultiTaskModule,
the user should still be able to interact with the encapsulated module
as if it was the module itself.
The only things that change are the following, the classifier from the
given model will be replaced by a MultiHeadClassifier, and the forward()
implementation will be overwritten by one that accepts task labels.
The encapsulated module will then be automatically extended to
fit new classes during calls to model.adaptation()
"""
def __init__(self, model: nn.Module, classifier_name: str):
"""
:param model: pytorch nn.Module that does not support multitask
:param classifier_name: attribute name of the existing classification
layer inside the module
"""
for m in model.modules():
assert not isinstance(m, MultiTaskModule)
self.__dict__["_initialized"] = False
super().__init__()
self.model = model
self.classifier_name = classifier_name
old_classifier = getattr(model, classifier_name)
if isinstance(old_classifier, nn.Linear):
in_size = old_classifier.in_features
out_size = old_classifier.out_features
old_params = [torch.clone(p.data) for p in old_classifier.parameters()]
# Replace old classifier by empty block
setattr(self.model, classifier_name, nn.Sequential())
elif isinstance(old_classifier, nn.Sequential):
in_size = old_classifier[-1].in_features
out_size = old_classifier[-1].out_features
old_params = [torch.clone(p.data) for p in old_classifier[-1].parameters()]
del old_classifier[-1]
else:
raise NotImplementedError(
f"Cannot handle the following type \
of classification layer {type(old_classifier)}"
)
# Set new classifier and initialize to previous param values
setattr(self, classifier_name, MultiHeadClassifier(in_size, out_size))
for param, param_old in zip(
getattr(self, classifier_name).parameters(), old_params
):
param.data = param_old
self.max_class_label = max(self.max_class_label, out_size)
self._initialized = True
def METHOD_NAME(self, x: torch.Tensor, task_label: int):
out = self.model(x)
return getattr(self, self.classifier_name)(
out.view(out.size(0), -1), task_labels=task_label
)
def forward_all_tasks(self, x: torch.Tensor):
"""compute the output given the input `x` and task label.
By default, it considers only tasks seen at training time.
:param x:
:return: all the possible outputs are returned as a dictionary
with task IDs as keys and the output of the corresponding
task as output.
"""
out = self.model(x)
return getattr(self, self.classifier_name)(
out.view(out.size(0), -1), task_labels=None
)
def __getattr__(self, name):
# Override pytorch impl from nn.Module
# Its a bit particular since pytorch nn.Module does not
# keep some attributes in a classical manner in self.__dict__
# rather it puts them into _parameters, _buffers and
# _modules attributes. We have to add these lines to avoid recursion
if name == "model":
return self.__dict__["_modules"]["model"]
if name == self.classifier_name:
return self.__dict__["_modules"][self.classifier_name]
# If its a different attribute, return the one from the model
return getattr(self.model, name)
def __setattr__(self, name, value):
# During initialization, use pytorch routine
if not self.__dict__["_initialized"] or name in self.__dict__:
super().__setattr__(name, value)
else:
return setattr(self.model, name, value)
def as_multitask(model: nn.Module, classifier_name: str) -> MultiTaskModule:
"""Wraps around a model to make it a multitask model.
:param model: model to be converted into MultiTaskModule
:param classifier_name: the name of the attribute containing
the classification layer (nn.Linear). It can also
be an instance of nn.Sequential containing multiple
layers as long as the classification layer is the
last layer.
:return: the decorated model, now subclassing MultiTaskModule, and
accepting task_labels as forward() method argument
"""
return MultiTaskDecorator(model, classifier_name)
__all__ = ["as_multitask"]
| null |
4,832 |
#!/usr/bin/env python
"""Unit tests for Info class and associated objects (DbRef, DbRefs, etc.).
"""
import warnings
from unittest import TestCase, main
from cogent3.core.info import DbRef, DbRefs, Info, _make_list
class DbRefTests(TestCase):
"""Tests of the DbRef object."""
def setUp(self):
"""Define a standard DbRef object"""
self.data = dict(
Accession="xyz",
Db="abc",
name="qwe",
Description="blah",
Data=list(range(20)),
)
self.db = DbRef(**self.data)
def test_init_minimal(self):
"""DbRef minimal init should fill fields as expected"""
d = DbRef("abc")
self.assertEqual(d.Accession, "abc")
self.assertEqual(d.Db, "")
self.assertEqual(d.name, "")
self.assertEqual(d.Description, "")
self.assertEqual(d.Data, None)
# empty init not allowed
self.assertRaises(TypeError, DbRef)
def test_init(self):
"""DbRef init should insert correct data"""
for attr, val in list(self.data.items()):
self.assertEqual(getattr(self.db, attr), val)
def METHOD_NAME(self):
"""DbRef str should be the same as the accession str"""
self.assertEqual(str(self.db), "xyz")
self.db.Accession = 12345
self.assertEqual(str(self.db), "12345")
def test_int(self):
"""DbRef int should be the same as the accession int"""
self.assertRaises(ValueError, int, self.db)
self.db.Accession = "12345"
self.assertEqual(int(self.db), 12345)
def test_cmp(self):
"""DbRef cmp should first try numeric, then alphabetic, cmp."""
self.assertLess(DbRef("abc"), DbRef("xyz"))
self.assertEqual(DbRef("abc"), DbRef("abc"))
self.assertGreater(DbRef("123"), DbRef("14"))
self.assertLess(DbRef("123"), DbRef("abc"))
# check that it ignores other attributes
self.assertEqual(DbRef("x", "y", "z", "a", "b"), DbRef("x"))
class infoTests(TestCase):
"""Tests of top-level functions."""
def test_make_list(self):
"""_make_list should always return a list"""
self.assertEqual(_make_list("abc"), ["abc"])
self.assertEqual(_make_list([]), [])
self.assertEqual(_make_list(None), [None])
self.assertEqual(_make_list({"x": "y"}), [{"x": "y"}])
self.assertEqual(_make_list([1, 2, 3]), [1, 2, 3])
class DbRefsTests(TestCase):
"""Tests of the DbRefs class."""
def test_init_empty(self):
"""DbRefs empty init should work as expected"""
self.assertEqual(DbRefs(), {})
def test_init_data(self):
"""DbRefs init with data should produce expected results"""
d = DbRefs({"GenBank": "ab", "GO": (3, 44), "PDB": ["asdf", "ghjk"]})
self.assertEqual(d, {"GenBank": ["ab"], "GO": [3, 44], "PDB": ["asdf", "ghjk"]})
d.GenBank = "xyz"
self.assertEqual(d["GenBank"], ["xyz"])
class InfoTests(TestCase):
"""Tests of the Info class."""
def test_init_empty(self):
"""Info empty init should work as expected"""
d = Info()
self.assertEqual(len(d), 1)
self.assertIn("Refs", d)
self.assertEqual(d.Refs, DbRefs())
self.assertTrue(isinstance(d.Refs, DbRefs))
def test_init_data(self):
"""Info init with data should put items in correct places"""
# need to check init, setting, and resetting of attributes that belong
# in the Info object and attributes that belong in Info.Refs. Also need
# to check __getitem__, __setitem__, and __contains__.
d = Info({"x": 3, "GO": 12345})
self.assertEqual(d.x, 3)
self.assertEqual(d.GO, [12345])
self.assertEqual(d.Refs.GO, [12345])
try:
del d.Refs
except AttributeError:
pass
else:
raise Exception("Failed to prevent deletion of required key Refs" "")
d.GenBank = ("qaz", "wsx")
self.assertEqual(d.GenBank, ["qaz", "wsx"])
self.assertIn("GenBank", d.Refs)
self.assertIn("GenBank", d)
d.GenBank = "xyz"
self.assertEqual(d.GenBank, ["xyz"])
self.assertIs(d.GenBank, d.Refs.GenBank)
d.GO = "x"
self.assertEqual(d.GO, ["x"])
d.GO.append("y")
self.assertEqual(d.GO, ["x", "y"])
d.ZZZ = "zzz"
self.assertEqual(d.ZZZ, "zzz")
self.assertNotIn("ZZZ", d.Refs)
self.assertNotIn("XXX", d)
self.assertEqual(d.XXX, None)
def test_identity(self):
"""Info should get its own new Refs when created"""
i = Info()
j = Info()
self.assertIsNot(i, j)
self.assertIsNot(i.Refs, j.Refs)
def test_update(self):
"""update should warn the user of overlapping keys"""
with warnings.catch_warnings(record=True) as w:
d1 = Info({"key1": "value1", "key2": "value2", "key3": "value3"})
d2 = Info({"key2": "value2", "key3": "value3", "key4": "value4"})
d1.update(d2)
self.assertEqual(len(w), 1)
# run the following if invoked from command-line
if __name__ == "__main__":
main()
| null |
4,833 |
import textwrap
import pytest
from conans.test.assets.genconanfile import GenConanfile
from conans.test.utils.tools import TestClient, NO_SETTINGS_PACKAGE_ID
class TestInvalidConfiguration:
"""
ConanInvalidConfiguration without a binary fall backs, result in errors
"""
conanfile = textwrap.dedent("""
from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
class Conan(ConanFile):
settings = "os"
def validate(self):
if self.info.settings.os == "Windows":
raise ConanInvalidConfiguration("Package does not work in Windows!")
""")
linux_package_id = "9a4eb3c8701508aa9458b1a73d0633783ecc2270"
invalid = "Invalid"
@pytest.fixture(scope="class")
def client(self):
client = TestClient()
client.save({"pkg/conanfile.py": self.conanfile})
client.run("create pkg --name=pkg --version=0.1 -s os=Linux")
return client
def test_invalid(self, client):
conanfile_consumer = GenConanfile().with_requires("pkg/0.1").with_settings("os")
client.save({"consumer/conanfile.py": conanfile_consumer})
client.run("install consumer -s os=Windows", assert_error=True)
assert "pkg/0.1: Invalid: Package does not work in Windows!" in client.out
def test_invalid_info(self, client):
"""
the conan info command does not raise, but it outputs info
"""
conanfile_consumer = GenConanfile().with_requires("pkg/0.1").with_settings("os")
client.save({"consumer/conanfile.py": conanfile_consumer})
client.run("graph info consumer -s os=Windows")
assert "binary: Invalid" in client.out
def test_valid(self, client):
conanfile_consumer = GenConanfile().with_requires("pkg/0.1").with_settings("os")
client.save({"consumer/conanfile.py": conanfile_consumer})
client.run("install consumer -s os=Linux")
client.assert_listed_binary({"pkg/0.1": (self.linux_package_id, "Cache")})
assert "pkg/0.1: Already installed!" in client.out
def METHOD_NAME(self, client):
conanfile_consumer = GenConanfile().with_tool_requires("pkg/0.1").with_settings("os")
client.save({"consumer/conanfile.py": conanfile_consumer})
client.run("install consumer -s:h os=Windows -s:b os=Windows", assert_error=True)
assert "pkg/0.1: Invalid: Package does not work in Windows!" in client.out
def test_valid_build_require_two_profiles(self, client):
conanfile_consumer = GenConanfile().with_tool_requires("pkg/0.1").with_settings("os")
client.save({"consumer/conanfile.py": conanfile_consumer})
client.run("install consumer -s:b os=Linux -s:h os=Windows")
client.assert_listed_binary({"pkg/0.1": (self.linux_package_id, "Cache")}, build=True)
assert "pkg/0.1: Already installed!" in client.out
class TestErrorConfiguration(TestInvalidConfiguration):
"""
A configuration error is unsolvable, even if a binary exists
"""
conanfile = textwrap.dedent("""
from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
class Conan(ConanFile):
settings = "os"
def validate(self):
if self.info.settings.os == "Windows":
raise ConanInvalidConfiguration("Package does not work in Windows!")
def package_id(self):
del self.info.settings.os
""")
linux_package_id = NO_SETTINGS_PACKAGE_ID
invalid = "ConfigurationError"
class TestErrorConfigurationCompatible(TestInvalidConfiguration):
"""
A configuration error is unsolvable, even if a binary exists
"""
conanfile = textwrap.dedent("""
from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
class Conan(ConanFile):
settings = "os"
def validate(self):
if self.info.settings.os == "Windows":
raise ConanInvalidConfiguration("Package does not work in Windows!")
def compatibility(self):
if self.settings.os == "Windows":
return [{"settings": [("os", "Linux")]}]
""")
linux_package_id = "9a4eb3c8701508aa9458b1a73d0633783ecc2270"
invalid = "ConfigurationError"
class TestInvalidBuildPackageID:
"""
ConanInvalidBuildConfiguration will not block if setting is removed from package_id
"""
conanfile = textwrap.dedent("""
from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
class Conan(ConanFile):
settings = "os"
def validate_build(self):
if self.settings.os == "Windows":
raise ConanInvalidConfiguration("Package does not work in Windows!")
def package_id(self):
del self.info.settings.os
""")
linux_package_id = NO_SETTINGS_PACKAGE_ID
windows_package_id = NO_SETTINGS_PACKAGE_ID
@pytest.fixture(scope="class")
def client(self):
client = TestClient()
client.save({"pkg/conanfile.py": self.conanfile})
client.run("create pkg --name=pkg --version=0.1 -s os=Linux")
return client
def test_valid(self, client):
conanfile_consumer = GenConanfile().with_requires("pkg/0.1").with_settings("os")
client.save({"consumer/conanfile.py": conanfile_consumer})
client.run("install consumer -s os=Windows")
client.assert_listed_binary({"pkg/0.1": (self.linux_package_id, "Cache")})
assert "pkg/0.1: Already installed!" in client.out
client.run("install consumer -s os=Linux")
client.assert_listed_binary({"pkg/0.1": (self.linux_package_id, "Cache")})
assert "pkg/0.1: Already installed!" in client.out
def test_invalid_try_build(self, client):
conanfile_consumer = GenConanfile().with_requires("pkg/0.1").with_settings("os")
client.save({"consumer/conanfile.py": conanfile_consumer})
client.run("install consumer -s os=Windows --build='*'", assert_error=True)
# Only when trying to build, it will try to build the Windows one
client.assert_listed_binary({"pkg/0.1": (self.windows_package_id, "Invalid")})
assert "Package does not work in Windows!" in client.out
def test_valid_build_require_two_profiles(self, client):
conanfile_consumer = GenConanfile().with_tool_requires("pkg/0.1").with_settings("os")
client.save({"consumer/conanfile.py": conanfile_consumer})
client.run("install consumer -s:b os=Linux -s:h os=Windows")
client.assert_listed_binary({"pkg/0.1": (self.linux_package_id, "Cache")}, build=True)
assert "pkg/0.1: Already installed!" in client.out
client.run("install consumer -s:b os=Windows -s:h os=Windows")
client.assert_listed_binary({"pkg/0.1": (self.linux_package_id, "Cache")}, build=True)
assert "pkg/0.1: Already installed!" in client.out
class TestInvalidBuildCompatible(TestInvalidBuildPackageID):
"""
ConanInvalidBuildConfiguration will not block if compatible_packages fallback
"""
conanfile = textwrap.dedent("""
from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
class Conan(ConanFile):
settings = "os"
def validate_build(self):
if self.settings.os == "Windows":
raise ConanInvalidConfiguration("Package does not work in Windows!")
def compatibility(self):
if self.settings.os == "Windows":
return [{"settings": [("os", "Linux")]}]
""")
linux_package_id = "9a4eb3c8701508aa9458b1a73d0633783ecc2270"
windows_package_id = "ebec3dc6d7f6b907b3ada0c3d3cdc83613a2b715"
| null |
4,834 |
# Copyright (C) 2023 Mandiant, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
from __future__ import annotations
from typing import Dict, List, Tuple, Union, Iterator, Optional
from pathlib import Path
import dnfile
from dncil.cil.opcode import OpCodes
import capa.features.extractors
import capa.features.extractors.dotnetfile
import capa.features.extractors.dnfile.file
import capa.features.extractors.dnfile.insn
import capa.features.extractors.dnfile.function
from capa.features.common import Feature
from capa.features.address import NO_ADDRESS, Address, DNTokenAddress, DNTokenOffsetAddress
from capa.features.extractors.dnfile.types import DnType, DnUnmanagedMethod
from capa.features.extractors.base_extractor import BBHandle, InsnHandle, FunctionHandle, FeatureExtractor
from capa.features.extractors.dnfile.helpers import (
get_dotnet_types,
get_dotnet_fields,
get_dotnet_managed_imports,
get_dotnet_managed_methods,
get_dotnet_unmanaged_imports,
get_dotnet_managed_method_bodies,
)
class DnFileFeatureExtractorCache:
def __init__(self, pe: dnfile.dnPE):
self.imports: Dict[int, Union[DnType, DnUnmanagedMethod]] = {}
self.native_imports: Dict[int, Union[DnType, DnUnmanagedMethod]] = {}
self.methods: Dict[int, Union[DnType, DnUnmanagedMethod]] = {}
self.fields: Dict[int, Union[DnType, DnUnmanagedMethod]] = {}
self.types: Dict[int, Union[DnType, DnUnmanagedMethod]] = {}
for import_ in get_dotnet_managed_imports(pe):
self.imports[import_.token] = import_
for native_import in get_dotnet_unmanaged_imports(pe):
self.native_imports[native_import.token] = native_import
for method in get_dotnet_managed_methods(pe):
self.methods[method.token] = method
for field in get_dotnet_fields(pe):
self.fields[field.token] = field
for type_ in get_dotnet_types(pe):
self.types[type_.token] = type_
def get_import(self, token: int) -> Optional[Union[DnType, DnUnmanagedMethod]]:
return self.imports.get(token)
def get_native_import(self, token: int) -> Optional[Union[DnType, DnUnmanagedMethod]]:
return self.native_imports.get(token)
def get_method(self, token: int) -> Optional[Union[DnType, DnUnmanagedMethod]]:
return self.methods.get(token)
def get_field(self, token: int) -> Optional[Union[DnType, DnUnmanagedMethod]]:
return self.fields.get(token)
def METHOD_NAME(self, token: int) -> Optional[Union[DnType, DnUnmanagedMethod]]:
return self.types.get(token)
class DnfileFeatureExtractor(FeatureExtractor):
def __init__(self, path: Path):
super().__init__()
self.pe: dnfile.dnPE = dnfile.dnPE(str(path))
# pre-compute .NET token lookup tables; each .NET method has access to this cache for feature extraction
# most relevant at instruction scope
self.token_cache: DnFileFeatureExtractorCache = DnFileFeatureExtractorCache(self.pe)
# pre-compute these because we'll yield them at *every* scope.
self.global_features: List[Tuple[Feature, Address]] = []
self.global_features.extend(capa.features.extractors.dotnetfile.extract_file_format())
self.global_features.extend(capa.features.extractors.dotnetfile.extract_file_os(pe=self.pe))
self.global_features.extend(capa.features.extractors.dotnetfile.extract_file_arch(pe=self.pe))
def get_base_address(self):
return NO_ADDRESS
def extract_global_features(self):
yield from self.global_features
def extract_file_features(self):
yield from capa.features.extractors.dnfile.file.extract_features(self.pe)
def get_functions(self) -> Iterator[FunctionHandle]:
# create a method lookup table
methods: Dict[Address, FunctionHandle] = {}
for token, method in get_dotnet_managed_method_bodies(self.pe):
fh: FunctionHandle = FunctionHandle(
address=DNTokenAddress(token),
inner=method,
ctx={"pe": self.pe, "calls_from": set(), "calls_to": set(), "cache": self.token_cache},
)
# method tokens should be unique
assert fh.address not in methods.keys()
methods[fh.address] = fh
# calculate unique calls to/from each method
for fh in methods.values():
for insn in fh.inner.instructions:
if insn.opcode not in (
OpCodes.Call,
OpCodes.Callvirt,
OpCodes.Jmp,
OpCodes.Newobj,
):
continue
address: DNTokenAddress = DNTokenAddress(insn.operand.value)
# record call to destination method; note: we only consider MethodDef methods for destinations
dest: Optional[FunctionHandle] = methods.get(address)
if dest is not None:
dest.ctx["calls_to"].add(fh.address)
# record call from source method; note: we record all unique calls from a MethodDef method, not just
# those calls to other MethodDef methods e.g. calls to imported MemberRef methods
fh.ctx["calls_from"].add(address)
yield from methods.values()
def extract_function_features(self, fh) -> Iterator[Tuple[Feature, Address]]:
yield from capa.features.extractors.dnfile.function.extract_features(fh)
def get_basic_blocks(self, f) -> Iterator[BBHandle]:
# each dotnet method is considered 1 basic block
yield BBHandle(
address=f.address,
inner=f.inner,
)
def extract_basic_block_features(self, fh, bbh):
# we don't support basic block features
yield from []
def get_instructions(self, fh, bbh):
for insn in bbh.inner.instructions:
yield InsnHandle(
address=DNTokenOffsetAddress(bbh.address, insn.offset - (fh.inner.offset + fh.inner.header_size)),
inner=insn,
)
def extract_insn_features(self, fh, bbh, ih) -> Iterator[Tuple[Feature, Address]]:
yield from capa.features.extractors.dnfile.insn.extract_features(fh, bbh, ih)
| null |
4,835 |
"""
The main DebugToolbar class that loads and renders the Toolbar.
"""
import uuid
from collections import OrderedDict
from functools import lru_cache
from django.apps import apps
from django.core.exceptions import ImproperlyConfigured
from django.dispatch import Signal
from django.template import TemplateSyntaxError
from django.template.loader import render_to_string
from django.urls import path, resolve
from django.urls.exceptions import Resolver404
from django.utils.module_loading import import_string
from django.utils.translation import get_language, override as lang_override
from debug_toolbar import APP_NAME, settings as dt_settings
class DebugToolbar:
# for internal testing use only
_created = Signal()
def __init__(self, request, get_response):
self.request = request
self.config = dt_settings.get_config().copy()
panels = []
for panel_class in reversed(self.get_panel_classes()):
panel = panel_class(self, get_response)
panels.append(panel)
if panel.enabled:
get_response = panel.process_request
self.process_request = get_response
# Use OrderedDict for the _panels attribute so that items can be efficiently
# removed using FIFO order in the DebugToolbar.store() method. The .popitem()
# method of Python's built-in dict only supports LIFO removal.
self._panels = OrderedDict()
while panels:
panel = panels.pop()
self._panels[panel.panel_id] = panel
self.stats = {}
self.server_timing_stats = {}
self.store_id = None
self._created.send(request, toolbar=self)
# Manage panels
@property
def panels(self):
"""
Get a list of all available panels.
"""
return list(self._panels.values())
@property
def enabled_panels(self):
"""
Get a list of panels enabled for the current request.
"""
return [panel for panel in self._panels.values() if panel.enabled]
def get_panel_by_id(self, panel_id):
"""
Get the panel with the given id, which is the class name by default.
"""
return self._panels[panel_id]
# Handle rendering the toolbar in HTML
def render_toolbar(self):
"""
Renders the overall Toolbar with panels inside.
"""
if not self.should_render_panels():
self.store()
try:
context = {"toolbar": self}
lang = self.config["TOOLBAR_LANGUAGE"] or get_language()
with lang_override(lang):
return render_to_string("debug_toolbar/base.html", context)
except TemplateSyntaxError:
if not apps.is_installed("django.contrib.staticfiles"):
raise ImproperlyConfigured(
"The debug toolbar requires the staticfiles contrib app. "
"Add 'django.contrib.staticfiles' to INSTALLED_APPS and "
"define STATIC_URL in your settings."
) from None
else:
raise
def should_render_panels(self):
"""Determine whether the panels should be rendered during the request
If False, the panels will be loaded via Ajax.
"""
if (render_panels := self.config["RENDER_PANELS"]) is None:
# If wsgi.multiprocess isn't in the headers, then it's likely
# being served by ASGI. This type of set up is most likely
# incompatible with the toolbar until
# https://github.com/jazzband/django-debug-toolbar/issues/1430
# is resolved.
render_panels = self.request.META.get("wsgi.multiprocess", True)
return render_panels
# Handle storing toolbars in memory and fetching them later on
_store = OrderedDict()
def store(self):
# Store already exists.
if self.store_id:
return
self.store_id = uuid.uuid4().hex
self._store[self.store_id] = self
for _ in range(self.config["RESULTS_CACHE_SIZE"], len(self._store)):
self._store.popitem(last=False)
@classmethod
def METHOD_NAME(cls, store_id):
return cls._store.get(store_id)
# Manually implement class-level caching of panel classes and url patterns
# because it's more obvious than going through an abstraction.
_panel_classes = None
@classmethod
def get_panel_classes(cls):
if cls._panel_classes is None:
# Load panels in a temporary variable for thread safety.
panel_classes = [
import_string(panel_path) for panel_path in dt_settings.get_panels()
]
cls._panel_classes = panel_classes
return cls._panel_classes
_urlpatterns = None
@classmethod
def get_urls(cls):
if cls._urlpatterns is None:
from . import views
# Load URLs in a temporary variable for thread safety.
# Global URLs
urlpatterns = [
path("render_panel/", views.render_panel, name="render_panel"),
]
# Per-panel URLs
for panel_class in cls.get_panel_classes():
urlpatterns += panel_class.get_urls()
cls._urlpatterns = urlpatterns
return cls._urlpatterns
@classmethod
def is_toolbar_request(cls, request):
"""
Determine if the request is for a DebugToolbar view.
"""
# The primary caller of this function is in the middleware which may
# not have resolver_match set.
try:
resolver_match = request.resolver_match or resolve(
request.path, getattr(request, "urlconf", None)
)
except Resolver404:
return False
return resolver_match.namespaces and resolver_match.namespaces[-1] == APP_NAME
@staticmethod
@lru_cache(maxsize=None)
def get_observe_request():
# If OBSERVE_REQUEST_CALLBACK is a string, which is the recommended
# setup, resolve it to the corresponding callable.
func_or_path = dt_settings.get_config()["OBSERVE_REQUEST_CALLBACK"]
if isinstance(func_or_path, str):
return import_string(func_or_path)
else:
return func_or_path
def observe_request(request):
"""
Determine whether to update the toolbar from a client side request.
"""
return not DebugToolbar.is_toolbar_request(request)
| null |
4,836 |
from sympy.core import symbols, S, Pow, Function
from sympy.functions import exp
from sympy.testing.pytest import raises
from sympy.tensor.indexed import Idx, IndexedBase
from sympy.tensor.index_methods import IndexConformanceException
from sympy.tensor.index_methods import (get_contraction_structure, get_indices)
def test_trivial_indices():
x, y = symbols('x y')
assert get_indices(x) == (set(), {})
assert get_indices(x*y) == (set(), {})
assert get_indices(x + y) == (set(), {})
assert get_indices(x**y) == (set(), {})
def test_get_indices_Indexed():
x = IndexedBase('x')
i, j = Idx('i'), Idx('j')
assert get_indices(x[i, j]) == ({i, j}, {})
assert get_indices(x[j, i]) == ({j, i}, {})
def test_get_indices_Idx():
f = Function('f')
i, j = Idx('i'), Idx('j')
assert get_indices(f(i)*j) == ({i, j}, {})
assert get_indices(f(j, i)) == ({j, i}, {})
assert get_indices(f(i)*i) == (set(), {})
def test_get_indices_mul():
x = IndexedBase('x')
y = IndexedBase('y')
i, j = Idx('i'), Idx('j')
assert get_indices(x[j]*y[i]) == ({i, j}, {})
assert get_indices(x[i]*y[j]) == ({i, j}, {})
def METHOD_NAME():
x = IndexedBase('x')
y = IndexedBase('y')
i, j = Idx('i'), Idx('j')
raises(IndexConformanceException, lambda: get_indices(x[i] + y[j]))
def test_scalar_broadcast():
x = IndexedBase('x')
y = IndexedBase('y')
i, j = Idx('i'), Idx('j')
assert get_indices(x[i] + y[i, i]) == ({i}, {})
assert get_indices(x[i] + y[j, j]) == ({i}, {})
def test_get_indices_add():
x = IndexedBase('x')
y = IndexedBase('y')
A = IndexedBase('A')
i, j, k = Idx('i'), Idx('j'), Idx('k')
assert get_indices(x[i] + 2*y[i]) == ({i}, {})
assert get_indices(y[i] + 2*A[i, j]*x[j]) == ({i}, {})
assert get_indices(y[i] + 2*(x[i] + A[i, j]*x[j])) == ({i}, {})
assert get_indices(y[i] + x[i]*(A[j, j] + 1)) == ({i}, {})
assert get_indices(
y[i] + x[i]*x[j]*(y[j] + A[j, k]*x[k])) == ({i}, {})
def test_get_indices_Pow():
x = IndexedBase('x')
y = IndexedBase('y')
A = IndexedBase('A')
i, j, k = Idx('i'), Idx('j'), Idx('k')
assert get_indices(Pow(x[i], y[j])) == ({i, j}, {})
assert get_indices(Pow(x[i, k], y[j, k])) == ({i, j, k}, {})
assert get_indices(Pow(A[i, k], y[k] + A[k, j]*x[j])) == ({i, k}, {})
assert get_indices(Pow(2, x[i])) == get_indices(exp(x[i]))
# test of a design decision, this may change:
assert get_indices(Pow(x[i], 2)) == ({i}, {})
def test_get_contraction_structure_basic():
x = IndexedBase('x')
y = IndexedBase('y')
i, j = Idx('i'), Idx('j')
assert get_contraction_structure(x[i]*y[j]) == {None: {x[i]*y[j]}}
assert get_contraction_structure(x[i] + y[j]) == {None: {x[i], y[j]}}
assert get_contraction_structure(x[i]*y[i]) == {(i,): {x[i]*y[i]}}
assert get_contraction_structure(
1 + x[i]*y[i]) == {None: {S.One}, (i,): {x[i]*y[i]}}
assert get_contraction_structure(x[i]**y[i]) == {None: {x[i]**y[i]}}
def test_get_contraction_structure_complex():
x = IndexedBase('x')
y = IndexedBase('y')
A = IndexedBase('A')
i, j, k = Idx('i'), Idx('j'), Idx('k')
expr1 = y[i] + A[i, j]*x[j]
d1 = {None: {y[i]}, (j,): {A[i, j]*x[j]}}
assert get_contraction_structure(expr1) == d1
expr2 = expr1*A[k, i] + x[k]
d2 = {None: {x[k]}, (i,): {expr1*A[k, i]}, expr1*A[k, i]: [d1]}
assert get_contraction_structure(expr2) == d2
def test_contraction_structure_simple_Pow():
x = IndexedBase('x')
y = IndexedBase('y')
i, j, k = Idx('i'), Idx('j'), Idx('k')
ii_jj = x[i, i]**y[j, j]
assert get_contraction_structure(ii_jj) == {
None: {ii_jj},
ii_jj: [
{(i,): {x[i, i]}},
{(j,): {y[j, j]}}
]
}
ii_jk = x[i, i]**y[j, k]
assert get_contraction_structure(ii_jk) == {
None: {x[i, i]**y[j, k]},
x[i, i]**y[j, k]: [
{(i,): {x[i, i]}}
]
}
def test_contraction_structure_Mul_and_Pow():
x = IndexedBase('x')
y = IndexedBase('y')
i, j, k = Idx('i'), Idx('j'), Idx('k')
i_ji = x[i]**(y[j]*x[i])
assert get_contraction_structure(i_ji) == {None: {i_ji}}
ij_i = (x[i]*y[j])**(y[i])
assert get_contraction_structure(ij_i) == {None: {ij_i}}
j_ij_i = x[j]*(x[i]*y[j])**(y[i])
assert get_contraction_structure(j_ij_i) == {(j,): {j_ij_i}}
j_i_ji = x[j]*x[i]**(y[j]*x[i])
assert get_contraction_structure(j_i_ji) == {(j,): {j_i_ji}}
ij_exp_kki = x[i]*y[j]*exp(y[i]*y[k, k])
result = get_contraction_structure(ij_exp_kki)
expected = {
(i,): {ij_exp_kki},
ij_exp_kki: [{
None: {exp(y[i]*y[k, k])},
exp(y[i]*y[k, k]): [{
None: {y[i]*y[k, k]},
y[i]*y[k, k]: [{(k,): {y[k, k]}}]
}]}
]
}
assert result == expected
def test_contraction_structure_Add_in_Pow():
x = IndexedBase('x')
y = IndexedBase('y')
i, j, k = Idx('i'), Idx('j'), Idx('k')
s_ii_jj_s = (1 + x[i, i])**(1 + y[j, j])
expected = {
None: {s_ii_jj_s},
s_ii_jj_s: [
{None: {S.One}, (i,): {x[i, i]}},
{None: {S.One}, (j,): {y[j, j]}}
]
}
result = get_contraction_structure(s_ii_jj_s)
assert result == expected
s_ii_jk_s = (1 + x[i, i]) ** (1 + y[j, k])
expected_2 = {
None: {(x[i, i] + 1)**(y[j, k] + 1)},
s_ii_jk_s: [
{None: {S.One}, (i,): {x[i, i]}}
]
}
result_2 = get_contraction_structure(s_ii_jk_s)
assert result_2 == expected_2
def test_contraction_structure_Pow_in_Pow():
x = IndexedBase('x')
y = IndexedBase('y')
z = IndexedBase('z')
i, j, k = Idx('i'), Idx('j'), Idx('k')
ii_jj_kk = x[i, i]**y[j, j]**z[k, k]
expected = {
None: {ii_jj_kk},
ii_jj_kk: [
{(i,): {x[i, i]}},
{
None: {y[j, j]**z[k, k]},
y[j, j]**z[k, k]: [
{(j,): {y[j, j]}},
{(k,): {z[k, k]}}
]
}
]
}
assert get_contraction_structure(ii_jj_kk) == expected
def test_ufunc_support():
f = Function('f')
g = Function('g')
x = IndexedBase('x')
y = IndexedBase('y')
i, j = Idx('i'), Idx('j')
a = symbols('a')
assert get_indices(f(x[i])) == ({i}, {})
assert get_indices(f(x[i], y[j])) == ({i, j}, {})
assert get_indices(f(y[i])*g(x[i])) == (set(), {})
assert get_indices(f(a, x[i])) == ({i}, {})
assert get_indices(f(a, y[i], x[j])*g(x[i])) == ({j}, {})
assert get_indices(g(f(x[i]))) == ({i}, {})
assert get_contraction_structure(f(x[i])) == {None: {f(x[i])}}
assert get_contraction_structure(
f(y[i])*g(x[i])) == {(i,): {f(y[i])*g(x[i])}}
assert get_contraction_structure(
f(y[i])*g(f(x[i]))) == {(i,): {f(y[i])*g(f(x[i]))}}
assert get_contraction_structure(
f(x[j], y[i])*g(x[i])) == {(i,): {f(x[j], y[i])*g(x[i])}}
| null |
4,837 |
#!/usr/bin/env python3
# SPDX-License-Identifier: LGPL-2.1-or-later
# -*- mode: python-mode -*-
#
# This file is part of systemd.
#
# systemd is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# systemd is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with systemd; If not, see <https://www.gnu.org/licenses/>.
# pylint: disable=import-outside-toplevel,consider-using-with,disable=redefined-builtin
import argparse
import os
import runpy
import shlex
from pathlib import Path
from typing import Optional
__version__ = '{{PROJECT_VERSION}} ({{GIT_VERSION}})'
try:
VERBOSE = int(os.environ['KERNEL_INSTALL_VERBOSE']) > 0
except (KeyError, ValueError):
VERBOSE = False
# Override location of ukify and the boot stub for testing and debugging.
UKIFY = os.getenv('KERNEL_INSTALL_UKIFY', 'ukify')
BOOT_STUB = os.getenv('KERNEL_INSTALL_BOOT_STUB')
def shell_join(cmd):
# TODO: drop in favour of shlex.join once shlex.join supports pathlib.Path.
return ' '.join(shlex.quote(str(x)) for x in cmd)
def log(*args, **kwargs):
if VERBOSE:
print(*args, **kwargs)
def path_is_readable(p: Path, dir=False) -> None:
"""Verify access to a file or directory."""
try:
p.open().close()
except IsADirectoryError:
if dir:
return
raise
def mandatory_variable(name):
try:
return os.environ[name]
except KeyError as e:
raise KeyError(f'${name} must be set in the environment') from e
def parse_args(args=None):
p = argparse.ArgumentParser(
description='kernel-install plugin to build a Unified Kernel Image',
allow_abbrev=False,
usage='60-ukify.install COMMAND KERNEL_VERSION ENTRY_DIR KERNEL_IMAGE INITRD…',
)
# Suppress printing of usage synopsis on errors
p.error = lambda message: p.exit(2, f'{p.prog}: error: {message}\n')
p.add_argument('command',
metavar='COMMAND',
help="The action to perform. Only 'add' is supported.")
p.add_argument('kernel_version',
metavar='KERNEL_VERSION',
help='Kernel version string')
p.add_argument('entry_dir',
metavar='ENTRY_DIR',
type=Path,
nargs='?',
help='Type#1 entry directory (ignored)')
p.add_argument('kernel_image',
metavar='KERNEL_IMAGE',
type=Path,
nargs='?',
help='Kernel binary')
p.add_argument('initrd',
metavar='INITRD…',
type=Path,
nargs='*',
help='Initrd files')
p.add_argument('--version',
action='version',
version=f'systemd {__version__}')
opts = p.parse_args(args)
if opts.command == 'add':
opts.staging_area = Path(mandatory_variable('KERNEL_INSTALL_STAGING_AREA'))
path_is_readable(opts.staging_area, dir=True)
opts.entry_token = mandatory_variable('KERNEL_INSTALL_ENTRY_TOKEN')
opts.machine_id = mandatory_variable('KERNEL_INSTALL_MACHINE_ID')
return opts
def we_are_wanted() -> bool:
KERNEL_INSTALL_LAYOUT = os.getenv('KERNEL_INSTALL_LAYOUT')
if KERNEL_INSTALL_LAYOUT != 'uki':
log(f'{KERNEL_INSTALL_LAYOUT=}, quitting.')
return False
KERNEL_INSTALL_UKI_GENERATOR = os.getenv('KERNEL_INSTALL_UKI_GENERATOR') or 'ukify'
if KERNEL_INSTALL_UKI_GENERATOR != 'ukify':
log(f'{KERNEL_INSTALL_UKI_GENERATOR=}, quitting.')
return False
log('KERNEL_INSTALL_LAYOUT and KERNEL_INSTALL_UKI_GENERATOR are good')
return True
def config_file_location() -> Optional[Path]:
if root := os.getenv('KERNEL_INSTALL_CONF_ROOT'):
p = Path(root) / 'uki.conf'
else:
p = Path('/etc/kernel/uki.conf')
if p.exists():
return p
return None
def kernel_cmdline_base() -> list[str]:
if root := os.getenv('KERNEL_INSTALL_CONF_ROOT'):
return Path(root).joinpath('cmdline').read_text().split()
for cmdline in ('/etc/kernel/cmdline',
'/usr/lib/kernel/cmdline'):
try:
return Path(cmdline).read_text().split()
except FileNotFoundError:
continue
options = Path('/proc/cmdline').read_text().split()
return [opt for opt in options
if not opt.startswith(('BOOT_IMAGE=', 'initrd='))]
def kernel_cmdline(opts) -> str:
options = kernel_cmdline_base()
# If the boot entries are named after the machine ID, then suffix the kernel
# command line with the machine ID we use, so that the machine ID remains
# stable, even during factory reset, in the initrd (where the system's machine
# ID is not directly accessible yet), and if the root file system is volatile.
if (opts.entry_token == opts.machine_id and
not any(opt.startswith('systemd.machine_id=') for opt in options)):
options += [f'systemd.machine_id={opts.machine_id}']
# TODO: we unconditionally set the cmdline here, ignoring the setting in
# the config file. Should we not do that?
# Prepend a space so that '@' does not get misinterpreted
return ' ' + ' '.join(options)
def METHOD_NAME(opts) -> list[Path]:
microcode = sorted(opts.staging_area.glob('microcode/*'))
initrd = sorted(opts.staging_area.glob('initrd*'))
# Order taken from 90-loaderentry.install
return [*microcode, *opts.initrd, *initrd]
def call_ukify(opts):
# Punish me harder.
# We want this:
# ukify = importlib.machinery.SourceFileLoader('ukify', UKIFY).load_module()
# but it throws a DeprecationWarning.
# https://stackoverflow.com/questions/67631/how-can-i-import-a-module-dynamically-given-the-full-path
# https://github.com/python/cpython/issues/65635
# offer "explanations", but to actually load a python file without a .py extension,
# the "solution" is 4+ incomprehensible lines.
# The solution with runpy gives a dictionary, which isn't great, but will do.
ukify = runpy.run_path(UKIFY, run_name='ukify')
# Create "empty" namespace. We want to override just a few settings, so it
# doesn't make sense to configure everything. We pretend to parse an empty
# argument set to prepopulate the namespace with the defaults.
opts2 = ukify['create_parser']().parse_args(['build'])
opts2.config = config_file_location()
opts2.uname = opts.kernel_version
opts2.linux = opts.kernel_image
opts2.initrd = METHOD_NAME(opts)
# Note that 'uki.efi' is the name required by 90-uki-copy.install.
opts2.output = opts.staging_area / 'uki.efi'
opts2.cmdline = kernel_cmdline(opts)
if BOOT_STUB:
opts2.stub = BOOT_STUB
# opts2.summary = True
ukify['apply_config'](opts2)
ukify['finalize_options'](opts2)
ukify['check_inputs'](opts2)
ukify['make_uki'](opts2)
log(f'{opts2.output} has been created')
def main():
opts = parse_args()
if opts.command != 'add':
return
if not we_are_wanted():
return
call_ukify(opts)
if __name__ == '__main__':
main()
| null |
4,838 |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Find specific type ast node in specific scope."""
from typing import Type, Any
import ast
class AstFinder(ast.NodeVisitor):
"""
Find all specific type ast node in specific scope.
Args:
node (ast.AST): An instance of ast node as search scope.
"""
def __init__(self, node: ast.AST):
self._scope: ast.AST = node
self._targets: tuple = ()
self._results: [ast.AST] = []
def generic_visit(self, node):
"""
An override method, iterating over all nodes and save target ast nodes.
Args:
node (ast.AST): An instance of ast node which is visited currently.
"""
if isinstance(node, self._targets):
self._results.append(node)
super(AstFinder, self).generic_visit(node)
def find_all(self, ast_types) -> [ast.AST]:
"""
Find all matched ast node.
Args:
ast_types (Union[tuple(Type), Type]): A tuple of Type or a Type indicates target ast node type.
Returns:
A list of instance of ast.AST as matched result.
Raises:
ValueError: If input `ast_types` is not a type nor a tuple.
"""
if isinstance(ast_types, Type):
self._targets: tuple = (ast_types,)
else:
if not isinstance(ast_types, tuple):
raise ValueError("Input ast_types should be a tuple or a type")
self._targets: tuple = ast_types
self._results.clear()
self.visit(self._scope)
return self._results
class StrChecker(ast.NodeVisitor):
"""
Check if specific String exists in specific scope.
Args:
node (ast.AST): An instance of ast node as check scope.
"""
def __init__(self, node: ast.AST):
self._context = node
self._pattern = ""
self._hit = False
def visit_Attribute(self, node: ast.Attribute) -> Any:
"""Visit a node of type ast.Attribute."""
if isinstance(node.value, ast.Name) and node.value.id == self._pattern:
self._hit = True
return super(StrChecker, self).generic_visit(node)
def METHOD_NAME(self, node: ast.Name) -> Any:
"""Visit a node of type ast.Name."""
if node.id == self._pattern:
self._hit = True
return super(StrChecker, self).generic_visit(node)
def generic_visit(self, node: ast.AST) -> Any:
for _, value in ast.iter_fields(node):
if self._hit:
break
if isinstance(value, (list, tuple)):
for item in value:
if isinstance(item, ast.AST):
self.visit(item)
if self._hit:
break
elif isinstance(value, dict):
for item in value.values():
if isinstance(item, ast.AST):
self.visit(item)
if self._hit:
break
elif isinstance(value, ast.AST):
self.visit(value)
def check(self, pattern: str) -> bool:
"""
Check if `pattern` exists in `_context`.
Args:
pattern (str): A string indicates target pattern.
Returns:
A bool indicates if `pattern` exists in `_context`.
"""
self._pattern = pattern
self._hit = False
self.generic_visit(self._context)
return self._hit
class FindConstValueInInit(ast.NodeVisitor):
"""
Check if specific String exists in specific scope.
Args:
node (ast.AST): An instance of ast node as check scope.
"""
def __init__(self, node: ast.AST):
self._context = node
self._pattern = ""
self._hit = False
def visit_Constant(self, node: ast.Constant):
if node.value == self._pattern:
self._hit = True
return node
def check(self, pattern: str) -> bool:
"""
Check if `pattern` exists in `_context`.
Args:
pattern (str): A string indicates target pattern.
Returns:
A bool indicates if `pattern` exists in `_context`.
"""
self._pattern = pattern
self._hit = False
self.generic_visit(self._context)
return self._hit
class CheckPropertyIsUsed(ast.NodeVisitor):
"""
Check whether a property is used.
Args:
node (ast.AST): An instance of ast node.
"""
def __init__(self, node: ast.AST):
self._context = node
self._value = ""
self._attr = ""
self._hit = False
def visit_Attribute(self, node: ast.Attribute) -> Any: # pylint: disable=invalid-name
"""Visit a node of type ast.Attribute."""
if isinstance(node.value, ast.Name) and node.value.id == self._value and node.attr == self._attr:
self._hit = True
return super(CheckPropertyIsUsed, self).generic_visit(node)
def generic_visit(self, node: ast.AST) -> Any:
"""
An override method, iterating over all nodes and save target ast nodes.
"""
if self._hit:
return
super(CheckPropertyIsUsed, self).generic_visit(node)
def check(self, value, attr) -> bool:
"""
Check whether `value` and `attr` exists.
"""
self._value = value
self._attr = attr
self._hit = False
self.generic_visit(self._context)
return self._hit
class GetPropertyOfObj(ast.NodeVisitor):
"""
Check whether a property is used.
Args:
node (ast.AST): An instance of ast node.
"""
def __init__(self, node: ast.AST):
self._context = node
self._property = set()
def visit_Assign(self, node: ast.Assign) -> Any: # pylint: disable=invalid-name
"""Visit a node of type ast.Attribute."""
target = node.targets[0]
if isinstance(target, ast.Attribute) and isinstance(target.value, ast.Name) and target.value.id == "self":
self._property.add(target.attr)
return super(GetPropertyOfObj, self).generic_visit(node)
def get(self):
"""
Check whether `value` and `attr` exists.
"""
self._property = set()
self.generic_visit(self._context)
return self._property
| null |
4,839 |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore as ms
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.ops.operations import _inner_ops as inner
import mindspore.nn as nn
import mindspore.context as context
class DTypeNet(nn.Cell):
def __init__(self):
super(DTypeNet, self).__init__()
self.dtype = P.DType()
def construct(self, x):
return self.dtype(x)
class DTypeDynamicNet(nn.Cell):
def __init__(self):
super(DTypeDynamicNet, self).__init__()
self.d = inner.GpuConvertToDynamicShape()
self.dtype = P.DType()
def construct(self, x):
x = self.d(x)
return self.dtype(x)
def dtype_with_testcase(mstype):
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
x = Tensor(np.arange(34).reshape(2, 17), dtype=mstype)
net = DTypeNet()
assert mstype == net(x)
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
assert mstype == net(x)
def dtype_dynamic_with_testcase(mstype):
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
x = Tensor(np.arange(34).reshape(2, 17), dtype=mstype)
net = DTypeDynamicNet()
assert mstype == net(x)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dtype_bool():
dtype_with_testcase(ms.bool_)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def METHOD_NAME():
dtype_with_testcase(ms.int8)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dtype_uint8():
dtype_with_testcase(ms.uint8)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dtype_int16():
dtype_with_testcase(ms.int16)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dtype_uint16():
dtype_with_testcase(ms.uint16)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dtype_int32():
dtype_with_testcase(ms.int32)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dtype_int64():
dtype_with_testcase(ms.int64)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dtype_float16():
dtype_with_testcase(ms.float16)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dtype_float32():
dtype_with_testcase(ms.float32)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dtype_float64():
dtype_with_testcase(ms.float64)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_dtype_bool():
dtype_dynamic_with_testcase(ms.bool_)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_dtype_int8():
dtype_dynamic_with_testcase(ms.int8)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_dtype_uint8():
dtype_dynamic_with_testcase(ms.uint8)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_dtype_int16():
dtype_dynamic_with_testcase(ms.int16)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_dtype_uint16():
dtype_dynamic_with_testcase(ms.uint16)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_dtype_int32():
dtype_dynamic_with_testcase(ms.int32)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_dtype_int64():
dtype_dynamic_with_testcase(ms.int64)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_dtype_float16():
dtype_dynamic_with_testcase(ms.float16)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_dtype_float32():
dtype_dynamic_with_testcase(ms.float32)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_dtype_float64():
dtype_dynamic_with_testcase(ms.float64)
| null |
4,840 |
# The MIT License (MIT)
#
# Copyright (c) 2019 Looker Data Sciences, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Base model for all generated models
"""
import collections
import datetime
import enum
import functools
import keyword
from typing import Any, Iterable, Optional, Sequence, TypeVar, cast
import cattr
from looker_sdk.rtl import hooks
try:
from typing import ForwardRef # type: ignore
except ImportError:
from typing import _ForwardRef as ForwardRef # type: ignore
EXPLICIT_NULL = cast(Any, "EXPLICIT_NULL") # type:ignore
class Model:
"""Base model for all generated models."""
def _get_converter(self):
if not hasattr(self, "_converter"):
converter = cattr.Converter()
converter.register_unstructure_hook(
datetime.datetime, hooks.datetime_unstructure_hook
)
uh = functools.partial(hooks.unstructure_hook, converter)
converter.register_unstructure_hook(Model, uh) # type: ignore
self._converter = converter
return self._converter
def _key_to_attr(self, key):
"""Appends the trailing _ to python reserved words."""
if key[-1] == "_":
raise KeyError(key)
if key in keyword.kwlist:
key = f"{key}_"
return key
def __getitem__(self, key):
key = self._key_to_attr(key)
try:
ret = getattr(self, key)
except AttributeError:
raise KeyError(key)
if isinstance(ret, enum.Enum):
ret = ret.value
return ret
def __setitem__(self, key, value):
key = self._key_to_attr(key)
if not hasattr(self, key):
raise AttributeError(
f"'{self.__class__.__name__}' object has no attribute '{key}'"
)
annotation = self.__annotations__[key]
if isinstance(annotation, ForwardRef):
actual_type = eval(
annotation.__forward_arg__, self.__global_context, locals()
)
if isinstance(actual_type, enum.EnumMeta):
# untyped because mypy really doesn't like this enum internals stuff
def err(val):
valid = []
for v in actual_type.__members__.values():
if v.value != "invalid_api_enum_value":
valid.METHOD_NAME(v.value)
return (
f"Invalid value '{val}' for " # type: ignore
f"'{self.__class__.__name__}.{key}'. Valid values are "
f"{valid}" # type: ignore
)
if isinstance(value, actual_type):
raise ValueError(err(value))
enum_member = actual_type(value)
if enum_member.value == "invalid_api_enum_value":
raise ValueError(err(value))
value = enum_member
elif issubclass(actual_type, Model):
value = self._get_converter().structure(value, actual_type)
return setattr(self, key, value)
def __delitem__(self, key):
self[key] # validates key
setattr(self, self._key_to_attr(key), None)
def __iter__(self):
return iter(self._get_converter().unstructure(self))
def __len__(self):
return len(self._get_converter().unstructure(self))
def __contains__(self, key):
return key in self._get_converter().unstructure(self)
def keys(self):
return self._get_converter().unstructure(self).keys()
def items(self):
return self._get_converter().unstructure(self).items()
def values(self):
return self._get_converter().unstructure(self).values()
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def pop(self, key, default=None):
ret = self.get(key, default)
if key in self:
del self[key]
return ret
def popitem(self):
raise NotImplementedError()
def clear(self):
raise NotImplementedError()
def update(self, iterable=None, **kwargs):
if iterable:
has_keys = getattr(iterable, "keys", None)
if callable(has_keys):
for k in iterable:
self[k] = iterable[k]
else:
for k, v in iterable:
self[k] = v
for k in kwargs:
self[k] = kwargs[k]
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
return self[key]
def copy(self):
raise NotImplementedError()
def safe_enum__new__(cls, value):
"""Handle out-of-spec enum values returned by API.
This is achieved by overriding the __new__ method to return
`invalid_api_enum_value` (defined on each subclass) when an
unexpected value for the enum is returned by the API.
"""
if not isinstance(value, (str, int, bool)):
return super().__new__(cls, value)
else:
vals = {v.value: v for v in cls.__members__.values()}
return vals.get(value, cls.invalid_api_enum_value)
T = TypeVar("T")
class DelimSequence(collections.UserList, Sequence[T]):
def __init__(
self,
data: Optional[Sequence[T]] = None,
prefix: str = "",
suffix: str = "",
separator: str = ",",
):
self.prefix = prefix
self.suffix = suffix
self.separator = separator
super().__init__(data)
def METHOD_NAME(self, elem: T):
super().METHOD_NAME(elem)
def extend(self, iterable: Iterable[T]):
super().extend(iterable)
def insert(self, i: int, elem: T):
super().insert(i, elem)
def remove(self, elem: T):
super().remove(elem)
def index(self, x: T, *args):
super().index(x, *args) # type: ignore
def count(self, elem: T):
super().count(elem)
def __str__(self):
return (
f"{self.prefix}"
f"{self.separator.join(str(d) for d in self.data)}"
f"{self.suffix}"
)
| null |
4,841 |
"""
Edge class for Jaseci
Each edge has an id, name, timestamp, the from node at the element of the edge
and the to node it is pointing to.
"""
from jaseci.prim.element import Element
from jaseci.prim.obj_mixins import Anchored
from jaseci.utils.utils import logger
import uuid
import sys
class Edge(Element, Anchored):
"""Edge class for Jaseci"""
def __init__(self, **kwargs):
self.from_node_id = None
self.to_node_id = None
self.bidirected: bool = False
Anchored.__init__(self)
Element.__init__(self, **kwargs)
def from_node(self):
"""Returns node edge is pointing from"""
ret = (
self._h.get_obj(self._m_id, self.from_node_id)
if self.from_node_id
else None
)
if not ret:
logger.critical(str(f"{self} disconnected from source node"))
return None
else:
return ret
def METHOD_NAME(self):
"""Returns node edge is pointing to"""
if not self.to_node_id:
return None
ret = self._h.get_obj(self._m_id, self.to_node_id) if self.to_node_id else None
if not ret:
logger.critical(str(f"{self} disconnected to target node"))
return None
else:
return ret
def nodes(self):
"""Returns both nodes connected to edge in a list"""
return [self.METHOD_NAME(), self.from_node()]
def opposing_node(self, node_obj):
"""Returns opposite node edge is pointing from node_obj"""
node_set = [self.to_node_id, self.from_node_id]
try:
node_set.remove(node_obj.jid)
return self._h.get_obj(self._m_id, node_set[0])
except ValueError:
logger.critical(str(f"{self} disconnected to node {node_obj}"))
return None
def connect(self, source, target, bi_dir=False):
"""
Connects both ends of the edge
"""
self.from_node_id = source.jid
self.to_node_id = target.jid
source.smart_add_edge(self)
target.smart_add_edge(self)
self.set_bidirected(bi_dir)
self.save()
return True
def set_bidirected(self, bidirected: bool):
"""Sets/unsets edge to be bidirected"""
self.bidirected = bidirected
self.save()
def is_bidirected(self):
"""Check if edge is bidirected"""
return self.bidirected
def connects(self, source=None, target=None, ignore_direction=False):
"""Test if a node or nodes are connected by edge"""
if not source and not target:
return False
if self.bidirected or ignore_direction:
if source and source.jid not in [self.from_node_id, self.to_node_id]:
return False
if target and target.jid not in [self.from_node_id, self.to_node_id]:
return False
else:
if source and source.jid != self.from_node_id:
return False
if target and target.jid != self.to_node_id:
return False
return True
def is_fast(self):
return sys.getsizeof(self.context) < 2000
def save(self):
"""
Write self through hook to persistent storage
"""
if self.is_fast():
self._persist = False
if self.from_node_id:
self.from_node().save()
if self.to_node_id:
self.METHOD_NAME().save()
super().save()
def destroy(self):
"""
Destroys self from memory and persistent storage
"""
base = self.from_node()
target = self.METHOD_NAME()
base.smart_remove_edge(self) if base else None
target.smart_remove_edge(self) if target else None
super().destroy()
def dot_str(self, node_map=None, edge_map=None, detailed=False):
"""
DOT representation
from_node -> to_node [context_key=contect_value]
"""
def handle_str(str):
return str[:32].replace('"', '\\"')
from_name = (
uuid.UUID(self.from_node().jid).hex
if node_map is None
else node_map.index(self.from_node().jid)
)
to_name = (
uuid.UUID(self.METHOD_NAME().jid).hex
if node_map is None
else node_map.index(self.METHOD_NAME().jid)
)
dstr = f'"n{from_name}" -> "n{to_name}" [ '
if detailed:
dstr += f'id="{uuid.UUID(self.jid).hex}", '
label = ""
if edge_map:
label = f"e{edge_map.index(self.jid)}"
if self.name != "generic":
label += f":{self.name}"
dstr += f'label="{label}"'
if self.bidirected:
dstr += ', dir="both"'
edge_dict = self.context
for i in self.private_values():
edge_dict.pop(i)
if edge_dict and detailed:
for k, v in edge_dict.items():
if not isinstance(v, str) or v == "":
continue
dstr += f', {k}="{handle_str(v)}"'
dstr += " ]"
return dstr + "\n"
| null |
4,842 |
import os
import pytest
import pytorch_pfn_extras as ppe
import torch
_profiler_available = os.name != "nt"
@pytest.mark.skipif(not _profiler_available, reason="profiler is not available")
@pytest.mark.parametrize("device", ["cpu", "cuda"])
def test_record(device):
if not torch.cuda.is_available() and device == "cuda":
pytest.skip()
model = torch.nn.Linear(30, 40)
model.to(device)
x = torch.arange(30, dtype=torch.float32).to(device)
with torch.profiler.profile() as prof:
with ppe.profiler.record("my_tag_1"):
model(x)
keys = [event.key for event in prof.key_averages()]
assert "my_tag_1" in keys
assert "aten::linear" in keys
@pytest.mark.skipif(not _profiler_available, reason="profiler is not available")
@pytest.mark.parametrize("device", ["cpu", "cuda"])
def test_record_without_tag(device):
if not torch.cuda.is_available() and device == "cuda":
pytest.skip()
model = torch.nn.Linear(30, 40)
model.to(device)
x = torch.arange(30, dtype=torch.float32).to(device)
with torch.profiler.profile() as prof:
with ppe.profiler.record(None):
model(x)
keys = [event.key for event in prof.key_averages()]
assert "aten::linear" in keys
assert any(k.endswith("test_record_without_tag") for k in keys)
@pytest.mark.skipif(not _profiler_available, reason="profiler is not available")
@pytest.mark.parametrize("device", ["cpu", "cuda"])
def test_record_function(device):
if not torch.cuda.is_available() and device == "cuda":
pytest.skip()
model = torch.nn.Linear(30, 40)
model.to(device)
@ppe.profiler.record_function("my_tag_2")
def my_run(x):
model(x)
with torch.profiler.profile() as prof:
x = torch.arange(30, dtype=torch.float32).to(device)
my_run(x)
keys = [event.key for event in prof.key_averages()]
assert "aten::linear" in keys
assert "my_tag_2" in keys
@pytest.mark.skipif(not _profiler_available, reason="profiler is not available")
@pytest.mark.parametrize("device", ["cpu", "cuda"])
def test_record_function_without_tag(device):
if not torch.cuda.is_available() and device == "cuda":
pytest.skip()
model = torch.nn.Linear(30, 40)
model.to(device)
x = torch.arange(30, dtype=torch.float32).to(device)
@ppe.profiler.record_function(None)
def my_run(x):
model(x)
with torch.profiler.profile() as prof:
my_run(x)
keys = [event.key for event in prof.key_averages()]
assert "aten::linear" in keys
assert "my_run" in keys
@pytest.mark.skipif(not _profiler_available, reason="profiler is not available")
@pytest.mark.parametrize("device", ["cpu", "cuda"])
def test_record_iterable(device):
if not torch.cuda.is_available() and device == "cuda":
pytest.skip()
model = torch.nn.Linear(30, 40)
model.to(device)
x = torch.arange(30, dtype=torch.float32).to(device)
iters = [x, x, x]
with torch.profiler.profile() as prof:
for x in ppe.profiler.record_iterable("my_tag_3", iters):
model(x)
keys = [event.key for event in prof.key_averages()]
assert "aten::linear" in keys
assert "my_tag_3-0" in keys
assert "my_tag_3-1" in keys
assert "my_tag_3-2" in keys
@pytest.mark.skipif(not _profiler_available, reason="profiler is not available")
@pytest.mark.parametrize("device", ["cpu", "cuda"])
def METHOD_NAME(device):
if not torch.cuda.is_available() and device == "cuda":
pytest.skip()
model = torch.nn.Linear(30, 40)
model.to(device)
x = torch.arange(30, dtype=torch.float32).to(device)
iters = [x, x, x]
with torch.profiler.profile() as prof:
for x in ppe.profiler.record_iterable(None, iters):
model(x)
keys = [event.key for event in prof.key_averages()]
assert "aten::linear" in keys
assert any(k.endswith("test_record_iterable_without_tag-0") for k in keys)
assert any(k.endswith("test_record_iterable_without_tag-1") for k in keys)
assert any(k.endswith("test_record_iterable_without_tag-2") for k in keys)
| null |
4,843 |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Test nn.probability.distribution.Uniform.
"""
import numpy as np
import pytest
import mindspore.nn as nn
import mindspore.nn.probability.distribution as msd
from mindspore import dtype
from mindspore import Tensor
from mindspore import context
skip_flag = context.get_context("device_target") == "CPU"
def test_uniform_shape_errpr():
"""
Invalid shapes.
"""
with pytest.raises(ValueError):
msd.Uniform([[2.], [1.]], [[2.], [3.], [4.]], dtype=dtype.float32)
def test_type():
with pytest.raises(TypeError):
msd.Uniform(0., 1., dtype=dtype.int32)
def test_name():
with pytest.raises(TypeError):
msd.Uniform(0., 1., name=1.0)
def test_seed():
with pytest.raises(TypeError):
msd.Uniform(0., 1., seed='seed')
def test_arguments():
"""
Args passing during initialization.
"""
u = msd.Uniform()
assert isinstance(u, msd.Distribution)
u = msd.Uniform([3.0], [4.0], dtype=dtype.float32)
assert isinstance(u, msd.Distribution)
def test_invalid_range():
"""
Test range of uniform distribution.
"""
with pytest.raises(ValueError):
msd.Uniform(0.0, 0.0, dtype=dtype.float32)
with pytest.raises(ValueError):
msd.Uniform(1.0, 0.0, dtype=dtype.float32)
class UniformProb(nn.Cell):
"""
Uniform distribution: initialize with low/high.
"""
def __init__(self):
super(UniformProb, self).__init__()
self.u = msd.Uniform(3.0, 4.0, dtype=dtype.float32)
def construct(self, value):
prob = self.u.prob(value)
log_prob = self.u.log_prob(value)
cdf = self.u.cdf(value)
log_cdf = self.u.log_cdf(value)
sf = self.u.survival_function(value)
log_sf = self.u.log_survival(value)
return prob + log_prob + cdf + log_cdf + sf + log_sf
@pytest.mark.skipif(skip_flag, reason="not support running in CPU")
def test_uniform_prob():
"""
Test probability functions: passing value through construct.
"""
net = UniformProb()
value = Tensor([3.1, 3.2, 3.3, 3.4], dtype=dtype.float32)
ans = net(value)
assert isinstance(ans, Tensor)
class UniformProb1(nn.Cell):
"""
Uniform distribution: initialize without low/high.
"""
def __init__(self):
super(UniformProb1, self).__init__()
self.u = msd.Uniform(dtype=dtype.float32)
def construct(self, value, low, high):
prob = self.u.prob(value, low, high)
log_prob = self.u.log_prob(value, low, high)
cdf = self.u.cdf(value, low, high)
log_cdf = self.u.log_cdf(value, low, high)
sf = self.u.survival_function(value, low, high)
log_sf = self.u.log_survival(value, low, high)
return prob + log_prob + cdf + log_cdf + sf + log_sf
@pytest.mark.skipif(skip_flag, reason="not support running in CPU")
def test_uniform_prob1():
"""
Test probability functions: passing low/high, value through construct.
"""
net = UniformProb1()
value = Tensor([0.1, 0.2, 0.3, 0.9], dtype=dtype.float32)
low = Tensor([0.0], dtype=dtype.float32)
high = Tensor([1.0], dtype=dtype.float32)
ans = net(value, low, high)
assert isinstance(ans, Tensor)
class UniformKl(nn.Cell):
"""
Test class: kl_loss of Uniform distribution.
"""
def __init__(self):
super(UniformKl, self).__init__()
self.u1 = msd.Uniform(
np.array([3.0]), np.array([4.0]), dtype=dtype.float32)
self.u2 = msd.Uniform(dtype=dtype.float32)
def construct(self, low_b, high_b, low_a, high_a):
kl1 = self.u1.kl_loss('Uniform', low_b, high_b)
kl2 = self.u2.kl_loss('Uniform', low_b, high_b, low_a, high_a)
return kl1 + kl2
@pytest.mark.skipif(skip_flag, reason="not support running in CPU")
def test_kl():
"""
Test kl_loss.
"""
net = UniformKl()
low_b = Tensor(np.array([0.0]).astype(np.float32), dtype=dtype.float32)
high_b = Tensor(np.array([5.0]).astype(np.float32), dtype=dtype.float32)
low_a = Tensor(np.array([2.0]).astype(np.float32), dtype=dtype.float32)
high_a = Tensor(np.array([3.0]).astype(np.float32), dtype=dtype.float32)
ans = net(low_b, high_b, low_a, high_a)
assert isinstance(ans, Tensor)
class UniformCrossEntropy(nn.Cell):
"""
Test class: cross_entropy of Uniform distribution.
"""
def __init__(self):
super(UniformCrossEntropy, self).__init__()
self.u1 = msd.Uniform(
np.array([3.0]), np.array([4.0]), dtype=dtype.float32)
self.u2 = msd.Uniform(dtype=dtype.float32)
def construct(self, low_b, high_b, low_a, high_a):
h1 = self.u1.cross_entropy('Uniform', low_b, high_b)
h2 = self.u2.cross_entropy('Uniform', low_b, high_b, low_a, high_a)
return h1 + h2
@pytest.mark.skipif(skip_flag, reason="not support running in CPU")
def METHOD_NAME():
"""
Test cross_entropy between Uniform distributions.
"""
net = UniformCrossEntropy()
low_b = Tensor(np.array([0.0]).astype(np.float32), dtype=dtype.float32)
high_b = Tensor(np.array([5.0]).astype(np.float32), dtype=dtype.float32)
low_a = Tensor(np.array([2.0]).astype(np.float32), dtype=dtype.float32)
high_a = Tensor(np.array([3.0]).astype(np.float32), dtype=dtype.float32)
ans = net(low_b, high_b, low_a, high_a)
assert isinstance(ans, Tensor)
class UniformBasics(nn.Cell):
"""
Test class: basic mean/sd/var/mode/entropy function.
"""
def __init__(self):
super(UniformBasics, self).__init__()
self.u = msd.Uniform(3.0, 4.0, dtype=dtype.float32)
def construct(self):
mean = self.u.mean()
sd = self.u.sd()
var = self.u.var()
entropy = self.u.entropy()
return mean + sd + var + entropy
@pytest.mark.skipif(skip_flag, reason="not support running in CPU")
def test_bascis():
"""
Test mean/sd/var/mode/entropy functionality of Uniform.
"""
net = UniformBasics()
ans = net()
assert isinstance(ans, Tensor)
class UniConstruct(nn.Cell):
"""
Uniform distribution: going through construct.
"""
def __init__(self):
super(UniConstruct, self).__init__()
self.u = msd.Uniform(-4.0, 4.0)
self.u1 = msd.Uniform()
def construct(self, value, low, high):
prob = self.u('prob', value)
prob1 = self.u('prob', value, low, high)
prob2 = self.u1('prob', value, low, high)
return prob + prob1 + prob2
@pytest.mark.skipif(skip_flag, reason="not support running in CPU")
def test_uniform_construct():
"""
Test probability function going through construct.
"""
net = UniConstruct()
value = Tensor([-5.0, 0.0, 1.0, 5.0], dtype=dtype.float32)
low = Tensor([-1.0], dtype=dtype.float32)
high = Tensor([1.0], dtype=dtype.float32)
ans = net(value, low, high)
assert isinstance(ans, Tensor)
| null |
4,844 |
from __future__ import annotations
import logging
from collections import defaultdict
from datetime import datetime
from typing import Optional, TYPE_CHECKING
from game.theater import ControlPoint
from .coalition import Coalition
from .dcs.groundunittype import GroundUnitType
from .theater.transitnetwork import (
NoPathError,
TransitNetwork,
)
from .transfers import TransferOrder
if TYPE_CHECKING:
from .game import Game
class GroundUnitOrders:
def __init__(self, destination: ControlPoint) -> None:
self.destination = destination
# Maps unit type to order quantity.
self.units: dict[GroundUnitType, int] = defaultdict(int)
def __str__(self) -> str:
return f"Pending ground unit delivery to {self.destination}"
def order(self, units: dict[GroundUnitType, int]) -> None:
for k, v in units.items():
self.units[k] += v
def sell(self, units: dict[GroundUnitType, int]) -> None:
for k, v in units.items():
self.units[k] -= v
if self.units[k] == 0:
del self.units[k]
def refund_all(self, coalition: Coalition) -> None:
self._refund(coalition, self.units)
self.units = defaultdict(int)
def _refund(self, coalition: Coalition, units: dict[GroundUnitType, int]) -> None:
for unit_type, count in units.items():
logging.info(f"Refunding {count} {unit_type} at {self.destination.name}")
coalition.adjust_budget(unit_type.price * count)
def METHOD_NAME(self, unit_type: GroundUnitType) -> int:
pending_units = self.units.get(unit_type)
if pending_units is None:
pending_units = 0
return pending_units
def process(self, game: Game, now: datetime) -> None:
coalition = game.coalition_for(self.destination.captured)
ground_unit_source = self.find_ground_unit_source(game)
if ground_unit_source is None:
game.message(
f"{self.destination.name} lost its source for ground unit "
"reinforcements. Refunding purchase price."
)
self.refund_all(coalition)
bought_units: dict[GroundUnitType, int] = {}
units_needing_transfer: dict[GroundUnitType, int] = {}
for unit_type, count in self.units.items():
allegiance = "Ally" if self.destination.captured else "Enemy"
d: dict[GroundUnitType, int]
if self.destination != ground_unit_source:
source = ground_unit_source
d = units_needing_transfer
else:
source = self.destination
d = bought_units
if count < 0:
logging.error(
f"Attempted sale of {unit_type} at {self.destination} but ground "
"units cannot be sold"
)
elif count > 0:
d[unit_type] = count
game.message(
f"{allegiance} reinforcements: {unit_type} x {count} at {source}"
)
self.units = defaultdict(int)
self.destination.base.commission_units(bought_units)
if units_needing_transfer:
if ground_unit_source is None:
raise RuntimeError(
f"Ground unit source could not be found for {self.destination} but "
"still tried to transfer units to there"
)
ground_unit_source.base.commission_units(units_needing_transfer)
self.create_transfer(
coalition, ground_unit_source, units_needing_transfer, now
)
def create_transfer(
self,
coalition: Coalition,
source: ControlPoint,
units: dict[GroundUnitType, int],
now: datetime,
) -> None:
coalition.transfers.new_transfer(
TransferOrder(source, self.destination, units), now
)
def find_ground_unit_source(self, game: Game) -> Optional[ControlPoint]:
# This is running *after* the turn counter has been incremented, so this is the
# reaction to turn 0. On turn zero we allow units to be recruited anywhere for
# delivery on turn 1 so that turn 1 always starts with units on the front line.
if game.turn == 1:
return self.destination
# Fast path if the destination is a valid source.
if self.destination.can_recruit_ground_units(game):
return self.destination
try:
return self.find_ground_unit_source_in_network(
game.transit_network_for(self.destination.captured), game
)
except NoPathError:
return None
def find_ground_unit_source_in_network(
self, network: TransitNetwork, game: Game
) -> Optional[ControlPoint]:
sources = []
for control_point in game.theater.control_points_for(self.destination.captured):
if control_point.can_recruit_ground_units(
game
) and network.has_path_between(self.destination, control_point):
sources.append(control_point)
if not sources:
return None
# Fast path to skip the distance calculation if we have only one option.
if len(sources) == 1:
return sources[0]
closest = sources[0]
_, cost = network.shortest_path_with_cost(self.destination, closest)
for source in sources:
_, new_cost = network.shortest_path_with_cost(self.destination, source)
if new_cost < cost:
closest = source
cost = new_cost
return closest
| null |
4,845 |
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A subclass of unittest.TestCase which checks for reference leaks.
To use:
- Use testing_refleak.BaseTestCase instead of unittest.TestCase
- Configure and compile Python with --with-pydebug
If sys.gettotalrefcount() is not available (because Python was built without
the Py_DEBUG option), then this module is a no-op and tests will run normally.
"""
import gc
import sys
try:
import copy_reg as copyreg #PY26
except ImportError:
import copyreg
try:
import unittest2 as unittest #PY26
except ImportError:
import unittest
class LocalTestResult(unittest.TestResult):
"""A TestResult which forwards events to a parent object, except for Skips."""
def __init__(self, parent_result):
unittest.TestResult.__init__(self)
self.parent_result = parent_result
def addError(self, test, error):
self.parent_result.addError(test, error)
def addFailure(self, test, error):
self.parent_result.addFailure(test, error)
def addSkip(self, test, reason):
pass
class ReferenceLeakCheckerTestCase(unittest.TestCase):
"""A TestCase which runs tests multiple times, collecting reference counts."""
NB_RUNS = 3
def run(self, result=None):
# python_message.py registers all Message classes to some pickle global
# registry, which makes the classes immortal.
# We save a copy of this registry, and reset it before we could references.
self._saved_pickle_registry = copyreg.dispatch_table.copy()
# Run the test twice, to warm up the instance attributes.
super(ReferenceLeakCheckerTestCase, self).run(result=result)
super(ReferenceLeakCheckerTestCase, self).run(result=result)
oldrefcount = 0
local_result = LocalTestResult(result)
refcount_deltas = []
for _ in range(self.NB_RUNS):
oldrefcount = self.METHOD_NAME()
super(ReferenceLeakCheckerTestCase, self).run(result=local_result)
newrefcount = self.METHOD_NAME()
refcount_deltas.append(newrefcount - oldrefcount)
print(refcount_deltas, self)
try:
self.assertEqual(refcount_deltas, [0] * self.NB_RUNS)
except Exception: # pylint: disable=broad-except
result.addError(self, sys.exc_info())
def METHOD_NAME(self):
copyreg.dispatch_table.clear()
copyreg.dispatch_table.update(self._saved_pickle_registry)
# It is sometimes necessary to gc.collect() multiple times, to ensure
# that all objects can be collected.
gc.collect()
gc.collect()
gc.collect()
return sys.gettotalrefcount()
if hasattr(sys, 'gettotalrefcount'):
BaseTestCase = ReferenceLeakCheckerTestCase
SkipReferenceLeakChecker = unittest.skip
else:
# When PyDEBUG is not enabled, run the tests normally.
BaseTestCase = unittest.TestCase
def SkipReferenceLeakChecker(reason):
del reason # Don't skip, so don't need a reason.
def Same(func):
return func
return Same
| null |
4,846 |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
General Validator Helper Functions.
"""
import os
import inspect
UINT32_MAX = 4294967295
UINT32_MIN = 0
UINT64_MAX = 18446744073709551615
UINT64_MIN = 0
def pad_arg_name(arg_name):
"""Add a space for arg_name."""
if arg_name != "":
arg_name = arg_name + " "
return arg_name
def check_value(arg, valid_range, arg_name=""):
"""Check the value of arg is in a valid range."""
arg_name = pad_arg_name(arg_name)
if arg < valid_range[0] or arg > valid_range[1]:
raise ValueError(
"Input {0}is not within the required interval of ({1} to {2}).".format(arg_name,
valid_range[0], valid_range[1]))
def METHOD_NAME(arg, arg_name=""):
"""Check arg type is uint32."""
type_check(arg, (int,), arg_name)
check_value(arg, [UINT32_MIN, UINT32_MAX])
def check_uint64(arg, arg_name=""):
"""Check arg type is uint64."""
type_check(arg, (int,), arg_name)
check_value(arg, [UINT64_MIN, UINT64_MAX])
def check_iteration(arg, arg_name=""):
"""Check arg is in a valid range."""
type_check(arg, (int,), arg_name)
check_value(arg, [-1, UINT64_MAX])
def check_dir(dataset_dir):
"""Check the dataset_dir is a valid dir."""
if not os.path.isdir(dataset_dir) or not os.access(dataset_dir, os.R_OK):
raise ValueError("The folder {} does not exist or permission denied!".format(dataset_dir))
def parse_user_args(method, *args, **kwargs):
"""
Parse user arguments in a function.
Args:
method (method): a callable function.
args: user passed args.
kwargs: user passed kwargs.
Returns:
user_filled_args (list): values of what the user passed in for the arguments.
ba.arguments (Ordered Dict): ordered dict of parameter and argument for what the user has passed.
"""
sig = inspect.signature(method)
if 'self' in sig.parameters or 'cls' in sig.parameters:
ba = sig.bind(method, *args, **kwargs)
ba.apply_defaults()
params = list(sig.parameters.keys())[1:]
else:
ba = sig.bind(*args, **kwargs)
ba.apply_defaults()
params = list(sig.parameters.keys())
user_filled_args = [ba.arguments.get(arg_value) for arg_value in params]
return user_filled_args, ba.arguments
def type_check(arg, types, arg_name):
"""
Check the type of the parameter.
Args:
arg (Any) : any variable.
types (tuple): tuple of all valid types for arg.
arg_name (str): the name of arg.
Returns:
Exception: when the type is not correct, otherwise nothing.
"""
# handle special case of booleans being a subclass of ints
print_value = '\"\"' if repr(arg) == repr('') else arg
if int in types and bool not in types:
if isinstance(arg, bool):
raise TypeError("Argument {0} with value {1} is not of type {2}.".format(arg_name, print_value, types))
if not isinstance(arg, types):
raise TypeError("Argument {0} with value {1} is not of type {2}.".format(arg_name, print_value, types))
def type_check_list(args, types, arg_names):
"""
Check the type of each parameter in the list.
Args:
args (Union[list, tuple]): a list or tuple of any variable.
types (tuple): tuple of all valid types for arg.
arg_names (Union[list, tuple of str]): the names of args.
Returns:
Exception: when the type is not correct, otherwise nothing.
"""
type_check(args, (list, tuple,), arg_names)
if len(args) != len(arg_names) and not isinstance(arg_names, str):
raise ValueError("List of arguments is not the same length as argument_names.")
if isinstance(arg_names, str):
arg_names = ["{0}[{1}]".format(arg_names, i) for i in range(len(args))]
for arg, arg_name in zip(args, arg_names):
type_check(arg, types, arg_name)
def replace_minus_one(value):
""" replace -1 with a default value """
return value if value != -1 else UINT32_MAX
def check_param_id(info_param, info_name):
"""
Check the type of info_param.
Args:
info_param (Union[list[int], str]): Info parameters of check_node_list that is either list of ints or *.
info_name (str): Info name of check_node_list.
Raises:
ValueError: When the type of info_param is not correct, otherwise nothing.
"""
if isinstance(info_param, str):
if info_param not in ["*"]:
raise ValueError("Node parameter {} only accepts '*' as string.".format(info_name))
else:
for param in info_param:
METHOD_NAME(param, info_name)
| null |
4,847 |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.ops.operations as P
from mindspore import context, Tensor
from mindspore.nn import Cell
from mindspore.ops.functional import vmap
from mindspore.ops.operations import _grad_ops as G
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
class MaxPool3DGradWithArgmaxNet(Cell):
def __init__(self, ksize, strides, pads, dilation, ceil_mode=False,
data_format="NCDHW"):
super(MaxPool3DGradWithArgmaxNet, self).__init__()
self.maxpool3d_grad_with_argmax = G.MaxPool3DGradWithArgmax(
ksize=ksize, strides=strides, pads=pads, dilation=dilation,
ceil_mode=ceil_mode, data_format=data_format)
def construct(self, x, dy, mask):
output = self.maxpool3d_grad_with_argmax(x, dy, mask)
return output
class DynamicShapeMaxPool3DGradWithArgmaxNet(Cell):
def __init__(self, net, axis=0):
super(DynamicShapeMaxPool3DGradWithArgmaxNet, self).__init__()
self.net = net
self.unique = P.Unique()
self.gather = P.Gather()
self.axis = axis
def construct(self, x, dy, mask, indices):
unique_indices, _ = self.unique(indices)
x = self.gather(x, unique_indices, self.axis)
dy = self.gather(dy, unique_indices, self.axis)
mask = self.gather(mask, unique_indices, self.axis)
return self.net(x, dy, mask)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_maxpool3d_grad_withargmax_float32():
"""
Feature: Test MaxPool3DGradWithArgmax.
Description: Test MaxPool3DGradWithArgmax with float32 inputs.
Expectation: success.
"""
attributes = {'ksize': 3, 'strides': 1, 'pads': 0, 'dilation': 1,
'ceil_mode': False, 'data_format': 'NCDHW'}
inputs = Tensor(np.arange(3*4*3).reshape(1, 1, 3, 4, 3).astype(np.float32))
dy = Tensor(np.ones((1, 1, 1, 2, 1)).astype(np.float32))
mask = Tensor(np.array([[[[[32], [35]]]]]).astype(np.int32))
net = MaxPool3DGradWithArgmaxNet(**attributes)
output = net(inputs, dy, mask)
expect = np.array([[[[[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[0., 0., 0.],
[0., 0., 1.],
[0., 0., 1.]]]]]).astype(np.float32)
assert (output.asnumpy() == expect).all()
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_maxpool3d_grad_withargmax_float16():
"""
Feature: Test MaxPool3DGradWithArgmax.
Description: Test MaxPool3DGradWithArgmax with float16 inputs.
Expectation: success.
"""
attributes = {'ksize': 3, 'strides': 1, 'pads': 0, 'dilation': 1,
'ceil_mode': False, 'data_format': 'NCDHW'}
inputs = Tensor(np.arange(3*4*3).reshape(1, 1, 3, 4, 3).astype(np.float16))
dy = Tensor(np.ones((1, 1, 1, 2, 1)).astype(np.float16))
mask = Tensor(np.array([[[[[32], [35]]]]]).astype(np.int32))
net = MaxPool3DGradWithArgmaxNet(**attributes)
output = net(inputs, dy, mask)
expect = np.array([[[[[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[0., 0., 0.],
[0., 0., 1.],
[0., 0., 1.]]]]]).astype(np.float16)
assert (output.asnumpy() == expect).all()
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def METHOD_NAME():
"""
Feature: Test vmap.
Description: Test MaxPool3DGradWithArgmax with vmap.
Expectation: success.
"""
attributes = {'ksize': 3, 'strides': 1, 'pads': 0, 'dilation': 1,
'ceil_mode': False, 'data_format': 'NCDHW'}
net = MaxPool3DGradWithArgmaxNet(**attributes)
nest_vmap = vmap(net, in_axes=(-1, -1, -1), out_axes=0)
inputs = Tensor(np.arange(3*4*3).reshape(1, 1, 3, 4, 3, 1).astype(np.float32))
dy = Tensor(np.ones((1, 1, 1, 2, 1, 1)).astype(np.float32))
mask = Tensor(np.array([[[[[[32]], [[35]]]]]]).astype(np.int32))
out = nest_vmap(inputs, dy, mask)
assert out.shape == (1, 1, 1, 3, 4, 3)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_maxpool3d_grad_with_argmax():
"""
Feature: MaxPool3DGradWithArgmax dynamic test.
Description: Run unique and gather ops before MaxPool3DGradWithArgmax.
Expectation: success.
"""
attributes = {'ksize': 3, 'strides': 1, 'pads': 0, 'dilation': 1,
'ceil_mode': False, 'data_format': 'NCDHW'}
inputs = Tensor(np.arange(3*4*3).reshape(1, 1, 3, 4, 3).astype(np.float32))
dy = Tensor(np.ones((1, 1, 1, 2, 1)).astype(np.float32))
mask = Tensor(np.array([[[[[32], [35]]]]]).astype(np.int32))
indices = Tensor(np.array([0]).astype(np.int32))
net = MaxPool3DGradWithArgmaxNet(**attributes)
dy_net = DynamicShapeMaxPool3DGradWithArgmaxNet(net)
out = dy_net(inputs, dy, mask, indices)
assert out.shape == inputs.shape
| null |
4,848 |
import pytest
from kopf._kits.webhooks import ClusterDetector, WebhookAutoServer, WebhookAutoTunnel
# Reproducing the realistic environment would be costly and difficult,
# so we mock all external(!) libraries to return the results as we expect them.
# This reduces the quality of the tests, but makes them simple.
@pytest.fixture(autouse=True)
def pathmock(mocker, fake_vault, enforced_context, aresponses, hostname):
mocker.patch('ssl.get_server_certificate', return_value='')
mocker.patch('certvalidator.ValidationContext')
validator = mocker.patch('certvalidator.CertificateValidator')
pathmock = validator.return_value.validate_tls.return_value
pathmock.first.issuer.native = {}
pathmock.first.subject.native = {}
return pathmock
async def test_no_detection(hostname, aresponses):
aresponses.add(hostname, '/version', 'get', {'gitVersion': 'v1.2.3'})
hostname = await ClusterDetector().guess_host()
assert hostname is None
async def test_dependencies(hostname, aresponses, no_certvalidator):
aresponses.add(hostname, '/version', 'get', {'gitVersion': 'v1.2.3'})
with pytest.raises(ImportError) as err:
await ClusterDetector().guess_host()
assert "pip install certvalidator" in str(err.value)
async def METHOD_NAME(pathmock):
pathmock.first.issuer.native = {'common_name': 'minikubeCA'}
hostname = await ClusterDetector().guess_host()
assert hostname == 'host.minikube.internal'
async def test_minikube_via_subject_cn(pathmock):
pathmock.first.subject.native = {'common_name': 'minikube'}
hostname = await ClusterDetector().guess_host()
assert hostname == 'host.minikube.internal'
async def test_k3d_via_issuer_cn(pathmock):
pathmock.first.issuer.native = {'common_name': 'k3s-ca-server-12345'}
hostname = await ClusterDetector().guess_host()
assert hostname == 'host.k3d.internal'
async def test_k3d_via_subject_cn(pathmock):
pathmock.first.subject.native = {'common_name': 'k3s'}
hostname = await ClusterDetector().guess_host()
assert hostname == 'host.k3d.internal'
async def test_k3d_via_subject_org(pathmock):
pathmock.first.subject.native = {'organization_name': 'k3s'}
hostname = await ClusterDetector().guess_host()
assert hostname == 'host.k3d.internal'
async def test_k3d_via_version_infix(hostname, aresponses):
aresponses.add(hostname, '/version', 'get', {'gitVersion': 'v1.20.4+k3s1'})
hostname = await ClusterDetector().guess_host()
assert hostname == 'host.k3d.internal'
async def test_server_detects(responder, aresponses, hostname, caplog, assert_logs):
caplog.set_level(0)
aresponses.add(hostname, '/version', 'get', {'gitVersion': 'v1.20.4+k3s1'})
server = WebhookAutoServer(insecure=True)
async with server:
async for _ in server(responder.fn):
break # do not sleep
assert_logs(["Cluster detection found the hostname: host.k3d.internal"])
async def test_server_works(
responder, aresponses, hostname, caplog, assert_logs):
caplog.set_level(0)
aresponses.add(hostname, '/version', 'get', {'gitVersion': 'v1.20.4'})
server = WebhookAutoServer(insecure=True)
async with server:
async for _ in server(responder.fn):
break # do not sleep
assert_logs(["Cluster detection failed, running a simple local server"])
async def test_tunnel_detects(responder, pyngrok_mock, aresponses, hostname, caplog, assert_logs):
caplog.set_level(0)
aresponses.add(hostname, '/version', 'get', {'gitVersion': 'v1.20.4+k3s1'})
server = WebhookAutoTunnel()
async with server:
async for _ in server(responder.fn):
break # do not sleep
assert_logs(["Cluster detection found the hostname: host.k3d.internal"])
async def test_tunnel_works(responder, pyngrok_mock, aresponses, hostname, caplog, assert_logs):
caplog.set_level(0)
aresponses.add(hostname, '/version', 'get', {'gitVersion': 'v1.20.4'})
server = WebhookAutoTunnel()
async with server:
async for _ in server(responder.fn):
break # do not sleep
assert_logs(["Cluster detection failed, using an ngrok tunnel."])
| null |
4,849 |
import binascii
import json
import os
from .gabi.attributes import make_attribute_list
from .gabi.keys import DefaultSystemParameters
from .gabi.proofs import createChallenge
from .wrappers import challenge_response, serialize_proof_d, unserialize_proof_d
from ..primitives.structs import ipack, iunpack
from ...identity_formats import Attestation, IdentityAlgorithm
class IRMAAttestation(Attestation):
def __init__(self, sign_date, proofd, z=None):
self.sign_date = sign_date
self.proofd = proofd
self.z = z
def serialize(self):
return ipack(self.sign_date) + serialize_proof_d(self.proofd)
def serialize_private(self, PK):
return ipack(self.z) + ipack(self.sign_date) + serialize_proof_d(self.proofd)
@classmethod
def unserialize(cls, s, id_format):
sign_date, rem = iunpack(s)
return IRMAAttestation(sign_date, unserialize_proof_d(rem))
@classmethod
def unserialize_private(cls, SK, s, id_format):
z, rem = iunpack(s)
sign_date, rem = iunpack(rem)
return IRMAAttestation(sign_date, unserialize_proof_d(rem), z)
class KeyStub:
def public_key(self):
return self
def serialize(self):
return b''
@classmethod
def unserialize(cls, s):
return KeyStub()
class IRMAExactAlgorithm(IdentityAlgorithm):
def __init__(self, id_format, formats):
super().__init__(id_format, formats)
# Check algorithm match
if formats[id_format]["algorithm"] != "irmaexact":
raise RuntimeError("Identity format linked to wrong algorithm")
self.issuer_pk = formats[self.id_format]["issuer_pk"]
self.attribute_order = formats[self.id_format]["order"]
self.validity = formats[self.id_format]["validity"]
self.base_meta = {
"credential": formats[self.id_format]["credential"],
"keyCounter": formats[self.id_format]["keyCounter"],
"validity": formats[self.id_format]["validity"]
}
self.system_parameters = DefaultSystemParameters[1024]
self.challenge_count = 8
def generate_secret_key(self):
return KeyStub()
def load_secret_key(self, serialized):
return KeyStub()
def load_public_key(self, serialized):
return KeyStub()
def get_attestation_class(self):
return IRMAAttestation
def attest(self, PK, value):
raise NotImplementedError("Only import_blob is supported (now) for IRMA.")
def certainty(self, value, aggregate):
value_json = {"attributes": json.loads(value)}
value_json.update(self.base_meta)
attestation = aggregate['attestation']
attr_ints, sign_date = make_attribute_list(value_json, self.attribute_order,
(self.validity, attestation.sign_date))
reconstructed_attr_map = {}
for i in range(len(attr_ints)):
reconstructed_attr_map[i + 1] = attr_ints[i]
verified = 0.0
failure = False
for k, v in aggregate.items():
if k != 'attestation' and v:
challenge_verif, _ = iunpack(k)
p = attestation.proofd.Copy()
p.ADisclosed = reconstructed_attr_map
Ap, Zp = p.ChallengeContribution(self.issuer_pk)
p.C, _ = iunpack(v)
reconstructed_challenge = createChallenge(challenge_verif, challenge_verif, [Ap, Zp], False)
if p.VerifyWithChallenge(self.issuer_pk, reconstructed_challenge):
verified += 1.0
else:
failure = True
return 0.0 if failure else (verified / self.challenge_count)
def create_challenges(self, PK, attestation):
return [ipack(int(binascii.hexlify(os.urandom(32)), 16) % self.issuer_pk.N)
for _ in range(self.challenge_count)]
def create_challenge_response(self, SK, attestation, challenge):
return challenge_response(attestation.proofd, attestation.z, challenge)
def create_certainty_aggregate(self, attestation):
return {'attestation': attestation}
def create_honesty_challenge(self, PK, value):
raise NotImplementedError()
def process_honesty_challenge(self, value, response):
raise NotImplementedError()
def METHOD_NAME(self, aggregate, challenge, response):
aggregate[challenge] = response
def import_blob(self, blob):
blob_json = json.loads(blob)
sign_date = blob_json["sign_date"]
proofd = unserialize_proof_d(binascii.unhexlify(blob_json["proofd"]))
z = blob_json["z"]
inst = self.get_attestation_class()(sign_date, proofd, z)
return inst.serialize_private(None), None
| null |
4,850 |
"""Tests for the KernelSpecManager"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import copy
import json
import os
import sys
import tempfile
import unittest
from io import StringIO
from logging import StreamHandler
from os.path import join as pjoin
from subprocess import PIPE, STDOUT, Popen
from tempfile import TemporaryDirectory
import pytest
from jupyter_core import paths
from jupyter_client import kernelspec
from .utils import install_kernel, sample_kernel_json
class KernelSpecTests(unittest.TestCase):
def setUp(self):
self.sample_kernel_dir = install_kernel(
pjoin(paths.jupyter_data_dir(), "kernels"), name="sample"
)
self.ksm = kernelspec.KernelSpecManager()
td2 = TemporaryDirectory()
self.addCleanup(td2.cleanup)
self.installable_kernel = td2.name
with open(pjoin(self.installable_kernel, "kernel.json"), "w") as f:
json.dump(sample_kernel_json, f)
def test_find_kernel_specs(self):
kernels = self.ksm.find_kernel_specs()
self.assertEqual(kernels["sample"], self.sample_kernel_dir)
def METHOD_NAME(self):
ksm = kernelspec.KernelSpecManager()
ksm.allowed_kernelspecs = ["foo"]
kernels = ksm.find_kernel_specs()
assert not len(kernels)
def test_deprecated_whitelist(self):
ksm = kernelspec.KernelSpecManager()
ksm.whitelist = ["bar"]
kernels = ksm.find_kernel_specs()
assert not len(kernels)
def test_get_kernel_spec(self):
ks = self.ksm.get_kernel_spec("SAMPLE") # Case insensitive
self.assertEqual(ks.resource_dir, self.sample_kernel_dir)
self.assertEqual(ks.argv, sample_kernel_json["argv"])
self.assertEqual(ks.display_name, sample_kernel_json["display_name"])
self.assertEqual(ks.env, {})
self.assertEqual(ks.metadata, {})
def test_find_all_specs(self):
kernels = self.ksm.get_all_specs()
self.assertEqual(kernels["sample"]["resource_dir"], self.sample_kernel_dir)
self.assertIsNotNone(kernels["sample"]["spec"])
def test_kernel_spec_priority(self):
td = TemporaryDirectory()
self.addCleanup(td.cleanup)
sample_kernel = install_kernel(td.name, name="sample")
self.ksm.kernel_dirs.append(td.name)
kernels = self.ksm.find_kernel_specs()
self.assertEqual(kernels["sample"], self.sample_kernel_dir)
self.ksm.kernel_dirs.insert(0, td.name)
kernels = self.ksm.find_kernel_specs()
self.assertEqual(kernels["sample"], sample_kernel)
def test_install_kernel_spec(self):
self.ksm.install_kernel_spec(self.installable_kernel, kernel_name="tstinstalled", user=True)
self.assertIn("tstinstalled", self.ksm.find_kernel_specs())
# install again works
self.ksm.install_kernel_spec(self.installable_kernel, kernel_name="tstinstalled", user=True)
def test_install_kernel_spec_prefix(self):
td = TemporaryDirectory()
self.addCleanup(td.cleanup)
capture = StringIO()
handler = StreamHandler(capture)
self.ksm.log.addHandler(handler)
self.ksm.install_kernel_spec(
self.installable_kernel, kernel_name="tstinstalled", prefix=td.name
)
captured = capture.getvalue()
self.ksm.log.removeHandler(handler)
self.assertIn("may not be found", captured)
self.assertNotIn("tstinstalled", self.ksm.find_kernel_specs())
# add prefix to path, so we find the spec
self.ksm.kernel_dirs.append(pjoin(td.name, "share", "jupyter", "kernels"))
self.assertIn("tstinstalled", self.ksm.find_kernel_specs())
# Run it again, no warning this time because we've added it to the path
capture = StringIO()
handler = StreamHandler(capture)
self.ksm.log.addHandler(handler)
self.ksm.install_kernel_spec(
self.installable_kernel, kernel_name="tstinstalled", prefix=td.name
)
captured = capture.getvalue()
self.ksm.log.removeHandler(handler)
self.assertNotIn("may not be found", captured)
@pytest.mark.skipif(
not (os.name != "nt" and not os.access("/usr/local/share", os.W_OK)),
reason="needs Unix system without root privileges",
)
def test_cant_install_kernel_spec(self):
with self.assertRaises(OSError):
self.ksm.install_kernel_spec(
self.installable_kernel, kernel_name="tstinstalled", user=False
)
def test_remove_kernel_spec(self):
path = self.ksm.remove_kernel_spec("sample")
self.assertEqual(path, self.sample_kernel_dir)
def test_remove_kernel_spec_app(self):
p = Popen(
[
sys.executable,
"-m",
"jupyter_client.kernelspecapp",
"remove",
"sample",
"-f",
],
stdout=PIPE,
stderr=STDOUT,
env=os.environ,
)
out, _ = p.communicate()
self.assertEqual(p.returncode, 0, out.decode("utf8", "replace"))
def test_validate_kernel_name(self):
for good in [
"julia-0.4",
"ipython",
"R",
"python_3",
"Haskell-1-2-3",
]:
assert kernelspec._is_valid_kernel_name(good)
for bad in [
"has space",
"ünicode",
"%percent",
"question?",
]:
assert not kernelspec._is_valid_kernel_name(bad)
def test_subclass(self):
"""Test get_all_specs in subclasses that override find_kernel_specs"""
ksm = self.ksm
resource_dir = tempfile.gettempdir()
native_name = kernelspec.NATIVE_KERNEL_NAME
native_kernel = ksm.get_kernel_spec(native_name)
class MyKSM(kernelspec.KernelSpecManager):
def get_kernel_spec(self, name):
spec = copy.copy(native_kernel)
if name == "fake":
spec.name = name
spec.resource_dir = resource_dir
elif name == native_name:
pass
else:
raise KeyError(name)
return spec
def find_kernel_specs(self):
return {
"fake": resource_dir,
native_name: native_kernel.resource_dir,
}
# ensure that get_all_specs doesn't raise if only
# find_kernel_specs and get_kernel_spec are defined
myksm = MyKSM()
specs = myksm.get_all_specs()
assert sorted(specs) == ["fake", native_name]
| null |
4,851 |
import pytest
import numpy as np
from rlberry.agents.utils import replay
from rlberry.envs.finite import GridWorld
from gymnasium.wrappers import TimeLimit
def _get_filled_replay(max_replay_size):
"""runs env for ~ 2 * max_replay_size timesteps."""
env = GridWorld(terminal_states=None)
env = TimeLimit(env, max_episode_steps=200)
env.reseed(123)
rng = np.random.default_rng(456)
buffer = replay.ReplayBuffer(
max_replay_size,
rng,
max_episode_steps=env._max_episode_steps,
enable_prioritized=True,
)
buffer.setup_entry("observations", np.float32)
buffer.setup_entry("actions", np.uint32)
buffer.setup_entry("rewards", np.float32)
buffer.setup_entry("dones", bool)
# Fill the replay buffer
total_time = 0
while True:
if total_time > 2 * buffer._max_replay_size:
break
done = False
observation, info = env.reset()
while not done:
total_time += 1
action = env.action_space.sample()
next_observation, reward, terminated, truncated, info = env.step(action)
done = terminated or truncated
buffer.append(
{
"observations": observation,
"actions": action,
"rewards": reward,
"dones": done,
}
)
observation = next_observation
if done:
buffer.end_episode()
return buffer, env
def test_replay_size():
# get replay buffer
buffer, _ = _get_filled_replay(max_replay_size=500)
assert len(buffer) == 500
@pytest.mark.parametrize("sampling_mode", ["uniform", "prioritized"])
def test_replay_sampling(sampling_mode):
batch_size = 128
chunk_size = 256
# get replay buffer
buffer, _ = _get_filled_replay(max_replay_size=500)
# Sample batches, check shape and dtype
for _ in range(10):
batch = buffer.sample(
batch_size=batch_size, chunk_size=chunk_size, sampling_mode=sampling_mode
)
for tag in buffer.tags:
assert batch.data[tag].shape[:2] == (batch_size, chunk_size)
assert batch.data[tag].dtype == buffer.dtypes[tag]
assert np.array_equal(
np.array(buffer.data[tag], dtype=buffer.dtypes[tag])[
batch.info["indices"]
],
batch.data[tag],
)
def test_replay_priority_update():
# get replay buffer
buffer, _ = _get_filled_replay(max_replay_size=500)
rng = buffer._rng
# Test priority update
# Note: the test only works if all indices in indices_to_update are unique (otherwise the
# same priority index can be updated more than once, which will break the test)
indices_to_update = rng.choice(len(buffer), size=(32, 10), replace=False)
n_indices = indices_to_update.shape[0] * indices_to_update.shape[1]
new_priorities = np.arange(n_indices).reshape(indices_to_update.shape)
buffer.update_priorities(indices_to_update.copy(), new_priorities.copy())
for bb in range(indices_to_update.shape[0]):
for cc in range(indices_to_update.shape[1]):
idx = indices_to_update[bb, cc]
val1 = buffer._it_sum[idx]
val2 = buffer._it_min[idx]
assert val1 == val2 == new_priorities[bb, cc] ** buffer._alpha
@pytest.mark.parametrize("sampling_mode", ["uniform", "prioritized"])
def METHOD_NAME(sampling_mode):
batch_size = 16
chunk_size = 256
# get replay buffer
buffer, env = _get_filled_replay(max_replay_size=500)
# add more data, sample batches and check that sampled sub-trajetories
# are not "crossing" the current position (buffer._position)
total_time = 0
while True:
if total_time > 1000:
break
done = False
obs, info = env.reset()
while not done:
total_time += 1
action = env.action_space.sample()
next_obs, reward, terminated, truncated, _ = env.step(action)
done = terminated or truncated
buffer.append(
{
"observations": obs,
"actions": action,
"rewards": reward,
"dones": done,
}
)
obs = next_obs
if done:
buffer.end_episode()
# sample and check
start_indices, end_indices, weights = buffer._sample_batch_indices(
batch_size, chunk_size, sampling_mode=sampling_mode
)
assert np.all(weights >= 0), "weights must be nonnegative"
# we need end_indices > start_indices and the difference
# to be equal to chunk_size
assert np.all((end_indices - start_indices) == chunk_size)
positive_mask = start_indices >= 0
negative_mask = ~positive_mask
# Case 1: start indices are >= 0
assert np.all(
~np.logical_and(
buffer._position > start_indices[positive_mask],
buffer._position < end_indices[positive_mask],
)
), "buffer._position cannot be in the middle of start and end indices"
# Case 2: start indices are < 0
# -> self._position cannot be between start_indices+len(buffer) and len(buffer)-1
# -> self._position cannot be between 0 and end_indices-1
assert np.all(
np.logical_and(
(start_indices[negative_mask] + len(buffer)) >= buffer._position,
end_indices[negative_mask] <= buffer._position,
)
), "buffer._position cannot be in the middle of start and end indices"
| null |
4,852 |
# Microsoft Azure Linux Agent
#
# Copyright 2018 Microsoft Corporation
# Copyright 2018 Sonus Networks, Inc. (d.b.a. Ribbon Communications Operating Company)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import os
import re
import azurelinuxagent.common.logger as logger
import azurelinuxagent.common.utils.shellutil as shellutil
import azurelinuxagent.common.utils.fileutil as fileutil
from azurelinuxagent.common.osutil.default import DefaultOSUtil
from azurelinuxagent.common.utils.networkutil import NetworkInterfaceCard
class OpenWRTOSUtil(DefaultOSUtil):
def __init__(self):
super(OpenWRTOSUtil, self).__init__()
self.agent_conf_file_path = '/etc/waagent.conf'
self.dhclient_name = 'udhcpc'
self.ip_command_output = re.compile('^\d+:\s+(\w+):\s+(.*)$') # pylint: disable=W1401
self.jit_enabled = True
def eject_dvd(self, chk_err=True):
logger.warn('eject is not supported on OpenWRT')
def useradd(self, username, expiration=None, comment=None):
"""
Create user account with 'username'
"""
userentry = self.get_userentry(username)
if userentry is not None:
logger.info("User {0} already exists, skip useradd", username)
return
if expiration is not None:
cmd = ["useradd", "-m", username, "-s", "/bin/ash", "-e", expiration]
else:
cmd = ["useradd", "-m", username, "-s", "/bin/ash"]
if not os.path.exists("/home"):
os.mkdir("/home")
if comment is not None:
cmd.extend(["-c", comment])
self._run_command_raising_OSUtilError(cmd, err_msg="Failed to create user account:{0}".format(username))
def get_dhcp_pid(self):
return self._get_dhcp_pid(["pidof", self.dhclient_name])
def get_nic_state(self, as_string=False):
"""
Capture NIC state (IPv4 and IPv6 addresses plus link state).
:return: Dictionary of NIC state objects, with the NIC name as key
:rtype: dict(str,NetworkInformationCard)
"""
if as_string: # as_string not supported on open wrt
return ''
state = {}
status, output = shellutil.run_get_output("ip -o link", chk_err=False, log_cmd=False)
if status != 0:
logger.verbose("Could not fetch NIC link info; status {0}, {1}".format(status, output))
return {}
for entry in output.splitlines():
result = self.ip_command_output.match(entry)
if result:
name = result.group(1)
state[name] = NetworkInterfaceCard(name, result.group(2))
self._update_nic_state(state, "ip -o -f inet address", NetworkInterfaceCard.add_ipv4, "an IPv4 address")
self._update_nic_state(state, "ip -o -f inet6 address", NetworkInterfaceCard.add_ipv6, "an IPv6 address")
return state
def _update_nic_state(self, state, ip_command, handler, description):
"""
Update the state of NICs based on the output of a specified ip subcommand.
:param dict(str, NetworkInterfaceCard) state: Dictionary of NIC state objects
:param str ip_command: The ip command to run
:param handler: A method on the NetworkInterfaceCard class
:param str description: Description of the particular information being added to the state
"""
status, output = shellutil.run_get_output(ip_command, chk_err=True)
if status != 0:
return
for entry in output.splitlines():
result = self.ip_command_output.match(entry)
if result:
interface_name = result.group(1)
if interface_name in state:
handler(state[interface_name], result.group(2))
else:
logger.error("Interface {0} has {1} but no link state".format(interface_name, description))
def is_dhcp_enabled(self):
pass
def start_dhcp_service(self):
pass
def stop_dhcp_service(self):
pass
def start_network(self) :
return shellutil.run("/etc/init.d/network start", chk_err=True)
def restart_ssh_service(self): # pylint: disable=R1710
# Since Dropbear is the default ssh server on OpenWRt, lets do a sanity check
if os.path.exists("/etc/init.d/sshd"):
return shellutil.run("/etc/init.d/sshd restart", chk_err=True)
else:
logger.warn("sshd service does not exists")
def stop_agent_service(self):
return shellutil.run("/etc/init.d/{0} stop".format(self.service_name), chk_err=True)
def start_agent_service(self):
return shellutil.run("/etc/init.d/{0} start".format(self.service_name), chk_err=True)
def register_agent_service(self):
return shellutil.run("/etc/init.d/{0} enable".format(self.service_name), chk_err=True)
def unregister_agent_service(self):
return shellutil.run("/etc/init.d/{0} disable".format(self.service_name), chk_err=True)
def METHOD_NAME(self, hostname):
fileutil.write_file('/etc/hostname', hostname)
commands = [['uci', 'set', 'system.@system[0].hostname={0}'.format(hostname)], ['uci', 'commit', 'system'],
['/etc/init.d/system', 'reload']]
self._run_multiple_commands_without_raising(commands, log_error=False, continue_on_error=False)
def remove_rules_files(self, rules_files=""):
pass
| null |
4,853 |
import pickle
import pickletools
from test import support
from test.pickletester import AbstractPickleTests
import doctest
import unittest
class OptimizedPickleTests(AbstractPickleTests, unittest.TestCase):
# TODO: RUSTPYTHON, AttributeError: module 'pickle' has no attribute 'PickleBuffer'
@unittest.expectedFailure
def test_buffer_callback_error(self): # TODO: RUSTPYTHON, remove when this passes
super().test_buffer_callback_error()
# TODO: RUSTPYTHON, AttributeError: module 'pickle' has no attribute 'PickleBuffer'
@unittest.expectedFailure
def test_buffers_error(self): # TODO: RUSTPYTHON, remove when this passes
super().test_buffers_error()
def test_compat_pickle(self): # TODO: RUSTPYTHON, remove when this passes
super().test_compat_pickle()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_complex_newobj_ex(self): # TODO: RUSTPYTHON, remove when this passes
super().test_complex_newobj_ex()
# TODO: RUSTPYTHON, TypeError: cannot pickle 'method' object
@unittest.expectedFailure
def test_in_band_buffers(self): # TODO: RUSTPYTHON, remove when this passes
super().test_in_band_buffers()
def test_notimplemented(self): # TODO: RUSTPYTHON, remove when this passes
super().test_notimplemented()
# TODO: RUSTPYTHON, AttributeError: module 'pickle' has no attribute 'PickleBuffer'
@unittest.expectedFailure
def test_oob_buffers(self): # TODO: RUSTPYTHON, remove when this passes
super().test_oob_buffers()
# TODO: RUSTPYTHON, AttributeError: module 'pickle' has no attribute 'PickleBuffer'
@unittest.expectedFailure
def test_oob_buffers_writable_to_readonly(self): # TODO: RUSTPYTHON, remove when this passes
super().test_oob_buffers_writable_to_readonly()
# TODO: RUSTPYTHON, TypeError: Expected type 'bytes', not 'bytearray'
@unittest.expectedFailure
def test_optional_frames(self): # TODO: RUSTPYTHON, remove when this passes
super().test_optional_frames()
# TODO: RUSTPYTHON, AttributeError: module 'pickle' has no attribute 'PickleBuffer'
@unittest.expectedFailure
def test_picklebuffer_error(self): # TODO: RUSTPYTHON, remove when this passes
super().test_picklebuffer_error()
def dumps(self, arg, proto=None, **kwargs):
return pickletools.optimize(pickle.dumps(arg, proto, **kwargs))
def loads(self, buf, **kwds):
return pickle.loads(buf, **kwds)
# Test relies on precise output of dumps()
test_pickle_to_2x = None
# Test relies on writing by chunks into a file object.
test_framed_write_sizes_with_delayed_writer = None
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_optimize_long_binget(self):
data = [str(i) for i in range(257)]
data.append(data[-1])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickled = pickle.dumps(data, proto)
unpickled = pickle.loads(pickled)
self.assertEqual(unpickled, data)
self.assertIs(unpickled[-1], unpickled[-2])
pickled2 = pickletools.optimize(pickled)
unpickled2 = pickle.loads(pickled2)
self.assertEqual(unpickled2, data)
self.assertIs(unpickled2[-1], unpickled2[-2])
self.assertNotIn(pickle.LONG_BINGET, pickled2)
self.assertNotIn(pickle.LONG_BINPUT, pickled2)
def METHOD_NAME(self):
pickled = (b'\x80\x04\x95\x15\x00\x00\x00\x00\x00\x00\x00'
b']\x94(\x8c\x04spamq\x01\x8c\x03ham\x94h\x02e.')
# 0: \x80 PROTO 4
# 2: \x95 FRAME 21
# 11: ] EMPTY_LIST
# 12: \x94 MEMOIZE
# 13: ( MARK
# 14: \x8c SHORT_BINUNICODE 'spam'
# 20: q BINPUT 1
# 22: \x8c SHORT_BINUNICODE 'ham'
# 27: \x94 MEMOIZE
# 28: h BINGET 2
# 30: e APPENDS (MARK at 13)
# 31: . STOP
self.assertIn(pickle.BINPUT, pickled)
unpickled = pickle.loads(pickled)
self.assertEqual(unpickled, ['spam', 'ham', 'ham'])
self.assertIs(unpickled[1], unpickled[2])
pickled2 = pickletools.optimize(pickled)
unpickled2 = pickle.loads(pickled2)
self.assertEqual(unpickled2, ['spam', 'ham', 'ham'])
self.assertIs(unpickled2[1], unpickled2[2])
self.assertNotIn(pickle.BINPUT, pickled2)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
not_exported = {
'bytes_types',
'UP_TO_NEWLINE', 'TAKEN_FROM_ARGUMENT1',
'TAKEN_FROM_ARGUMENT4', 'TAKEN_FROM_ARGUMENT4U',
'TAKEN_FROM_ARGUMENT8U', 'ArgumentDescriptor',
'read_uint1', 'read_uint2', 'read_int4', 'read_uint4',
'read_uint8', 'read_stringnl', 'read_stringnl_noescape',
'read_stringnl_noescape_pair', 'read_string1',
'read_string4', 'read_bytes1', 'read_bytes4',
'read_bytes8', 'read_bytearray8', 'read_unicodestringnl',
'read_unicodestring1', 'read_unicodestring4',
'read_unicodestring8', 'read_decimalnl_short',
'read_decimalnl_long', 'read_floatnl', 'read_float8',
'read_long1', 'read_long4',
'uint1', 'uint2', 'int4', 'uint4', 'uint8', 'stringnl',
'stringnl_noescape', 'stringnl_noescape_pair', 'string1',
'string4', 'bytes1', 'bytes4', 'bytes8', 'bytearray8',
'unicodestringnl', 'unicodestring1', 'unicodestring4',
'unicodestring8', 'decimalnl_short', 'decimalnl_long',
'floatnl', 'float8', 'long1', 'long4',
'StackObject',
'pyint', 'pylong', 'pyinteger_or_bool', 'pybool', 'pyfloat',
'pybytes_or_str', 'pystring', 'pybytes', 'pybytearray',
'pyunicode', 'pynone', 'pytuple', 'pylist', 'pydict',
'pyset', 'pyfrozenset', 'pybuffer', 'anyobject',
'markobject', 'stackslice', 'OpcodeInfo', 'opcodes',
'code2op',
}
support.check__all__(self, pickletools, not_exported=not_exported)
def load_tests(loader, tests, pattern):
# TODO: RUSTPYTHON
# tests.addTest(doctest.DocTestSuite(pickletools))
return tests
if __name__ == "__main__":
unittest.main()
| null |
4,854 |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
import mindspore as ms
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.ops import functional as F
class OpNetWrapperBitwise(nn.Cell):
"""OpNetWrapperBitwise"""
def __init__(self, op):
"""__init__"""
super(OpNetWrapperBitwise, self).__init__()
self.op = op
def construct(self, *inputs):
"""construct"""
return self.op(*inputs)
suport_type_list = [np.bool_, np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64]
mode_list = [context.PYNATIVE_MODE, context.GRAPH_MODE]
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('shape', [(2,), (4, 5), (3, 4, 5, 6), (3, 4, 5, 6, 2)])
@pytest.mark.parametrize('dtype', suport_type_list)
@pytest.mark.parametrize('mode', mode_list)
def test_bitwise_and(shape, dtype, mode):
"""
Feature: BitwiseAnd gpu kernel.
Description: test the rightness of BitwiseAnd gpu kernel.
Expectation: Success.
"""
context.set_context(mode=mode, device_target='GPU')
op = P.BitwiseAnd()
op_wrapper = OpNetWrapperBitwise(op)
prop = 100 if np.random.random() > 0.5 else -100
x_np = (np.random.randn(*shape) * prop).astype(dtype)
y_np = (np.random.randn(*shape) * prop).astype(dtype)
outputs = op_wrapper(Tensor(x_np), Tensor(y_np))
outputs_func = F.bitwise_and(Tensor(x_np), Tensor(y_np))
expect = np.bitwise_and(x_np, y_np)
assert np.allclose(outputs.asnumpy(), expect)
assert np.allclose(outputs_func.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('shape', [(2,), (4, 5), (3, 4, 5, 6), (3, 4, 5, 6, 2)])
@pytest.mark.parametrize('dtype', suport_type_list)
@pytest.mark.parametrize('mode', mode_list)
def METHOD_NAME(shape, dtype, mode):
"""
Feature: BitwiseOr gpu kernel.
Description: test the rightness of BitwiseOr gpu kernel.
Expectation: Success.
"""
context.set_context(mode=mode, device_target='GPU')
op = P.BitwiseOr()
op_wrapper = OpNetWrapperBitwise(op)
prop = 100 if np.random.random() > 0.5 else -100
x_np = (np.random.randn(*shape) * prop).astype(dtype)
y_np = (np.random.randn(*shape) * prop).astype(dtype)
outputs = op_wrapper(Tensor(x_np), Tensor(y_np))
outputs_func = F.bitwise_or(Tensor(x_np), Tensor(y_np))
expect = np.bitwise_or(x_np, y_np)
assert np.allclose(outputs.asnumpy(), expect)
assert np.allclose(outputs_func.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('shape', [(2,), (4, 5), (3, 4, 5, 6), (3, 4, 5, 6, 2)])
@pytest.mark.parametrize('dtype', suport_type_list)
@pytest.mark.parametrize('mode', mode_list)
def test_bitwise_xor(shape, dtype, mode):
"""
Feature: BitwiseXor gpu kernel.
Description: test the rightness of BitwiseXor gpu kernel.
Expectation: Success.
"""
context.set_context(mode=mode, device_target='GPU')
op = P.BitwiseXor()
op_wrapper = OpNetWrapperBitwise(op)
prop = 100 if np.random.random() > 0.5 else -100
x_np = (np.random.randn(*shape) * prop).astype(dtype)
y_np = (np.random.randn(*shape) * prop).astype(dtype)
outputs = op_wrapper(Tensor(x_np), Tensor(y_np))
outputs_func = F.bitwise_xor(Tensor(x_np), Tensor(y_np))
expect = np.bitwise_xor(x_np, y_np)
assert np.allclose(outputs.asnumpy(), expect)
assert np.allclose(outputs_func.asnumpy(), expect)
class NetBitwiseGPU(nn.Cell):
"""NetBitwiseGPU"""
def construct(self, input_x, input_y):
"""construct"""
out_and = input_x & input_y
out_or = input_x | input_y
out_xor = input_x ^ input_y
return out_and, out_or, out_xor
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', mode_list)
def test_bitwise_bool(mode):
"""
Feature: Bitwise gpu kernel.
Description: test the rightness of Bitwise cpu kernel tensor operations.
Expectation: Success.
"""
context.set_context(mode=mode, device_target='CPU')
input_x = ms.Tensor([True, False], dtype=ms.bool_)
input_y = ms.Tensor([True, True], dtype=ms.bool_)
net = NetBitwiseGPU()
out = net(input_x, input_y)
expect_and_gpu = np.array([True, False])
expect_or_gpu = np.array([True, True])
expect_xor_gpu = np.array([False, True])
assert np.allclose(out[0].asnumpy(), expect_and_gpu)
assert np.allclose(out[1].asnumpy(), expect_or_gpu)
assert np.allclose(out[2].asnumpy(), expect_xor_gpu)
res_and = input_x & input_y
res_or = input_x | input_y
res_xor = input_x ^ input_y
assert np.allclose(res_and.asnumpy(), expect_and_gpu)
assert np.allclose(res_or.asnumpy(), expect_or_gpu)
assert np.allclose(res_xor.asnumpy(), expect_xor_gpu)
| null |
4,855 |
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import subprocess
import argparse
import concurrent.futures
def generate_report(module_path, report_orig_path, report_out_path):
is_dir = os.path.isdir(module_path)
if not is_dir:
print("{} is not a directory".format(module_path))
return
os.makedirs(report_out_path, exist_ok=True)
try:
print("Generating report for {}".format(module_path))
# This command prints lots of false errors. Here, we redirect stdout and stderr to avoid them.
command = "mvn -Ddependency.locations.enabled=false -Ddependency.details.enabled=false project-info-reports:dependencies"
subprocess.check_output(command, cwd=module_path, shell=True)
command = "cp -r {} {}".format(report_orig_path, report_out_path)
subprocess.check_output(command, cwd=module_path, shell=True)
print("Generated report for {} in {}".format(module_path, report_out_path))
except subprocess.CalledProcessError as e:
print("Encountered error [{}] with the following output when generating report for {}".format(e, module_path))
print(e.output.decode('utf-8'))
except Exception as e:
print("Encountered error [{}] when generating report for {}".format(e, module_path))
def METHOD_NAME(druid_path, tmp_path, exclude_ext, num_threads):
tmp_path = os.path.abspath(tmp_path)
license_report_root = os.path.join(tmp_path, "license-reports")
license_core_path = os.path.join(license_report_root, "core")
license_ext_path = os.path.join(license_report_root, "ext")
shutil.rmtree(license_report_root, ignore_errors=True)
os.makedirs(license_core_path)
os.makedirs(license_ext_path)
druid_path = os.path.abspath(druid_path)
script_args = [(druid_path, os.path.join(druid_path, "distribution", "target", "site"), license_core_path)]
if not exclude_ext:
extensions_core_path = os.path.join(druid_path, "extensions-core")
command = "mvn -Dexec.executable='echo' -Dexec.args='${basedir}' exec:exec -q | grep extensions-core | grep -o '[^/]*$'"
extension_dirs = subprocess.check_output(command, cwd=druid_path, shell=True).decode().split('\n')[:-1]
print("Found {} extensions".format(len(extension_dirs)))
for extension_dir in extension_dirs:
print("extension dir: {}".format(extension_dir))
extension_path = os.path.join(extensions_core_path, extension_dir)
if not os.path.isdir(extension_path):
print("{} is not a directory".format(extension_path))
continue
extension_report_dir = "{}/{}".format(license_ext_path, extension_dir)
script_args.append((extension_path, os.path.join(extension_path, "target", "site"), extension_report_dir))
print("Generating dependency reports")
if num_threads > 1:
with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as executor:
for module_path, report_orig_path, report_out_path in script_args:
executor.submit(generate_report, module_path, report_orig_path, report_out_path)
else:
for module_path, report_orig_path, report_out_path in script_args:
generate_report(module_path, report_orig_path, report_out_path)
if __name__ == "__main__":
try:
parser = argparse.ArgumentParser(description='Generating dependency reports.')
parser.add_argument('druid_path', metavar='<Druid source path>', type=str)
parser.add_argument('tmp_path', metavar='<Full tmp path>', type=str)
parser.add_argument('--exclude-extension', dest='exclude_ext', action='store_const', const=True, default=False, help="Exclude extension report")
parser.add_argument('--clean-maven-artifact-transfer', dest='clean_mvn_artifact_transfer', action='store_const', const=True, default=False, help="Clean maven-artifact-transfer before generating dependency reports")
parser.add_argument('--parallel', dest='num_threads', type=int, default=1, help='Number of threads for generating reports')
args = parser.parse_args()
# The default maven-artifact-transfer in Travis is currently corrupted. Set the below argument properly to remove the corrupted one.
if args.clean_mvn_artifact_transfer:
command = "rm -rf ~/.m2/repository/org/apache/maven/shared/maven-artifact-transfer"
subprocess.check_call(command, shell=True)
METHOD_NAME(args.druid_path, args.tmp_path, args.exclude_ext, args.num_threads)
except KeyboardInterrupt:
print('Interrupted, closing.')
| null |
4,856 |
import unittest
from unittest import mock
from dateutil.relativedelta import relativedelta
from bot.constants import Emojis
from bot.exts.moderation.slowmode import Slowmode
from tests.helpers import MockBot, MockContext, MockTextChannel
class SlowmodeTests(unittest.IsolatedAsyncioTestCase):
def METHOD_NAME(self) -> None:
self.bot = MockBot()
self.cog = Slowmode(self.bot)
self.ctx = MockContext()
async def test_get_slowmode_no_channel(self) -> None:
"""Get slowmode without a given channel."""
self.ctx.channel = MockTextChannel(name="python-general", slowmode_delay=5)
await self.cog.get_slowmode(self.cog, self.ctx, None)
self.ctx.send.assert_called_once_with("The slowmode delay for #python-general is 5 seconds.")
async def test_get_slowmode_with_channel(self) -> None:
"""Get slowmode with a given channel."""
text_channel = MockTextChannel(name="python-language", slowmode_delay=2)
await self.cog.get_slowmode(self.cog, self.ctx, text_channel)
self.ctx.send.assert_called_once_with("The slowmode delay for #python-language is 2 seconds.")
async def test_set_slowmode_no_channel(self) -> None:
"""Set slowmode without a given channel."""
test_cases = (
("helpers", 23, True, f"{Emojis.check_mark} The slowmode delay for #helpers is now 23 seconds."),
("mods", 76526, False, f"{Emojis.cross_mark} The slowmode delay must be between 0 and 6 hours."),
("admins", 97, True, f"{Emojis.check_mark} The slowmode delay for #admins is now 1 minute and 37 seconds.")
)
for channel_name, seconds, edited, result_msg in test_cases:
with self.subTest(
channel_mention=channel_name,
seconds=seconds,
edited=edited,
result_msg=result_msg
):
self.ctx.channel = MockTextChannel(name=channel_name)
await self.cog.set_slowmode(self.cog, self.ctx, None, relativedelta(seconds=seconds))
if edited:
self.ctx.channel.edit.assert_awaited_once_with(slowmode_delay=float(seconds))
else:
self.ctx.channel.edit.assert_not_called()
self.ctx.send.assert_called_once_with(result_msg)
self.ctx.reset_mock()
async def test_set_slowmode_with_channel(self) -> None:
"""Set slowmode with a given channel."""
test_cases = (
("bot-commands", 12, True, f"{Emojis.check_mark} The slowmode delay for #bot-commands is now 12 seconds."),
("mod-spam", 21, True, f"{Emojis.check_mark} The slowmode delay for #mod-spam is now 21 seconds."),
("admin-spam", 4323598, False, f"{Emojis.cross_mark} The slowmode delay must be between 0 and 6 hours.")
)
for channel_name, seconds, edited, result_msg in test_cases:
with self.subTest(
channel_mention=channel_name,
seconds=seconds,
edited=edited,
result_msg=result_msg
):
text_channel = MockTextChannel(name=channel_name)
await self.cog.set_slowmode(self.cog, self.ctx, text_channel, relativedelta(seconds=seconds))
if edited:
text_channel.edit.assert_awaited_once_with(slowmode_delay=float(seconds))
else:
text_channel.edit.assert_not_called()
self.ctx.send.assert_called_once_with(result_msg)
self.ctx.reset_mock()
async def test_reset_slowmode_sets_delay_to_zero(self) -> None:
"""Reset slowmode with a given channel."""
text_channel = MockTextChannel(name="meta", slowmode_delay=1)
self.cog.set_slowmode = mock.AsyncMock()
await self.cog.reset_slowmode(self.cog, self.ctx, text_channel)
self.cog.set_slowmode.assert_awaited_once_with(
self.ctx, text_channel, relativedelta(seconds=0)
)
@mock.patch("bot.exts.moderation.slowmode.has_any_role")
@mock.patch("bot.exts.moderation.slowmode.MODERATION_ROLES", new=(1, 2, 3))
async def test_cog_check(self, role_check):
"""Role check is called with `MODERATION_ROLES`"""
role_check.return_value.predicate = mock.AsyncMock()
await self.cog.cog_check(self.ctx)
role_check.assert_called_once_with(*(1, 2, 3))
role_check.return_value.predicate.assert_awaited_once_with(self.ctx)
| null |
4,857 |
from django.conf import settings
from django.test import TestCase
from cradmin_legacy import cradmin_testhelpers
from model_bakery import baker
from devilry.devilry_account.models import PermissionGroup
from devilry.devilry_frontpage.views import frontpage
class TestFrontpage(TestCase, cradmin_testhelpers.TestCaseMixin):
viewclass = frontpage.FrontpageView
def test_title(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
mockresponse = self.mock_http200_getrequest_htmls(requestuser=testuser)
self.assertEqual('Devilry frontpage',
mockresponse.selector.one('title').alltext_normalized)
def METHOD_NAME(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
mockresponse = self.mock_http200_getrequest_htmls(requestuser=testuser)
self.assertEqual('Choose your role',
mockresponse.selector.one('h1').alltext_normalized)
def test_user_is_student(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Candidate',
relatedstudent__user=testuser,
assignment_group__parentnode=baker.make_recipe('devilry.apps.core.assignment_activeperiod_start'))
mockresponse = self.mock_http200_getrequest_htmls(requestuser=testuser)
self.assertTrue(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-student'))
self.assertFalse(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-examiner'))
self.assertFalse(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-anyadmin'))
def test_user_is_examiner(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner',
relatedexaminer__user=testuser,
assignmentgroup__parentnode=baker.make_recipe('devilry.apps.core.assignment_activeperiod_start'))
mockresponse = self.mock_http200_getrequest_htmls(requestuser=testuser)
self.assertFalse(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-student'))
self.assertTrue(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-examiner'))
self.assertFalse(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-anyadmin'))
def test_user_is_superuser(self):
testuser = baker.make(settings.AUTH_USER_MODEL, is_superuser=True)
mockresponse = self.mock_http200_getrequest_htmls(requestuser=testuser)
self.assertFalse(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-student'))
self.assertFalse(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-examiner'))
self.assertTrue(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-anyadmin'))
self.assertTrue(mockresponse.selector.exists('.devilry-frontpage-superuser-link'))
def test_user_is_departmentadmin(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('devilry_account.PermissionGroupUser', user=testuser,
permissiongroup=baker.make(
'devilry_account.SubjectPermissionGroup',
permissiongroup__grouptype=PermissionGroup.GROUPTYPE_DEPARTMENTADMIN).permissiongroup)
mockresponse = self.mock_http200_getrequest_htmls(requestuser=testuser)
self.assertFalse(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-student'))
self.assertFalse(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-examiner'))
self.assertTrue(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-anyadmin'))
self.assertFalse(mockresponse.selector.exists('.devilry-frontpage-superuser-link'))
def test_user_is_subjectadmin(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('devilry_account.PermissionGroupUser', user=testuser,
permissiongroup=baker.make(
'devilry_account.SubjectPermissionGroup',
permissiongroup__grouptype=PermissionGroup.GROUPTYPE_SUBJECTADMIN).permissiongroup)
mockresponse = self.mock_http200_getrequest_htmls(requestuser=testuser)
self.assertFalse(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-student'))
self.assertFalse(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-examiner'))
self.assertTrue(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-anyadmin'))
self.assertFalse(mockresponse.selector.exists('.devilry-frontpage-superuser-link'))
def test_user_is_periodadmin(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('devilry_account.PermissionGroupUser', user=testuser,
permissiongroup=baker.make('devilry_account.PeriodPermissionGroup').permissiongroup)
mockresponse = self.mock_http200_getrequest_htmls(requestuser=testuser)
self.assertFalse(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-student'))
self.assertFalse(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-examiner'))
self.assertTrue(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-anyadmin'))
self.assertFalse(mockresponse.selector.exists('.devilry-frontpage-superuser-link'))
| null |
4,858 |
"""Contains code for PIN2DMD."""
import logging
import threading
from mpf.core.utility_functions import Util
from mpf.platforms.interfaces.dmd_platform import DmdPlatformInterface
from mpf.core.platform import RgbDmdPlatform
# pylint: disable-msg=ungrouped-imports
try:
import usb.core
except ImportError as e:
IMPORT_FAILED = e
else:
IMPORT_FAILED = None # type: ignore
GAMMA_TABLE = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5,
5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7,
7, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10,
11, 11, 11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 13, 14, 14,
14, 14, 15, 15, 15, 16, 16, 16, 16, 17, 17, 17, 18, 18, 18, 18,
19, 19, 19, 20, 20, 20, 21, 21, 21, 22, 22, 22, 23, 23, 23, 24,
24, 24, 25, 25, 25, 26, 26, 27, 27, 27, 28, 28, 29, 29, 29, 30,
30, 31, 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 35, 36, 36, 37,
37, 38, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45,
45, 46, 47, 47, 48, 48, 49, 49, 50, 50, 51, 52, 52, 53, 53, 54,
55, 55, 56, 56, 57, 58, 58, 59, 60, 60, 61, 62, 62, 63, 63, 63]
class Pin2DmdHardwarePlatform(RgbDmdPlatform):
"""PIN2DMD RGB DMD hardware."""
__slots__ = ["device", "config"]
def __init__(self, machine):
"""Initialise PIN2DMD."""
super().__init__(machine)
self.features['tickless'] = True
self.config = self.machine.config_validator.validate_config("pin2dmd", self.machine.config.get('pin2dmd', {}))
self._configure_device_logging_and_debug('PIN2DMD', self.config)
self.log.debug("Configuring PIN2DMD hardware interface.")
self.device = Pin2DmdDevice(machine, self.debug, self.config['resolution'], self.config['panel'])
if IMPORT_FAILED:
raise AssertionError('Failed to load pyusb. Did you install pyusb? '
'Try: "pip3 install pyusb".') from IMPORT_FAILED
async def initialize(self):
"""Initialise platform."""
await self.device.connect()
def stop(self):
"""Stop platform."""
self.device.stop()
self.device = None
def __repr__(self):
"""Return string representation."""
return '<Platform.Pin2Dmd>'
def configure_rgb_dmd(self, name: str):
"""Configure rgb dmd."""
if name != "default":
self.raise_config_error("Use dmd name 'default' for PIN2DMD.", 1)
return self.device
class Pin2DmdDevice(DmdPlatformInterface):
"""A PIN2DMD device."""
__slots__ = ["writer", "current_frame", "new_frame_event", "machine", "log", "device", "brightness",
"debug", "resolution", "panel"]
def __init__(self, machine, debug, resolution, panel):
"""Initialise smart matrix device."""
self.writer = None
self.current_frame = None
self.new_frame_event = None
self.machine = machine
self.device = None
self.brightness = 255
self.log = logging.getLogger('Pin2DmdDevice')
self.debug = debug
self.resolution = resolution
self.panel = panel
def _send_brightness(self, brightness):
data = [0x00] * 2052
data[0] = 0x81
data[1] = 0xc3
data[2] = 0xe7
data[3] = 0xff
data[4] = 0x08
data[17] = brightness
if self.debug:
self.log.debug("Writing 0x01, %s", "".join(" 0x%02x" % b for b in data))
self.device.write(0x01, data)
def _send_frame(self, buffer):
if self.resolution == "128x32":
elements = 2048
else:
elements = 6144
output_buffer = [0] * (elements * 6 + 4)
output_buffer[0] = 0x81
output_buffer[1] = 0xC3
output_buffer[2] = 0xE9
output_buffer[3] = 18
for i in range(0, elements):
idx = i * 3
if self.panel == "rgb":
# use these mappings for RGB panels
pixel_r = buffer[idx]
pixel_g = buffer[idx + 1]
pixel_b = buffer[idx + 2]
# lower half of display
pixel_rl = buffer[elements * 3 + idx]
pixel_gl = buffer[elements * 3 + idx + 1]
pixel_bl = buffer[elements * 3 + idx + 2]
else:
# use these mappings for RBG panels
pixel_r = buffer[idx]
pixel_g = buffer[idx + 2]
pixel_b = buffer[idx + 1]
# lower half of display
pixel_rl = buffer[elements * 3 + idx]
pixel_gl = buffer[elements * 3 + idx + 2]
pixel_bl = buffer[elements * 3 + idx + 1]
# color correction
pixel_r = GAMMA_TABLE[pixel_r]
pixel_g = GAMMA_TABLE[pixel_g]
pixel_b = GAMMA_TABLE[pixel_b]
pixel_rl = GAMMA_TABLE[pixel_rl]
pixel_gl = GAMMA_TABLE[pixel_gl]
pixel_bl = GAMMA_TABLE[pixel_bl]
target_idx = i + 4
for _ in range(0, 6):
output_buffer[target_idx] = ((pixel_gl & 1) << 5) | ((pixel_bl & 1) << 4) | ((pixel_rl & 1) << 3) |\
((pixel_g & 1) << 2) | ((pixel_b & 1) << 1) | ((pixel_r & 1) << 0)
pixel_r >>= 1
pixel_g >>= 1
pixel_b >>= 1
pixel_rl >>= 1
pixel_gl >>= 1
pixel_bl >>= 1
target_idx += elements
if self.debug:
self.log.debug("Writing 0x01, %s, 1000", "".join(" 0x%02x" % b for b in output_buffer))
self.device.write(0x01, output_buffer, 1000)
def _feed_hardware(self):
"""Feed hardware in separate thread.
Wait for new_frame_event and send the last frame. If no event happened for 1s refresh the last frame.
"""
current_brightness = None
while not self.machine.thread_stopper.is_set():
# wait for new frame or timeout
self.new_frame_event.wait(1)
# clear event
self.new_frame_event.clear()
# set brightness if it changed
if self.brightness != current_brightness:
current_brightness = self.brightness
self._send_brightness(current_brightness)
# do not crash on missing frame
if self.current_frame is None:
continue
# send frame
self._send_frame(self.current_frame)
async def connect(self):
"""Connect to Pin2Dmd device."""
self.log.info("Connecting to Pin2DMD RGB DMD")
self.device = usb.core.find(idVendor=0x0314, idProduct=0xE457)
if self.device is None:
raise AssertionError('Pin2Dmd USB device not found')
self.new_frame_event = threading.Event()
self.writer = self.machine.clock.loop.run_in_executor(None, self._feed_hardware)
self.writer.add_done_callback(Util.raise_exceptions)
self.log.info("Connected to Pin2DMD")
def METHOD_NAME(self, brightness: float):
"""Set brightness."""
if brightness < 0.0 or brightness > 1.0:
raise AssertionError("Brightness has to be between 0 and 1.")
self.brightness = int(brightness * 255)
def stop(self):
"""Stop platform."""
def update(self, data):
"""Update DMD data."""
self.current_frame = bytearray(data)
self.new_frame_event.set()
| null |
4,859 |
import unittest
from pyexcel import Sheet
from ._compact import OrderedDict
class TestSheetColumn(unittest.TestCase):
def setUp(self):
self.data = [
["Column 1", "Column 2", "Column 3"],
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
]
def test_negative_row_index(self):
s = Sheet(self.data, "test")
data = s.column[-1]
self.assertEqual(data, ["Column 3", 3, 6, 9])
def test_formatter_by_named_column(self):
"""Test one named column"""
s = Sheet(self.data, "test")
s.name_columns_by_row(0)
s.column.format("Column 1", str)
self.assertEqual(s.column["Column 1"], ["1", "4", "7"])
def test_formatter_by_named_columns(self):
"""Test multiple named columns"""
s = Sheet(self.data, "test")
s.name_columns_by_row(0)
s.column.format(["Column 1", "Column 3"], str)
self.assertEqual(s.column["Column 1"], ["1", "4", "7"])
self.assertEqual(s.column["Column 3"], ["3", "6", "9"])
def test_add(self):
s = Sheet(self.data, "test")
s.name_columns_by_row(0)
data = OrderedDict({"Column 4": [10, 11, 12]})
s1 = s.column + data
self.assertEqual(s1.column.Column_4, [10, 11, 12])
# check that we don't have the column in the original sheet
with self.assertRaises((AttributeError)):
self.assertEqual(s.column.Column_4, [10, 11, 12])
def test_iadd(self):
s = Sheet(self.data, "test")
s.name_columns_by_row(0)
data = OrderedDict({"Column 4": [10, 11, 12]})
s.column += data
self.assertEqual(s.column.Column_4, [10, 11, 12])
def test_add_wrong_type(self):
"""Add string type"""
s = Sheet(self.data, "test")
s.name_columns_by_row(0)
with self.assertRaises((TypeError)):
s = s.column + "string type" # bang
def test_delete_named_column(self):
s = Sheet(self.data, "test")
s.name_columns_by_row(0)
del s.column["Column 2"]
assert s.number_of_columns() == 2
with self.assertRaises((ValueError)):
s.column["Column 2"] # bang
def test_delete_indexed_column1(self):
s = Sheet(self.data, "test")
s.name_columns_by_row(0)
del s.column[1]
assert s.number_of_columns() == 2
with self.assertRaises((ValueError)):
s.column["Column 2"] # access it after deletion, bang
def test_delete_indexed_column2(self):
s = Sheet(self.data, "test")
s.name_columns_by_row(0)
del s.column["Column 2"]
assert s.number_of_columns() == 2
with self.assertRaises((ValueError)):
s.column["Column 2"] # access it after deletion, bang
def test_delete_indexed_column(self):
s = Sheet(self.data, "test")
s.name_columns_by_row(0)
s.delete_named_column_at(1)
assert s.number_of_columns() == 2
with self.assertRaises((ValueError)):
s.column["Column 2"] # access it after deletion, bang
def test_delete_column(self):
s = Sheet(self.data, "test")
del s.column[1, 2]
assert s.number_of_columns() == 1
with self.assertRaises((ValueError)):
s.column["Column 2"] # access it after deletion, bang
class TestSheetColumn2(unittest.TestCase):
def setUp(self):
self.data = [
[1, 2, 3],
[4, 5, 6],
["Column 1", "Column 2", "Column 3"],
[7, 8, 9],
]
def test_series(self):
s = Sheet(self.data, "test")
s.name_columns_by_row(2)
self.assertEqual(s.colnames, ["Column 1", "Column 2", "Column 3"])
custom_columns = ["C1", "C2", "C3"]
s.colnames = custom_columns
self.assertEqual(s.colnames, custom_columns)
def test_series2(self):
custom_columns = ["C1", "C2", "C3"]
s = Sheet(self.data, "test", colnames=custom_columns)
self.assertEqual(s.colnames, custom_columns)
def test_series3(self):
custom_columns = ["C1", "C2", "C3"]
with self.assertRaises((NotImplementedError)):
Sheet(
self.data,
"test",
colnames=custom_columns,
name_columns_by_row=0,
)
def test_formatter_by_named_column(self):
s = Sheet(self.data, "test")
s.name_columns_by_row(2)
s.column.format("Column 1", str)
self.assertEqual(s.column["Column 1"], ["1", "4", "7"])
def test_add(self):
s = Sheet(self.data, "test")
s.name_columns_by_row(2)
data = OrderedDict({"Column 4": [10, 11, 12]})
s1 = s.column + data
self.assertEqual(s1.column["Column 4"], [10, 11, 12])
self.assertNotIn("Column 4", s.column)
def test_iadd(self):
s = Sheet(self.data, "test")
s.name_columns_by_row(2)
data = OrderedDict({"Column 4": [10, 11, 12]})
s.column += data
self.assertEqual(s.column["Column 4"], [10, 11, 12])
def test_dot_notation(self):
s = Sheet(self.data, "test")
s.name_columns_by_row(2)
self.assertEqual(s.column.Column_3, [3, 6, 9])
def test_delete_named_column(self):
s = Sheet(self.data, "test")
s.name_columns_by_row(2)
del s.column["Column 2"]
assert s.number_of_columns() == 2
with self.assertRaises((ValueError)):
s.column["Column 2"] # bang
def METHOD_NAME(self):
s = Sheet(self.data, "test")
s.name_columns_by_row(2)
s.row[0] = [10000, 1, 11]
self.assertEqual(s.row[0], [10000, 1, 11])
| null |
4,860 |
import os
import subprocess
import numpy as np
from libensemble.message_numbers import EVAL_GEN_TAG, FINISHED_PERSISTENT_GEN_TAG, PERSIS_STOP, STOP_TAG
from libensemble.tools.persistent_support import PersistentSupport
def METHOD_NAME(x_f_pairs, gen_specs, noise_h_mat):
U = gen_specs["user"]
x0 = U["x0"]
# This function constructs H0 to contain points to be sent back to the
# manager to be evaluated
n = len(x0)
E = np.eye(n)
nf = U["nf"]
H0 = np.zeros(len(x_f_pairs) * nf, dtype=gen_specs["out"])
ind = 0
for i, j in x_f_pairs:
for k in range(nf + 1):
if k != nf // 2:
H0["x"][ind] = x0 + (k - nf / 2) * noise_h_mat[i, j] * E[i]
H0["x_ind"][ind] = i
H0["f_ind"][ind] = j
H0["n_ind"][ind] = k
ind += 1
return H0
def fd_param_finder(H, persis_info, gen_specs, libE_info):
"""
This generation function loops through a set of suitable finite difference
parameters for a mapping F from R^n to R^m.
.. seealso::
`test_persistent_fd_param_finder.py` <https://github.com/Libensemble/libensemble/blob/develop/libensemble/tests/regression_tests/test_persistent_fd_param_finder.py>`_ # noqa
"""
U = gen_specs["user"]
p = U["p"]
x0 = U["x0"]
nf = U["nf"]
noise_h_mat = U["noise_h_mat"]
inform = np.zeros_like(noise_h_mat)
Fnoise = np.zeros_like(noise_h_mat)
maxnoiseits = U["maxnoiseits"]
ps = PersistentSupport(libE_info, EVAL_GEN_TAG)
n = len(x0)
Fhist0 = np.zeros((n, p, nf + 1))
tag = None
# # Request evaluations of the base point x0 at all p f_inds
# H0 = np.zeros(p, dtype=gen_specs["out"])
# for j in range(p):
# H0["x"][j] = x0
# H0["x_ind"][j] = -1 # Marking these to know they are the basepoint
# H0["f_ind"][j] = j
# H0["n_ind"][j] = nf/2
# tag, Work, calc_in = sendrecv_mgr_worker_msg(comm, H0)
for i in range(n):
for j in range(p):
# Fhist0[i, j, nf//2] = calc_in["f_val"][calc_in["f_ind"]==j]
Fhist0[i, j, nf // 2] = U["f0"][j]
x_f_pairs = np.array(np.meshgrid(range(n), range(p))).T.reshape(-1, n)
H0 = METHOD_NAME(x_f_pairs, gen_specs, noise_h_mat)
iters = np.ones_like(noise_h_mat)
tag, Work, calc_in = ps.send_recv(H0)
# import matlab.engine
# eng = matlab.engine.start_matlab()
# Send nf points for each (x_ind, f_ind) pair
while tag not in [STOP_TAG, PERSIS_STOP]:
x_f_pairs = np.unique(calc_in[["x_ind", "f_ind"]])
x_f_pairs_new = []
# Update Fhist0
for i, j in x_f_pairs:
for k in range(nf + 1):
if k != nf / 2:
logical_conds = (calc_in["x_ind"] == i, calc_in["f_ind"] == j, calc_in["n_ind"] == k)
Fhist0[i, j, k] = calc_in["f_val"][np.logical_and.reduce(logical_conds)][0]
# Compute noise for (i, j):
# [Fnoise(i, j), ~, inform(i, j)] = ECnoise(nf-1, Fhist0(i, j, 2:nf));
# t = eng.ECnoise(nf+1, matlab.double(Fhist0[i, j, :nf+1]), nargout=3)
# # Optional: check to see what would get with 2 fewer evals (requires nf>=4):
# [Fnoise2(i, j), ~, inform2(i, j)] = ECnoise(nf-1, Fhist0(i, j, 2:nf));
# cmd = ["/home/jlarson/software/MATLAB/R2019a/bin/matlab", "-batch",
cmd = [
"octave",
"--no-window-system",
"--eval",
"F=[" + " ".join([f"{x:18.18f}" for x in Fhist0[i, j, : nf + 1]]) + "];"
"nf=" + str(nf) + "';"
"[fnoise, ~, inform] = ECnoise(nf+1, F);"
"dlmwrite('fnoise.out', fnoise, 'delimiter', ' ', 'precision', 16);"
"dlmwrite('inform.out', inform, 'delimiter', ' ', 'precision', 16);"
"exit",
]
p = subprocess.call(cmd, shell=False, stdout=subprocess.DEVNULL)
inform[i, j] = np.loadtxt("inform.out")
if inform[i, j] >= 2:
# Mark as needing more points for this noise_h_mat value
if iters[i, j] < maxnoiseits:
iters[i, j] += 1
x_f_pairs_new.append((i, j))
if inform[i, j] == 3:
noise_h_mat[i, j] = noise_h_mat[i, j] / 100
else:
noise_h_mat[i, j] = noise_h_mat[i, j] * 100
else:
# We have successfully identified the Fnoise
Fnoise[i, j] = np.loadtxt("fnoise.out")
os.remove("inform.out")
os.remove("fnoise.out")
if np.all(inform == 1):
break
H0 = METHOD_NAME(x_f_pairs_new, gen_specs, noise_h_mat)
tag, Work, calc_in = ps.send_recv(H0)
persis_info["Fnoise"] = Fnoise
return H0, persis_info, FINISHED_PERSISTENT_GEN_TAG
| null |
4,861 |
import pytest
from dirty_equals import IsDict
from fastapi.testclient import TestClient
from fastapi.utils import match_pydantic_error_url
@pytest.fixture(name="client")
def get_client():
from docs_src.body_multiple_params.tutorial001_an import app
client = TestClient(app)
return client
def test_post_body_q_bar_content(client: TestClient):
response = client.put("/items/5?q=bar", json={"name": "Foo", "price": 50.5})
assert response.status_code == 200
assert response.json() == {
"item_id": 5,
"item": {
"name": "Foo",
"price": 50.5,
"description": None,
"tax": None,
},
"q": "bar",
}
def test_post_no_body_q_bar(client: TestClient):
response = client.put("/items/5?q=bar", json=None)
assert response.status_code == 200
assert response.json() == {"item_id": 5, "q": "bar"}
def METHOD_NAME(client: TestClient):
response = client.put("/items/5", json=None)
assert response.status_code == 200
assert response.json() == {"item_id": 5}
def test_post_id_foo(client: TestClient):
response = client.put("/items/foo", json=None)
assert response.status_code == 422
assert response.json() == IsDict(
{
"detail": [
{
"type": "int_parsing",
"loc": ["path", "item_id"],
"msg": "Input should be a valid integer, unable to parse string as an integer",
"input": "foo",
"url": match_pydantic_error_url("int_parsing"),
}
]
}
) | IsDict(
# TODO: remove when deprecating Pydantic v1
{
"detail": [
{
"loc": ["path", "item_id"],
"msg": "value is not a valid integer",
"type": "type_error.integer",
}
]
}
)
def test_openapi_schema(client: TestClient):
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/{item_id}": {
"put": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Update Item",
"operationId": "update_item_items__item_id__put",
"parameters": [
{
"required": True,
"schema": {
"title": "The ID of the item to get",
"maximum": 1000.0,
"minimum": 0.0,
"type": "integer",
},
"name": "item_id",
"in": "path",
},
{
"required": False,
"schema": IsDict(
{
"anyOf": [{"type": "string"}, {"type": "null"}],
"title": "Q",
}
)
| IsDict(
# TODO: remove when deprecating Pydantic v1
{"title": "Q", "type": "string"}
),
"name": "q",
"in": "query",
},
],
"requestBody": {
"content": {
"application/json": {
"schema": IsDict(
{
"anyOf": [
{"$ref": "#/components/schemas/Item"},
{"type": "null"},
],
"title": "Item",
}
)
| IsDict(
# TODO: remove when deprecating Pydantic v1
{"$ref": "#/components/schemas/Item"}
)
}
}
},
}
}
},
"components": {
"schemas": {
"Item": {
"title": "Item",
"required": ["name", "price"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"description": IsDict(
{
"title": "Description",
"anyOf": [{"type": "string"}, {"type": "null"}],
}
)
| IsDict(
# TODO: remove when deprecating Pydantic v1
{"title": "Description", "type": "string"}
),
"price": {"title": "Price", "type": "number"},
"tax": IsDict(
{
"title": "Tax",
"anyOf": [{"type": "number"}, {"type": "null"}],
}
)
| IsDict(
# TODO: remove when deprecating Pydantic v1
{"title": "Tax", "type": "number"}
),
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
| null |
4,862 |
# Status: ported.
# Base revision: 45462
#
# Copyright 2003 Dave Abrahams
# Copyright 2002, 2003, 2004, 2005 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Implements scanners: objects that compute implicit dependencies for
# files, such as includes in C++.
#
# Scanner has a regular expression used to find dependencies, some
# data needed to interpret those dependencies (for example, include
# paths), and a code which actually established needed relationship
# between actual jam targets.
#
# Scanner objects are created by actions, when they try to actualize
# virtual targets, passed to 'virtual-target.actualize' method and are
# then associated with actual targets. It is possible to use
# several scanners for a virtual-target. For example, a single source
# might be used by to compile actions, with different include paths.
# In this case, two different actual targets will be created, each
# having scanner of its own.
#
# Typically, scanners are created from target type and action's
# properties, using the rule 'get' in this module. Directly creating
# scanners is not recommended, because it might create many equvivalent
# but different instances, and lead in unneeded duplication of
# actual targets. However, actions can also create scanners in a special
# way, instead of relying on just target type.
import property
import bjam
import os
from b2.manager import get_manager
from b2.util import is_iterable_typed
def reset ():
""" Clear the module state. This is mainly for testing purposes.
"""
global __scanners, __rv_cache, __scanner_cache
# Maps registered scanner classes to relevant properties
__scanners = {}
# A cache of scanners.
# The key is: class_name.properties_tag, where properties_tag is the concatenation
# of all relevant properties, separated by '-'
__scanner_cache = {}
reset ()
def register(scanner_class, relevant_properties):
""" Registers a new generator class, specifying a set of
properties relevant to this scanner. Ctor for that class
should have one parameter: list of properties.
"""
assert issubclass(scanner_class, Scanner)
assert isinstance(relevant_properties, basestring)
__scanners[str(scanner_class)] = relevant_properties
def registered(scanner_class):
""" Returns true iff a scanner of that class is registered
"""
return str(scanner_class) in __scanners
def METHOD_NAME(scanner_class, properties):
""" Returns an instance of previously registered scanner
with the specified properties.
"""
assert issubclass(scanner_class, Scanner)
assert is_iterable_typed(properties, basestring)
scanner_name = str(scanner_class)
if not registered(scanner_name):
raise BaseException ("attempt to get unregistered scanner: %s" % scanner_name)
relevant_properties = __scanners[scanner_name]
r = property.select(relevant_properties, properties)
scanner_id = scanner_name + '.' + '-'.join(r)
if scanner_id not in __scanner_cache:
__scanner_cache[scanner_id] = scanner_class(r)
return __scanner_cache[scanner_id]
class Scanner:
""" Base scanner class.
"""
def __init__ (self):
pass
def pattern (self):
""" Returns a pattern to use for scanning.
"""
raise BaseException ("method must be overridden")
def process (self, target, matches, binding):
""" Establish necessary relationship between targets,
given actual target being scanned, and a list of
pattern matches in that file.
"""
raise BaseException ("method must be overridden")
# Common scanner class, which can be used when there's only one
# kind of includes (unlike C, where "" and <> includes have different
# search paths).
class CommonScanner(Scanner):
def __init__ (self, includes):
Scanner.__init__(self)
self.includes = includes
def process(self, target, matches, binding):
target_path = os.path.normpath(os.path.dirname(binding[0]))
bjam.call("mark-included", target, matches)
get_manager().engine().set_target_variable(matches, "SEARCH",
[target_path] + self.includes)
get_manager().scanners().propagate(self, matches)
class ScannerRegistry:
def __init__ (self, manager):
self.manager_ = manager
self.count_ = 0
self.exported_scanners_ = {}
def install (self, scanner, target, vtarget):
""" Installs the specified scanner on actual target 'target'.
vtarget: virtual target from which 'target' was actualized.
"""
assert isinstance(scanner, Scanner)
assert isinstance(target, basestring)
assert isinstance(vtarget, basestring)
engine = self.manager_.engine()
engine.set_target_variable(target, "HDRSCAN", scanner.pattern())
if scanner not in self.exported_scanners_:
exported_name = "scanner_" + str(self.count_)
self.count_ = self.count_ + 1
self.exported_scanners_[scanner] = exported_name
bjam.import_rule("", exported_name, scanner.process)
else:
exported_name = self.exported_scanners_[scanner]
engine.set_target_variable(target, "HDRRULE", exported_name)
# scanner reflects difference in properties affecting
# binding of 'target', which will be known when processing
# includes for it, will give information on how to
# interpret quoted includes.
engine.set_target_variable(target, "HDRGRIST", str(id(scanner)))
pass
def propagate(self, scanner, targets):
assert isinstance(scanner, Scanner)
assert is_iterable_typed(targets, basestring) or isinstance(targets, basestring)
engine = self.manager_.engine()
engine.set_target_variable(targets, "HDRSCAN", scanner.pattern())
engine.set_target_variable(targets, "HDRRULE",
self.exported_scanners_[scanner])
engine.set_target_variable(targets, "HDRGRIST", str(id(scanner)))
| null |
4,863 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2021 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
from matplotlib import rcParams
from matplotlib.font_manager import FontProperties
from mantid.plots.legend import LegendProperties
from mantid.plots.utility import convert_color_to_hex
from workbench.plotting.plotscriptgenerator.utils import convert_args_to_string
# Default values of all options that are accessible via the legend tab in the plot settings.
mpl_default_kwargs = {
"visible": True,
"title": "",
"background_color": convert_color_to_hex(rcParams["axes.facecolor"]), # inherits from axes by default
"edge_color": convert_color_to_hex(rcParams["legend.edgecolor"]),
"transparency": rcParams["legend.framealpha"],
"entries_font": "DejaVu Sans",
"entries_size": rcParams["legend.fontsize"],
"entries_color": "#000000",
"title_font": "DejaVu Sans",
"title_size": rcParams["axes.labelsize"], # Uses axes size by default
"title_color": "#000000",
"marker_size": rcParams["legend.handlelength"],
"box_visible": rcParams["legend.frameon"],
"shadow": rcParams["legend.shadow"],
"round_edges": rcParams["legend.fancybox"],
"columns": 1,
"column_spacing": rcParams["legend.columnspacing"],
"label_spacing": rcParams["legend.labelspacing"],
"marker_position": "Left of Entries",
"markers": rcParams["legend.numpoints"],
"border_padding": rcParams["legend.borderpad"],
"marker_label_padding": rcParams["legend.handletextpad"],
}
# Dictionary to convert from the mantid legend interface to matplotlib legend argument names.
MANTID_TO_MPL = {
"background_color": "facecolor",
"edge_color": "edgecolor",
"transparency": "framealpha",
"entries_size": "fontsize",
"columns": "ncol",
"markers": "numpoints",
"marker_position": "markerfirst",
"box_visible": "frameon",
"round_edges": "fancybox",
"shadow": "shadow",
"title": "title",
"border_padding": "borderpad",
"label_spacing": "labelspacing",
"marker_size": "handlelength",
"marker_label_padding": "handletextpad",
"column_spacing": "columnspacing",
}
def generate_legend_commands(legend):
"""
Generates a string containing a comma separated list of kwargs to set legend properties.
"""
kwargs = get_legend_command_kwargs(legend)
return convert_args_to_string([], kwargs)
def generate_title_font_commands(legend, legend_object_var):
"""
Generate commands for setting properties for the legend title font.
"""
title_commands = []
kwargs = LegendProperties.from_legend(legend)
_remove_kwargs_if_default(kwargs)
if "title_font" in kwargs:
title_commands.append(legend_object_var + ".get_title().set_fontname('" + kwargs["title_font"] + "')")
if "title_color" in kwargs:
title_commands.append(legend_object_var + ".get_title().set_color('" + kwargs["title_color"] + "')")
if "title_size" in kwargs:
title_commands.append(legend_object_var + ".get_title().set_fontsize('" + str(kwargs["title_size"]) + "')")
return title_commands
def generate_label_font_commands(legend, legend_object_var):
"""
Generate python commands for setting the legend text label properties. The size is not present here because it is
already included in the list of legend properties.
"""
label_commands = []
kwargs = LegendProperties.from_legend(legend)
_remove_kwargs_if_default(kwargs)
if "entries_font" in kwargs:
label_commands.append("[label.set_fontname('" + kwargs["entries_font"] + "') for label in " + legend_object_var + ".get_texts()]")
if "entries_color" in kwargs:
label_commands.append("[label.set_color('" + kwargs["entries_color"] + "') for label in " + legend_object_var + ".get_texts()]")
return label_commands
def METHOD_NAME(legend, legend_object_var):
"""
Returns a command to set the visibility of the legend if it's different to the default value.
It's returned as a list for convenience, so it can be added to the end of a list without checking if it's empty.
"""
visible_command = []
kwargs = LegendProperties.from_legend(legend)
_remove_kwargs_if_default(kwargs)
if "visible" in kwargs:
visible_command.append(legend_object_var + ".set_visible(" + str(kwargs["visible"]) + ")")
return visible_command
def get_legend_command_kwargs(legend):
"""
Returns a list of matplotlib legend kwargs, removing any that are default values.
"""
kwargs = LegendProperties.from_legend(legend)
_remove_kwargs_if_default(kwargs)
# Convert the kwargs to the matplotlib ones.
return get_mpl_kwargs(kwargs)
def get_mpl_kwargs(kwargs):
"""
Keep only matplotlib legend kwargs, and convert the keys to matplotlib compatible ones.
"""
mpl_kwargs = {}
for key, value in kwargs.items():
if key in MANTID_TO_MPL:
mpl_kwargs[MANTID_TO_MPL[key]] = value
# The markerfirst kwarg is a boolean in matplotlib, so need to convert it.
if "markerfirst" in mpl_kwargs:
mpl_kwargs["markerfirst"] = mpl_kwargs["markerfirst"] == "Left of Entries"
return mpl_kwargs
def _remove_kwargs_if_default(kwargs):
"""
Remove kwargs from the given dict if they're the default values
"""
for kwarg, default_value in mpl_default_kwargs.items():
if kwargs[kwarg] == default_value:
kwargs.pop(kwarg)
# Font size defaults are string values (e.g. 'medium', 'large', 'x-large'), so we need to convert the defaults to
# point sizes before comparing.
if "title_size" in kwargs:
if convert_to_point_size(kwargs["title_size"]) == convert_to_point_size(mpl_default_kwargs["title_size"]):
kwargs.pop("title_size")
if "entries_size" in kwargs:
if convert_to_point_size(kwargs["entries_size"]) == convert_to_point_size(mpl_default_kwargs["entries_size"]):
kwargs.pop("entries_size")
# Hex values of colours may not be the same case, so convert to lower before comparing.
if "background_color" in kwargs:
if kwargs["background_color"].lower() == mpl_default_kwargs["background_color"].lower():
kwargs.pop("background_color")
if "edge_color" in kwargs:
if kwargs["edge_color"].lower() == mpl_default_kwargs["edge_color"].lower():
kwargs.pop("edge_color")
def convert_to_point_size(font_size):
"""
Convert font size (may be int or string, e.g. 'medium', 'large', ...) to point size.
"""
font = FontProperties()
font.set_size(font_size)
return font.get_size_in_points()
| null |
4,864 |
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2023, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import json
import uuid
from unittest.mock import patch
from pgadmin.browser.server_groups.servers.databases.schemas.tables.tests \
import utils as tables_utils
from pgadmin.browser.server_groups.servers.databases.schemas.tests import \
utils as schema_utils
from pgadmin.browser.server_groups.servers.databases.tests import utils as \
database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from pgadmin.utils import server_utils
from . import utils as columns_utils
class ColumnAddTestCase(BaseTestGenerator):
"""This class will add new column under table node."""
url = '/browser/column/obj/'
# Generates scenarios
scenarios = utils.generate_scenarios("column_create",
columns_utils.test_cases)
def setUp(self):
super().setUp()
# Load test data
self.data = self.test_data
# Create db
self.db_name = parent_node_dict["database"][-1]["db_name"]
schema_info = parent_node_dict["schema"][-1]
self.server_id = schema_info["server_id"]
self.db_id = schema_info["db_id"]
# Check DB version
if "server_min_version" in self.data:
server_con = server_utils.connect_server(self, self.server_id)
if not server_con["info"] == "Server connected.":
raise Exception("Could not connect to server to add "
"a table.")
if server_con["data"]["version"] < \
self.data["server_min_version"]:
self.skipTest(self.data["skip_msg"])
# Create db connection
db_con = database_utils.connect_database(self, utils.SERVER_GROUP,
self.server_id, self.db_id)
if not db_con['data']["connected"]:
raise Exception("Could not connect to database to add a table.")
# Create schema
self.schema_id = schema_info["schema_id"]
self.schema_name = schema_info["schema_name"]
schema_response = schema_utils.verify_schemas(self.server,
self.db_name,
self.schema_name)
if not schema_response:
raise Exception("Could not find the schema to add a table.")
# Create table
self.table_name = "table_column_%s" % (str(uuid.uuid4())[1:8])
self.table_id = tables_utils.create_table(self.server, self.db_name,
self.schema_name,
self.table_name)
def METHOD_NAME(self):
"""This function will add column under table node."""
if "name" in self.data:
self.data["name"] = self.data["name"] + (str(uuid.uuid4())[1:8])
if self.is_positive_test:
response = columns_utils.api_create(self)
# Assert response
utils.assert_status_code(self, response)
# Verify in backend
self.assertIsNotNone(columns_utils.verify_column
(self.server, self.db_name,
self.data["name"]),
"Column not found")
else:
if self.mocking_required:
with patch(self.mock_data["function_name"],
side_effect=eval(self.mock_data["return_value"])):
response = columns_utils.api_create(self)
else:
if 'table_id' in self.data:
self.table_id = self.data['table_id']
response = columns_utils.api_create(self)
# Assert response
utils.assert_status_code(self, response)
utils.assert_error_message(self, response)
def tearDown(self):
# Disconnect the database
database_utils.disconnect_database(self, self.server_id, self.db_id)
| null |
4,865 |
# Copyright (C) 2015 Canonical Ltd.
# Copyright (C) 2015 VMware Inc.
#
# Author: Sankar Tanguturi <[email protected]>
#
# This file is part of cloud-init. See LICENSE file for license information.
from cloudinit.sources.helpers.vmware.imc.nic import Nic
class Config:
"""
Stores the Contents specified in the Customization
Specification file.
"""
CUSTOM_SCRIPT = "CUSTOM-SCRIPT|SCRIPT-NAME"
DNS = "DNS|NAMESERVER|"
DOMAINNAME = "NETWORK|DOMAINNAME"
HOSTNAME = "NETWORK|HOSTNAME"
MARKERID = "MISC|MARKER-ID"
PASS = "PASSWORD|-PASS"
RESETPASS = "PASSWORD|RESET"
SUFFIX = "DNS|SUFFIX|"
TIMEZONE = "DATETIME|TIMEZONE"
UTC = "DATETIME|UTC"
POST_GC_STATUS = "MISC|POST-GC-STATUS"
DEFAULT_RUN_POST_SCRIPT = "MISC|DEFAULT-RUN-POST-CUST-SCRIPT"
CLOUDINIT_META_DATA = "CLOUDINIT|METADATA"
CLOUDINIT_USER_DATA = "CLOUDINIT|USERDATA"
CLOUDINIT_INSTANCE_ID = "CLOUDINIT|INSTANCE-ID"
def __init__(self, configFile):
self._configFile = configFile
@property
def host_name(self):
"""Return the hostname."""
return self._configFile.get(Config.HOSTNAME, None)
@property
def domain_name(self):
"""Return the domain name."""
return self._configFile.get(Config.DOMAINNAME, None)
@property
def timezone(self):
"""Return the timezone."""
return self._configFile.get(Config.TIMEZONE, None)
@property
def admin_password(self):
"""Return the root password to be set."""
return self._configFile.get(Config.PASS, None)
@property
def name_servers(self):
"""Return the list of DNS servers."""
res = []
cnt = self._configFile.get_count_with_prefix(Config.DNS)
for i in range(1, cnt + 1):
key = Config.DNS + str(i)
res.append(self._configFile[key])
return res
@property
def dns_suffixes(self):
"""Return the list of DNS Suffixes."""
res = []
cnt = self._configFile.get_count_with_prefix(Config.SUFFIX)
for i in range(1, cnt + 1):
key = Config.SUFFIX + str(i)
res.append(self._configFile[key])
return res
@property
def nics(self):
"""Return the list of associated NICs."""
res = []
nics = self._configFile["NIC-CONFIG|NICS"]
for nic in nics.split(","):
res.append(Nic(nic, self._configFile))
return res
@property
def reset_password(self):
"""Retreives if the root password needs to be reset."""
resetPass = self._configFile.get(Config.RESETPASS, "no")
resetPass = resetPass.lower()
if resetPass not in ("yes", "no"):
raise ValueError("ResetPassword value should be yes/no")
return resetPass == "yes"
@property
def METHOD_NAME(self):
"""Returns marker id."""
return self._configFile.get(Config.MARKERID, None)
@property
def custom_script_name(self):
"""Return the name of custom (pre/post) script."""
return self._configFile.get(Config.CUSTOM_SCRIPT, None)
@property
def post_gc_status(self):
"""Return whether to post guestinfo.gc.status VMX property."""
postGcStatus = self._configFile.get(Config.POST_GC_STATUS, "no")
postGcStatus = postGcStatus.lower()
if postGcStatus not in ("yes", "no"):
raise ValueError("PostGcStatus value should be yes/no")
return postGcStatus == "yes"
@property
def default_run_post_script(self):
"""
Return enable-custom-scripts default value if enable-custom-scripts
is absent in VM Tools configuration
"""
defaultRunPostScript = self._configFile.get(
Config.DEFAULT_RUN_POST_SCRIPT, "no"
)
defaultRunPostScript = defaultRunPostScript.lower()
if defaultRunPostScript not in ("yes", "no"):
raise ValueError("defaultRunPostScript value should be yes/no")
return defaultRunPostScript == "yes"
@property
def meta_data_name(self):
"""Return the name of cloud-init meta data."""
return self._configFile.get(Config.CLOUDINIT_META_DATA, None)
@property
def user_data_name(self):
"""Return the name of cloud-init user data."""
return self._configFile.get(Config.CLOUDINIT_USER_DATA, None)
@property
def instance_id(self):
"""Return instance id"""
return self._configFile.get(Config.CLOUDINIT_INSTANCE_ID, None)
# vi: ts=4 expandtab
| null |
4,866 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetDashboardResult',
'AwaitableGetDashboardResult',
'get_dashboard',
'get_dashboard_output',
]
@pulumi.output_type
class GetDashboardResult:
"""
A collection of values returned by getDashboard.
"""
def __init__(__self__, dashboard_properties=None, display_name=None, id=None, location=None, name=None, resource_group_name=None, METHOD_NAME=None):
if dashboard_properties and not isinstance(dashboard_properties, str):
raise TypeError("Expected argument 'dashboard_properties' to be a str")
pulumi.set(__self__, "dashboard_properties", dashboard_properties)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
pulumi.set(__self__, "resource_group_name", resource_group_name)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", METHOD_NAME)
@property
@pulumi.getter(name="dashboardProperties")
def dashboard_properties(self) -> str:
"""
JSON data representing dashboard body.
"""
return pulumi.get(self, "dashboard_properties")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[str]:
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
The Azure Region where the shared Azure Portal dashboard exists.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> str:
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def METHOD_NAME(self) -> Mapping[str, str]:
"""
A mapping of tags assigned to the shared Azure Portal dashboard.
"""
return pulumi.get(self, "tags")
class AwaitableGetDashboardResult(GetDashboardResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDashboardResult(
dashboard_properties=self.dashboard_properties,
display_name=self.display_name,
id=self.id,
location=self.location,
name=self.name,
resource_group_name=self.resource_group_name,
METHOD_NAME=self.METHOD_NAME)
def get_dashboard(dashboard_properties: Optional[str] = None,
display_name: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDashboardResult:
"""
Use this data source to access information about an existing shared dashboard in the Azure Portal. This is the data source of the `portal.Dashboard` resource.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.portal.get_dashboard(name="existing-dashboard",
resource_group_name="dashboard-rg")
pulumi.export("id", data["azurerm_dashboard"]["example"]["id"])
```
:param str dashboard_properties: JSON data representing dashboard body.
:param str display_name: Specifies the display name of the shared Azure Portal Dashboard.
:param str name: Specifies the name of the shared Azure Portal Dashboard.
:param str resource_group_name: Specifies the name of the resource group the shared Azure Portal Dashboard is located in.
"""
__args__ = dict()
__args__['dashboardProperties'] = dashboard_properties
__args__['displayName'] = display_name
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure:portal/getDashboard:getDashboard', __args__, opts=opts, typ=GetDashboardResult).value
return AwaitableGetDashboardResult(
dashboard_properties=pulumi.get(__ret__, 'dashboard_properties'),
display_name=pulumi.get(__ret__, 'display_name'),
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
resource_group_name=pulumi.get(__ret__, 'resource_group_name'),
METHOD_NAME=pulumi.get(__ret__, 'tags'))
@_utilities.lift_output_func(get_dashboard)
def get_dashboard_output(dashboard_properties: Optional[pulumi.Input[Optional[str]]] = None,
display_name: Optional[pulumi.Input[Optional[str]]] = None,
name: Optional[pulumi.Input[Optional[str]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDashboardResult]:
"""
Use this data source to access information about an existing shared dashboard in the Azure Portal. This is the data source of the `portal.Dashboard` resource.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.portal.get_dashboard(name="existing-dashboard",
resource_group_name="dashboard-rg")
pulumi.export("id", data["azurerm_dashboard"]["example"]["id"])
```
:param str dashboard_properties: JSON data representing dashboard body.
:param str display_name: Specifies the display name of the shared Azure Portal Dashboard.
:param str name: Specifies the name of the shared Azure Portal Dashboard.
:param str resource_group_name: Specifies the name of the resource group the shared Azure Portal Dashboard is located in.
"""
...
| null |
4,867 |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import context, Tensor
import mindspore.ops.operations.image_ops as P
from mindspore import nn
class NetCropAndResizeGradBoxes(nn.Cell):
def __init__(self, method_="bilinear"):
super(NetCropAndResizeGradBoxes, self).__init__()
self.op = P.CropAndResizeGradBoxes(method_)
def construct(self, grads, images, boxes, box_index):
return self.op(grads, images, boxes, box_index)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize("image_type", [np.uint8, np.uint16, np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64])
def METHOD_NAME(image_type):
"""
Feature: Test CropAndResizeGradBoxes.
Description: grads, boxes type is float32, output type is float32.
Expectation: Check it by expected_output variable.
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
others_type = np.float32
batch_size = 2
image_height = 32
image_width = 18
channels = 3
crop_height = 8
crop_width = 9
num_boxes = 2
total_values_1 = num_boxes * crop_height * crop_width * channels
input_grads = 1e-5 * np.arange(0, total_values_1).reshape((num_boxes, crop_height, crop_width, channels))
total_values_2 = batch_size * image_height * image_width * channels
input_image_tmp = np.arange(0, 256)
div = total_values_2 // 256
mod = total_values_2 % 256
input_image = np.append(np.repeat(input_image_tmp, div), input_image_tmp[:mod]).reshape(
(batch_size, image_height, image_width, channels))
input_boxes = np.array([[0.1, 0.5, 0.5, 0.0], [0.1, 0, 0.75, 1.75]])
input_box_index = np.array([1, 0]).astype(np.int32)
input_grads_tensor = Tensor(input_grads.astype(others_type))
input_image_tensor = Tensor(input_image.astype(image_type))
input_boxes_tensor = Tensor(input_boxes.astype(others_type))
input_box_index_tensor = Tensor(input_box_index)
net = NetCropAndResizeGradBoxes()
output = net(input_grads_tensor, input_image_tensor,
input_boxes_tensor, input_box_index_tensor)
output_ms = output.asnumpy()
expected_output = np.array([[9.326791763305664, 0.4429844617843628, 20.578969955444336, 0.3551655411720276],
[21.320859909057617, 0.7584426403045654, 27.210113525390625,
0.38604485988616943]]).astype(others_type)
error = np.ones(shape=[num_boxes, 4]) * 1.0e-4
diff = output_ms - expected_output
assert np.all(abs(diff) < error)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize("image_type", [np.uint8, np.uint16, np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64])
def test_crop_and_resize_grad_boxes_float64(image_type):
"""
Feature: Test CropAndResizeGradBoxes.
Description: grads, boxes type is float64, output type is float64.
Expectation: Check it by expected_output variable.
"""
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
others_type = np.float64
batch_size = 2
image_height = 34
image_width = 34
channels = 3
crop_height = 7
crop_width = 7
num_boxes = 2
total_values_1 = num_boxes * crop_height * crop_width * channels
input_grads = 1e-5 * np.arange(0, total_values_1).reshape((num_boxes, crop_height, crop_width, channels))
total_values_2 = batch_size * image_height * image_width * channels
input_image_tmp = np.arange(0, 256)
div = total_values_2 // 256
mod = total_values_2 % 256
input_image = np.append(np.repeat(input_image_tmp, div), input_image_tmp[:mod]).reshape(
(batch_size, image_height, image_width, channels))
input_boxes = np.array([[0.1, 0.5, 0.5, 0.7], [0.1, 0, 0.75, 0.85]])
input_box_index = np.array([0, 1]).astype(np.int32)
input_grads_tensor = Tensor(input_grads.astype(others_type))
input_image_tensor = Tensor(input_image.astype(image_type))
input_boxes_tensor = Tensor(input_boxes.astype(others_type))
input_box_index_tensor = Tensor(input_box_index)
net = NetCropAndResizeGradBoxes()
output = net(input_grads_tensor, input_image_tensor,
input_boxes_tensor, input_box_index_tensor)
output_ms = output.asnumpy()
expected_output = np.array([[4.165656089782715, 0.12503701448440552, 9.360515594482422, 0.20364297926425934],
[18.26944351196289, 0.6215707063674927, 23.362707138061523,
1.013537049293518]]).astype(others_type)
error = np.ones(shape=[num_boxes, 4]) * 1.0e-5
diff = output_ms - expected_output
assert np.all(abs(diff) < error)
| null |
4,868 |
#!/usr/bin/env python3
# This file is part of cloud-init. See LICENSE file for license information.
"""Debug network config format conversions."""
import argparse
import json
import os
import sys
import yaml
from cloudinit import distros, log, safeyaml
from cloudinit.net import (
eni,
netplan,
network_manager,
network_state,
networkd,
sysconfig,
)
from cloudinit.sources import DataSourceAzure as azure
from cloudinit.sources.helpers import openstack
from cloudinit.sources.helpers.vmware.imc import guestcust_util
NAME = "net-convert"
def METHOD_NAME(parser=None):
"""Build or extend and arg parser for net-convert utility.
@param parser: Optional existing ArgumentParser instance representing the
subcommand which will be extended to support the args of this utility.
@returns: ArgumentParser with proper argument configuration.
"""
if not parser:
parser = argparse.ArgumentParser(prog=NAME, description=__doc__)
parser.add_argument(
"-p",
"--network-data",
type=open,
metavar="PATH",
required=True,
help="The network configuration to read",
)
parser.add_argument(
"-k",
"--kind",
choices=[
"eni",
"network_data.json",
"yaml",
"azure-imds",
"vmware-imc",
],
required=True,
help="The format of the given network config",
)
parser.add_argument(
"-d",
"--directory",
metavar="PATH",
help="directory to place output in",
required=True,
)
parser.add_argument(
"-D",
"--distro",
choices=[
item for sublist in distros.OSFAMILIES.values() for item in sublist
],
required=True,
)
parser.add_argument(
"-m",
"--mac",
metavar="name,mac",
action="append",
help="interface name to mac mapping",
)
parser.add_argument(
"--debug", action="store_true", help="enable debug logging to stderr."
)
parser.add_argument(
"-O",
"--output-kind",
choices=["eni", "netplan", "networkd", "sysconfig", "network-manager"],
required=True,
help="The network config format to emit",
)
return parser
def handle_args(name, args):
if not args.directory.endswith("/"):
args.directory += "/"
if not os.path.isdir(args.directory):
os.makedirs(args.directory)
if args.debug:
log.setupBasicLogging(level=log.DEBUG)
else:
log.setupBasicLogging(level=log.WARN)
if args.mac:
known_macs = {}
for item in args.mac:
iface_name, iface_mac = item.split(",", 1)
known_macs[iface_mac] = iface_name
else:
known_macs = None
net_data = args.network_data.read()
if args.kind == "eni":
pre_ns = eni.convert_eni_data(net_data)
elif args.kind == "yaml":
pre_ns = safeyaml.load(net_data)
if "network" in pre_ns:
pre_ns = pre_ns.get("network")
if args.debug:
sys.stderr.write(
"\n".join(["Input YAML", safeyaml.dumps(pre_ns), ""])
)
elif args.kind == "network_data.json":
pre_ns = openstack.convert_net_json(
json.loads(net_data), known_macs=known_macs
)
elif args.kind == "azure-imds":
pre_ns = azure.generate_network_config_from_instance_network_metadata(
json.loads(net_data)["network"]
)
elif args.kind == "vmware-imc":
config = guestcust_util.Config(
guestcust_util.ConfigFile(args.network_data.name)
)
pre_ns = guestcust_util.get_network_data_from_vmware_cust_cfg(
config, False
)
distro_cls = distros.fetch(args.distro)
distro = distro_cls(args.distro, {}, None)
if args.output_kind == "eni":
r_cls = eni.Renderer
config = distro.renderer_configs.get("eni")
elif args.output_kind == "netplan":
r_cls = netplan.Renderer
config = distro.renderer_configs.get("netplan", {})
# don't run netplan generate/apply
config["postcmds"] = False
# trim leading slash
config["netplan_path"] = config["netplan_path"][1:]
# enable some netplan features
config["features"] = ["dhcp-use-domains", "ipv6-mtu"]
elif args.output_kind == "networkd":
r_cls = networkd.Renderer
config = distro.renderer_configs.get("networkd")
elif args.output_kind == "sysconfig":
r_cls = sysconfig.Renderer
config = distro.renderer_configs.get("sysconfig")
elif args.output_kind == "network-manager":
r_cls = network_manager.Renderer
config = distro.renderer_configs.get("network-manager")
else:
raise RuntimeError("Invalid output_kind")
r = r_cls(config=config)
ns = network_state.parse_net_config_data(pre_ns, renderer=r)
if args.debug:
sys.stderr.write("\n".join(["", "Internal State", yaml.dump(ns), ""]))
sys.stderr.write(
"".join(
[
"Read input format '%s' from '%s'.\n"
% (args.kind, args.network_data.name),
"Wrote output format '%s' to '%s'\n"
% (args.output_kind, args.directory),
]
)
+ "\n"
)
r.render_network_state(network_state=ns, target=args.directory)
if __name__ == "__main__":
args = METHOD_NAME().parse_args()
handle_args(NAME, args)
| null |
4,869 |
import os
import shutil
from pathlib import Path
from typing import List, Optional
import git
from opal_common.security.tarsafe import TarSafe
from pydantic.error_wrappers import ValidationError
class TarFileToLocalGitExtractor:
"""This class takes tar file from remote api source and extract it to local
git, so we could manage update to opal clients.
Args:
local_clone_path(str): path for the local git to manage policies
tmp_bundle_path(Path): path to download bundle from api source
"""
def __init__(
self,
local_clone_path: str,
tmp_bundle_path: Path,
policy_bundle_git_add_pattern="*",
):
self.local_clone_path = local_clone_path
self.tmp_bundle_path = tmp_bundle_path
self.policy_bundle_git_add_pattern = policy_bundle_git_add_pattern
def METHOD_NAME(
self, init_commit_msg: str = "Init", should_init: bool = False
):
"""
Commit first version of bundle or the updates that come after
Args:
init_commit_msg(str): text of the commit msg
should_init(Path): should it init the repo or it is existing repo
"""
if should_init:
local_git = git.Repo.init(self.local_clone_path)
else:
local_git = git.Repo(self.local_clone_path)
prev_commit = None
if len(local_git.index.repo.heads):
prev_commit = local_git.index.repo.head.commit
local_git.index.add(self.policy_bundle_git_add_pattern)
new_commit = local_git.index.commit(init_commit_msg)
return local_git, prev_commit, new_commit
def create_local_git(self):
"""Extract bundle create local git and commit this initial state."""
self.extract_bundle_tar()
local_git = TarFileToLocalGitExtractor.is_git_repo(self.local_clone_path)
if not local_git or len(local_git.heads) == 0:
local_git = self.METHOD_NAME(should_init=True)
return local_git
def extract_bundle_to_local_git(self, commit_msg: str):
"""
Update local git with new bundle
Args:
commit_msg(str): text of the commit msg
"""
tmp_path = f"{self.local_clone_path}.bak"
os.rename(self.local_clone_path, tmp_path)
try:
self.extract_bundle_tar()
shutil.move(
os.path.join(tmp_path, ".git"),
os.path.join(self.local_clone_path, ".git"),
)
finally:
shutil.rmtree(tmp_path)
local_git, prev_commit, new_commit = self.METHOD_NAME(commit_msg)
return local_git, prev_commit, new_commit
def extract_bundle_tar(self, mode: str = "r:gz") -> bool:
"""
Extract bundle tar, tar path is at self.tmp_bundle_path
Uses TarSafe that checks that our bundle file don't have vulnerabilities like path traversal
Args:
mode(str): mode for TarSafe default to r:gz that can open tar.gz files
"""
with TarSafe.open(self.tmp_bundle_path, mode=mode) as tar_file:
tar_file_names = tar_file.getnames()
TarFileToLocalGitExtractor.validate_tar_or_throw(tar_file_names)
tar_file.extractall(path=self.local_clone_path)
@staticmethod
def is_git_repo(path) -> Optional[git.Repo]:
"""
Checks is this path is a git repo if it is return Repo obj
Return:
Repo obj if it is a git repo if not returns None
"""
local_git = False
try:
local_git = git.Repo(path)
_ = local_git.git_dir
return local_git
except Exception:
return None
@staticmethod
def validate_tar_or_throw(
tar_file_names: List[str], forbidden_filename: str = ".git"
):
if len(tar_file_names) == 0:
raise ValidationError("No files in bundle")
if forbidden_filename and forbidden_filename in tar_file_names:
raise ValidationError(
"No {forbidden_filename} files are allowed in OPAL api bundle".format(
forbidden_filename=forbidden_filename
)
)
| null |
4,870 |
# -*- coding: utf-8 -*-
"""elegant lattice parser.
:copyright: Copyright (c) 2015 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdc, pkdlog, pkdp
import re
from sirepo.template.line_parser import LineParser
# a map of old elegant names to the new name
_FIELD_ALIAS = PKDict(bmax="b_max")
def METHOD_NAME(lattice_text, rpn_variables, maxId=0):
parser = LineParser(maxId)
lines = lattice_text.replace("\r", "").split("\n")
prev_line = ""
models = PKDict(
beamlines=[],
elements=[],
default_beamline_name=None,
rpnVariables=PKDict(),
)
for line in lines:
parser.increment_line_number()
if re.search(r"^\s*\!", line):
continue
if re.search(r"\&\s*$", line):
prev_line += re.sub(r"(\s*\&\s*)$", "", line)
continue
if not _parse_line(parser, prev_line + line, models):
break
prev_line = ""
models["rpnVariables"] = [
PKDict(name=k, value=v) for k, v in models.rpnVariables.items()
] + rpn_variables
return models
def _parse_beamline(parser, name):
parser.assert_char("=")
return PKDict(
name=name,
id=parser.next_id(),
items=_parse_beamline_items(parser),
)
def _parse_beamline_items(parser):
parser.assert_char("(")
items = []
while True:
value = parser.parse_value()
if not value:
if parser.peek_char() == ",":
parser.assert_char(",")
continue
parser.raise_error("expecting beamline element")
if re.search(r"^[0-9]+$", value):
repeat_count = int(value)
parser.assert_char("*")
if parser.peek_char() == "(":
repeat_items = _parse_beamline_items(parser)
else:
repeat_items = [parser.parse_value()]
for _ in range(repeat_count):
for item in repeat_items:
items.append(item)
else:
items.append(value)
if parser.peek_char() == ",":
parser.assert_char(",")
else:
break
parser.assert_char(")")
return items
def _parse_element(parser, name, type):
el = PKDict(
_id=parser.next_id(),
type=type,
name=name,
)
while parser.peek_char() == ",":
parser.assert_char(",")
field = parser.parse_value()
if not field:
parser.assert_end_of_line()
if parser.peek_char() == "=":
parser.assert_char("=")
f = field.lower()
if f in _FIELD_ALIAS:
f = _FIELD_ALIAS[f]
el[f] = parser.parse_value()
return el
def _parse_line(parser, line, models):
line = line.lstrip()
# strip comments
line = re.sub(r"\s!.*$", "", line)
# ignore end of line ';'
line = re.sub(r";\s*$", "", line)
parser.set_line(line)
name = ""
while parser.peek_char() == ":":
# need to strip leading ':' and add to name, used as value break below
parser.assert_char(":")
name += ":"
name += parser.parse_value(r"[:\s,=)*]")
if re.search(r"^\%", name):
# rpn value
line = re.sub(r"\s*%\s*", "", line)
line = re.sub(r"\s+", " ", line)
_save_rpn_variables(line, models["rpnVariables"])
return True
if not name or not re.search(r"[:0-9A-Z]", name[0], re.IGNORECASE):
if name and name.upper() == "#INCLUDE":
parser.raise_error("#INCLUDE files not supported")
return True
if parser.peek_char() != ":":
if name.upper() == "USE" and parser.peek_char() == ",":
parser.assert_char(",")
models["default_beamline_name"] = parser.parse_value()
return True
if name.upper() == "RETURN":
return False
# ignore non-definition lines
return True
parser.assert_char(":")
type = parser.parse_value()
if not type:
parser.raise_error("expected type")
if type.upper() == "LINE":
models["beamlines"].append(_parse_beamline(parser, name))
else:
models["elements"].append(_parse_element(parser, name, type))
parser.assert_end_of_line()
return True
def _save_rpn_variables(line, rpn_variables):
m = re.match(r"(.*) sto ((\S+).*)", line)
if m:
val = _save_rpn_variables(m.group(1), rpn_variables)
var = m.group(3)
rpn_variables[var] = val
return m.group(2)
return line
| null |
4,871 |
"""
Test basic functionality of olevba[3]
"""
import unittest
import os
from os.path import join, splitext
import re
import json
# Directory with test data, independent of current working directory
from tests.test_utils import DATA_BASE_DIR, call_and_capture
class TestOlevbaBasic(unittest.TestCase):
"""Tests olevba basic functionality"""
def test_text_behaviour(self):
"""Test behaviour of olevba when presented with pure text file."""
self.do_test_behaviour('text')
def test_empty_behaviour(self):
"""Test behaviour of olevba when presented with pure text file."""
self.do_test_behaviour('empty')
def do_test_behaviour(self, filename):
"""Helper for test_{text,empty}_behaviour."""
input_file = join(DATA_BASE_DIR, 'basic', filename)
output, _ = call_and_capture('olevba', args=(input_file, ))
# check output
self.assertTrue(re.search(r'^Type:\s+Text\s*$', output, re.MULTILINE),
msg='"Type: Text" not found in output:\n' + output)
self.assertTrue(re.search(r'^No suspicious .+ found.$', output,
re.MULTILINE),
msg='"No suspicous...found" not found in output:\n' + \
output)
self.assertNotIn('error', output.lower())
# check warnings
for line in output.splitlines():
if line.startswith('WARNING ') and 'encrypted' in line:
continue # encryption warnings are ok
elif 'warn' in line.lower():
raise self.fail('Found "warn" in output line: "{}"'
.format(line.rstrip()))
# TODO: I disabled this test because we do not log "not encrypted" as warning anymore
# to avoid other issues.
# If we really want to test this, then the test should be run with log level INFO:
# self.assertIn('not encrypted', output)
def test_rtf_behaviour(self):
"""Test behaviour of olevba when presented with an rtf file."""
input_file = join(DATA_BASE_DIR, 'msodde', 'RTF-Spec-1.7.rtf')
output, ret_code = call_and_capture('olevba', args=(input_file, ),
accept_nonzero_exit=True)
# check that return code is olevba.RETURN_OPEN_ERROR
self.assertEqual(ret_code, 5)
# check output:
self.assertIn('FileOpenError', output)
self.assertIn('is RTF', output)
self.assertIn('rtfobj', output)
# TODO: I disabled this test because we do not log "not encrypted" as warning anymore
# to avoid other issues.
# If we really want to test this, then the test should be run with log level INFO:
# self.assertIn('not encrypted', output)
# check warnings
for line in output.splitlines():
if line.startswith('WARNING ') and 'encrypted' in line:
continue # encryption warnings are ok
elif 'warn' in line.lower():
raise self.fail('Found "warn" in output line: "{}"'
.format(line.rstrip()))
def METHOD_NAME(self):
"""
Test that encrypted files give a certain return code.
Currently, only the encryption applied by Office 2010 (CryptoApi RC4
Encryption) is tested.
"""
CRYPT_DIR = join(DATA_BASE_DIR, 'encrypted')
CRYPT_RETURN_CODE = 9
ADD_ARGS = [], ['-d', ], ['-a', ], ['-j', ], ['-t', ] # only 1st file
EXCEPTIONS = ['autostart-encrypt-standardpassword.xls', # These ...
'autostart-encrypt-standardpassword.xlsm', # files ...
'autostart-encrypt-standardpassword.xlsb', # are ...
'dde-test-encrypt-standardpassword.xls', # automati...
'dde-test-encrypt-standardpassword.xlsx', # ...cally...
'dde-test-encrypt-standardpassword.xlsm', # decrypted.
'dde-test-encrypt-standardpassword.xlsb']
for filename in os.listdir(CRYPT_DIR):
if filename in EXCEPTIONS:
continue
full_name = join(CRYPT_DIR, filename)
for args in ADD_ARGS:
_, ret_code = call_and_capture('olevba',
args=[full_name, ] + args,
accept_nonzero_exit=True)
self.assertEqual(ret_code, CRYPT_RETURN_CODE,
msg='Wrong return code {} for args {}'\
.format(ret_code, args + [filename, ]))
# test only first file with all arg combinations, others just
# without arg (test takes too long otherwise
ADD_ARGS = ([], )
def test_xlm(self):
"""Test that xlm macros are found."""
XLM_DIR = join(DATA_BASE_DIR, 'excel4-macros')
ADD_ARGS = ['-j']
for filename in os.listdir(XLM_DIR):
full_name = join(XLM_DIR, filename)
suffix = splitext(filename)[1]
out_str, ret_code = call_and_capture('olevba',
args=[full_name, ] + ADD_ARGS,
accept_nonzero_exit=True)
output = json.loads(out_str)
self.assertEqual(len(output), 2)
self.assertEqual(output[0]['type'], 'MetaInformation')
self.assertEqual(output[0]['script_name'], 'olevba')
result = output[1]
self.assertTrue(result['json_conversion_successful'])
if suffix in ('.xlsb', '.xltm', '.xlsm'):
# TODO: cannot extract xlm macros for these types yet
self.assertEqual(result['macros'], [])
else:
code = result['macros'][0]['code']
if suffix == '.slk':
self.assertIn('Excel 4 macros extracted', code)
else:
self.assertIn('Excel 4.0 macro sheet', code)
self.assertIn('Auto_Open', code)
if 'excel5' not in filename: # TODO: is not found in excel5
self.assertIn('ALERT(', code)
self.assertIn('HALT()', code)
self.assertIn(len(result['analysis']), (2, 3))
types = [entry['type'] for entry in result['analysis']]
keywords = [entry['keyword'] for entry in result['analysis']]
self.assertIn('Auto_Open', keywords)
self.assertIn('XLM macro', keywords)
self.assertIn('AutoExec', types)
self.assertIn('Suspicious', types)
# just in case somebody calls this file as a script
if __name__ == '__main__':
unittest.main()
| null |
4,872 |
"""Test program for the fcntl C module.
"""
import platform
import os
import struct
import sys
import unittest
from multiprocessing import Process
from test.support import verbose, cpython_only
from test.support.import_helper import import_module
from test.support.os_helper import TESTFN, unlink
# Skip test if no fcntl module.
fcntl = import_module('fcntl')
def get_lockdata():
try:
os.O_LARGEFILE
except AttributeError:
start_len = "ll"
else:
start_len = "qq"
if (sys.platform.startswith(('netbsd', 'freebsd', 'openbsd'))
or sys.platform == 'darwin'):
if struct.calcsize('l') == 8:
off_t = 'l'
pid_t = 'i'
else:
off_t = 'lxxxx'
pid_t = 'l'
lockdata = struct.pack(off_t + off_t + pid_t + 'hh', 0, 0, 0,
fcntl.F_WRLCK, 0)
elif sys.platform.startswith('gnukfreebsd'):
lockdata = struct.pack('qqihhi', 0, 0, 0, fcntl.F_WRLCK, 0, 0)
elif sys.platform in ['hp-uxB', 'unixware7']:
lockdata = struct.pack('hhlllii', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
else:
lockdata = struct.pack('hh'+start_len+'hh', fcntl.F_WRLCK, 0, 0, 0, 0, 0)
if lockdata:
if verbose:
print('struct.pack: ', repr(lockdata))
return lockdata
lockdata = get_lockdata()
class BadFile:
def __init__(self, fn):
self.fn = fn
def fileno(self):
return self.fn
def try_lockf_on_other_process_fail(fname, cmd):
f = open(fname, 'wb+')
try:
fcntl.lockf(f, cmd)
except BlockingIOError:
pass
finally:
f.close()
def try_lockf_on_other_process(fname, cmd):
f = open(fname, 'wb+')
fcntl.lockf(f, cmd)
fcntl.lockf(f, fcntl.LOCK_UN)
f.close()
class TestFcntl(unittest.TestCase):
def setUp(self):
self.f = None
def tearDown(self):
if self.f and not self.f.closed:
self.f.close()
unlink(TESTFN)
def METHOD_NAME(self):
# the example from the library docs
self.f = open(TESTFN, 'wb')
rv = fcntl.fcntl(self.f.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
if verbose:
print('Status from fcntl with O_NONBLOCK: ', rv)
rv = fcntl.fcntl(self.f.fileno(), fcntl.F_SETLKW, lockdata)
if verbose:
print('String from fcntl with F_SETLKW: ', repr(rv))
self.f.close()
def test_fcntl_file_descriptor(self):
# again, but pass the file rather than numeric descriptor
self.f = open(TESTFN, 'wb')
rv = fcntl.fcntl(self.f, fcntl.F_SETFL, os.O_NONBLOCK)
if verbose:
print('Status from fcntl with O_NONBLOCK: ', rv)
rv = fcntl.fcntl(self.f, fcntl.F_SETLKW, lockdata)
if verbose:
print('String from fcntl with F_SETLKW: ', repr(rv))
self.f.close()
def test_fcntl_bad_file(self):
with self.assertRaises(ValueError):
fcntl.fcntl(-1, fcntl.F_SETFL, os.O_NONBLOCK)
with self.assertRaises(ValueError):
fcntl.fcntl(BadFile(-1), fcntl.F_SETFL, os.O_NONBLOCK)
with self.assertRaises(TypeError):
fcntl.fcntl('spam', fcntl.F_SETFL, os.O_NONBLOCK)
with self.assertRaises(TypeError):
fcntl.fcntl(BadFile('spam'), fcntl.F_SETFL, os.O_NONBLOCK)
@cpython_only
def test_fcntl_bad_file_overflow(self):
from _testcapi import INT_MAX, INT_MIN
# Issue 15989
with self.assertRaises(OverflowError):
fcntl.fcntl(INT_MAX + 1, fcntl.F_SETFL, os.O_NONBLOCK)
with self.assertRaises(OverflowError):
fcntl.fcntl(BadFile(INT_MAX + 1), fcntl.F_SETFL, os.O_NONBLOCK)
with self.assertRaises(OverflowError):
fcntl.fcntl(INT_MIN - 1, fcntl.F_SETFL, os.O_NONBLOCK)
with self.assertRaises(OverflowError):
fcntl.fcntl(BadFile(INT_MIN - 1), fcntl.F_SETFL, os.O_NONBLOCK)
@unittest.skipIf(
platform.machine().startswith('arm') and platform.system() == 'Linux',
"ARM Linux returns EINVAL for F_NOTIFY DN_MULTISHOT")
def test_fcntl_64_bit(self):
# Issue #1309352: fcntl shouldn't fail when the third arg fits in a
# C 'long' but not in a C 'int'.
try:
cmd = fcntl.F_NOTIFY
# This flag is larger than 2**31 in 64-bit builds
flags = fcntl.DN_MULTISHOT
except AttributeError:
self.skipTest("F_NOTIFY or DN_MULTISHOT unavailable")
fd = os.open(os.path.dirname(os.path.abspath(TESTFN)), os.O_RDONLY)
try:
fcntl.fcntl(fd, cmd, flags)
finally:
os.close(fd)
def test_flock(self):
# Solaris needs readable file for shared lock
self.f = open(TESTFN, 'wb+')
fileno = self.f.fileno()
fcntl.flock(fileno, fcntl.LOCK_SH)
fcntl.flock(fileno, fcntl.LOCK_UN)
fcntl.flock(self.f, fcntl.LOCK_SH | fcntl.LOCK_NB)
fcntl.flock(self.f, fcntl.LOCK_UN)
fcntl.flock(fileno, fcntl.LOCK_EX)
fcntl.flock(fileno, fcntl.LOCK_UN)
self.assertRaises(ValueError, fcntl.flock, -1, fcntl.LOCK_SH)
self.assertRaises(TypeError, fcntl.flock, 'spam', fcntl.LOCK_SH)
# TODO RustPython
@unittest.skipUnless(sys.platform == 'linux', 'test requires Linux')
@unittest.skipIf(platform.system() == "AIX", "AIX returns PermissionError")
def test_lockf_exclusive(self):
self.f = open(TESTFN, 'wb+')
cmd = fcntl.LOCK_EX | fcntl.LOCK_NB
fcntl.lockf(self.f, cmd)
p = Process(target=try_lockf_on_other_process_fail, args=(TESTFN, cmd))
p.start()
p.join()
fcntl.lockf(self.f, fcntl.LOCK_UN)
self.assertEqual(p.exitcode, 0)
# TODO RustPython
@unittest.skipUnless(sys.platform == 'linux', 'test requires Linux')
@unittest.skipIf(platform.system() == "AIX", "AIX returns PermissionError")
def test_lockf_share(self):
self.f = open(TESTFN, 'wb+')
cmd = fcntl.LOCK_SH | fcntl.LOCK_NB
fcntl.lockf(self.f, cmd)
p = Process(target=try_lockf_on_other_process, args=(TESTFN, cmd))
p.start()
p.join()
fcntl.lockf(self.f, fcntl.LOCK_UN)
self.assertEqual(p.exitcode, 0)
@cpython_only
def test_flock_overflow(self):
import _testcapi
self.assertRaises(OverflowError, fcntl.flock, _testcapi.INT_MAX+1,
fcntl.LOCK_SH)
@unittest.skipIf(sys.platform != 'darwin', "F_GETPATH is only available on macos")
def test_fcntl_f_getpath(self):
self.f = open(TESTFN, 'wb')
expected = os.path.abspath(TESTFN).encode('utf-8')
res = fcntl.fcntl(self.f.fileno(), fcntl.F_GETPATH, bytes(len(expected)))
self.assertEqual(expected, res)
@unittest.skipUnless(
hasattr(fcntl, "F_SETPIPE_SZ") and hasattr(fcntl, "F_GETPIPE_SZ"),
"F_SETPIPE_SZ and F_GETPIPE_SZ are not available on all platforms.")
def test_fcntl_f_pipesize(self):
test_pipe_r, test_pipe_w = os.pipe()
try:
# Get the default pipesize with F_GETPIPE_SZ
pipesize_default = fcntl.fcntl(test_pipe_w, fcntl.F_GETPIPE_SZ)
pipesize = pipesize_default // 2 # A new value to detect change.
if pipesize < 512: # the POSIX minimum
raise unittest.SkitTest(
'default pipesize too small to perform test.')
fcntl.fcntl(test_pipe_w, fcntl.F_SETPIPE_SZ, pipesize)
self.assertEqual(fcntl.fcntl(test_pipe_w, fcntl.F_GETPIPE_SZ),
pipesize)
finally:
os.close(test_pipe_r)
os.close(test_pipe_w)
if __name__ == '__main__':
unittest.main()
| null |
4,873 |
# SPDX-FileCopyrightText: Copyright DB Netz AG and the capellambse contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import contextlib
import logging
import os
import pathlib
import subprocess
import typing as t
from capellambse import helpers
from capellambse.loader.modelinfo import ModelInfo
from . import abc
LOGGER = logging.getLogger(__name__)
class LocalFileHandler(abc.FileHandler):
def __init__(
self,
path: str | os.PathLike,
*,
subdir: str | pathlib.PurePosixPath = "/",
) -> None:
path = pathlib.Path(path, helpers.normalize_pure_path(subdir))
super().__init__(path)
self.__transaction: set[pathlib.PurePosixPath] | None = None
assert isinstance(self.path, pathlib.Path)
def open(
self,
filename: str | pathlib.PurePosixPath,
mode: t.Literal["r", "rb", "w", "wb"] = "rb",
) -> t.BinaryIO:
assert isinstance(self.path, pathlib.Path)
normpath = helpers.normalize_pure_path(filename)
if "w" not in mode or self.__transaction is None:
path = self.path / normpath
return t.cast(t.BinaryIO, path.open(mode))
if normpath in self.__transaction:
raise RuntimeError(
f"File already written in this transaction: {normpath}"
)
self.__transaction.add(normpath)
tmppath = _tmpname(normpath)
return t.cast(t.BinaryIO, (self.path / tmppath).open(mode))
def get_model_info(self) -> ModelInfo:
assert isinstance(self.path, pathlib.Path)
if (self.path / ".git").exists():
return ModelInfo(
branch=self.__git_rev_parse("--abbrev-ref", "HEAD"),
title=self.path.name,
url=self.__git_get_remote_url(),
rev_hash=self.__git_rev_parse("HEAD"),
)
return ModelInfo(title=self.path.name)
@contextlib.contextmanager
def METHOD_NAME(
self, *, dry_run: bool = False, **kw: t.Any
) -> t.Generator[t.Mapping[str, t.Any], None, None]:
"""Start a write transaction.
During the transaction, file writes are redirected to temporary
files next to the target files, and if the transaction ends
successfully they are moved to their destinations all at once.
Parameters
----------
dry_run
Discard the temporary files after a successful transaction
instead of committing them to their destinations.
"""
assert isinstance(self.path, pathlib.Path)
with super().METHOD_NAME(**kw) as unused_kw:
if self.__transaction is not None:
raise RuntimeError("Another transaction is already open")
self.__transaction = set()
try:
yield unused_kw
except:
LOGGER.debug("Aborting transaction due to exception")
dry_run = True
raise
finally:
for file in self.__transaction:
tmpname = _tmpname(file)
if dry_run:
LOGGER.debug("Removing temporary file %s", tmpname)
(self.path / tmpname).unlink()
else:
LOGGER.debug("Committing file %s to %s", tmpname, file)
(self.path / tmpname).replace(self.path / file)
self.__transaction = None
@property
def rootdir(self) -> LocalFilePath:
return LocalFilePath(self, pathlib.PurePosixPath("/"))
def iterdir(
self, subdir: str | pathlib.PurePosixPath = "."
) -> t.Iterator[LocalFilePath]:
assert isinstance(self.path, pathlib.Path)
subdir = helpers.normalize_pure_path(subdir)
for p in self.path.joinpath(subdir).iterdir():
yield LocalFilePath(
self,
pathlib.PurePosixPath(p.relative_to(self.path)),
)
def __git_rev_parse(self, *options: str) -> str | None:
assert isinstance(self.path, pathlib.Path)
try:
return (
subprocess.run(
["git", "rev-parse", *options],
cwd=self.path,
check=True,
capture_output=True,
)
.stdout.decode("utf-8")
.strip()
)
except Exception as err:
LOGGER.debug(
"Git rev-parse with options %s failed: %s: %s",
options,
type(err).__name__,
err,
)
return None
def __git_get_remote_url(self) -> str | None:
assert isinstance(self.path, pathlib.Path)
try:
remotes = (
subprocess.run(
["git", "remote"],
cwd=self.path,
check=True,
capture_output=True,
)
.stdout.decode("utf-8")
.splitlines()
)
return (
subprocess.run(
["git", "remote", "get-url", remotes[0]],
cwd=self.path,
check=True,
capture_output=True,
)
.stdout.decode("utf-8")
.strip()
)
except (IndexError, subprocess.CalledProcessError):
return None
def _tmpname(filename: pathlib.PurePosixPath) -> pathlib.PurePosixPath:
prefix = "."
suffix = ".tmp"
name = filename.name[0 : 255 - (len(prefix) + len(suffix))]
return filename.with_name(f"{prefix}{name}{suffix}")
class LocalFilePath(abc.AbstractFilePath[LocalFileHandler]):
def is_dir(self) -> bool:
base = t.cast(pathlib.Path, self._parent.path)
path = base.joinpath(self._path).resolve()
return path.is_dir()
def is_file(self) -> bool:
base = t.cast(pathlib.Path, self._parent.path)
path = base.joinpath(self._path).resolve()
return path.is_file()
| null |
4,874 |
import warnings
import random
from typing import Any, Iterator, List, Optional
import torch
from torch import Tensor
from avalanche.benchmarks.utils import make_classification_dataset
from avalanche.benchmarks.utils.data import AvalancheDataset
from avalanche.benchmarks.utils.data_loader import (
GroupBalancedInfiniteDataLoader,
)
from avalanche.models import avalanche_forward
from avalanche.training.plugins.strategy_plugin import SupervisedPlugin
class AGEMPlugin(SupervisedPlugin):
"""Average Gradient Episodic Memory Plugin.
AGEM projects the gradient on the current minibatch by using an external
episodic memory of patterns from previous experiences. If the dot product
between the current gradient and the (average) gradient of a randomly
sampled set of memory examples is negative, the gradient is projected.
This plugin does not use task identities.
"""
def __init__(self, patterns_per_experience: int, sample_size: int):
"""
:param patterns_per_experience: number of patterns per experience in the
memory.
:param sample_size: number of patterns in memory sample when computing
reference gradient.
"""
super().__init__()
self.patterns_per_experience = int(patterns_per_experience)
self.sample_size = int(sample_size)
# One AvalancheDataset for each experience
self.buffers: List[AvalancheDataset] = []
self.buffer_dataloader: Optional[GroupBalancedInfiniteDataLoader] = None
# Placeholder iterator to avoid typing issues
self.buffer_dliter: Iterator[Any] = iter([])
# Placeholder Tensor to avoid typing issues
self.reference_gradients: Tensor = torch.empty(0)
def before_training_iteration(self, strategy, **kwargs):
"""
Compute reference gradient on memory sample.
"""
if len(self.buffers) > 0:
strategy.model.train()
strategy.optimizer.zero_grad()
mb = self.sample_from_memory()
xref, yref, tid = mb[0], mb[1], mb[-1]
xref, yref = xref.to(strategy.device), yref.to(strategy.device)
out = avalanche_forward(strategy.model, xref, tid)
loss = strategy._criterion(out, yref)
loss.backward()
# gradient can be None for some head on multi-headed models
reference_gradients_list = [
p.grad.view(-1)
if p.grad is not None
else torch.zeros(p.numel(), device=strategy.device)
for n, p in strategy.model.named_parameters()
]
self.reference_gradients = torch.cat(reference_gradients_list)
strategy.optimizer.zero_grad()
@torch.no_grad()
def after_backward(self, strategy, **kwargs):
"""
Project gradient based on reference gradients
"""
if len(self.buffers) > 0:
current_gradients_list = [
p.grad.view(-1)
if p.grad is not None
else torch.zeros(p.numel(), device=strategy.device)
for n, p in strategy.model.named_parameters()
]
current_gradients = torch.cat(current_gradients_list)
assert (
current_gradients.shape == self.reference_gradients.shape
), "Different model parameters in AGEM projection"
dotg = torch.dot(current_gradients, self.reference_gradients)
if dotg < 0:
alpha2 = dotg / torch.dot(
self.reference_gradients, self.reference_gradients
)
grad_proj = current_gradients - self.reference_gradients * alpha2
count = 0
for n, p in strategy.model.named_parameters():
n_param = p.numel()
if p.grad is not None:
p.grad.copy_(grad_proj[count : count + n_param].view_as(p))
count += n_param
def METHOD_NAME(self, strategy, **kwargs):
"""Update replay memory with patterns from current experience."""
self.update_memory(strategy.experience.dataset, **kwargs)
def sample_from_memory(self):
"""
Sample a minibatch from memory.
Return a tuple of patterns (tensor), targets (tensor).
"""
return next(self.buffer_dliter)
@torch.no_grad()
def update_memory(self, dataset, num_workers=0, **kwargs):
"""
Update replay memory with patterns from current experience.
"""
if num_workers > 0:
warnings.warn(
"Num workers > 0 is known to cause heavy" "slowdowns in AGEM."
)
removed_els = len(dataset) - self.patterns_per_experience
if removed_els > 0:
indices = list(range(len(dataset)))
random.shuffle(indices)
dataset = dataset.subset(indices[: self.patterns_per_experience])
self.buffers.append(dataset)
persistent_workers = num_workers > 0
self.buffer_dataloader = GroupBalancedInfiniteDataLoader(
self.buffers,
batch_size=(self.sample_size // len(self.buffers)),
num_workers=num_workers,
pin_memory=False,
persistent_workers=persistent_workers,
)
self.buffer_dliter = iter(self.buffer_dataloader)
| null |
4,875 |
from __future__ import annotations
import itertools
from collections import defaultdict
from typing import Iterator, Optional, Sequence, TYPE_CHECKING
from game.ato.closestairfields import ObjectiveDistanceCache
from game.dcs.aircrafttype import AircraftType
from .squadrondefloader import SquadronDefLoader
from ..campaignloader.squadrondefgenerator import SquadronDefGenerator
from ..factions.faction import Faction
from ..theater import ControlPoint, MissionTarget
if TYPE_CHECKING:
from game.game import Game
from ..ato.flighttype import FlightType
from .squadron import Squadron
class AirWing:
def __init__(self, player: bool, game: Game, faction: Faction) -> None:
self.player = player
self.squadrons: dict[AircraftType, list[Squadron]] = defaultdict(list)
self.squadron_defs = SquadronDefLoader(game, faction).load()
self.squadron_def_generator = SquadronDefGenerator(faction)
self.settings = game.settings
def unclaim_squadron_def(self, squadron: Squadron) -> None:
if squadron.aircraft in self.squadron_defs:
for squadron_def in self.squadron_defs[squadron.aircraft]:
if squadron_def.claimed and squadron_def.name == squadron.name:
squadron_def.claimed = False
def add_squadron(self, squadron: Squadron) -> None:
self.squadrons[squadron.aircraft].append(squadron)
def squadrons_for(self, aircraft: AircraftType) -> Sequence[Squadron]:
return self.squadrons[aircraft]
def can_auto_plan(self, task: FlightType) -> bool:
try:
next(self.auto_assignable_for_task(task))
return True
except StopIteration:
return False
def best_squadrons_for(
self, location: MissionTarget, task: FlightType, size: int, this_turn: bool
) -> list[Squadron]:
airfield_cache = ObjectiveDistanceCache.get_closest_airfields(location)
best_aircraft = AircraftType.priority_list_for_task(task)
ordered: list[Squadron] = []
for control_point in airfield_cache.operational_airfields:
if control_point.captured != self.player:
continue
capable_at_base = []
for squadron in control_point.squadrons:
if squadron.can_auto_assign_mission(location, task, size, this_turn):
capable_at_base.append(squadron)
if squadron.aircraft not in best_aircraft:
# If it is not already in the list it should be the last one
best_aircraft.append(squadron.aircraft)
ordered.extend(
sorted(
capable_at_base,
key=lambda s: best_aircraft.index(s.aircraft),
)
)
return sorted(
ordered,
key=lambda s: (
# This looks like the opposite of what we want because False sorts
# before True.
s.primary_task != task,
s.location.distance_to(location),
),
)
def best_squadron_for(
self, location: MissionTarget, task: FlightType, size: int, this_turn: bool
) -> Optional[Squadron]:
for squadron in self.best_squadrons_for(location, task, size, this_turn):
return squadron
return None
def best_available_aircrafts_for(self, task: FlightType) -> list[AircraftType]:
"""Returns an ordered list of available aircrafts for the given task"""
aircrafts = []
best_aircraft_for_task = AircraftType.priority_list_for_task(task)
for aircraft, squadrons in self.squadrons.items():
for squadron in squadrons:
if squadron.untasked_aircraft and squadron.capable_of(task):
aircrafts.append(aircraft)
if aircraft not in best_aircraft_for_task:
best_aircraft_for_task.append(aircraft)
break
# Sort the list ordered by the best capability
return sorted(
aircrafts,
key=lambda ac: best_aircraft_for_task.index(ac),
)
def auto_assignable_for_task(self, task: FlightType) -> Iterator[Squadron]:
for squadron in self.iter_squadrons():
if squadron.can_auto_assign(task):
yield squadron
def auto_assignable_for_task_at(
self, task: FlightType, base: ControlPoint
) -> Iterator[Squadron]:
for squadron in self.iter_squadrons():
if squadron.can_auto_assign(task) and squadron.location == base:
yield squadron
def squadron_for(self, aircraft: AircraftType) -> Squadron:
return self.squadrons_for(aircraft)[0]
def iter_squadrons(self) -> Iterator[Squadron]:
return itertools.chain.from_iterable(self.squadrons.values())
def METHOD_NAME(self, index: int) -> Squadron:
return list(self.iter_squadrons())[index]
def populate_for_turn_0(self, squadrons_start_full: bool) -> None:
for squadron in self.iter_squadrons():
squadron.populate_for_turn_0(squadrons_start_full)
def end_turn(self) -> None:
for squadron in self.iter_squadrons():
squadron.end_turn()
def reset(self) -> None:
for squadron in self.iter_squadrons():
squadron.return_all_pilots_and_aircraft()
@property
def size(self) -> int:
return sum(len(s) for s in self.squadrons.values())
| null |
4,876 |
#!/usr/bin/env python3
# Copyright 2021 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test integrating the sequence processor into a simple test pipeline.
"""
import os
import sys
import unittest
# Needed because the test runner contains relative imports.
TOOLS_PATH = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(TOOLS_PATH)
from testrunner.testproc import base
from testrunner.testproc.loader import LoadProc
from testrunner.testproc.sequence import SequenceProc
class FakeExecutionProc(base.TestProc):
"""Simulates the pipeline sink consuming and running the tests.
Test execution is simulated for each test by calling run().
"""
def __init__(self):
super(FakeExecutionProc, self).__init__()
self.tests = []
def next_test(self, test):
self.tests.append(test)
return True
def run(self):
test = self.tests.pop()
self._send_result(test, test.n)
class FakeResultObserver(base.TestProcObserver):
"""Observer to track all results sent back through the pipeline."""
def __init__(self):
super(FakeResultObserver, self).__init__()
self.tests = set([])
def _on_result_for(self, test, result):
self.tests.add(test.n)
class FakeTest(object):
"""Simple test representation to differentiate light/heavy tests."""
def __init__(self, n, is_heavy):
self.n = n
self.is_heavy = is_heavy
self.keep_output = False
class TestSequenceProc(unittest.TestCase):
def _test(self, tests, batch_size, max_heavy):
# Set up a simple processing pipeline:
# Loader -> observe results -> sequencer -> execution.
loader = LoadProc(iter(tests))
results = FakeResultObserver()
sequence_proc = SequenceProc(max_heavy)
execution = FakeExecutionProc()
loader.connect_to(results)
results.connect_to(sequence_proc)
sequence_proc.connect_to(execution)
# Fill the execution queue (with the number of tests potentially
# executed in parallel).
loader.load_initial_tests(batch_size)
# Simulate the execution test by test.
while execution.tests:
# Assert the invariant of maximum heavy tests executed simultaneously.
self.assertLessEqual(
sum(int(test.is_heavy) for test in execution.tests), max_heavy)
# As in the real pipeline, running a test and returning its result
# will add another test into the pipeline.
execution.run()
# Ensure that all tests are processed and deliver results.
self.assertEqual(set(test.n for test in tests), results.tests)
def test_wrong_usage(self):
with self.assertRaises(Exception):
SequenceProc(0)
def METHOD_NAME(self):
self._test([], 1, 1)
def test_large_batch_light(self):
self._test([
FakeTest(0, False),
FakeTest(1, False),
FakeTest(2, False),
], 4, 1)
def test_small_batch_light(self):
self._test([
FakeTest(0, False),
FakeTest(1, False),
FakeTest(2, False),
], 2, 1)
def test_large_batch_heavy(self):
self._test([
FakeTest(0, True),
FakeTest(1, True),
FakeTest(2, True),
], 4, 1)
def test_small_batch_heavy(self):
self._test([
FakeTest(0, True),
FakeTest(1, True),
FakeTest(2, True),
], 2, 1)
def test_large_batch_mixed(self):
self._test([
FakeTest(0, True),
FakeTest(1, False),
FakeTest(2, True),
FakeTest(3, False),
], 4, 1)
def test_small_batch_mixed(self):
self._test([
FakeTest(0, True),
FakeTest(1, False),
FakeTest(2, True),
FakeTest(3, False),
], 2, 1)
def test_large_batch_more_heavy(self):
self._test([
FakeTest(0, True),
FakeTest(1, True),
FakeTest(2, True),
FakeTest(3, False),
FakeTest(4, True),
FakeTest(5, True),
FakeTest(6, False),
], 4, 2)
def test_small_batch_more_heavy(self):
self._test([
FakeTest(0, True),
FakeTest(1, True),
FakeTest(2, True),
FakeTest(3, False),
FakeTest(4, True),
FakeTest(5, True),
FakeTest(6, False),
], 2, 2)
if __name__ == '__main__':
unittest.main()
| null |
4,877 |
#!/usr/bin/env python3
# SPDX-License-Identifier: LGPL-2.1-or-later
OUTFILE_HEADER = """#!/usr/bin/env python3
# SPDX-License-Identifier: LGPL-2.1-or-later
#
# create-sys-script.py
#
# © 2017 Canonical Ltd.
# Author: Dan Streetman <[email protected]>
"""
# Use this only to (re-)create the test/sys-script.py script,
# after adding or modifying anything in the test/sys/ directory
import os, sys
import stat
import tempfile
import filecmp
import subprocess
OUTFILE_MODE = 0o775
OUTFILE_FUNCS = r"""
import os, sys
import shutil
def d(path, mode):
os.mkdir(path, mode)
def l(path, src):
os.symlink(src, path)
def f(path, mode, contents):
with open(path, "wb") as f:
f.write(contents)
os.chmod(path, mode)
"""
OUTFILE_MAIN = """
if len(sys.argv) < 2:
exit("Usage: {} <target dir>".format(sys.argv[0]))
if not os.path.isdir(sys.argv[1]):
exit("Target dir {} not found".format(sys.argv[1]))
os.chdir(sys.argv[1])
if os.path.exists('sys'):
shutil.rmtree('sys')
"""
def METHOD_NAME(outfile, path):
m = os.lstat(path).st_mode & 0o777
outfile.write(f"d('{path}', {m:#o})\n")
def handle_link(outfile, path):
src = os.readlink(path)
outfile.write(f"l('{path}', '{src}')\n")
def escape_single_quotes(b):
# remove the b'' wrapping each line repr
r = repr(b)[2:-1]
# python escapes all ' only if there are ' and " in the string
if '"' not in r:
r = r.replace("'", r"\'")
# return line with all ' escaped
return r
def handle_file(outfile, path):
m = os.lstat(path).st_mode & 0o777
with open(path, "rb") as f:
b = f.read()
if b.count(b"\n") > 1:
r = "\n".join( escape_single_quotes(l) for l in b.split(b"\n") )
r = f"b'''{r}'''"
else:
r = repr(b)
outfile.write(f"f('{path}', {m:#o}, {r})\n")
def process_sysdir(outfile):
for (dirpath, dirnames, filenames) in os.walk('sys'):
METHOD_NAME(outfile, dirpath)
for d in dirnames:
path = os.path.join(dirpath, d)
if stat.S_ISLNK(os.lstat(path).st_mode):
handle_link(outfile, path)
for f in filenames:
path = os.path.join(dirpath, f)
mode = os.lstat(path).st_mode
if stat.S_ISLNK(mode):
handle_link(outfile, path)
elif stat.S_ISREG(mode):
handle_file(outfile, path)
def verify_dir(tmpd, path_a):
path_b = os.path.join(tmpd, path_a)
mode_a = os.lstat(path_a).st_mode
mode_b = os.lstat(path_b).st_mode
if not stat.S_ISDIR(mode_b):
raise Exception("Not directory")
if (mode_a & 0o777) != (mode_b & 0o777):
raise Exception("Permissions mismatch")
def verify_link(tmpd, path_a):
path_b = os.path.join(tmpd, path_a)
if not stat.S_ISLNK(os.lstat(path_b).st_mode):
raise Exception("Not symlink")
if os.readlink(path_a) != os.readlink(path_b):
raise Exception("Symlink dest mismatch")
def verify_file(tmpd, path_a):
path_b = os.path.join(tmpd, path_a)
mode_a = os.lstat(path_a).st_mode
mode_b = os.lstat(path_b).st_mode
if not stat.S_ISREG(mode_b):
raise Exception("Not file")
if (mode_a & 0o777) != (mode_b & 0o777):
raise Exception("Permissions mismatch")
if not filecmp.cmp(path_a, path_b, shallow=False):
raise Exception("File contents mismatch")
def verify_script(tmpd):
any = False
for (dirpath, dirnames, filenames) in os.walk("sys"):
any = True
try:
path = dirpath
verify_dir(tmpd, path)
for d in dirnames:
path = os.path.join(dirpath, d)
if stat.S_ISLNK(os.lstat(path).st_mode):
verify_link(tmpd, path)
for f in filenames:
path = os.path.join(dirpath, f)
mode = os.lstat(path).st_mode
if stat.S_ISLNK(mode):
verify_link(tmpd, path)
elif stat.S_ISREG(mode):
verify_file(tmpd, path)
except Exception:
print(f'FAIL on "{path}"', file=sys.stderr)
raise
if not any:
exit('Nothing found!')
if __name__ == "__main__":
if len(sys.argv) < 2:
exit('Usage: create-sys-script.py /path/to/test/')
outfile = os.path.abspath(os.path.dirname(sys.argv[0]) + '/sys-script.py')
print(f'Creating {outfile} using contents of {sys.argv[1]}/sys')
os.chdir(sys.argv[1])
with open(outfile, "w") as f:
os.chmod(outfile, OUTFILE_MODE)
f.write(OUTFILE_HEADER.replace(os.path.basename(sys.argv[0]),
os.path.basename(outfile)))
f.write(OUTFILE_FUNCS)
f.write(OUTFILE_MAIN)
process_sysdir(f)
with tempfile.TemporaryDirectory() as tmpd:
print(f'Recreating sys/ using {outfile} at {tmpd}')
subprocess.check_call([outfile, tmpd])
verify_script(tmpd)
print(f'Verification successful, {outfile} is correct')
| null |
4,878 |
import tempfile
import pytest
from policyengine_core.parameters import (
ParameterNode,
ParameterNodeAtInstant,
ParameterNotFoundError,
load_parameter_file,
)
def test_get_at_instant(tax_benefit_system):
parameters = tax_benefit_system.parameters
assert isinstance(parameters, ParameterNode), parameters
parameters_at_instant = parameters("2016-01-01")
assert isinstance(
parameters_at_instant, ParameterNodeAtInstant
), parameters_at_instant
assert parameters_at_instant.taxes.income_tax_rate == 0.15
assert parameters_at_instant.benefits.basic_income == 600
def test_param_values(tax_benefit_system):
dated_values = {
"2015-01-01": 0.15,
"2014-01-01": 0.14,
"2013-01-01": 0.13,
"2012-01-01": 0.16,
}
for date, value in dated_values.items():
assert (
tax_benefit_system.get_parameters_at_instant(
date
).taxes.income_tax_rate
== value
)
def test_param_before_it_is_defined(tax_benefit_system):
with pytest.raises(ParameterNotFoundError):
tax_benefit_system.get_parameters_at_instant(
"1997-12-31"
).taxes.income_tax_rate
# The placeholder should have no effect on the parameter computation
def test_param_with_placeholder(tax_benefit_system):
assert (
tax_benefit_system.get_parameters_at_instant(
"2018-01-01"
).taxes.income_tax_rate
== 0.15
)
def test_stopped_parameter_before_end_value(tax_benefit_system):
assert (
tax_benefit_system.get_parameters_at_instant(
"2011-12-31"
).benefits.housing_allowance
== 0.25
)
def test_stopped_parameter_after_end_value(tax_benefit_system):
with pytest.raises(ParameterNotFoundError):
tax_benefit_system.get_parameters_at_instant(
"2016-12-01"
).benefits.housing_allowance
def test_parameter_for_period(tax_benefit_system):
income_tax_rate = tax_benefit_system.parameters.taxes.income_tax_rate
assert income_tax_rate("2015") == income_tax_rate("2015-01-01")
def METHOD_NAME(tax_benefit_system):
income_tax_rate = tax_benefit_system.parameters.taxes.income_tax_rate
with pytest.raises(ValueError):
income_tax_rate("test")
def test_parameter_repr(tax_benefit_system):
parameters = tax_benefit_system.parameters
tf = tempfile.NamedTemporaryFile(delete=False)
tf.write(repr(parameters).encode("utf-8"))
tf.close()
tf_parameters = load_parameter_file(file_path=tf.name)
assert repr(parameters) == repr(tf_parameters)
def test_parameters_metadata(tax_benefit_system):
parameter = tax_benefit_system.parameters.benefits.basic_income
assert (
parameter.metadata["reference"]
== "https://law.gov.example/basic-income/amount"
)
assert parameter.metadata["unit"] == "currency-EUR"
assert (
parameter.values_list[0].metadata["reference"]
== "https://law.gov.example/basic-income/amount/2015-12"
)
assert parameter.values_list[0].metadata["unit"] == "currency-EUR"
scale = tax_benefit_system.parameters.taxes.social_security_contribution
assert scale.metadata["threshold_unit"] == "currency-EUR"
assert scale.metadata["rate_unit"] == "/1"
def test_parameter_node_metadata(tax_benefit_system):
parameter = tax_benefit_system.parameters.benefits
assert parameter.description == "Social benefits"
parameter_2 = tax_benefit_system.parameters.taxes.housing_tax
assert parameter_2.description == "Housing tax"
def test_parameter_documentation(tax_benefit_system):
parameter = tax_benefit_system.parameters.benefits.housing_allowance
assert (
parameter.documentation
== "A fraction of the rent.\nFrom the 1st of Dec 2016, the housing allowance no longer exists.\n"
)
def test_get_descendants(tax_benefit_system):
all_parameters = {
parameter.name
for parameter in tax_benefit_system.parameters.get_descendants()
}
assert all_parameters.issuperset(
{"taxes", "taxes.housing_tax", "taxes.housing_tax.minimal_amount"}
)
def test_name():
parameter_data = {
"description": "Parameter indexed by a numeric key",
"2010": {"values": {"2006-01-01": 0.0075}},
}
parameter = ParameterNode("root", data=parameter_data)
assert parameter.children["2010"].name == "root.2010"
| null |
4,879 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2023 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from mantid.simpleapi import (
FindSXPeaksConvolve,
LoadParameterFile,
AnalysisDataService,
SortPeaksWorkspace,
)
from testhelpers import WorkspaceCreationHelper
from numpy import array, sqrt
XML_PARAMS = """
<?xml version="1.0" encoding="UTF-8" ?>
<parameter-file instrument = "basic_rect" valid-from = "2013-11-06T00:00:00">
<component-link name = "bank1">
<parameter name="BackToBackExponential:A" type="fitting">
<formula eq="2" unit="TOF" result-unit="1/TOF" /> <fixed />
</parameter>
<parameter name="BackToBackExponential:B" type="fitting">
<formula eq="2" unit="TOF" result-unit="1/TOF" /> <fixed />
</parameter>
<parameter name="BackToBackExponential:S" type="fitting">
<formula eq="0.1" unit="TOF" result-unit="TOF" />
</parameter>
</component-link>
</parameter-file>
"""
class FindSXPeaksConvolveTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# load empty instrument with RectangularDetector banks and create a peak table
cls.ws = WorkspaceCreationHelper.create2DWorkspaceWithRectangularInstrument(1, 7, 13) # nbanks, npix, nbins
AnalysisDataService.addOrReplace("ws_rect", cls.ws)
axis = cls.ws.getAxis(0)
axis.setUnit("TOF")
# fake peak centred on ispec=30 (detid=79) and TOF=5 - near middle of bank
peak_1D = array([0, 0, 0, 0, 4, 6, 4, 0, 0, 0, 0, 0, 0])
cls.ws.setY(30, cls.ws.readY(30) + peak_1D)
for ispec in [23, 29, 30, 31, 37]:
cls.ws.setY(ispec, cls.ws.readY(ispec) + peak_1D)
cls.ws.setE(ispec, sqrt(cls.ws.readY(ispec)))
# fake peak centred on ispec=12 (detid=61) and TOF=7 - near detector edge
cls.ws.setY(12, cls.ws.readY(12) + peak_1D[::-1])
for ispec in [5, 11, 12, 13, 19]:
cls.ws.setY(ispec, cls.ws.readY(ispec) + peak_1D[::-1])
cls.ws.setE(ispec, sqrt(cls.ws.readY(ispec)))
# Add back-to-back exponential params
LoadParameterFile(cls.ws, ParameterXML=XML_PARAMS)
@classmethod
def tearDownClass(cls):
AnalysisDataService.clear()
def _assert_found_correct_peaks(self, peak_ws):
self.assertEqual(peak_ws.getNumberPeaks(), 2)
peak_ws = SortPeaksWorkspace(
InputWorkspace=peak_ws, OutputWorkspace=peak_ws.name(), ColumnNameToSortBy="DetID", SortAscending=False
)
pk = peak_ws.getPeak(0)
self.assertEqual(pk.getDetectorID(), 79)
self.assertAlmostEqual(pk.getTOF(), 5.0, delta=1e-8)
self.assertAlmostEqual(pk.getIntensityOverSigma(), 6.0622, delta=1e-4)
pk = peak_ws.getPeak(1)
self.assertEqual(pk.getDetectorID(), 61)
self.assertAlmostEqual(pk.getTOF(), 7.0, delta=1e-8)
self.assertAlmostEqual(pk.getIntensityOverSigma(), 5.3981, delta=1e-4)
def METHOD_NAME(self):
out = FindSXPeaksConvolve(
InputWorkspace=self.ws, PeaksWorkspace="peaks1", NRows=3, NCols=3, NBins=3, ThresholdIoverSigma=3.0, MinFracSize=0.02
)
self._assert_found_correct_peaks(out)
def test_exec_get_nbins_from_back_to_back_params(self):
out = FindSXPeaksConvolve(
InputWorkspace=self.ws,
PeaksWorkspace="peaks1",
NRows=3,
NCols=3,
GetNBinsFromBackToBackParams=True,
ThresholdIoverSigma=3.0,
MinFracSize=0.02,
)
self._assert_found_correct_peaks(out)
def test_exec_IoverSigma_threshold(self):
out = FindSXPeaksConvolve(InputWorkspace=self.ws, PeaksWorkspace="peaks3", NRows=3, NCols=3, NBins=3, ThresholdIoverSigma=100.0)
self.assertEqual(out.getNumberPeaks(), 0)
def test_exec_remove_on_edge(self):
out = FindSXPeaksConvolve(
InputWorkspace=self.ws,
PeaksWorkspace="peaks1",
NRows=3,
NCols=3,
NBins=3,
ThresholdIoverSigma=3.0,
MinFracSize=0.02,
RemoveOnEdge=True,
)
self.assertEqual(out.getNumberPeaks(), 1)
# check it's the correct peak
pk = out.getPeak(0)
self.assertEqual(pk.getDetectorID(), 79)
def test_exec_min_frac_size(self):
out = FindSXPeaksConvolve(
InputWorkspace=self.ws, PeaksWorkspace="peaks1", NRows=3, NCols=3, NBins=5, ThresholdIoverSigma=3.0, MinFracSize=0.5
)
self.assertEqual(out.getNumberPeaks(), 0)
if __name__ == "__main__":
unittest.main()
| null |
4,880 |
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2023, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import sys
import traceback
from urllib.parse import urlencode
from regression.python_test_utils import test_utils as utils
import os
import json
# Load test data from json file.
CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
with open(CURRENT_PATH + "/index_constraint_test_data.json") as data_file:
test_cases = json.load(data_file)
# api method call
def api_create(self):
return self.tester.post("{0}{1}/{2}/{3}/{4}/{5}/".
format(self.url, utils.SERVER_GROUP,
self.server_id, self.db_id,
self.schema_id, self.table_id),
data=json.dumps(self.data),
content_type='html/json'
)
def api_delete(self, index_constraint_id=None):
if index_constraint_id is None:
index_constraint_id = self.index_constraint_id
return self.tester.delete("{0}{1}/{2}/{3}/{4}/{5}/{6}".
format(self.url, utils.SERVER_GROUP,
self.server_id, self.db_id,
self.schema_id,
self.table_id,
index_constraint_id),
data=json.dumps(self.data),
follow_redirects=True
)
def api_get(self, index_constraint_id=None):
if index_constraint_id is None:
index_constraint_id = self.index_constraint_id
return self.tester.get("{0}{1}/{2}/{3}/{4}/{5}/{6}".
format(self.url, utils.SERVER_GROUP,
self.server_id, self.db_id,
self.schema_id,
self.table_id,
index_constraint_id),
data=json.dumps(self.data),
follow_redirects=True
)
def METHOD_NAME(self, url_encode_data):
return self.tester.get("{0}{1}/{2}/{3}/{4}/{5}/{6}?{7}".
format(self.url, utils.SERVER_GROUP,
self.server_id, self.db_id,
self.schema_id,
self.table_id,
self.index_constraint_id,
urlencode(url_encode_data)),
data=json.dumps(self.data),
follow_redirects=True
)
def api_put(self):
return self.tester.put("{0}{1}/{2}/{3}/{4}/{5}/{6}".
format(self.url, utils.SERVER_GROUP,
self.server_id, self.db_id,
self.schema_id, self.table_id,
self.index_constraint_id
), data=json.dumps(self.data),
follow_redirects=True
)
def create_index_constraint(server, db_name, schema_name, table_name,
key_name, key_type):
"""
This function creates a index constraint(PK or UK) under provided table.
:param server: server details
:type server: dict
:param db_name: database name
:type db_name: str
:param schema_name: schema name
:type schema_name: str
:param table_name: table name
:type table_name: str
:param key_name: test name for primary or unique key
:type key_name: str
:param key_type: key type i.e. primary or unique key
:type key_type: str
:return oid: key constraint id
:rtype: int
"""
try:
connection = utils.get_db_connection(db_name,
server['username'],
server['db_password'],
server['host'],
server['port'],
server['sslmode'])
old_isolation_level = connection.isolation_level
utils.set_isolation_level(connection, 0)
pg_cursor = connection.cursor()
query = "ALTER TABLE %s.%s ADD CONSTRAINT %s %s (id)" % \
(schema_name, table_name, key_name, key_type)
pg_cursor.execute(query)
utils.set_isolation_level(connection, old_isolation_level)
connection.commit()
# Get oid of newly added index constraint
pg_cursor.execute("SELECT conindid FROM pg_catalog.pg_constraint "
"where conname='%s'" % key_name)
index_constraint = pg_cursor.fetchone()
connection.close()
oid = index_constraint[0]
return oid
except Exception:
traceback.print_exc(file=sys.stderr)
def verify_index_constraint(server, db_name, constraint_name):
"""
This function verifies that index constraint(PK or UK) is exists or not.
:param constraint_name:
:param server: server details
:type server: dict
:param db_name: database name
:type db_name: str
:return index_constraint: index constraint record from database
:rtype: tuple
"""
try:
connection = utils.get_db_connection(db_name,
server['username'],
server['db_password'],
server['host'],
server['port'],
server['sslmode'])
pg_cursor = connection.cursor()
pg_cursor.execute("SELECT oid FROM pg_catalog.pg_constraint "
"where conname='%s'" % constraint_name)
index_constraint = pg_cursor.fetchone()
connection.close()
return index_constraint
except Exception:
traceback.print_exc(file=sys.stderr)
def create_unique_index(server, db_name, schema_name, table_name,
index_name, column_name):
"""
This function creates a unique index for provided table.
:param server: server details
:type server: dict
:param db_name: database name
:type db_name: str
:param schema_name: schema name
:type schema_name: str
:param table_name: table name
:type table_name: str
:param index_name: index name
:type index_name: str
:param column_name: column on which index to be created
:type column_name: str
"""
try:
connection = utils.get_db_connection(db_name,
server['username'],
server['db_password'],
server['host'],
server['port'],
server['sslmode'])
old_isolation_level = connection.isolation_level
utils.set_isolation_level(connection, 0)
pg_cursor = connection.cursor()
query = "CREATE UNIQUE INDEX CONCURRENTLY %s ON %s.%s (%s)" % \
(index_name, schema_name, table_name, column_name)
pg_cursor.execute(query)
utils.set_isolation_level(connection, old_isolation_level)
connection.commit()
connection.close()
except Exception:
traceback.print_exc(file=sys.stderr)
| null |
4,881 |
import hyperion, time, random
# Convert x/y (0.0 - 1.0) point to proper int values based on Hyperion image width/height
# Or get a random value
# @param bool rand Randomize point if true
# @param float x Point at the x axis between 0.0-1.0
# @param float y Point at the y axis between 0.0-1.0
# @return Tuple with (x,y) as Integer
def getPoint(rand = True ,x = 0.5, y = 0.5):
if rand:
x = random.uniform(0.0, 1.0)
y = random.uniform(0.0, 1.0)
x = int(round(x*hyperion.imageWidth()))
y = int(round(y*hyperion.imageHeight()))
return (x,y)
# Returns the required sleep time for a interval function based on rotationtime and steps
# Adapts also to led device latchTime if required
# @param float rt RotationTime in seconds (time for one ration, based on steps)
# @param int steps The steps it should calc the rotation time
# @return Tuple with (x,y) as Integer
def getSTime(rt, steps = 360):
rt = float(rt)
sleepTime = max(0.1, rt) / steps
# adapt sleeptime to hardware
minStepTime= float(hyperion.latchTime)/1000.0
if minStepTime == 0: minStepTime = 0.001
if minStepTime > sleepTime:
sleepTime = minStepTime
return sleepTime
# Creates a PRGBA bytearray gradient based on provided colors (RGB or RGBA (0-255, 0-1 for alpha)), the color stop positions are calculated based on color count. Requires at least 2 colors!
# @param tuple cc Colors in a tuple of RGB or RGBA
# @param bool closeCircle If True use last color as first color
# @return bytearray A bytearray of RGBA for hyperion.image*Gradient functions
def buildGradient(cc, closeCircle = True):
if len(cc) > 1:
withAlpha = False
posfac = int(255/len(cc))
ba = bytearray()
pos = 0
if len(cc[0]) == 4:
withAlpha = True
for c in cc:
if withAlpha:
alpha = int(c[3]*255)
else:
alpha = 255
pos += posfac
ba += bytearray([pos,c[0],c[1],c[2],alpha])
if closeCircle:
# last color as first color
lC = cc[-1]
if withAlpha:
alpha = int(lC[3]*255)
else:
alpha = 255
ba += bytearray([0,lC[0],lC[1],lC[2],alpha])
return ba
return bytearray()
def METHOD_NAME( increment = 1):
global angle
angle += increment
if angle > 360: angle=0
if angle < 0: angle=360
return angle
def rotateAngle2( increment = 1):
global angle2
angle2 += increment
if angle2 > 360: angle2=0
if angle2 < 0: angle2=360
return angle2
# set minimum image size - must be done asap
hyperion.imageMinSize(64,64)
iW = hyperion.imageWidth()
iH = hyperion.imageHeight()
# Get the parameters
rotationTime = float(hyperion.args.get('rotation-time', 10.0))
reverse = bool(hyperion.args.get('reverse', False))
centerX = float(hyperion.args.get('center_x', 0.5))
centerY = float(hyperion.args.get('center_y', 0.5))
randomCenter = bool(hyperion.args.get('random-center', False))
custColors = hyperion.args.get('custom-colors', ((255,0,0),(0,255,0),(0,0,255)))
enableSecond = bool(hyperion.args.get('enable-second', False))
#rotationTime2 = float(hyperion.args.get('rotation-time2', 5.0))
reverse2 = bool(hyperion.args.get('reverse2', True))
centerX2 = float(hyperion.args.get('center_x2', 0.5))
centerY2 = float(hyperion.args.get('center_y2', 0.5))
randomCenter2 = bool(hyperion.args.get('random-center2', False))
custColors2 = hyperion.args.get('custom-colors2', ((255,255,255,0),(0,255,255,0),(255,255,255,1),(0,255,255,0),(0,255,255,0),(0,255,255,0),(255,255,255,1),(0,255,255,0),(0,255,255,0),(0,255,255,0),(255,255,255,1),(0,255,255,0)))
# process parameters
pointS1 = getPoint(randomCenter ,centerX, centerY)
pointS2 = getPoint(randomCenter2 ,centerX2, centerY2)
sleepTime = getSTime(rotationTime)
#sleepTime2 = getSTime(rotationTime2)
angle = 0
angle2 = 0
S2 = False
increment = -1 if reverse else 1
increment2 = -1 if reverse2 else 1
if len(custColors) > 1:
baS1 = buildGradient(custColors)
else:
baS1 = bytearray([
0 ,255,0 ,0, 255,
25 ,255,230,0, 255,
63 ,255,255,0, 255,
100,0 ,255,0, 255,
127,0 ,255,200, 255,
159,0 ,255,255, 255,
191,0 ,0 ,255, 255,
224,255,0 ,255, 255,
255,255,0 ,127, 255,
])
# check if the second swirl should be build
if enableSecond and len(custColors2) > 1:
S2 = True
baS2 = buildGradient(custColors2)
# effect loop
while not hyperion.abort():
angle += increment
if angle > 360: angle=0
if angle < 0: angle=360
angle2 += increment2
if angle2 > 360: angle2=0
if angle2 < 0: angle2=360
hyperion.imageConicalGradient(pointS1[0], pointS1[1], angle, baS1)
if S2:
hyperion.imageConicalGradient(pointS2[0], pointS2[1], angle2, baS2)
hyperion.imageShow()
time.sleep(sleepTime)
| null |
4,882 |
# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import asyncio
import logging
from pathlib import Path
from random import random
import pytest
from cylc.flow.async_util import (
pipe,
asyncqgen,
scandir,
)
LOG = logging.getLogger('test')
@pipe()
async def a_range(n):
for num in range(n):
LOG.info(f'a_range({n})')
yield num
@pipe
async def even(x):
LOG.info(f'even({x})')
return x % 2 == 0
@pipe
async def mult(x, y, kwarg='useless kwarg'):
LOG.info(f'mult{x, y}')
return x * y
@pipe
async def sleepy(x):
"""A filter which waits a while then passes."""
LOG.info(f'sleepy({x})')
await asyncio.sleep(0.1)
return True
@pytest.mark.parametrize('preserve_order', (True, False))
async def test_pipe(preserve_order):
"""It passes values through the pipe."""
pipe = a_range(5) | even | mult(2)
pipe.preserve_order = preserve_order
result = []
async for num in pipe:
result.append(num)
assert result == [
0,
4,
8,
]
@pytest.mark.parametrize('preserve_order', (True, False))
async def test_pipe_single(preserve_order):
"""It allow single-step pipes."""
pipe = a_range(5)
pipe.preserve_order = preserve_order
result = []
async for num in pipe:
result.append(num)
assert result == [
0,
1,
2,
3,
4
]
@pytest.mark.parametrize('preserve_order', (True, False))
async def METHOD_NAME(preserve_order):
"""It can be re-used once depleted."""
pipe = a_range(5) | even | mult(2)
pipe.preserve_order = preserve_order
for _ in range(5):
result = []
async for num in pipe:
result.append(num)
assert result == [
0,
4,
8,
]
@pytest.mark.parametrize('preserve_order', (True, False))
async def test_pipe_filter_stop(preserve_order):
"""It yields values early with the filter_stop argument."""
pipe = a_range(5) | even(filter_stop=False)
pipe |= mult(10)
pipe.preserve_order = preserve_order
result = []
async for num in pipe:
result.append(num)
# the even numbers should be multiplied by 10
# the odd numbers should be yielded early (so don't get multiplied)
assert result == [
0,
1,
20,
3,
40,
]
@pipe
async def one(x):
await asyncio.sleep(random() / 5)
return x
@pytest.mark.parametrize('preserve_order', (True, False))
async def test_pipe_preserve_order(preserve_order):
"""It should control result order according to pipe configuration."""
n = 50
pipe = a_range(n) | one | one | one
pipe.preserve_order = preserve_order
result = []
async for item in pipe:
result.append(item)
# the odds of getting 50 items in order by chance are pretty slim
assert (result == list(range(n))) is preserve_order
@pytest.mark.parametrize('preserve_order', (True, False))
async def test_pipe_concurrent(caplog, preserve_order):
"""It runs pipes concurrently.
It is easy to make something which appears to be concurrent, this
test is intended to ensure that it actually IS concurrent.
"""
pipe = a_range(5) | even | sleepy | mult(2)
pipe.preserve_order = preserve_order
caplog.set_level(logging.INFO, 'test')
async for num in pipe:
pass
order = [
# a list of the log messages generated by each step of the pipe
# as it processes an item
x[2].split('(')[0]
for x in caplog.record_tuples
]
assert 'mult' in order
assert len(order) == 4 * 4 # 4 steps * 4 items yielded by a_range
# ensure that the steps aren't completed in order (as sync code would)
# the sleep should ensure this
# NOTE: not the best test but better than nothing
assert order != [
'a_range',
'even',
'sleepy',
'mult'
] * 4
def test_pipe_str():
"""It has helpful textual representations."""
pipe = a_range(5) | even(filter_stop=False) | mult(10, kwarg=42)
assert str(pipe) == 'a_range(5)'
assert repr(pipe) == 'a_range(5) | even() | mult(10, kwarg=42)'
@pipe() # NOTE: these brackets are what the next function is testing
async def div(x, y):
return x / y
def test_pipe_brackets():
"""Ensure that pipe functions can be declared with or without brackets."""
pipe = a_range(5) | div
assert repr(pipe) == 'a_range(5) | div()'
@pipe
async def documented(x):
"""The docstring for the pipe function."""
pass
def test_documentation():
"""It should preserve the docstring of pipe functions."""
assert documented.__doc__ == 'The docstring for the pipe function.'
def test_rewind():
"""It should be possible to move throught the pipe stages."""
pipe = a_range | mult | even
assert pipe.fastforward().rewind() == pipe
async def test_asyncqgen():
"""It should provide an async gen interface to an async queue."""
queue = asyncio.Queue()
gen = asyncqgen(queue)
await queue.put(1)
await queue.put(2)
await queue.put(3)
ret = []
async for item in gen:
ret.append(item)
assert ret == [1, 2, 3]
async def test_scandir(tmp_path: Path):
"""It should list directory contents (including symlinks)."""
(tmp_path / 'a').touch()
(tmp_path / 'b').touch()
(tmp_path / 'c').symlink_to(tmp_path / 'b')
assert sorted(await scandir(tmp_path)) == [
Path(tmp_path, 'a'),
Path(tmp_path, 'b'),
Path(tmp_path, 'c')
]
async def test_scandir_non_exist(tmp_path: Path):
"""scandir() should raise FileNotFoundError if called on a path that
doesn't exist."""
with pytest.raises(FileNotFoundError):
await scandir(tmp_path / 'HORSE')
| null |
4,883 |
import asyncio
import dataclasses
import gc
import warnings
import pyngrok.conf
import pyngrok.ngrok
import pytest
from kopf._cogs.structs.references import Insights, Resource
from kopf._cogs.structs.reviews import CreateOptions, Request, RequestKind, RequestPayload, \
RequestResource, UserInfo, WebhookFn
from kopf._core.engines.indexing import OperatorIndexers
from kopf._kits.webhooks import WebhookServer
# TODO: LATER: Fix this issue some day later.
@pytest.fixture()
def no_serverside_resource_warnings():
"""
Hide an irrelevant ResourceWarning on the server side:
It happens when a client disconnects from the webhook server,
and the server closes the transport for that client. The garbage
collector calls ``__del__()`` on the SSL proto object, despite
it is not close to the moment.
"""
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
category=ResourceWarning,
module='asyncio.sslproto',
message='unclosed transport')
yield
# Provoke the garbage collection of SSL sockets to trigger the warnings.
# Otherwise, in PyPy, these warnings leak to other tests due to delayed gc.
gc.collect()
# TODO: LATER: Fix this issue after aiohttp 4.0.0 is used.
@pytest.fixture()
async def METHOD_NAME():
"""
Hide an irrelevant ResourceWarning on the client side.
https://docs.aiohttp.org/en/stable/client_advanced.html#graceful-shutdown
"""
yield
await asyncio.sleep(0.100)
@pytest.fixture()
async def no_sslproto_warnings(no_serverside_resource_warnings, METHOD_NAME):
pass
# cert generation is somewhat slow (~1s)
@pytest.fixture(scope='module')
def certpkey():
cert, pkey = WebhookServer.build_certificate(['localhost', '127.0.0.1'])
return cert, pkey
@pytest.fixture()
def certfile(tmpdir, certpkey):
path = tmpdir.join('cert.pem')
path.write_binary(certpkey[0])
return str(path)
@pytest.fixture()
def pkeyfile(tmpdir, certpkey):
path = tmpdir.join('pkey.pem')
path.write_binary(certpkey[1])
return str(path)
@pytest.fixture()
def adm_request(resource, namespace):
return Request(
apiVersion='admission.k8s.io/v1',
kind='AdmissionReview',
request=RequestPayload(
uid='uid1',
kind=RequestKind(group=resource.group, version=resource.version, kind=resource.kind),
resource=RequestResource(group=resource.group, version=resource.version, resource=resource.plural),
subResource=None,
requestKind=RequestKind(group=resource.group, version=resource.version, kind=resource.kind),
requestResource=RequestResource(group=resource.group, version=resource.version, resource=resource.plural),
requestSubResource=None,
userInfo=UserInfo(username='user1', uid='useruid1', groups=['group1']),
name='name1',
namespace=namespace,
operation='CREATE',
options=CreateOptions(apiVersion='meta.k8s.io/v1', kind='CreateOptions'),
object={'spec': {'field': 'value'}},
oldObject=None,
dryRun=False,
))
@dataclasses.dataclass(frozen=True)
class Responder:
fn: WebhookFn
fut: asyncio.Future # asyncio.Future[Response]
@pytest.fixture()
async def responder() -> Responder:
fut = asyncio.Future()
async def fn(*_, **__):
return await fut
return Responder(fn=fn, fut=fut)
@pytest.fixture()
async def insights(settings, resource):
val_resource = Resource('admissionregistration.k8s.io', 'v1', 'validatingwebhookconfigurations')
mut_resource = Resource('admissionregistration.k8s.io', 'v1', 'mutatingwebhookconfigurations')
insights = Insights()
insights.watched_resources.add(resource)
insights.webhook_resources.add(resource)
await insights.backbone.fill(resources=[val_resource, mut_resource])
insights.ready_resources.set()
return insights
@pytest.fixture()
def indices():
indexers = OperatorIndexers()
return indexers.indices
@pytest.fixture(autouse=True)
def pyngrok_mock(mocker):
mocker.patch.object(pyngrok.conf, 'get_default')
mocker.patch.object(pyngrok.ngrok, 'set_auth_token')
mocker.patch.object(pyngrok.ngrok, 'connect')
mocker.patch.object(pyngrok.ngrok, 'disconnect')
pyngrok.ngrok.connect.return_value.public_url = 'https://nowhere'
return pyngrok
| null |
4,884 |
from collections import OrderedDict
from conans.client.graph.graph import RECIPE_SYSTEM_TOOL
from conans.errors import ConanException
from conans.model.recipe_ref import RecipeReference
from conans.model.conanfile_interface import ConanFileInterface
class UserRequirementsDict(object):
""" user facing dict to allow access of dependencies by name
"""
def __init__(self, data, require_filter=None):
self._data = data # dict-like
self._require_filter = require_filter # dict {trait: value} for requirements
def filter(self, require_filter):
def filter_fn(require):
for k, v in require_filter.items():
if getattr(require, k) != v:
return False
return True
data = OrderedDict((k, v) for k, v in self._data.items() if filter_fn(k))
return UserRequirementsDict(data, require_filter)
def __bool__(self):
return bool(self._data)
def METHOD_NAME(self, ref, build=None, **kwargs):
return self._get(ref, build, **kwargs)[1]
def _get(self, ref, build=None, **kwargs):
if build is None:
current_filters = self._require_filter or {}
if "build" not in current_filters:
# By default we search in the "host" context
kwargs["build"] = False
else:
kwargs["build"] = build
data = self.filter(kwargs)
ret = []
if "/" in ref:
# FIXME: Validate reference
ref = RecipeReference.loads(ref)
for require, value in data.items():
if require.ref == ref:
ret.append((require, value))
else:
name = ref
for require, value in data.items():
if require.ref.name == name:
ret.append((require, value))
if len(ret) > 1:
current_filters = data._require_filter or "{}"
requires = "\n".join(["- {}".format(require) for require, _ in ret])
raise ConanException("There are more than one requires matching the specified filters:"
" {}\n{}".format(current_filters, requires))
if not ret:
raise KeyError("'{}' not found in the dependency set".format(ref))
key, value = ret[0]
return key, value
def __getitem__(self, name):
return self.METHOD_NAME(name)
def __delitem__(self, name):
r, _ = self._get(name)
del self._data[r]
def items(self):
return self._data.items()
def values(self):
return self._data.values()
class ConanFileDependencies(UserRequirementsDict):
@staticmethod
def from_node(node):
d = OrderedDict((require, ConanFileInterface(transitive.node.conanfile))
for require, transitive in node.transitive_deps.items())
return ConanFileDependencies(d)
def filter(self, require_filter, remove_system_tools=False):
# FIXME: Copy of hte above, to return ConanFileDependencies class object
def filter_fn(require):
for k, v in require_filter.items():
if getattr(require, k) != v:
return False
return True
data = OrderedDict((k, v) for k, v in self._data.items() if filter_fn(k))
if remove_system_tools:
data = OrderedDict((k, v) for k, v in data.items()
# TODO: Make "recipe" part of ConanFileInterface model
if v._conanfile._conan_node.recipe != RECIPE_SYSTEM_TOOL)
return ConanFileDependencies(data, require_filter)
def transitive_requires(self, other):
"""
:type other: ConanFileDependencies
"""
data = OrderedDict()
for k, v in self._data.items():
for otherk, otherv in other._data.items():
if v == otherv:
data[k] = v
return ConanFileDependencies(data)
@property
def topological_sort(self):
# Return first independent nodes, final ones are the more direct deps
result = OrderedDict()
opened = self._data.copy()
while opened:
opened_values = set(opened.values())
new_opened = OrderedDict()
for req, conanfile in opened.items():
deps_in_opened = any(d in opened_values for d in conanfile.dependencies.values())
if deps_in_opened:
new_opened[req] = conanfile # keep it for next iteration
else:
result[req] = conanfile # No dependencies in open set!
opened = new_opened
return ConanFileDependencies(result)
@property
def direct_host(self):
return self.filter({"build": False, "direct": True, "test": False, "skip": False})
@property
def direct_build(self):
return self.filter({"build": True, "direct": True}, remove_system_tools=True)
@property
def host(self):
return self.filter({"build": False, "test": False, "skip": False})
@property
def test(self):
# Not needed a direct_test because they are visible=False so only the direct consumer
# will have them in the graph
return self.filter({"build": False, "test": True, "skip": False})
@property
def build(self):
return self.filter({"build": True}, remove_system_tools=True)
def get_transitive_requires(consumer, dependency):
""" the transitive requires that we need are the consumer ones, not the current dependencey
ones, so we get the current ones, then look for them in the consumer, and return those
"""
pkg_deps = dependency.dependencies.filter({"direct": True})
result = consumer.dependencies.transitive_requires(pkg_deps)
result = result.filter({"skip": False})
return result
| null |
4,885 |
from time import perf_counter
import pyglet.gl as pgl
from sympy.plotting.pygletplot.managed_window import ManagedWindow
from sympy.plotting.pygletplot.plot_camera import PlotCamera
from sympy.plotting.pygletplot.plot_controller import PlotController
class PlotWindow(ManagedWindow):
def __init__(self, plot, antialiasing=True, ortho=False,
invert_mouse_zoom=False, linewidth=1.5, caption="SymPy Plot",
**kwargs):
"""
Named Arguments
===============
antialiasing = True
True OR False
ortho = False
True OR False
invert_mouse_zoom = False
True OR False
"""
self.plot = plot
self.camera = None
self._calculating = False
self.antialiasing = antialiasing
self.ortho = ortho
self.invert_mouse_zoom = invert_mouse_zoom
self.linewidth = linewidth
self.title = caption
self.last_caption_update = 0
self.caption_update_interval = 0.2
self.drawing_first_object = True
super().__init__(**kwargs)
def setup(self):
self.camera = PlotCamera(self, ortho=self.ortho)
self.controller = PlotController(self,
invert_mouse_zoom=self.invert_mouse_zoom)
self.push_handlers(self.controller)
pgl.glClearColor(1.0, 1.0, 1.0, 0.0)
pgl.glClearDepth(1.0)
pgl.glDepthFunc(pgl.GL_LESS)
pgl.glEnable(pgl.GL_DEPTH_TEST)
pgl.glEnable(pgl.GL_LINE_SMOOTH)
pgl.glShadeModel(pgl.GL_SMOOTH)
pgl.glLineWidth(self.linewidth)
pgl.glEnable(pgl.GL_BLEND)
pgl.glBlendFunc(pgl.GL_SRC_ALPHA, pgl.GL_ONE_MINUS_SRC_ALPHA)
if self.antialiasing:
pgl.glHint(pgl.GL_LINE_SMOOTH_HINT, pgl.GL_NICEST)
pgl.glHint(pgl.GL_POLYGON_SMOOTH_HINT, pgl.GL_NICEST)
self.camera.setup_projection()
def on_resize(self, w, h):
super().on_resize(w, h)
if self.camera is not None:
self.camera.setup_projection()
def update(self, dt):
self.controller.update(dt)
def METHOD_NAME(self):
self.plot._render_lock.acquire()
self.camera.apply_transformation()
calc_verts_pos, calc_verts_len = 0, 0
calc_cverts_pos, calc_cverts_len = 0, 0
should_update_caption = (perf_counter() - self.last_caption_update >
self.caption_update_interval)
if len(self.plot._functions.values()) == 0:
self.drawing_first_object = True
iterfunctions = iter(self.plot._functions.values())
for r in iterfunctions:
if self.drawing_first_object:
self.camera.set_rot_preset(r.default_rot_preset)
self.drawing_first_object = False
pgl.glPushMatrix()
r._draw()
pgl.glPopMatrix()
# might as well do this while we are
# iterating and have the lock rather
# than locking and iterating twice
# per frame:
if should_update_caption:
try:
if r.calculating_verts:
calc_verts_pos += r.calculating_verts_pos
calc_verts_len += r.calculating_verts_len
if r.calculating_cverts:
calc_cverts_pos += r.calculating_cverts_pos
calc_cverts_len += r.calculating_cverts_len
except ValueError:
pass
for r in self.plot._pobjects:
pgl.glPushMatrix()
r._draw()
pgl.glPopMatrix()
if should_update_caption:
self.update_caption(calc_verts_pos, calc_verts_len,
calc_cverts_pos, calc_cverts_len)
self.last_caption_update = perf_counter()
if self.plot._screenshot:
self.plot._screenshot._execute_saving()
self.plot._render_lock.release()
def update_caption(self, calc_verts_pos, calc_verts_len,
calc_cverts_pos, calc_cverts_len):
caption = self.title
if calc_verts_len or calc_cverts_len:
caption += " (calculating"
if calc_verts_len > 0:
p = (calc_verts_pos / calc_verts_len) * 100
caption += " vertices %i%%" % (p)
if calc_cverts_len > 0:
p = (calc_cverts_pos / calc_cverts_len) * 100
caption += " colors %i%%" % (p)
caption += ")"
if self.caption != caption:
self.set_caption(caption)
| null |
4,886 |
"""Tests for computing Galois groups. """
from sympy.abc import x
from sympy.combinatorics.galois import (
S1TransitiveSubgroups, S2TransitiveSubgroups, S3TransitiveSubgroups,
S4TransitiveSubgroups, S5TransitiveSubgroups, S6TransitiveSubgroups,
)
from sympy.polys.domains.rationalfield import QQ
from sympy.polys.numberfields.galoisgroups import (
tschirnhausen_transformation,
galois_group,
_galois_group_degree_4_root_approx,
_galois_group_degree_5_hybrid,
)
from sympy.polys.numberfields.subfield import field_isomorphism
from sympy.polys.polytools import Poly
from sympy.testing.pytest import raises
def test_tschirnhausen_transformation():
for T in [
Poly(x**2 - 2),
Poly(x**2 + x + 1),
Poly(x**4 + 1),
Poly(x**4 - x**3 + x**2 - x + 1),
]:
_, U = tschirnhausen_transformation(T)
assert U.degree() == T.degree()
assert U.is_monic
assert U.is_irreducible
K = QQ.alg_field_from_poly(T)
L = QQ.alg_field_from_poly(U)
assert field_isomorphism(K.ext, L.ext) is not None
# Test polys are from:
# Cohen, H. *A Course in Computational Algebraic Number Theory*.
test_polys_by_deg = {
# Degree 1
1: [
(x, S1TransitiveSubgroups.S1, True)
],
# Degree 2
2: [
(x**2 + x + 1, S2TransitiveSubgroups.S2, False)
],
# Degree 3
3: [
(x**3 + x**2 - 2*x - 1, S3TransitiveSubgroups.A3, True),
(x**3 + 2, S3TransitiveSubgroups.S3, False),
],
# Degree 4
4: [
(x**4 + x**3 + x**2 + x + 1, S4TransitiveSubgroups.C4, False),
(x**4 + 1, S4TransitiveSubgroups.V, True),
(x**4 - 2, S4TransitiveSubgroups.D4, False),
(x**4 + 8*x + 12, S4TransitiveSubgroups.A4, True),
(x**4 + x + 1, S4TransitiveSubgroups.S4, False),
],
# Degree 5
5: [
(x**5 + x**4 - 4*x**3 - 3*x**2 + 3*x + 1, S5TransitiveSubgroups.C5, True),
(x**5 - 5*x + 12, S5TransitiveSubgroups.D5, True),
(x**5 + 2, S5TransitiveSubgroups.M20, False),
(x**5 + 20*x + 16, S5TransitiveSubgroups.A5, True),
(x**5 - x + 1, S5TransitiveSubgroups.S5, False),
],
# Degree 6
6: [
(x**6 + x**5 + x**4 + x**3 + x**2 + x + 1, S6TransitiveSubgroups.C6, False),
(x**6 + 108, S6TransitiveSubgroups.S3, False),
(x**6 + 2, S6TransitiveSubgroups.D6, False),
(x**6 - 3*x**2 - 1, S6TransitiveSubgroups.A4, True),
(x**6 + 3*x**3 + 3, S6TransitiveSubgroups.G18, False),
(x**6 - 3*x**2 + 1, S6TransitiveSubgroups.A4xC2, False),
(x**6 - 4*x**2 - 1, S6TransitiveSubgroups.S4p, True),
(x**6 - 3*x**5 + 6*x**4 - 7*x**3 + 2*x**2 + x - 4, S6TransitiveSubgroups.S4m, False),
(x**6 + 2*x**3 - 2, S6TransitiveSubgroups.G36m, False),
(x**6 + 2*x**2 + 2, S6TransitiveSubgroups.S4xC2, False),
(x**6 + 10*x**5 + 55*x**4 + 140*x**3 + 175*x**2 + 170*x + 25, S6TransitiveSubgroups.PSL2F5, True),
(x**6 + 10*x**5 + 55*x**4 + 140*x**3 + 175*x**2 - 3019*x + 25, S6TransitiveSubgroups.PGL2F5, False),
(x**6 + 6*x**4 + 2*x**3 + 9*x**2 + 6*x - 4, S6TransitiveSubgroups.G36p, True),
(x**6 + 2*x**4 + 2*x**3 + x**2 + 2*x + 2, S6TransitiveSubgroups.G72, False),
(x**6 + 24*x - 20, S6TransitiveSubgroups.A6, True),
(x**6 + x + 1, S6TransitiveSubgroups.S6, False),
],
}
def test_galois_group():
"""
Try all the test polys.
"""
for deg in range(1, 7):
polys = test_polys_by_deg[deg]
for T, G, alt in polys:
assert galois_group(T, by_name=True) == (G, alt)
def test_galois_group_degree_out_of_bounds():
raises(ValueError, lambda: galois_group(Poly(0, x)))
raises(ValueError, lambda: galois_group(Poly(1, x)))
raises(ValueError, lambda: galois_group(Poly(x ** 7 + 1)))
def test_galois_group_not_by_name():
"""
Check at least one polynomial of each supported degree, to see that
conversion from name to group works.
"""
for deg in range(1, 7):
T, G_name, _ = test_polys_by_deg[deg][0]
G, _ = galois_group(T)
assert G == G_name.get_perm_group()
def test_galois_group_not_monic_over_ZZ():
"""
Check that we can work with polys that are not monic over ZZ.
"""
for deg in range(1, 7):
T, G, alt = test_polys_by_deg[deg][0]
assert galois_group(T/2, by_name=True) == (G, alt)
def test__galois_group_degree_4_root_approx():
for T, G, alt in test_polys_by_deg[4]:
assert _galois_group_degree_4_root_approx(Poly(T)) == (G, alt)
def METHOD_NAME():
for T, G, alt in test_polys_by_deg[5]:
assert _galois_group_degree_5_hybrid(Poly(T)) == (G, alt)
def test_AlgebraicField_galois_group():
k = QQ.alg_field_from_poly(Poly(x**4 + 1))
G, _ = k.galois_group(by_name=True)
assert G == S4TransitiveSubgroups.V
k = QQ.alg_field_from_poly(Poly(x**4 - 2))
G, _ = k.galois_group(by_name=True)
assert G == S4TransitiveSubgroups.D4
| null |
4,887 |
import abc
import io
import os
from typing import Any, BinaryIO, Iterable, Iterator, NoReturn, Text, Optional
from typing import runtime_checkable, Protocol
from typing import Union
StrPath = Union[str, os.PathLike[str]]
__all__ = ["ResourceReader", "Traversable", "TraversableResources"]
class ResourceReader(metaclass=abc.ABCMeta):
"""Abstract base class for loaders to provide resource reading support."""
@abc.abstractmethod
def open_resource(self, resource: Text) -> BinaryIO:
"""Return an opened, file-like object for binary reading.
The 'resource' argument is expected to represent only a file name.
If the resource cannot be found, FileNotFoundError is raised.
"""
# This deliberately raises FileNotFoundError instead of
# NotImplementedError so that if this method is accidentally called,
# it'll still do the right thing.
raise FileNotFoundError
@abc.abstractmethod
def resource_path(self, resource: Text) -> Text:
"""Return the file system path to the specified resource.
The 'resource' argument is expected to represent only a file name.
If the resource does not exist on the file system, raise
FileNotFoundError.
"""
# This deliberately raises FileNotFoundError instead of
# NotImplementedError so that if this method is accidentally called,
# it'll still do the right thing.
raise FileNotFoundError
@abc.abstractmethod
def is_resource(self, path: Text) -> bool:
"""Return True if the named 'path' is a resource.
Files are resources, directories are not.
"""
raise FileNotFoundError
@abc.abstractmethod
def contents(self) -> Iterable[str]:
"""Return an iterable of entries in `package`."""
raise FileNotFoundError
@runtime_checkable
class Traversable(Protocol):
"""
An object with a subset of pathlib.Path methods suitable for
traversing directories and opening files.
Any exceptions that occur when accessing the backing resource
may propagate unaltered.
"""
@abc.abstractmethod
def iterdir(self) -> Iterator["Traversable"]:
"""
Yield Traversable objects in self
"""
def read_bytes(self) -> bytes:
"""
Read contents of self as bytes
"""
with self.open('rb') as strm:
return strm.read()
def read_text(self, encoding: Optional[str] = None) -> str:
"""
Read contents of self as text
"""
with self.open(encoding=encoding) as strm:
return strm.read()
@abc.abstractmethod
def is_dir(self) -> bool:
"""
Return True if self is a directory
"""
@abc.abstractmethod
def is_file(self) -> bool:
"""
Return True if self is a file
"""
@abc.abstractmethod
def METHOD_NAME(self, *descendants: StrPath) -> "Traversable":
"""
Return Traversable resolved with any descendants applied.
Each descendant should be a path segment relative to self
and each may contain multiple levels separated by
``posixpath.sep`` (``/``).
"""
def __truediv__(self, child: StrPath) -> "Traversable":
"""
Return Traversable child in self
"""
return self.METHOD_NAME(child)
@abc.abstractmethod
def open(self, mode='r', *args, **kwargs):
"""
mode may be 'r' or 'rb' to open as text or binary. Return a handle
suitable for reading (same as pathlib.Path.open).
When opening as text, accepts encoding parameters such as those
accepted by io.TextIOWrapper.
"""
@abc.abstractproperty
def name(self) -> str:
"""
The base name of this object without any parent references.
"""
class TraversableResources(ResourceReader):
"""
The required interface for providing traversable
resources.
"""
@abc.abstractmethod
def files(self) -> "Traversable":
"""Return a Traversable object for the loaded package."""
def open_resource(self, resource: StrPath) -> io.BufferedReader:
return self.files().METHOD_NAME(resource).open('rb')
def resource_path(self, resource: Any) -> NoReturn:
raise FileNotFoundError(resource)
def is_resource(self, path: StrPath) -> bool:
return self.files().METHOD_NAME(path).is_file()
def contents(self) -> Iterator[str]:
return (item.name for item in self.files().iterdir())
| null |
4,888 |
import json
import logging
import urllib.request
import xml.etree.ElementTree as ET
from backend.gene_info.config import GeneInfoConfig
class NCBIException(Exception):
pass
class NCBIAPIException(NCBIException):
pass
class NCBIUnexpectedResultException(NCBIException):
pass
class NCBIProvider:
"""
Provider class used to generate NCBI URL
"""
def __init__(self) -> None:
self.base_ncbi_uri = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/"
gene_info_config = GeneInfoConfig()
try:
self.api_key = f"&api_key={gene_info_config.ncbi_api_key}"
except RuntimeError:
logging.error("Could not find NCBI API key")
self.api_key = None
def fetch_gene_info_tree(self, uid):
"""
Given a gene UID from NCBI, returns an XML tree of gene information
"""
if self.api_key is None:
raise NCBIAPIException
fetch_url = f"{self.base_ncbi_uri}efetch.fcgi?db=gene&id={uid}{self.api_key}&retmode=xml"
try:
return urllib.request.urlopen(fetch_url).read()
except Exception:
raise NCBIUnexpectedResultException from None
def fetch_gene_uid(self, geneID, gene):
"""
Given a gene ensembl ID and gene name, returns a tuple with NCBI's corresponding gene UID and a
boolean noting if the result is from searching by the gene name, instead of the gene ENSEMBL id.
Initially, uses ensembl ID to find gene UID, but in the event that this returns an
unexpected result, call the NCBI API again with gene name. This is successful if
the response returns only 1 result for UID.
"""
if self.api_key is None:
raise NCBIAPIException
# first search with gene ENSEMBL id
try:
search_response = self._search_gene_uid(geneID)
except NCBIUnexpectedResultException:
raise NCBIUnexpectedResultException from None
# search with gene name if needed
if not self._is_valid_search_result(search_response) and gene and gene != "":
try:
# to refine the search parameters, gene is searched with the label "Gene Name" and human "Organism"
gene_search_term = "(" + str(gene) + "%5BGene%20Name%5D)%20AND%20human%5BOrganism%5D"
search_response = self._search_gene_uid(gene_search_term)
if self._is_valid_search_result(search_response):
show_warning_banner = False
return (int(search_response["esearchresult"]["idlist"][0]), show_warning_banner)
else:
logging.error(f"Unexpected NCBI search result, got {search_response}")
raise NCBIUnexpectedResultException
except NCBIUnexpectedResultException:
raise NCBIUnexpectedResultException from None
elif not self._is_valid_search_result(search_response):
logging.error(f"Unexpected NCBI search result, got {search_response}")
raise NCBIUnexpectedResultException
else:
show_warning_banner = False
return (int(search_response["esearchresult"]["idlist"][0]), show_warning_banner)
def _search_gene_uid(self, term):
"""
Conducts an Esearch using NCBI's E-Utilities API with provided term
"""
search_url = f"{self.base_ncbi_uri}esearch.fcgi?db=gene&term={term}{self.api_key}&retmode=json"
try:
search_response = urllib.request.urlopen(search_url).read()
except Exception:
raise NCBIUnexpectedResultException from None
return json.loads(search_response)
def _is_valid_search_result(self, search_result):
"""
Checks that a search result contains only one UID as a result
"""
try:
int(search_result["esearchresult"]["idlist"][0])
if len(search_result["esearchresult"]["idlist"]) != 1:
return False
except (ValueError, KeyError, IndexError):
return False
return True
def METHOD_NAME(self, tree_response):
"""
Parse NCBI XML response into relevant values to return by gene_info API
"""
result_tree = ET.ElementTree(ET.fromstring(tree_response))
root = result_tree.getroot()
synonyms = []
summary = ""
name = ""
summary_tag = "Entrezgene_summary"
gene_tag = "Entrezgene_gene"
desc_tag = "Gene-ref_desc"
syn_tag = "Gene-ref_syn"
if len(root) > 0:
for x in root[0]:
if x.tag == summary_tag:
summary = x.text
elif x.tag == gene_tag and len(x) > 0:
for y in x[0]:
if y.tag == desc_tag:
name = y.text
elif y.tag == syn_tag:
for syn in y:
synonyms.append(syn.text)
return dict(
name=name,
summary=summary,
synonyms=synonyms,
)
| null |
4,889 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetGroupResult',
'AwaitableGetGroupResult',
'get_group',
'get_group_output',
]
@pulumi.output_type
class GetGroupResult:
"""
A collection of values returned by getGroup.
"""
def __init__(__self__, all_management_group_ids=None, all_subscription_ids=None, display_name=None, id=None, METHOD_NAME=None, name=None, parent_management_group_id=None, subscription_ids=None):
if all_management_group_ids and not isinstance(all_management_group_ids, list):
raise TypeError("Expected argument 'all_management_group_ids' to be a list")
pulumi.set(__self__, "all_management_group_ids", all_management_group_ids)
if all_subscription_ids and not isinstance(all_subscription_ids, list):
raise TypeError("Expected argument 'all_subscription_ids' to be a list")
pulumi.set(__self__, "all_subscription_ids", all_subscription_ids)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if METHOD_NAME and not isinstance(METHOD_NAME, list):
raise TypeError("Expected argument 'management_group_ids' to be a list")
pulumi.set(__self__, "management_group_ids", METHOD_NAME)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if parent_management_group_id and not isinstance(parent_management_group_id, str):
raise TypeError("Expected argument 'parent_management_group_id' to be a str")
pulumi.set(__self__, "parent_management_group_id", parent_management_group_id)
if subscription_ids and not isinstance(subscription_ids, list):
raise TypeError("Expected argument 'subscription_ids' to be a list")
pulumi.set(__self__, "subscription_ids", subscription_ids)
@property
@pulumi.getter(name="allManagementGroupIds")
def all_management_group_ids(self) -> Sequence[str]:
"""
A list of Management Group IDs which directly or indirectly belong to this Management Group.
"""
return pulumi.get(self, "all_management_group_ids")
@property
@pulumi.getter(name="allSubscriptionIds")
def all_subscription_ids(self) -> Sequence[str]:
"""
A list of Subscription IDs which are assigned to this Management Group or its children Management Groups.
"""
return pulumi.get(self, "all_subscription_ids")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="managementGroupIds")
def METHOD_NAME(self) -> Sequence[str]:
"""
A list of Management Group IDs which directly belong to this Management Group.
"""
return pulumi.get(self, "management_group_ids")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="parentManagementGroupId")
def parent_management_group_id(self) -> str:
"""
The ID of any Parent Management Group.
"""
return pulumi.get(self, "parent_management_group_id")
@property
@pulumi.getter(name="subscriptionIds")
def subscription_ids(self) -> Sequence[str]:
"""
A list of Subscription IDs which are directly assigned to this Management Group.
"""
return pulumi.get(self, "subscription_ids")
class AwaitableGetGroupResult(GetGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetGroupResult(
all_management_group_ids=self.all_management_group_ids,
all_subscription_ids=self.all_subscription_ids,
display_name=self.display_name,
id=self.id,
METHOD_NAME=self.METHOD_NAME,
name=self.name,
parent_management_group_id=self.parent_management_group_id,
subscription_ids=self.subscription_ids)
def get_group(display_name: Optional[str] = None,
name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGroupResult:
"""
Use this data source to access information about an existing Management Group.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.management.get_group(name="00000000-0000-0000-0000-000000000000")
pulumi.export("displayName", example.display_name)
```
:param str display_name: Specifies the display name of this Management Group.
> **NOTE** Whilst multiple management groups may share the same display name, when filtering, the provider expects a single management group to be found with this name.
:param str name: Specifies the name or UUID of this Management Group.
"""
__args__ = dict()
__args__['displayName'] = display_name
__args__['name'] = name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure:management/getGroup:getGroup', __args__, opts=opts, typ=GetGroupResult).value
return AwaitableGetGroupResult(
all_management_group_ids=pulumi.get(__ret__, 'all_management_group_ids'),
all_subscription_ids=pulumi.get(__ret__, 'all_subscription_ids'),
display_name=pulumi.get(__ret__, 'display_name'),
id=pulumi.get(__ret__, 'id'),
METHOD_NAME=pulumi.get(__ret__, 'management_group_ids'),
name=pulumi.get(__ret__, 'name'),
parent_management_group_id=pulumi.get(__ret__, 'parent_management_group_id'),
subscription_ids=pulumi.get(__ret__, 'subscription_ids'))
@_utilities.lift_output_func(get_group)
def get_group_output(display_name: Optional[pulumi.Input[Optional[str]]] = None,
name: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetGroupResult]:
"""
Use this data source to access information about an existing Management Group.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.management.get_group(name="00000000-0000-0000-0000-000000000000")
pulumi.export("displayName", example.display_name)
```
:param str display_name: Specifies the display name of this Management Group.
> **NOTE** Whilst multiple management groups may share the same display name, when filtering, the provider expects a single management group to be found with this name.
:param str name: Specifies the name or UUID of this Management Group.
"""
...
| null |
4,890 |
import json
import numpy as np
# mimics relative error function in the src/rascal/math/utils.hh
def compute_relative_error(
reference_values, test_values, epsilon=100 * np.finfo(np.double).resolution
):
relative_error = test_values - reference_values
non_zero_idx = np.abs(reference_values) > epsilon
relative_error[non_zero_idx] /= reference_values[non_zero_idx]
return np.abs(relative_error)
def METHOD_NAME(X, Y=None):
if Y is None:
return list(X.keys())
else:
return list(set(X.keys()).METHOD_NAME(Y.keys()))
def dot(X, Y=None):
key_intersection = METHOD_NAME(X, Y)
if Y is None:
Y = X
N = X[key_intersection[0]].shape[0]
M = Y[key_intersection[0]].shape[0]
K = np.zeros((N, M))
for key in key_intersection:
K += np.dot(X[key], Y[key].T)
return K
def adapt_structure(cell, positions, numbers, pbc):
cell = np.array(cell.T, order="F")
positions = np.array(positions.T, order="F")
numbers = numbers.reshape(-1, 1)
pbc = pbc.reshape(3, 1)
return dict(cell=cell, positions=positions, atom_types=numbers, pbc=pbc)
def dump_json_frame(fn, frames):
from collections import Iterable
if not isinstance(frames, Iterable):
frames = [frames]
data = dict()
for ii, frame in enumerate(frames):
data[ii] = dict(
positions=frame.get_positions().tolist(),
cell=frame.get_cell().tolist(),
numbers=frame.get_atomic_numbers().tolist(),
pbc=frame.get_pbc().tolist(),
)
data["ids"] = np.arange(len(frames)).tolist()
data["nextid"] = 2
with open(fn, "w") as f:
json.dump(data, f, indent=2, separators=(",", ": "))
def load_json_frame(fn):
with open(fn, "r") as f:
data = json.load(f)
ids = data["ids"]
keys = ["cell", "positions", "numbers", "pbc"]
structure = {key: np.array(data[str(idx)][key]) for idx in ids for key in keys}
return adapt_structure(**structure)
class BoxList(object):
def __init__(self, max_cutoff, cell, pbc, centers):
# Compute reciprocal lattice vectors.
b1_c, b2_c, b3_c = np.linalg.pinv(cell).T
# Compute distances of cell faces (height between 2
# consecutive faces [010]
l1 = np.linalg.norm(b1_c)
l2 = np.linalg.norm(b2_c)
l3 = np.linalg.norm(b3_c)
face_dist_c = np.array(
[
1 / l1 if l1 > 0 else 1,
1 / l2 if l2 > 0 else 1,
1 / l3 if l3 > 0 else 1,
]
)
# We use a minimum bin size of 3 A
self.bin_size = max_cutoff
# Compute number of bins such that a sphere of radius
# cutoff fit into eight neighboring bins.
self.nbins_c = np.maximum((face_dist_c / self.bin_size).astype(int), [1, 1, 1])
self.nbins = np.prod(self.nbins_c)
# Compute over how many bins we need to loop in the
# neighbor list search.
self.neigh_search = np.ceil(self.bin_size * self.nbins_c / face_dist_c).astype(
int
)
self.bin2icenters = [[] for bin_idx in range(self.nbins)]
scaled_positions_ic = np.linalg.solve(cell.T, centers.T).T
self.h_sizes = np.linalg.norm(cell, axis=1)
self.part2bin = {}
for icenter in range(len(centers)):
bin_index_ic = np.floor(scaled_positions_ic[icenter] * self.nbins_c).astype(
int
)
bin_id = self.cell2lin(bin_index_ic)
self.bin2icenters[bin_id].append(icenter)
self.part2bin[icenter] = bin_id
self.list = []
# print(self.nbins)
for bin_id in range(self.nbins):
self.list.append(
Box(
bin_id,
self.nbins_c,
self.neigh_search,
self.bin2icenters[bin_id],
pbc,
self,
)
)
def cell2lin(self, ids):
return int(ids[0] + self.nbins_c[0] * (ids[1] + self.nbins_c[1] * ids[2]))
def iter_box(self):
for bin_id in range(self.nbins):
yield self.list[bin_id]
def __getitem__(self, bin_id):
return self.list[bin_id]
class Box(object):
def __init__(self, lin_pos, nbins_c, neigh_search, icenters, pbc, boxlist):
self.nbins_c = nbins_c
self.neigh_search = neigh_search
self.icenters = icenters
self.pbc = pbc
self.lin_pos = lin_pos
self.mult_pos = self.lin2cell(lin_pos)
self.boxlist = boxlist
self.search_idx = []
for ii, p in enumerate(self.pbc):
if 0 == self.mult_pos[ii] and p is False:
self.search_idx.append(
[self.mult_pos[ii] + jj for jj in range(self.neigh_search[ii] + 1)]
)
elif self.nbins_c[ii] - 1 == self.mult_pos[ii] and p is False:
self.search_idx.append(
[
self.mult_pos[ii] + jj
for jj in range(-self.neigh_search[ii], 0 + 1)
]
)
else:
self.search_idx.append(
[
self.mult_pos[ii] + jj
for jj in range(
-self.neigh_search[ii], self.neigh_search[ii] + 1
)
]
)
self.neighbour_bin_index, self.neighbour_bin_shift = [], []
for ii in self.search_idx[0]:
for jj in self.search_idx[1]:
for kk in self.search_idx[2]:
box_shift, box_pos = np.divmod([ii, jj, kk], self.nbins_c)
neigh_box_idx = self.cell2lin(box_pos)
self.neighbour_bin_index.append(neigh_box_idx)
self.neighbour_bin_shift.append(box_shift)
def cell2lin(self, ids):
return int(ids[0] + self.nbins_c[0] * (ids[1] + self.nbins_c[1] * ids[2]))
def lin2cell(self, lin_ids):
fac = 1
cell_pos = np.array([0, 0, 0])
for ii in range(3):
cell_pos[ii] = lin_ids / fac % self.nbins_c[ii]
fac *= self.nbins_c[ii]
return cell_pos
def iter_neigh_box(self):
from copy import deepcopy
for ii in self.search_idx[0]:
for jj in self.search_idx[1]:
for kk in self.search_idx[2]:
box_shift, box_pos = np.divmod([ii, jj, kk], self.nbins_c)
neigh_box_idx = self.cell2lin(box_pos)
jcenters = deepcopy(self.boxlist[neigh_box_idx].icenters)
for jneigh in jcenters:
yield jneigh, deepcopy(box_shift)
| null |
4,891 |
import unittest
from mock import patch
from django.urls import reverse
from django.test import TestCase
from devilry.project.develop.testhelpers.corebuilder import PeriodBuilder
from devilry.project.develop.testhelpers.corebuilder import UserBuilder
from devilry.project.develop.testhelpers.soupselect import cssGet
from devilry.project.develop.testhelpers.soupselect import cssFind
from devilry.devilry_gradingsystem.pluginregistry import GradingSystemPluginRegistry
from .base import AdminViewTestMixin
from .base import MockApprovedPluginApi
from .base import MockPointsPluginApi
from .base import MockRequiresConfigurationPluginApi
@unittest.skip('devilry_gradingsystem will most likely be replaced in 3.0')
class TestSelectPluginView(TestCase, AdminViewTestMixin):
def setUp(self):
self.admin1 = UserBuilder('admin1').user
self.assignmentbuilder = PeriodBuilder.quickadd_ducku_duck1010_active()\
.add_assignment('assignment1')\
.add_admins(self.admin1)
self.url = reverse('devilry_gradingsystem_admin_selectplugin', kwargs={
'assignmentid': self.assignmentbuilder.assignment.id,
})
def test_get_not_admin_404_with_pluginselected(self):
nobody = UserBuilder('nobody').user
myregistry = GradingSystemPluginRegistry()
myregistry.add(MockPointsPluginApi)
with patch('devilry.devilry_gradingsystem.views.admin.selectplugin.gradingsystempluginregistry', myregistry):
self.assertIn(MockPointsPluginApi.id, myregistry)
response = self.get_as(nobody, {
'grading_system_plugin_id': MockPointsPluginApi.id
})
self.assertEqual(response.status_code, 404)
def test_render(self):
myregistry = GradingSystemPluginRegistry()
myregistry.add(MockPointsPluginApi)
myregistry.add(MockApprovedPluginApi)
with patch('devilry.devilry_gradingsystem.views.admin.selectplugin.gradingsystempluginregistry', myregistry):
response = self.get_as(self.admin1)
self.assertEqual(response.status_code, 200)
html = response.content
self.assertEqual(cssGet(html, '.page-header h1').text.strip(),
'How would you like to provide feedback to your students?')
self.assertEqual(len(cssFind(html, '.devilry_gradingsystem_verbose_selectbox')), 2)
def test_next_page_requires_configuration(self):
myregistry = GradingSystemPluginRegistry()
myregistry.add(MockRequiresConfigurationPluginApi)
with patch('devilry.devilry_gradingsystem.views.admin.selectplugin.gradingsystempluginregistry', myregistry):
response = self.get_as(self.admin1, {
'grading_system_plugin_id': MockRequiresConfigurationPluginApi.id
})
self.assertEqual(response.status_code, 302)
self.assertEqual(response["Location"],
'http://testserver/mock/requiresconfiguration/configure/{}'.format(
self.assignmentbuilder.assignment.id))
self.assignmentbuilder.reload_from_db()
self.assertEqual(self.assignmentbuilder.assignment.grading_system_plugin_id,
MockRequiresConfigurationPluginApi.id)
def test_next_page_no_configuration_required(self):
myregistry = GradingSystemPluginRegistry()
myregistry.add(MockPointsPluginApi)
with patch('devilry.devilry_gradingsystem.views.admin.selectplugin.gradingsystempluginregistry', myregistry):
response = self.get_as(self.admin1, {
'grading_system_plugin_id': MockPointsPluginApi.id
})
self.assertEqual(response.status_code, 302)
self.assertTrue(response["Location"].endswith(
reverse('devilry_gradingsystem_admin_setmaxpoints', kwargs={
'assignmentid': self.assignmentbuilder.assignment.id})))
self.assignmentbuilder.reload_from_db()
self.assertEqual(self.assignmentbuilder.assignment.grading_system_plugin_id,
MockPointsPluginApi.id)
def METHOD_NAME(self):
myregistry = GradingSystemPluginRegistry()
myregistry.add(MockPointsPluginApi)
with patch('devilry.devilry_gradingsystem.views.admin.selectplugin.gradingsystempluginregistry', myregistry):
response = self.get_as(self.admin1, {
'grading_system_plugin_id': 'doesnotexist'
})
self.assertEqual(response.status_code, 200)
self.assertIn('Invalid grading system plugin ID: doesnotexist', response.content)
| null |
4,892 |
"""From https://github.com/orweis/emport."""
import collections
import glob
import inspect
import os
import sys
__author__ = "orw"
class ObjectUtils(object):
@staticmethod
def is_derived_of(obj, possible_parent_class):
if hasattr(obj, "__bases__"):
return possible_parent_class in inspect.getmro(obj)
else:
return False
@staticmethod
def get_properties(obj):
def filter(x):
return not isinstance(x, collections.Callable)
return {
k: v for k, v in inspect.getmembers(obj, filter) if not k.startswith("__")
}
@staticmethod
def get_members_who_are_instance_of(obj, class_type):
def filter(x):
return isinstance(x, class_type)
return inspect.getmembers(obj, filter)
@classmethod
def get_class_members_who_derive_of(cls, obj, parent_class):
def filter(x):
return (
inspect.isclass(x)
and cls.is_derived_of(x, parent_class)
and list(inspect.getmro(x)).index(parent_class) != 0
)
return inspect.getmembers(obj, filter)
class PyFrame(object):
def __init__(self):
self._frame = inspect.currentframe()
def __enter__(self):
return self._frame.f_back
def __exit__(self, exc_type, exc_value, traceback):
del self._frame
class Emport(object):
def __init__(self, module, members):
self.__original__ = module
self._members = []
for member in members:
self._members.append(member[1])
setattr(self, member[0], member[1])
def get_original_module(self):
return self.__original__
def METHOD_NAME(self):
return self._members
def get_flat_list(self):
"""
:return: all the members of this Emport (And submodules) as one list
"""
res = []
for member in self._members:
# if a member is an Emport itself flatten it as well
if isinstance(member, Emport):
res += member.get_flat_list()
else:
res.append(member)
return res
def __repr__(self):
return "EMPORT - %s" % self.__original__
def get_caller_module(depth=0):
"""
:param depth: stack depth of the caller. 0 == yourself, 1 == your parent
:return: the module object of the caller function (in set stack depth)
"""
with PyFrame() as frame:
for i in range(0, depth):
frame = frame.f_back
return sys.modules[frame.f_globals["__name__"]]
def co_to_dict(co):
return {
"co_argcount": co.co_argcount,
"co_nlocals": co.co_nlocals,
"co_stacksize": co.co_stacksize,
"co_flags": co.co_flags,
"co_consts": co.co_consts,
"co_names": co.co_names,
"co_varnames": co.co_varnames,
"co_filename": co.co_filename,
"co_name": co.co_name,
"co_firstlineno": co.co_firstlineno,
"co_lnotab": co.co_lnotab,
}
def get_caller(depth=0):
"""
:param depth: stack depth of the caller. 0 == yourself, 1 == your parent
:return: the frame object of the caller function (in set stack depth)
"""
with PyFrame() as frame:
for i in range(0, depth):
frame = frame.f_back
return co_to_dict(frame.f_code)
def emport_by_class(from_path, cls, import_items=None):
"""Wrap __import__ to import modules and filter only classes deriving from
the given cls.
:param from_path: dot separated package path
:param cls: class to filter import contents by
:param import_items: the items to import form the package path (can also be ['*'])
:return: an Emport object with contents filtered according to given cls
"""
import_items = import_items or ["*"]
module_obj = __import__(from_path, globals(), locals(), import_items, 0)
clean_items = ObjectUtils.get_class_members_who_derive_of(module_obj, cls)
for (sub_name, sub_module) in ObjectUtils.get_members_who_are_instance_of(
module_obj, module_obj.__class__
):
results = ObjectUtils.get_class_members_who_derive_of(sub_module, cls)
# Keep only modules with sub values
if len(results) > 0:
clean_sub_module = Emport(sub_module, results)
clean_items.append((sub_name, clean_sub_module))
clean_module = Emport(module_obj, clean_items)
return clean_module
def emport_objects_by_class(from_path, cls, import_items=None):
"""Wrap __import__ to import modules and filter only classes deriving from
the given cls Return a flat list of objects without the modules themselves.
:param from_path: dot separated package path
:param cls: class to filter import contents by
:param import_items: the items to import form the package path (can also be ['*'])
:return: an Emport object with contents filtered according to given cls
"""
results = []
import_items = import_items or ["*"]
module_obj = __import__(from_path, globals(), locals(), import_items, 0)
# direct objects
clean_items = ObjectUtils.get_class_members_who_derive_of(module_obj, cls)
results.extend(clean_items)
# nested
for (sub_name, sub_module) in ObjectUtils.get_members_who_are_instance_of(
module_obj, module_obj.__class__
):
objects = ObjectUtils.get_class_members_who_derive_of(sub_module, cls)
results.extend(objects)
return results
def dynamic_all(init_file_path):
"""return a list of all the py files in a dir usage (in __init__.py file) :
from emport import dynamic_all
__all__ = dynamic_all(__file__)
"""
modules = glob.glob(os.path.join(os.path.dirname(init_file_path), "*.py*"))
target_modules = set([])
for module in modules:
name = os.path.splitext(os.path.basename(module))[0]
if os.path.isfile(module) and not name.startswith("_"):
target_modules.add(name)
return list(target_modules)
| null |
4,893 |
# EnergyPlus, Copyright (c) 1996-2023, The Board of Trustees of the University
# of Illinois, The Regents of the University of California, through Lawrence
# Berkeley National Laboratory (subject to receipt of any required approvals
# from the U.S. Dept. of Energy), Oak Ridge National Laboratory, managed by UT-
# Battelle, Alliance for Sustainable Energy, LLC, and other contributors. All
# rights reserved.
#
# NOTICE: This Software was developed under funding from the U.S. Department of
# Energy and the U.S. Government consequently retains certain rights. As such,
# the U.S. Government has been granted for itself and others acting on its
# behalf a paid-up, nonexclusive, irrevocable, worldwide license in the
# Software to reproduce, distribute copies to the public, prepare derivative
# works, and perform publicly and display publicly, and to permit others to do
# so.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# (1) Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# (3) Neither the name of the University of California, Lawrence Berkeley
# National Laboratory, the University of Illinois, U.S. Dept. of Energy nor
# the names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# (4) Use of EnergyPlus(TM) Name. If Licensee (i) distributes the software in
# stand-alone form without changes from the version obtained under this
# License, or (ii) Licensee makes a reference solely to the software
# portion of its product, Licensee must refer to the software as
# "EnergyPlus version X" software, where "X" is the version number Licensee
# obtained under this License and may not use a different name for the
# software. Except as specifically required in this Section (4), Licensee
# shall not use in a company name, a product name, in advertising,
# publicity, or other promotional activities any name, trade name,
# trademark, logo, or other designation of "EnergyPlus", "E+", "e+" or
# confusingly similar designation, without the U.S. Department of Energy's
# prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import platform
import subprocess
from eplaunch.workflows.base import BaseEPLaunchWorkflow1, EPLaunchWorkflowResponse1
class ConvertInputFormatWorkflow(BaseEPLaunchWorkflow1):
def name(self):
return "ConvertInputFormat-${CMAKE_VERSION_MAJOR}.${CMAKE_VERSION_MINOR}.${CMAKE_VERSION_PATCH}"
def context(self):
return "EnergyPlus-${CMAKE_VERSION_MAJOR}.${CMAKE_VERSION_MINOR}.${CMAKE_VERSION_PATCH}-${CMAKE_VERSION_BUILD}"
def description(self):
return "Run ConvertInputFormat"
def METHOD_NAME(self):
return ["*.idf", "*.epJSON"]
def get_output_suffixes(self):
return [".idf", ".epJSON"]
def get_extra_data(self):
return {"Hey, it's extra": "data"}
def get_interface_columns(self):
return []
def main(self, run_directory, file_name, args):
if 'workflow location' in args:
energyplus_root_folder, _ = os.path.split(args['workflow location'])
if platform.system() == 'Windows':
convertinputformat_binary = os.path.join(energyplus_root_folder, 'ConvertInputFormat.exe')
else:
convertinputformat_binary = os.path.join(energyplus_root_folder, 'ConvertInputFormat')
if not os.path.exists(convertinputformat_binary):
return EPLaunchWorkflowResponse1(
success=False,
message="ConvertInputFormat binary not found: {}!".format(convertinputformat_binary),
column_data=[]
)
else:
return EPLaunchWorkflowResponse1(
success=False,
message="Workflow location missing: {}!".format(args['worflow location']),
column_data=[]
)
original_with_path = os.path.join(run_directory, file_name)
converted_file_no_ext, original_ext = os.path.splitext(original_with_path)
if original_ext.lower() == '.idf':
converted_file_with_path = converted_file_no_ext + '.epJSON'
elif original_ext.lower() == '.epjson':
converted_file_with_path = converted_file_no_ext + '.idf'
else:
return EPLaunchWorkflowResponse1(
success=False,
message="Invalid extension {} on file: {}!".format(original_ext, original_with_path),
column_data=[]
)
if os.path.exists(original_with_path) and os.path.exists(convertinputformat_binary) and os.path.exists(run_directory):
# execute utility
command_line_args = [convertinputformat_binary, original_with_path]
try:
for message in self.execute_for_callback(command_line_args, run_directory):
self.callback(message)
except subprocess.CalledProcessError:
self.callback("ConvertInputFormat FAILED")
return EPLaunchWorkflowResponse1(
success=False,
message="ConvertInputFormat failed for file: %s!" % original_with_path,
column_data={}
)
return EPLaunchWorkflowResponse1(
success=True,
message="Ran ConvertInputFormat OK for file: {}!".format(original_with_path),
column_data=[]
)
else:
return EPLaunchWorkflowResponse1(
success=False,
message="ConvertInputFormat file not found: {}!".format(original_with_path),
column_data=[]
)
| null |
4,894 |
from fastapi import APIRouter, FastAPI
from fastapi.responses import HTMLResponse, JSONResponse, PlainTextResponse
from fastapi.testclient import TestClient
class OverrideResponse(JSONResponse):
media_type = "application/x-override"
app = FastAPI()
router_a = APIRouter()
router_a_a = APIRouter()
router_a_b_override = APIRouter() # Overrides default class
router_b_override = APIRouter() # Overrides default class
router_b_a = APIRouter()
router_b_a_c_override = APIRouter() # Overrides default class again
@app.get("/")
def get_root():
return {"msg": "Hello World"}
@app.get("/override", response_class=PlainTextResponse)
def get_path_override():
return "Hello World"
@router_a.get("/")
def get_a():
return {"msg": "Hello A"}
@router_a.get("/override", response_class=PlainTextResponse)
def get_a_path_override():
return "Hello A"
@router_a_a.get("/")
def get_a_a():
return {"msg": "Hello A A"}
@router_a_a.get("/override", response_class=PlainTextResponse)
def get_a_a_path_override():
return "Hello A A"
@router_a_b_override.get("/")
def get_a_b():
return "Hello A B"
@router_a_b_override.get("/override", response_class=HTMLResponse)
def get_a_b_path_override():
return "Hello A B"
@router_b_override.get("/")
def get_b():
return "Hello B"
@router_b_override.get("/override", response_class=HTMLResponse)
def get_b_path_override():
return "Hello B"
@router_b_a.get("/")
def get_b_a():
return "Hello B A"
@router_b_a.get("/override", response_class=HTMLResponse)
def METHOD_NAME():
return "Hello B A"
@router_b_a_c_override.get("/")
def get_b_a_c():
return "Hello B A C"
@router_b_a_c_override.get("/override", response_class=OverrideResponse)
def get_b_a_c_path_override():
return {"msg": "Hello B A C"}
router_b_a.include_router(
router_b_a_c_override, prefix="/c", default_response_class=HTMLResponse
)
router_b_override.include_router(router_b_a, prefix="/a")
router_a.include_router(router_a_a, prefix="/a")
router_a.include_router(
router_a_b_override, prefix="/b", default_response_class=PlainTextResponse
)
app.include_router(router_a, prefix="/a")
app.include_router(
router_b_override, prefix="/b", default_response_class=PlainTextResponse
)
client = TestClient(app)
json_type = "application/json"
text_type = "text/plain; charset=utf-8"
html_type = "text/html; charset=utf-8"
override_type = "application/x-override"
def test_app():
with client:
response = client.get("/")
assert response.json() == {"msg": "Hello World"}
assert response.headers["content-type"] == json_type
def test_app_override():
with client:
response = client.get("/override")
assert response.content == b"Hello World"
assert response.headers["content-type"] == text_type
def test_router_a():
with client:
response = client.get("/a")
assert response.json() == {"msg": "Hello A"}
assert response.headers["content-type"] == json_type
def test_router_a_override():
with client:
response = client.get("/a/override")
assert response.content == b"Hello A"
assert response.headers["content-type"] == text_type
def test_router_a_a():
with client:
response = client.get("/a/a")
assert response.json() == {"msg": "Hello A A"}
assert response.headers["content-type"] == json_type
def test_router_a_a_override():
with client:
response = client.get("/a/a/override")
assert response.content == b"Hello A A"
assert response.headers["content-type"] == text_type
def test_router_a_b():
with client:
response = client.get("/a/b")
assert response.content == b"Hello A B"
assert response.headers["content-type"] == text_type
def test_router_a_b_override():
with client:
response = client.get("/a/b/override")
assert response.content == b"Hello A B"
assert response.headers["content-type"] == html_type
def test_router_b():
with client:
response = client.get("/b")
assert response.content == b"Hello B"
assert response.headers["content-type"] == text_type
def test_router_b_override():
with client:
response = client.get("/b/override")
assert response.content == b"Hello B"
assert response.headers["content-type"] == html_type
def test_router_b_a():
with client:
response = client.get("/b/a")
assert response.content == b"Hello B A"
assert response.headers["content-type"] == text_type
def test_router_b_a_override():
with client:
response = client.get("/b/a/override")
assert response.content == b"Hello B A"
assert response.headers["content-type"] == html_type
def test_router_b_a_c():
with client:
response = client.get("/b/a/c")
assert response.content == b"Hello B A C"
assert response.headers["content-type"] == html_type
def test_router_b_a_c_override():
with client:
response = client.get("/b/a/c/override")
assert response.json() == {"msg": "Hello B A C"}
assert response.headers["content-type"] == override_type
| null |
4,895 |
import base64
import logging
try:
from Crypto.Cipher import AES
from Crypto import Random
except ImportError:
from .nocrypto import AES, Random
from ably.types.typedbuffer import TypedBuffer
from ably.util.exceptions import AblyException
log = logging.getLogger(__name__)
class CipherParams:
def __init__(self, algorithm='AES', mode='CBC', secret_key=None, iv=None):
self.__algorithm = algorithm.upper()
self.__secret_key = secret_key
self.__key_length = len(secret_key) * 8 if secret_key is not None else 128
self.__mode = mode.upper()
self.__iv = iv
@property
def algorithm(self):
return self.__algorithm
@property
def secret_key(self):
return self.__secret_key
@property
def iv(self):
return self.__iv
@property
def key_length(self):
return self.__key_length
@property
def mode(self):
return self.__mode
class CbcChannelCipher:
def __init__(self, cipher_params):
self.__secret_key = (cipher_params.secret_key or
self.__random(cipher_params.key_length / 8))
if isinstance(self.__secret_key, str):
self.__secret_key = self.__secret_key.encode()
self.__iv = cipher_params.iv or self.__random(16)
self.__block_size = len(self.__iv)
if cipher_params.algorithm != 'AES':
raise NotImplementedError('Only AES algorithm is supported')
self.__algorithm = cipher_params.algorithm
if cipher_params.mode != 'CBC':
raise NotImplementedError('Only CBC mode is supported')
self.__mode = cipher_params.mode
self.__key_length = cipher_params.key_length
self.__encryptor = AES.new(self.__secret_key, AES.MODE_CBC, self.__iv)
def __pad(self, data):
padding_size = self.__block_size - (len(data) % self.__block_size)
padding_char = bytes((padding_size,))
padded = data + padding_char * padding_size
return padded
def __unpad(self, data):
padding_size = data[-1]
if padding_size > len(data):
# Too short
raise AblyException('invalid-padding', 0, 0)
if padding_size == 0:
# Missing padding
raise AblyException('invalid-padding', 0, 0)
for i in range(padding_size):
# Invalid padding bytes
if padding_size != data[-i - 1]:
raise AblyException('invalid-padding', 0, 0)
return data[:-padding_size]
def __random(self, length):
rndfile = Random.new()
return rndfile.read(length)
def encrypt(self, plaintext):
if isinstance(plaintext, bytearray):
plaintext = bytes(plaintext)
padded_plaintext = self.__pad(plaintext)
encrypted = self.__iv + self.__encryptor.encrypt(padded_plaintext)
self.__iv = encrypted[-self.__block_size:]
return encrypted
def decrypt(self, ciphertext):
if isinstance(ciphertext, bytearray):
ciphertext = bytes(ciphertext)
iv = ciphertext[:self.__block_size]
ciphertext = ciphertext[self.__block_size:]
decryptor = AES.new(self.__secret_key, AES.MODE_CBC, iv)
decrypted = decryptor.decrypt(ciphertext)
return bytearray(self.__unpad(decrypted))
@property
def secret_key(self):
return self.__secret_key
@property
def iv(self):
return self.__iv
@property
def cipher_type(self):
return ("%s-%s-%s" % (self.__algorithm, self.__key_length,
self.__mode)).lower()
class CipherData(TypedBuffer):
ENCODING_ID = 'cipher'
def __init__(self, buffer, type, cipher_type=None, **kwargs):
self.__cipher_type = cipher_type
super().__init__(buffer, type, **kwargs)
@property
def METHOD_NAME(self):
return self.ENCODING_ID + '+' + self.__cipher_type
DEFAULT_KEYLENGTH = 256
DEFAULT_BLOCKLENGTH = 16
def generate_random_key(length=DEFAULT_KEYLENGTH):
rndfile = Random.new()
return rndfile.read(length // 8)
def get_default_params(params=None):
if type(params) in [str, bytes]:
raise ValueError("Calling get_default_params with a key directly is deprecated, it expects a params dict")
key = params.get('key')
algorithm = params.get('algorithm') or 'AES'
iv = params.get('iv') or generate_random_key(DEFAULT_BLOCKLENGTH * 8)
mode = params.get('mode') or 'CBC'
if not key:
raise ValueError("Crypto.get_default_params: a key is required")
if type(key) == str:
key = base64.b64decode(key)
cipher_params = CipherParams(algorithm=algorithm, secret_key=key, iv=iv, mode=mode)
validate_cipher_params(cipher_params)
return cipher_params
def get_cipher(params):
if isinstance(params, CipherParams):
cipher_params = params
else:
cipher_params = get_default_params(params)
return CbcChannelCipher(cipher_params)
def validate_cipher_params(cipher_params):
if cipher_params.algorithm == 'AES' and cipher_params.mode == 'CBC':
key_length = cipher_params.key_length
if key_length == 128 or key_length == 256:
return
raise ValueError(
'Unsupported key length %s for aes-cbc encryption. Encryption key must be 128 or 256 bits'
' (16 or 32 ASCII characters)' % key_length)
| null |
4,896 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from mantidqt.utils.observer_pattern import GenericObservable, GenericObserver, GenericObserverWithArgPassing
from mantidqtinterfaces.Muon.GUI.Common.utilities.run_string_utils import run_string_to_list
from mantidqtinterfaces.Muon.GUI.Common import thread_model
from mantidqtinterfaces.Muon.GUI.Common.thread_model_wrapper import ThreadModelWrapper
class EAGroupingTabPresenter(object):
"""
The grouping tab presenter is responsible for synchronizing the group table.
"""
@staticmethod
def string_to_list(text):
return run_string_to_list(text)
def __init__(self, view, model, grouping_table_widget=None):
self._view = view
self._model = model
self.grouping_table_widget = grouping_table_widget
self._view.set_description_text(self.text_for_description())
# monitors for loaded data changing
self.loadObserver = GenericObserver(self.handle_new_data_loaded)
self.instrumentObserver = GenericObserver(self.on_clear_requested)
# notifiers
self.groupingNotifier = GenericObservable()
self.enable_editing_notifier = GenericObservable()
self.disable_editing_notifier = GenericObservable()
self.calculation_finished_notifier = GenericObservable()
self.message_observer = GenericObserverWithArgPassing(self._view.display_warning_box)
self.gui_variables_observer = GenericObserver(self.handle_update_all_clicked)
self.enable_observer = GenericObserver(self.enable_editing)
self.disable_observer = GenericObserver(self.disable_editing)
self.disable_tab_observer = GenericObserver(self.disable_editing_without_notifying_subscribers)
self.enable_tab_observer = GenericObserver(self.enable_editing_without_notifying_subscribers)
self.update_view_from_model_observer = GenericObserver(self.update_view_from_model)
def update_view_from_model(self):
self.grouping_table_widget.update_view_from_model()
def show(self):
self._view.show()
def text_for_description(self):
"""
Generate the text for the description edit at the top of the widget.
"""
text = "\u03BCx: exp2k : file type .dat"
return text
def update_description_text(self, description_text=""):
if not description_text:
description_text = self.text_for_description()
self._view.set_description_text(description_text)
def disable_editing(self):
self.grouping_table_widget.disable_editing()
self.disable_editing_notifier.notify_subscribers()
def enable_editing(self):
self.grouping_table_widget.enable_editing()
self.enable_editing_notifier.notify_subscribers()
def disable_editing_without_notifying_subscribers(self):
self.grouping_table_widget.disable_editing()
def enable_editing_without_notifying_subscribers(self):
self.grouping_table_widget.enable_editing()
def error_callback(self, error_message):
self.enable_editing()
self._view.display_warning_box(error_message)
def handle_update_finished(self):
self.enable_editing()
self.groupingNotifier.notify_subscribers()
self.calculation_finished_notifier.notify_subscribers()
def on_clear_requested(self):
self._model.clear()
self.grouping_table_widget.update_view_from_model()
self.update_description_text()
def handle_new_data_loaded(self):
if self._model.is_data_loaded():
self.update_view_from_model()
self.update_description_text()
self.METHOD_NAME()
else:
self.on_clear_requested()
def METHOD_NAME(self):
# if we have no groups selected, generate a default plot
if len(self._model.selected_groups) == 0:
self.grouping_table_widget.plot_default_case()
def handle_update_all_clicked(self):
self.update_thread = self.create_update_thread()
self.update_thread.threadWrapperSetUp(self.disable_editing, self.handle_update_finished, self.error_callback)
self.update_thread.start()
def create_update_thread(self):
self._update_model = ThreadModelWrapper(self.calculate_all_data)
return thread_model.ThreadModel(self._update_model)
def calculate_all_data(self):
self._model.show_all_groups()
| null |
4,897 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from unittest import mock
from mantidqt.utils.qt.testing import start_qapplication
from mantidqtinterfaces.Muon.GUI.ElementalAnalysis.PeriodicTable.periodic_table import PeriodicTable as silxPT, PeriodicTableItem
from mantidqtinterfaces.Muon.GUI.ElementalAnalysis.PeriodicTable.periodic_table_model import PeriodicTableModel
from mantidqtinterfaces.Muon.GUI.ElementalAnalysis.PeriodicTable.periodic_table_presenter import PeriodicTablePresenter
from mantidqtinterfaces.Muon.GUI.ElementalAnalysis.PeriodicTable.periodic_table_view import PeriodicTableView
@start_qapplication
class PeriodicTablePresenterTest(unittest.TestCase):
def setUp(self):
self._model = mock.create_autospec(PeriodicTableModel)
self.view = PeriodicTableView()
self.presenter = PeriodicTablePresenter(self.view, self._model)
self.presenter.is_selected = mock.Mock()
self.mock_elem = mock.create_autospec(PeriodicTableItem)
self.mock_elem.symbol = mock.Mock()
self.view.ptable = mock.create_autospec(silxPT)
self.view.ptable.getSelection = mock.Mock(return_value=self.mock_elem)
self.view.ptable.isElementSelected = mock.Mock(return_value=True)
self.view.on_table_lclicked = mock.Mock()
self.view.on_table_rclicked = mock.Mock()
self.view.on_table_changed = mock.Mock()
self.view.unreg_on_table_lclicked = mock.Mock()
self.view.unreg_on_table_rclicked = mock.Mock()
self.view.unreg_on_table_changed = mock.Mock()
self.presenter.view = self.view
# checks if subsequent function is called on func()
def check_second_func_called(self, register_func, signal_func):
test_slot = mock.Mock()
register_func(test_slot)
assert signal_func.call_count == 1
def test_register_table_lclicked(self):
self.check_second_func_called(self.presenter.register_table_lclicked, self.view.on_table_lclicked)
def test_unregister_table_lclicked(self):
self.check_second_func_called(self.presenter.unregister_table_lclicked, self.view.unreg_on_table_lclicked)
def test_register_table_rclicked(self):
self.check_second_func_called(self.presenter.register_table_rclicked, self.view.on_table_rclicked)
def test_unregister_table_rclicked(self):
self.check_second_func_called(self.presenter.unregister_table_rclicked, self.view.unreg_on_table_rclicked)
def METHOD_NAME(self):
self.check_second_func_called(self.presenter.register_table_changed, self.view.on_table_changed)
def test_unregister_table_changed(self):
self.check_second_func_called(self.presenter.unregister_table_changed, self.view.unreg_on_table_changed)
def test_selection(self):
assert self.presenter.selection == self.mock_elem
def test_is_selected(self):
assert self.presenter.is_selected(mock.Mock())
def test_select_element(self):
self.check_second_func_called(self.presenter.select_element, self.view.ptable.setElementSelected)
def test_add_elements(self):
self.check_second_func_called(self.presenter.add_elements, self.view.ptable.setSelection)
def test_set_buttons(self):
self.presenter.model.peak_data = [self.mock_elem.symbol]
self.view.ptable.elements = [self.mock_elem]
self.presenter.set_buttons()
assert self.view.ptable.silentSetElementSelected.call_count == 1
assert self.view.ptable.enableElementButton.call_count == 1
def test_set_peak_datafile(self):
self.presenter.set_buttons = mock.Mock()
test_filename = mock.Mock
self.presenter.set_peak_datafile(test_filename)
assert self.presenter.model.peak_data_file == test_filename
if __name__ == "__main__":
unittest.main()
| null |
4,898 |
# *******************************************************
# Copyright (c) VMware, Inc. 2020-2023. All Rights Reserved.
# SPDX-License-Identifier: MIT
# *******************************************************
# *
# * DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
# * WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
# * EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
# * WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
# * NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""Tests for the macOS Registry credential provider."""
import sys
import pytest
import platform
if platform.system() == 'Darwin':
from cbc_sdk.credential_providers.keychain_credential_provider import KeychainCredentialProvider
from cbc_sdk.errors import CredentialError
INVALID_JSON = """{\nurl: "http://example.test/",\n"token" : "TTTTT/TTTTT",\n"org_key": "TT123TT",
\n"ssl_verify": true,\n"ssl_verify_hostname": true,\n"ssl_cert_file": "test.cert",\n"ssl_force_tls_1_2": true,
\n"proxy": "proxy.example",\n"ignore_system_proxy": true,\n"integration": "test"\n} """
VALID_JSON = """{\n"url": "http://example.test/",\n"token" : "TTTTT/TTTTT",\n"org_key": "TT123TT",
\n"ssl_verify": true,\n"ssl_verify_hostname": true,\n"ssl_cert_file": "test.cert",\n"ssl_force_tls_1_2": true,
\n"proxy": "proxy.example",\n"ignore_system_proxy": true,\n"integration": "test"\n} """
@pytest.mark.skipif(platform.system() != 'Darwin', reason="only run on mac os")
def METHOD_NAME(monkeypatch):
"""Test that creating the KeychainCredentialProvider breaks if we're not on macOS."""
monkeypatch.setattr(sys, "platform", "linux")
with pytest.raises(CredentialError):
KeychainCredentialProvider("test", "test")
@pytest.mark.skipif(platform.system() != 'Darwin', reason="only run on mac os")
def test_password_parser(monkeypatch):
"""Test that checks if the password is parsed correctly."""
monkeypatch.setattr(sys, "platform", "darwin")
parsed = KeychainCredentialProvider("test", "test")._parse_credentials(VALID_JSON)
assert isinstance(parsed, dict)
assert parsed["url"] == "http://example.test/"
assert parsed["token"] == "TTTTT/TTTTT"
assert parsed["org_key"] == "TT123TT"
assert parsed["ssl_verify"]
assert parsed["ssl_verify_hostname"]
assert parsed["ssl_cert_file"] == "test.cert"
assert parsed["ssl_force_tls_1_2"]
assert parsed["proxy"] == "proxy.example"
assert parsed["ignore_system_proxy"]
assert parsed["integration"] == "test"
@pytest.mark.skipif(platform.system() != 'Darwin', reason="only run on mac os")
def test_password_parser_invalid_json(monkeypatch):
"""Test that checks if the password is parsed correctly."""
monkeypatch.setattr(sys, "platform", "darwin")
with pytest.raises(CredentialError):
KeychainCredentialProvider("test", "test")._parse_credentials(INVALID_JSON)
@pytest.mark.skipif(platform.system() != 'Darwin', reason="only run on mac os")
def test_get_credentials_valid(monkeypatch):
"""Tests if it parses the Credential data correctly."""
monkeypatch.setattr(sys, "platform", "darwin")
monkeypatch.setattr(KeychainCredentialProvider, "_get_keyring_credentials", lambda c: VALID_JSON)
keychain_provider = KeychainCredentialProvider("test", "test").get_credentials()
assert keychain_provider.url == "http://example.test/"
assert keychain_provider.token == "TTTTT/TTTTT"
assert keychain_provider.org_key == "TT123TT"
assert keychain_provider.ssl_verify
assert keychain_provider.ssl_verify_hostname
assert keychain_provider.ssl_cert_file == "test.cert"
assert keychain_provider.ssl_force_tls_1_2
assert keychain_provider.proxy == "proxy.example"
assert keychain_provider.ignore_system_proxy
assert keychain_provider.integration == "test"
@pytest.mark.skipif(platform.system() != 'Darwin', reason="only run on mac os")
def test_get_credentials_invalid(monkeypatch):
"""Tests if it raises the CredentialError with the given invalid json."""
monkeypatch.setattr(sys, "platform", "darwin")
monkeypatch.setattr(KeychainCredentialProvider, "_get_keyring_credentials", lambda c: INVALID_JSON)
with pytest.raises(CredentialError):
KeychainCredentialProvider("test", "test").get_credentials()
@pytest.mark.skipif(platform.system() != 'Darwin', reason="only run on mac os")
def test_get_credentials_none_found(monkeypatch):
"""Tests if it raises the CredentialError if credentials are not found."""
monkeypatch.setattr(sys, "platform", "darwin")
monkeypatch.setattr(KeychainCredentialProvider, "_get_keyring_credentials", lambda c: None)
with pytest.raises(CredentialError):
KeychainCredentialProvider("test", "test").get_credentials()
| null |
4,899 |
##############################################################################
# Copyright by The HDF Group. #
# All rights reserved. #
# #
# This file is part of HSDS (HDF5 Scalable Data Service), Libraries and #
# Utilities. The full HSDS copyright notice, including #
# terms governing use, modification, and redistribution, is contained in #
# the file COPYING, which can be found at the root of the source code #
# distribution tree. If you do not have access to this file, you may #
# request a copy from [email protected]. #
##############################################################################
import unittest
import json
import math
import time
import config
import helper
# Test the binary PUTs and GETs work for request larger than
# max_request_size (by default 100MB)
# max_request_size should only apply to JSON streaming or
# variable length datatypes
class StreamTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(StreamTest, self).__init__(*args, **kwargs)
self.base_domain = config.get("stream_test_domain")
self.username = config.get("user_name")
self.password = config.get("user_password")
helper.setupDomain(
self.base_domain, username=self.username, password=self.password
)
self.endpoint = helper.getEndpoint()
def METHOD_NAME(self):
self.session = helper.getSession()
def tearDown(self):
if self.session:
self.session.close()
def getUUIDByPath(self, domain, h5path):
return helper.getUUIDByPath(
domain,
h5path,
username=self.username,
password=self.password,
session=self.session,
)
def getRootUUID(self, domain):
return helper.getRootUUID(
domain, username=self.username, password=self.password, session=self.session
)
def testStream2D(self):
# write a large request for a 2d dataset
print("testStream2D", self.base_domain)
kwargs = {}
if self.username:
kwargs["username"] = self.username
if self.password:
kwargs["password"] = self.password
headers = helper.getRequestHeaders(domain=self.base_domain, **kwargs)
headers_bin_req = helper.getRequestHeaders(domain=self.base_domain, **kwargs)
headers_bin_req["Content-Type"] = "application/octet-stream"
headers_bin_rsp = helper.getRequestHeaders(domain=self.base_domain, **kwargs)
headers_bin_rsp["accept"] = "application/octet-stream"
req = self.endpoint + "/"
# Get root uuid
rsp = self.session.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
root_uuid = rspJson["root"]
helper.validateId(root_uuid)
create_dataset = True
dset_id = None
dset_name = "dset2d"
num_col = int(config.get("stream_test_ncols"))
num_row = int(config.get("stream_test_nrows"))
item_size = 8 # 8 bytes for H5T_STD_U64LE
print(f"dataset shape: [{num_row}, {num_col}]")
try:
dset_id = self.getUUIDByPath(self.base_domain, "/dset2d")
print("got dset_id:", dset_id)
# get the dset json
req = self.endpoint + "/datasets/" + dset_id
rsp = self.session.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
shape = rspJson["shape"]
dims = shape["dims"]
# can re-use this if the shape is what we need
if len(dims) == 2 and dims[0] == num_row and dims[1] == num_col:
create_dataset = False
else:
print("dims don't match - delete and create new dataset")
except KeyError:
pass # will create a new dataset
if create_dataset and dset_id:
# delete the old datsaet
print(f"deleting dataset: {dset_id}")
req = self.endpoint + "/datasets/" + dset_id
rsp = self.session.delete(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
# delete the link
req = self.endpoint + "/groups/" + root_uuid + "/links/" + dset_name
rsp = self.session.delete(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
if create_dataset:
# create dataset
print(f"create datset with shape: [{num_row}, {num_col}]")
data = {"type": "H5T_STD_U64LE", "shape": [num_row, num_col]}
req = self.endpoint + "/datasets"
rsp = self.session.post(req, data=json.dumps(data), headers=headers)
self.assertEqual(rsp.status_code, 201)
rspJson = json.loads(rsp.text)
dset_id = rspJson["id"]
print(f"got dset_id: {dset_id}")
# link new dataset
req = self.endpoint + "/groups/" + root_uuid + "/links/" + dset_name
payload = {"id": dset_id}
rsp = self.session.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
# initialize bytearray to test values
num_bytes = item_size * num_row * num_col
print(
f"initializing test data ({num_bytes} bytes, {num_bytes/(1024*1024):.2f} MiB)"
)
bin_data = bytearray(num_bytes)
exp = int(math.log10(num_col)) + 1
for i in range(num_row):
row_start_value = i * 10 ** exp
for j in range(num_col):
n = row_start_value + j + 1
int_bytes = n.to_bytes(8, "little")
offset_start = (i * num_col + j) * item_size
offset_end = offset_start + item_size
bin_data[offset_start:offset_end] = int_bytes
print("writing...")
ts = time.time()
req = self.endpoint + "/datasets/" + dset_id + "/value"
rsp = self.session.put(req, data=bin_data, headers=headers_bin_req)
self.assertEqual(rsp.status_code, 200)
elapsed = time.time() - ts
mb_per_sec = num_bytes / (1024 * 1024 * elapsed)
print(f" elapsed: {elapsed:.2f} s, {mb_per_sec:.2f} MB/s")
# read back the data as binary
print("reading...")
ts = time.time()
rsp = self.session.get(req, headers=headers_bin_rsp)
self.assertEqual(rsp.status_code, 200)
elapsed = time.time() - ts
mb_per_sec = num_bytes / (1024 * 1024 * elapsed)
print(f" elapsed: {elapsed:.2f} s, {mb_per_sec:.2f} MB/s")
print("comparing sent vs. received")
data = rsp.content
self.assertEqual(len(data), num_bytes)
self.assertEqual(data, bin_data)
print("passed!")
if __name__ == "__main__":
# setup test files
unittest.main()
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.