text
stringlengths 957
885k
|
---|
""" Module providing unit-testing for `~halotools.utils.value_added_halo_table_functions`.
"""
from __future__ import (absolute_import, division, print_function)
from copy import deepcopy
from collections import Counter
import numpy as np
import pytest
from astropy.extern.six.moves import xrange as range
from ..value_added_halo_table_functions import broadcast_host_halo_property, add_halo_hostid
from ..crossmatch import crossmatch
from ...sim_manager import FakeSim
from ...custom_exceptions import HalotoolsError
__all__ = ('test_broadcast_host_halo_mass1', )
def test_broadcast_host_halo_mass1():
"""
"""
fake_sim = FakeSim()
t = fake_sim.halo_table
broadcast_host_halo_property(t, 'halo_mvir', delete_possibly_existing_column=True)
assert 'halo_mvir_host_halo' in list(t.keys())
hostmask = t['halo_hostid'] == t['halo_id']
assert np.all(t['halo_mvir_host_halo'][hostmask] == t['halo_mvir'][hostmask])
assert np.any(t['halo_mvir_host_halo'][~hostmask] != t['halo_mvir'][~hostmask])
# Verify that both the group_member_generator method and the
# crossmatch method give identical results for calculation of host halo mass
idx_table1, idx_table2 = crossmatch(t['halo_hostid'], t['halo_id'])
t['tmp'] = np.zeros(len(t), dtype=t['halo_mvir'].dtype)
t['tmp'][idx_table1] = t['halo_mvir'][idx_table2]
assert np.all(t['tmp'] == t['halo_mvir_host_halo'])
data = Counter(t['halo_hostid'])
frequency_analysis = data.most_common()
for igroup in range(0, 10):
idx = np.where(t['halo_hostid'] == frequency_analysis[igroup][0])[0]
idx_host = np.where(t['halo_id'] == frequency_analysis[igroup][0])[0]
assert np.all(t['halo_mvir_host_halo'][idx] == t['halo_mvir'][idx_host])
for igroup in range(-10, -1):
idx = np.where(t['halo_hostid'] == frequency_analysis[igroup][0])[0]
idx_host = np.where(t['halo_id'] == frequency_analysis[igroup][0])[0]
assert np.all(t['halo_mvir_host_halo'][idx] == t['halo_mvir'][idx_host])
del t
def test_broadcast_host_halo_mass2():
"""
"""
fake_sim = FakeSim()
with pytest.raises(HalotoolsError) as err:
broadcast_host_halo_property(4, 'xxx')
substr = "The input ``table`` must be an Astropy `~astropy.table.Table` object"
assert substr in err.value.args[0]
def test_broadcast_host_halo_mass3():
"""
"""
fake_sim = FakeSim()
t = fake_sim.halo_table
with pytest.raises(HalotoolsError) as err:
broadcast_host_halo_property(t, 'xxx')
substr = "The input table does not have the input ``halo_property_key``"
assert substr in err.value.args[0]
def test_broadcast_host_halo_mass4():
"""
"""
fake_sim = FakeSim()
t = fake_sim.halo_table
with pytest.raises(HalotoolsError) as err:
broadcast_host_halo_property(t, 'halo_mvir')
substr = "Your input table already has an existing new_colname column name."
assert substr in err.value.args[0]
broadcast_host_halo_property(t, 'halo_mvir', delete_possibly_existing_column=True)
def test_add_halo_hostid1():
"""
"""
with pytest.raises(HalotoolsError) as err:
add_halo_hostid(5, delete_possibly_existing_column=False)
substr = "The input ``table`` must be an Astropy `~astropy.table.Table` object"
assert substr in err.value.args[0]
def test_add_halo_hostid2():
"""
"""
fake_sim = FakeSim()
t = fake_sim.halo_table
del t['halo_id']
with pytest.raises(HalotoolsError) as err:
add_halo_hostid(t, delete_possibly_existing_column=False)
substr = "The input table must have ``halo_upid`` and ``halo_id`` keys"
assert substr in err.value.args[0]
def test_add_halo_hostid3():
"""
"""
fake_sim = FakeSim()
t = fake_sim.halo_table
with pytest.raises(HalotoolsError) as err:
add_halo_hostid(t, delete_possibly_existing_column=False)
substr = "Your input table already has an existing ``halo_hostid`` column name."
assert substr in err.value.args[0]
existing_halo_hostid = deepcopy(t['halo_hostid'].data)
del t['halo_hostid']
add_halo_hostid(t, delete_possibly_existing_column=False)
assert np.all(t['halo_hostid'] == existing_halo_hostid)
add_halo_hostid(t, delete_possibly_existing_column=True)
assert np.all(t['halo_hostid'] == existing_halo_hostid)
|
import logging
from typing import Callable, Union
from tags_model import (TagCategory, TagCategoryBase, TagCategoryBaseItem, TagItem)
tag_configuration: list[TagCategoryBase] = list()
def load_tag_configuration(config_file_name: str) -> None:
with open(config_file_name, mode='r', encoding='utf-8-sig') as f:
is_heading: bool = True
current_heading_index: int = -1
for line in f:
tag_line: str = line.strip()
if not tag_line:
is_heading = True
# An empty line is marking the category end.
# The next line is the other category beginning.
continue
if is_heading:
tag_configuration.append(TagCategoryBase((tag_line, None)))
current_heading_index += 1
is_heading = False
else:
tag_configuration[current_heading_index].add_item(tag_line)
log_tags('Loaded configuration:', tag_configuration)
def load_tag_category(loaded_categories: list[TagCategory], tag_config: TagCategoryBase,
included_predicate: Callable[[TagItem], bool]) -> TagCategory:
def initialize_tag(tag_category: TagCategory, tag_config: TagCategoryBaseItem,
included_predicate: Callable[[TagItem], bool]) -> TagItem:
result: TagItem = TagItem((tag_config.name, tag_category))
# Use a predicate or an included property initializer?
result.included = included_predicate(result)
return result
result: TagCategory = TagCategory((tag_config.name, None))
loaded_categories.append(result)
result.items = [initialize_tag(result, tag, included_predicate) for tag in tag_config.items]
return result
def load_tags(tags_file_name: str) -> list[TagCategory]:
def load_current_tags() -> set[str]:
with open(tags_file_name, mode='r', encoding='utf-8-sig') as f:
# Skip <!DOCTYPE html> header line
next(f)
# strip '<div>' from left and '</div>\n' from right for the tag name
result: set[str] = {get_tag_key(line[5:-7]) for line in f}
return result
def get_tag_key(tag_name: str) -> str:
return tag_name.upper()
def unregister_tag(tag: str) -> bool:
result: bool = tag in current_tags
if result:
current_tags.remove(tag)
return result
current_tags: set[str] = load_current_tags()
result: list[TagCategory] = list()
for tag_category in tag_configuration:
load_tag_category(result, tag_category, lambda tag: unregister_tag(get_tag_key(tag.name)))
if len(current_tags):
additional: TagCategoryBase = TagCategoryBase(('Additional tags', None))
additional.items = [TagCategoryBaseItem((tag_name, additional)) for tag_name in current_tags]
load_tag_category(result, additional, lambda t: True)
log_tags('Loaded file tags:', result)
return result
def save_tags(tags_file_name: str, tag_categories: list[str]) -> None:
with open(tags_file_name, mode='w', encoding='utf-8-sig') as f:
f.write('<!DOCTYPE html>\n')
for tag in tag_categories:
_ = f.write(f'<div>{tag}</div>\n')
def log_tags(list_description: str, tag_list: Union[list[TagCategoryBase], list[TagCategory]]) -> None:
logging.debug(list_description)
for category in tag_list:
[logging.debug(f'{category.name} : {tag.__dict__}') for tag in category.items]
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
|
<filename>pkgs/ops-pkg/src/genie/libs/ops/lldp/iosxr/tests/lldp_output.py
'''LLDP Genie Ops Object Outputs for IOSXR.'''
class LldpOutput(object):
ShowLldp = {
"hello_timer": 30,
"enabled": True,
"hold_timer": 120,
"status": "active",
"reinit_delay": 2
}
ShowLldpEntry = {
'interfaces': {
'GigabitEthernet0/0/0/0': {
'port_id': {
'GigabitEthernet2': {
'neighbors': {
'R1_csr1000v.openstacklocal': {
'chassis_id': '001e.49f7.2c00',
'port_description': 'GigabitEthernet2',
'system_name': 'R1_csr1000v.openstacklocal',
'neighbor_id': 'R1_csr1000v.openstacklocal',
'system_description': 'Cisco IOS Software [Everest], Virtual XE Software (X86_64_LINUX_IOSD-UNIVERSALK9-M), Version 16.6.1, RELEASE SOFTWARE (fc2)\nTechnical Support: http://www.cisco.com/techsupport\nCopyright (c) 1986-2017 by Cisco Systems, Inc.\nCompiled Sat 22-Jul-17 05:51 by',
'time_remaining': 117,
'hold_time': 120,
'capabilities': {
'bridge': {
'system': True,
},
'router': {
'system': True,
'enabled': True,
},
},
'management_address': '10.1.2.1',
},
},
},
},
},
'GigabitEthernet0/0/0/1': {
'port_id': {
'Ethernet1/2': {
'neighbors': {
'R3_n9kv': {
'chassis_id': '5e00.8002.0009',
'port_description': 'Ethernet1/2',
'system_name': 'R3_n9kv',
'neighbor_id': 'R3_n9kv',
'system_description': 'Cisco Nexus Operating System (NX-OS) Software 7.0(3)I7(1)\nTAC support: http://www.cisco.com/tac\nCopyright (c) 2002-2017, Cisco Systems, Inc. All rights reserved.\n',
'time_remaining': 103,
'hold_time': 120,
'capabilities': {
'bridge': {
'system': True,
'enabled': True,
},
'router': {
'system': True,
'enabled': True,
},
},
},
},
},
},
},
},
'total_entries': 2,
}
ShowLldpNeighborsDetail = {
'interfaces': {
'GigabitEthernet0/0/0/0': {
'port_id': {
'GigabitEthernet2': {
'neighbors': {
'R1_csr1000v.openstacklocal': {
'chassis_id': '001e.49f7.2c00',
'port_description': 'GigabitEthernet2',
'system_name': 'R1_csr1000v.openstacklocal',
'neighbor_id': 'R1_csr1000v.openstacklocal',
'system_description': 'Cisco IOS Software [Everest], Virtual XE Software (X86_64_LINUX_IOSD-UNIVERSALK9-M), Version 16.6.1, RELEASE SOFTWARE (fc2)\nTechnical Support: http://www.cisco.com/techsupport\nCopyright (c) 1986-2017 by Cisco Systems, Inc.\nCompiled Sat 22-Jul-17 05:51 by',
'time_remaining': 90,
'hold_time': 120,
'capabilities': {
'bridge': {
'system': True,
},
'router': {
'system': True,
'enabled': True,
},
},
'management_address': '10.1.2.1',
},
},
},
},
},
'GigabitEthernet0/0/0/1': {
'port_id': {
'Ethernet1/2': {
'neighbors': {
'R3_n9kv': {
'chassis_id': '5e00.8002.0009',
'port_description': 'Ethernet1/2',
'system_name': 'R3_n9kv',
'neighbor_id': 'R3_n9kv',
'system_description': 'Cisco Nexus Operating System (NX-OS) Software 7.0(3)I7(1)\nTAC support: http://www.cisco.com/tac\nCopyright (c) 2002-2017, Cisco Systems, Inc. All rights reserved.\n',
'time_remaining': 106,
'hold_time': 120,
'capabilities': {
'bridge': {
'system': True,
'enabled': True,
},
'router': {
'system': True,
'enabled': True,
},
},
},
},
},
},
},
},
'total_entries': 2,
}
ShowLldpTraffic = {
"counters": {
"frame_in": 399,
"frame_out": 588,
"frame_error_in": 0,
"frame_discard": 0,
"tlv_discard": 119,
'tlv_unknown': 119,
'entries_aged_out': 0
}
}
ShowLldpInterface = {
'interfaces': {
'GigabitEthernet0/0/0/0': {
'tx': 'enabled',
'rx': 'enabled',
'tx_state': 'idle',
'rx_state': 'wait for frame',
},
'GigabitEthernet0/0/0/1': {
'tx': 'enabled',
'rx': 'enabled',
'tx_state': 'idle',
'rx_state': 'wait for frame',
},
}
}
lldpOutput = {
'enabled': True,
'hello_timer': 30,
'hold_timer': 120,
'interfaces': {
'GigabitEthernet0/0/0/1': {
'port_id': {
'Ethernet1/2': {
'neighbors': {
'R3_n9kv': {
'neighbor_id': 'R3_n9kv',
'system_name': 'R3_n9kv',
'system_description': 'Cisco Nexus Operating System (NX-OS) Software 7.0(3)I7(1)\nTAC support: http://www.cisco.com/tac\nCopyright (c) 2002-2017, Cisco Systems, Inc. All rights reserved.\n',
'chassis_id': '5e00.8002.0009',
'port_description': 'Ethernet1/2',
'capabilities': {
'router': {
'enabled': True,
},
'bridge': {
'enabled': True,
},
},
},
},
},
},
'enabled': True,
},
'GigabitEthernet0/0/0/0': {
'port_id': {
'GigabitEthernet2': {
'neighbors': {
'R1_csr1000v.openstacklocal': {
'neighbor_id': 'R1_csr1000v.openstacklocal',
'system_name': 'R1_csr1000v.openstacklocal',
'system_description': 'Cisco IOS Software [Everest], Virtual XE Software (X86_64_LINUX_IOSD-UNIVERSALK9-M), Version 16.6.1, RELEASE SOFTWARE (fc2)\nTechnical Support: http://www.cisco.com/techsupport\nCopyright (c) 1986-2017 by Cisco Systems, Inc.\nCompiled Sat 22-Jul-17 05:51 by',
'chassis_id': '001e.49f7.2c00',
'port_description': 'GigabitEthernet2',
'management_address': '10.1.2.1',
'capabilities': {
'router': {
'enabled': True,
},
},
},
},
},
},
'enabled': True,
},
},
'counters': {
'frame_in': 399,
'frame_out': 588,
'frame_error_in': 0,
'frame_discard': 0,
'tlv_discard': 119,
'tlv_unknown': 119,
'entries_aged_out': 0,
},
}
|
<gh_stars>1-10
"""secp256k1 elliptic curve cryptography interface."""
# The process for using SECP256k1 is complex and more involved than ED25519.
#
# See https://xrpl.org/cryptographic-keys.html#secp256k1-key-derivation
# for an overview of the algorithm.
from __future__ import annotations
from hashlib import sha256
from typing import Callable, Tuple, Type, cast
from ecpy.curves import Curve # type: ignore
from ecpy.ecdsa import ECDSA # type: ignore
from ecpy.keys import ECPrivateKey, ECPublicKey # type: ignore
from typing_extensions import Final, Literal
from xrpl.core.keypairs.crypto_implementation import CryptoImplementation
from xrpl.core.keypairs.exceptions import XRPLKeypairsException
from xrpl.core.keypairs.helpers import sha512_first_half
_CURVE: Final[Curve] = Curve.get_curve("secp256k1")
_GROUP_ORDER: Final[int] = _CURVE.order
_SIGNER: Final[ECDSA] = ECDSA("DER")
# String keys must be _KEY_LENGTH long
_KEY_LENGTH: Final[int] = 66
# Pad string keys with _PADDING_PREFIX to reach _KEY_LENGTH
_PADDING_PREFIX: Final[str] = "0"
# Generated sequence values are _SEQUENCE_SIZE bytes unsigned big-endian
_SEQUENCE_SIZE: Final[int] = 4
_SEQUENCE_MAX: Final[int] = 256 ** _SEQUENCE_SIZE
# Intermediate private keys are always padded with 4 bytes of zeros
_INTERMEDIATE_KEYPAIR_PADDING: Final[bytes] = (0).to_bytes(
4,
byteorder="big",
signed=False,
)
class SECP256K1(CryptoImplementation):
"""
Methods for using the ECDSA cryptographic system with the secp256k1
elliptic curve.
"""
@classmethod
def derive_keypair(
cls: Type[SECP256K1], decoded_seed: bytes, is_validator: bool
) -> Tuple[str, str]:
"""
Derive the public and private secp256k1 keys from a given seed value.
Args:
decoded_seed: The secp256k1 seed to derive a key pair from, as bytes.
is_validator: Whether to derive a validator keypair.
Returns:
A (public key, private key) pair derived from the given seed.
"""
root_public, root_private = cls._do_derive_part(decoded_seed, "root")
# validator keys just stop at the first pass
if is_validator:
return cls._format_keys(root_public, root_private)
mid_public, mid_private = cls._do_derive_part(
cls._public_key_to_bytes(root_public),
"mid",
)
final_public, final_private = cls._derive_final_pair(
root_public,
root_private,
mid_public,
mid_private,
)
return cls._format_keys(final_public, final_private)
@classmethod
def sign(cls: Type[SECP256K1], message: bytes, private_key: str) -> bytes:
"""
Signs a message using a given secp256k1 private key.
Args:
message: The message to sign, as bytes.
private_key: The private key to use to sign the message.
Returns:
The signature of the message, as bytes.
"""
wrapped_private = ECPrivateKey(int(private_key, 16), _CURVE)
return cast(
bytes,
_SIGNER.sign_rfc6979(
sha512_first_half(message),
wrapped_private,
sha256,
canonical=True,
),
)
@classmethod
def is_valid_message(
cls: Type[SECP256K1], message: bytes, signature: bytes, public_key: str
) -> bool:
"""
Verifies the signature on a given message.
Args:
message: The message to validate.
signature: The signature of the message.
public_key: The public key to use to verify the message and
signature.
Returns:
Whether the message is valid for the given signature and public key.
"""
public_key_point = _CURVE.decode_point(bytes.fromhex(public_key))
wrapped_public = ECPublicKey(public_key_point)
return cast(
bool,
_SIGNER.verify(sha512_first_half(message), signature, wrapped_public),
)
@classmethod
def _format_keys(
cls: Type[SECP256K1], public: ECPublicKey, private: ECPrivateKey
) -> Tuple[str, str]:
return (
cls._format_key(cls._public_key_to_str(public)),
cls._format_key(cls._private_key_to_str(private)),
)
@classmethod
def _format_key(cls: Type[SECP256K1], keystr: str) -> str:
return keystr.rjust(_KEY_LENGTH, _PADDING_PREFIX).upper()
@classmethod
def _public_key_to_bytes(cls: Type[SECP256K1], key: ECPublicKey) -> bytes:
return bytes(_CURVE.encode_point(key.W, compressed=True))
@classmethod
def _public_key_to_str(cls: Type[SECP256K1], key: ECPublicKey) -> str:
return cls._public_key_to_bytes(key).hex()
@classmethod
def _do_derive_part(
cls: Type[SECP256K1], bytes_input: bytes, phase: Literal["root", "mid"]
) -> Tuple[ECPublicKey, ECPrivateKey]:
"""
Given bytes_input determine public/private keypair for a given phase of
this algorithm. The difference between generating the root and
intermediate keypairs is just what bytes are input by the caller and that
the intermediate keypair needs to inject _INTERMEDIATE_KEYPAIR_PADDING
into the value to hash to get the raw private key.
"""
def _candidate_merger(candidate: bytes) -> bytes:
if phase == "root":
return bytes_input + candidate
return bytes_input + _INTERMEDIATE_KEYPAIR_PADDING + candidate
raw_private = cls._get_secret(_candidate_merger)
wrapped_private = ECPrivateKey(int.from_bytes(raw_private, "big"), _CURVE)
return wrapped_private.get_public_key(), wrapped_private
@classmethod
def _derive_final_pair(
cls: Type[SECP256K1],
root_public: ECPublicKey,
root_private: ECPrivateKey,
mid_public: ECPublicKey,
mid_private: ECPrivateKey,
) -> Tuple[ECPublicKey, ECPrivateKey]:
raw_private = (root_private.d + mid_private.d) % _GROUP_ORDER
wrapped_private = ECPrivateKey(raw_private, _CURVE)
wrapped_public = ECPublicKey(_CURVE.add_point(root_public.W, mid_public.W))
return wrapped_public, wrapped_private
@classmethod
def _get_secret(
cls: Type[SECP256K1], candidate_merger: Callable[[bytes], bytes]
) -> bytes:
"""
Given a function `candidate_merger` that knows how
to prepare a sequence candidate bytestring into
a possible full candidate secret, returns the first sequence
value that is valid. If none are valid, raises; however this
should be so exceedingly rare as to ignore.
"""
for raw_root in range(_SEQUENCE_MAX):
root = raw_root.to_bytes(
_SEQUENCE_SIZE,
byteorder="big",
signed=False,
)
candidate = sha512_first_half(candidate_merger(root))
if cls._is_secret_valid(candidate):
return candidate
raise XRPLKeypairsException(
"""Could not determine a key pair.
This is extremely improbable. Please try again.""",
)
@classmethod
def _is_secret_valid(cls: Type[SECP256K1], secret: bytes) -> bool:
numerical_secret = int.from_bytes(secret, "big")
return numerical_secret in range(1, _GROUP_ORDER)
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for validation.py.
"""
import json
import os
import tempfile
import unittest
from app.executor import validation
class ValidationTest(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.TemporaryDirectory()
self.repo_dir = self.tmp_dir.name
def tearDown(self):
self.tmp_dir.cleanup()
def test_import_targets_valid_absolute_names(self):
manifest_path = os.path.join(self.repo_dir,
'scripts/us_fed/manifest.json')
os.makedirs(os.path.dirname(manifest_path), exist_ok=True)
with open(manifest_path, 'w+') as manifest:
manifest.write(
json.dumps(
{'import_specifications': [{
'import_name': 'treasury'
}]}))
manifest_path = os.path.join(self.repo_dir, 'us_bls/cpi/manifest.json')
os.makedirs(os.path.dirname(manifest_path), exist_ok=True)
with open(manifest_path, 'w+') as manifest:
manifest.write(
json.dumps(
{'import_specifications': [{
'import_name': 'cpi_u'
}]}))
validation.are_import_targets_valid(
['scripts/us_fed:treasury', 'us_bls/cpi:cpi_u'],
['utils/template.py'], self.repo_dir, 'manifest.json')
def test_import_targets_valid_name_not_exist(self):
manifest_path = os.path.join(self.repo_dir,
'scripts/us_fed/manifest.json')
os.makedirs(os.path.dirname(manifest_path), exist_ok=True)
with open(manifest_path, 'w+') as manifest:
manifest.write(
json.dumps(
{'import_specifications': [{
'import_name': 'treasury'
}]}))
with self.assertRaises(ValueError) as context:
validation.are_import_targets_valid(['scripts/us_fed:treasuryyy'],
['utils/template.py'],
self.repo_dir, 'manifest.json')
self.assertIn('treasuryyy not found', str(context.exception))
def test_import_targets_valid_manifest_not_exist(self):
with self.assertRaises(ValueError) as context:
validation.are_import_targets_valid(
['scripts/us_fed:treasury', 'us_bls/cpi:cpi_u'],
['utils/template.py'], self.repo_dir, 'manifest.json')
self.assertIn('manifest.json does not exist',
str(context.exception))
def test_import_targets_valid_relative_names(self):
manifest_path = os.path.join(self.repo_dir,
'scripts/us_fed/manifest.json')
os.makedirs(os.path.dirname(manifest_path), exist_ok=True)
with open(manifest_path, 'w+') as file:
manifest = {
'import_specifications': [{
'import_name': 'treasury1'
}, {
'import_name': 'treasury2'
}]
}
file.write(json.dumps(manifest))
validation.are_import_targets_valid(['treasury1', 'treasury2'],
['scripts/us_fed'], self.repo_dir,
'manifest.json')
def test_import_targets_valid_relative_names_multiple_dirs(self):
manifest_path = os.path.join(self.repo_dir,
'scripts/us_fed/manifest.json')
os.makedirs(os.path.dirname(manifest_path), exist_ok=True)
with open(manifest_path, 'w+') as file:
manifest = {
'import_specifications': [{
'import_name': 'treasury1'
}, {
'import_name': 'treasury2'
}]
}
file.write(json.dumps(manifest))
with self.assertRaises(ValueError) as context:
validation.are_import_targets_valid(['treasury1', 'treasury2'],
['scripts/us_fed', 'foo/bar'],
self.repo_dir, 'manifest.json')
self.assertIn('relative import names', str(context.exception))
def test_import_spec_valid(self):
import_dir = 'scripts/us_fed'
os.makedirs(os.path.join(self.repo_dir, import_dir, 'dir'),
exist_ok=True)
script_path = os.path.join(self.repo_dir, import_dir, 'dir/foo.py')
print(script_path)
with open(script_path, 'w+') as script:
script.write('line\n')
script.flush()
script_path = os.path.join(self.repo_dir, import_dir, 'bar.py')
with open(script_path, 'w+') as script:
script.write('line\n')
script.flush()
spec = {
'import_name': 'treausry',
'provenance_url': 'url',
'provenance_description': 'description',
'curator_emails': 'curator',
'scripts': ['dir/foo.py', 'dir/../bar.py']
}
validation._is_import_spec_valid(spec, self.repo_dir, import_dir)
def test_import_spec_valid_fields_absent(self):
spec = {
'import_name': 'treausry',
'scripts': ['dir/foo.py', 'dir/../bar.py']
}
with self.assertRaises(ValueError) as context:
validation._is_import_spec_valid(spec, self.repo_dir,
'scripts/us_fed')
self.assertIn(
'provenance_url, provenance_description, curator_emails',
str(context.exception))
def test_import_spec_valid_script_not_exist(self):
spec = {
'import_name': 'treausry',
'provenance_url': 'url',
'provenance_description': 'description',
'curator_emails': 'curator',
'scripts': ['dir/foo.py', 'dir/../bar.py']
}
with self.assertRaises(ValueError) as context:
validation._is_import_spec_valid(spec, self.repo_dir,
'scripts/us_fed')
self.assertIn('dir/foo.py, dir/../bar.py', str(context.exception))
def test_manifest_valid_fields_absent(self):
with self.assertRaises(ValueError) as context:
validation.is_manifest_valid({}, self.repo_dir, 'scripts/us_fed')
self.assertIn('import_specifications not found',
str(context.exception))
|
from matplotlib import pyplot as plt
from matplotlib.patches import Polygon
import seaborn as sns
from utils import *
from analysis import *
from SpikeVidUtils import *
def tidy_axis(ax, top=False, right=False, left=False, bottom=False):
ax.spines['top'].set_visible(top)
ax.spines['right'].set_visible(right)
ax.spines['left'].set_visible(left)
ax.spines['bottom'].set_visible(bottom)
ax.xaxis.set_tick_params(top='off', direction='out', width=1)
ax.yaxis.set_tick_params(right='off', left='off', direction='out', width=1)
def plot_neurons(ax, df, neurons, color_map):
# print(df.head())
for id_ in neurons:
df_id = df[df['ID'] == id_]
if len(df_id) == 0:
break
color = color_map[id_]
ax.scatter(df_id['Time'], df_id['ID'], color=color, marker="|", s=150, label='Simulated')
# ax.set_ylim(0, len(neurons))
xlim = int(max(df['Interval']))
ax.set_xlim(0, xlim)
ax.set_xticks(np.linspace(0, xlim, num=3))
ax.tick_params(axis='y', labelsize=15)
ax.tick_params(axis='x', labelsize=15)
def plot_raster_trial(df1, df2, trials, neurons):
color_labels = neurons
rgb_values = sns.color_palette("bright", len(neurons))
color_map = dict(zip(color_labels, rgb_values))
fig, ax = plt.subplots(nrows=len(trials), ncols=2, figsize=(12,10), squeeze=False)
for n, trial in enumerate(trials):
df1_trial_n = df1[df1['Trial'] == trial]
df2_trial_n = df2[df2['Trial'] == trial]
ax[n][0].set_ylabel(f'Trial {trial}')
plot_neurons(ax[n][0], df1_trial_n, neurons, color_map)
plot_neurons(ax[n][1], df2_trial_n, neurons, color_map)
# ax[0][n].get_shared_x_axes().join(ax[0][0], ax[0][n])
# ax[1][n].get_shared_x_axes().join(ax[0][0], ax[1][n])
plt.setp(ax, yticks=neurons, yticklabels=neurons)
ax[0][0].set_title('True')
ax[0][1].set_title('Predicted')
fig.supxlabel('Time (S)')
fig.supylabel('Neuron ID')
plt.tight_layout()
def plot_raster_trial(df1, df2, trials, neurons):
color_labels = neurons
rgb_values = sns.color_palette("bright", len(neurons))
color_map = dict(zip(color_labels, rgb_values))
fig, ax = plt.subplots(nrows=len(trials), ncols=2, figsize=(12,10), squeeze=False)
for n, trial in enumerate(trials):
df1_trial_n = df1[df1['Trial'] == trial]
df2_trial_n = df2[df2['Trial'] == trial]
ax[n][0].set_ylabel(f'Trial {trial}')
plot_neurons(ax[n][0], df1_trial_n, neurons, color_map)
plot_neurons(ax[n][1], df2_trial_n, neurons, color_map)
# ax[0][n].get_shared_x_axes().join(ax[0][0], ax[0][n])
# ax[1][n].get_shared_x_axes().join(ax[0][0], ax[1][n])
plt.setp(ax, yticks=neurons, yticklabels=neurons)
ax[0][0].set_title('True')
ax[0][1].set_title('Predicted')
fig.supxlabel('Time (S)')
fig.supylabel('Neuron ID')
plt.tight_layout()
def get_id_intervals(df, n_id, intervals):
id_intervals = np.zeros(len(intervals))
interval_counts = df[df['ID'] == n_id].groupby(df['Interval']).size()
id_intervals[interval_counts.index.astype(int).tolist()] = interval_counts.index.astype(int).tolist()
return id_intervals.tolist()
def get_id_intervals(df, n_id, intervals):
id_intervals = np.zeros(len(intervals))
interval_counts = df[df['ID'] == n_id].groupby(df['Interval']).size()
id_intervals[interval_counts.index.astype(int).tolist()] = interval_counts.index.astype(int).tolist()
return id_intervals.tolist()
def plot_var(ax, df, variable, values, color_map, m_s=150, l_w=1):
for value in values:
color = color_map[value]
data = df[df[variable] == value]
data[variable] = data[variable].astype('str')
ax.scatter(data['Time'], data[variable], color=color, # c=data[variable].map(color_map),
marker="|", s=m_s, linewidth=l_w)
# ax.xaxis.set_tick_params(top='off', direction='out', width=1)
ax.yaxis.set_tick_params(right='off', left='off', direction='out', width=1)
ax.set_ylim(0, len(values))
xlim = int(max(df['Interval']))
ax.set_xlim(0, xlim)
ax.set_xticks(np.linspace(0, xlim, num=3))
ax.tick_params(axis='y', labelsize=10)
ax.tick_params(axis='x', labelsize=10)
# ax.spines['top'].set_visible(False)
# ax.spines['right'].set_visible(False)
# ax.spines['left'].set_visible(False)
ax.xaxis.set_tick_params(top='off', direction='out', width=1)
# ax.yaxis.set_tick_params(right='off', direction='out', width=1)
ms_firing = 25
line_width = 0.75
lw_scatter = 0.1
def plot_firing_comparison(df_1, df_2, id, trials, intervals, figure_name=None):
'''
get trial averaged spikes (PSTH)
'''
id_ = id
true = df_1[(df_1['Trial'].isin(trials)) & (df_1['ID'] == id_)].reset_index(drop=True)
pred = df_2[(df_2['Trial'].isin(trials)) & (df_2['ID'] == id_)].reset_index(drop=True)
rates_1_id = get_rates(true, [id_], intervals)[id_]
rates_2_id = get_rates(pred, [id_], intervals)[id_]
left, width = 0.15, 0.85
bottom, height = 0.1, 0.1
spacing = 0.005
height_hist = 0.10
rect_scatter_1 = [left, bottom*4, width, height]
rect_scatter_2 = [left, bottom*3, width, height]
rect_hist1 = [left, bottom*2, width, height_hist]
# rect_hist2 = [left, bottom*1, width, height_hist]
# rect_histy = [left + width + spacing, bottom, 0.2, height]
if figure_name is None:
fig = plt.figure(figsize=(10, 10))
else:
fig = figure_name
# ax_rast_1 = fig.add_subaxes(rect_scatter_1)
# ax_rast_2 = fig.add_axes(rect_scatter_2, sharex=ax_rast_1)
# ax_hist_1 = fig.add_axes(rect_hist1, sharex=ax_rast_1)
# ax_hist_2 = fig.add_axes(rect_hist2, sharex=ax_rast_1)
tidy_axis(fig)
no_top_right_ticks(fig)
fig.set_yticks([])
fig.set_yticklabels([])
fig.axis('off')
ax_rast_1 = fig.inset_axes(rect_scatter_1)
ax_rast_2 = fig.inset_axes(rect_scatter_2, sharex=ax_rast_1)
ax_hist_1 = fig.inset_axes(rect_hist1, sharex=ax_rast_1)
ax_rast_2.axis('off')
ax_rast_1.axis('off')
axes_list = [ax_rast_1, ax_rast_2, ax_hist_1]
# colors = sns.color_palette("gist_ncar_r", 2)
colors = ['black', 'red']
def plot_raster_scatter(ax, data, color, label):
ax.scatter(data['Interval'], data['ID'], c=color, s=ms_firing, linewidth=lw_scatter, marker='|', label=label)
ax.set_xlabel(label)
# ax.scatter(true['Interval'], true['ID'].astype('str'), color='#069AF3', marker='|')
plot_raster_scatter(ax_rast_2, pred, colors[0], 'Simulated')
plot_raster_scatter(ax_rast_1, true, colors[1], 'True')
# sns.distplot(true['Interval'], hist=False)
# sns.distplot(pred['Interval'], hist=False)
sns.kdeplot(pred['Interval'], ax=ax_hist_1, bw_adjust=.25, color=colors[0], lw=line_width, alpha=0.7) #plot(np.array(intervals), rates_1_id, color=colors[0], lw=3)
sns.kdeplot(true['Interval'], ax=ax_hist_1, bw_adjust=.25, color=colors[1], lw=line_width, alpha=0.7) #plot(np.array(intervals), rates_2_id, color=colors[1], lw=3)
ax_hist_1.set_ylabel('')
ax_hist_1.set_yticks([])
sns.despine(top=True, left=True)
# tidy_axis(ax_hist_1, bottom=True)
# tidy_axis(ax_hist_2, bottom=True)
ax_hist_1.set_xlabel([])
# ax_hist_1.spines['bottom'].set_visible(False)
# ax_rast_1.spines['bottom'].set_visible(False)
# ax_rast_2.spines['bottom'].set_visible(False)
# ax_hist_1.spines['top'].set_visible(False)
# ax_hist_2.spines['top'].set_visible(False)
# xlabels = np.arange(0, max(intervals) + 1, 60)
# xticks, xlabels = xlabels, xlabels
max_intervals = math.ceil(df_1['Interval'].max())
# max_intervals = max(intervals)
xticks, xlabels = [0,max_intervals // 2, max_intervals], [0,max_intervals // 2, max_intervals]
yticks, ylabels = np.arange(len(trials)), list(map(str, trials))
for ax in axes_list:
tidy_axis(ax, bottom=True)
no_top_right_ticks(ax)
ax.set_xlim(0, max(intervals))
ax.set_xticks(xticks)
ax.set_xticklabels(xlabels)
ax.set_yticks([])
ax.set_yticklabels([])
# ax_hist_1.set_xlabel('Time (s)', fontsize=20)
ax_hist_1.set_xlabel('', fontsize=20)
legend = fig.legend(bbox_to_anchor=(0.25, 0.01), ncol=3, frameon=True, fontsize=17.5) # bbox_to_anchor=(0.75, 0.55)
ax_rast_1.set_title("{}".format(id_), fontsize=20)
def plot_firing_comparison_sweeps(df_1, df_2, id, trials, intervals, figure_name=None):
'''
get trial averaged spikes (PSTH)
'''
left, width = 0.15, 0.85
bottom, height = 0.1, 0.1
spacing = 0.005
height_hist = 0.10
rect_hist1 = [left, bottom*2, width, height_hist]
# rect_hist2 = [left, bottom*1, width, height_hist]
# rect_histy = [left + width + spacing, bottom, 0.2, height]
if figure_name is None:
# fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 10))
fig = plt.subplot()
else:
fig = figure_name
tidy_axis(fig)
no_top_right_ticks(fig)
fig.set_yticks([])
fig.set_yticklabels([])
fig.axis('off')
ax_dict_true = dict()
ax_dict_pred = dict()
for n, trial in enumerate(trials):
ax_dict_true[trial] = fig.inset_axes([left, bottom * (3+n), width, height_hist])
ax_dict_pred[trial] = fig.inset_axes([left, bottom * (3+n+len(trials)), width, height_hist], sharex=ax_dict_true[trial])
ax_dict_true[trial].axis('off')
ax_dict_pred[trial].axis('off')
ax_hist_1 = fig.inset_axes(rect_hist1, sharex=ax_dict_true[trials[0]])
axes_list = [list(ax_dict_true.values()), list(ax_dict_pred.values()), [ax_hist_1]]
# colors = sns.color_palette("gist_ncar_r", 2)
colors = ['black', 'red']
def plot_raster_scatter(ax, data, color, label):
ax.scatter(data['Interval'], data['ID'], c=color, s=ms_firing, marker='|', linewidth=lw_scatter, label=label)
ax.set_xlabel(label)
# ax.scatter(true['Interval'], true['ID'].astype('str'), color='#069AF3', marker='|')
for n, trial in enumerate(trials):
id_ = id
true = df_1[(df_1['Trial'] == trial) & (df_1['ID'] == id_)].reset_index(drop=True)
pred = df_2[(df_2['Trial'] == trial) & (df_2['ID'] == id_)].reset_index(drop=True)
if id_ == 345:
print(true, pred)
plot_raster_scatter(ax_dict_pred[trial], pred, colors[0], 'Simulated')
plot_raster_scatter(ax_dict_true[trial], true, colors[1], 'True')
sns.kdeplot(pred['Interval'], ax=ax_hist_1, bw_adjust=.25, color=colors[0], lw=line_width, alpha=0.7) #plot(np.array(intervals), rates_1_id, color=colors[0], lw=3)
sns.kdeplot(true['Interval'], ax=ax_hist_1, bw_adjust=.25, color=colors[1], lw=line_width, alpha=0.7) #plot(np.array(intervals), rates_2_id, color=colors[1], lw=3)
max_intervals = df_1['Interval'].max()
yticks, ylabels = np.arange(len(trials)), list(map(str, trials))
xticks, xlabels = [0,max_intervals // 2, max_intervals], [0,max_intervals // 2, max_intervals]
for ax in axes_list:
ax = ax[0]
tidy_axis(ax, bottom=True)
no_top_right_ticks(ax)
ax.set_xlim(0, max(intervals))
ax.set_xticks(xticks)
ax.set_xticklabels(xlabels)
ax.set_yticks([])
ax.set_yticklabels([])
# ax_hist_1.set_xlim(0, max(intervals))
# sns.distplot(true['Interval'], hist=False)
# sns.distplot(pred['Interval'], hist=False)
ax_hist_1.set_ylabel('')
ax_hist_1.set_yticks([])
sns.despine(top=True, left=True)
# tidy_axis(ax_hist_1, bottom=True)
# tidy_axis(ax_hist_2, bottom=True)
ax_hist_1.set_xlabel('')
# ax_hist_1.set_xlabel('Time (s)', fontsize=20)
legend = fig.legend(bbox_to_anchor=(0.25, 0.01), ncol=3, frameon=True, fontsize=17.5) # bbox_to_anchor=(0.75, 0.55)
list(ax_dict_pred.values())[-1].set_title("{}".format(id_), fontsize=20)
def get_psth(df, n_id, trials):
df = df[df['ID'] == n_id]
df = df[df['Trial'] == trial]
df = df.groupby('Interval_dt').size().reset_index()
df.columns = ['Interval_dt', 'Count']
return df
def set_categorical_ticks(ax, yticks=None, ylabels=None, xticks=None, xlabels=None, fs=None):
fs = fs if fs is not None else 10
if yticks is not None:
ax.set_ylim(0, len(ylabels))
ax.set_yticks(yticks)
ax.set_yticklabels(ylabels)
if xticks is not None:
ax.set_xlim(0, max(xlabels))
ax.set_xticks(xticks)
ax.set_xticklabels(xlabels)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(axis='x', labelsize=10)
ax.tick_params(axis='y', labelsize=fs)
ax.get_xaxis().tick_bottom() # remove unneeded ticks
ax.get_yaxis().tick_left()
def no_top_right_ticks(ax):
ax.set_yticklabels([])
ax.set_yticks([])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.yaxis.set_tick_params(top='off', direction='out', width=1)
ax.yaxis.set_tick_params(top='off', right='off', left='on', direction='out', width=1)
ax.tick_params(labelright='off', labeltop='off')
ax.tick_params(axis='both', direction='out')
ax.get_xaxis().tick_bottom() # remove unneeded ticks
ax.get_yaxis().tick_left()
def plot_neurons_trials_psth(df_1, df_2, neurons, trials, intervals, figuresize=None):
fs = 15
plt.rcParams['xtick.labelsize']= fs
plt.rcParams['ytick.labelsize']= fs
plt.rcParams['axes.labelsize']= fs
plt.rcParams['axes.titlesize']= fs
plt.rcParams['legend.fontsize']= fs
plt.rcParams['lines.linewidth']= 2
# plt.rcParams['fig.supylabel']= fs
df_1 = df_1.reset_index(drop=True)
df_2 = df_2.reset_index(drop=True)
dt = 4
intervals_dt = [dt * n for n in range(int((intervals[-1]) // dt) + 1)]
df_1['Interval_dt'] = pd.cut(df_1['Interval'], intervals_dt, include_lowest=True)
df_2['Interval_dt'] = pd.cut(df_2['Interval'], intervals_dt, include_lowest=True)
# neuron_list = list(map(str, sorted(top_corr[:6].index.tolist())))
neurons = list(map(str, [i for i in neurons]))
trials = df_1['Trial'].unique()
# neuron_list = sorted(top_corr[:10].index.tolist())
scale = 1
nrows, ncols = 4, len(neurons)
fig_size = figuresize if figuresize is not None else (2 * scale * len(neurons),10 * scale)
fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=fig_size)
variable = 'Trial'
color_labels = trials
rgb_values = sns.color_palette("gist_ncar_r", len(trials))
color_map = dict(zip(color_labels, rgb_values))
max_freq = 0
for n, neuron in enumerate(neurons):
df_1['ID'] = df_1['ID'].astype('str')
df_2['ID'] = df_2['ID'].astype('str')
df_1_id = df_1[df_1['ID'] == neuron]
df_2_id = df_2[df_2['ID'] == neuron]
max_intervals = 32
# max_intervals = max(intervals)
yticks, ylabels = np.arange(len(trials)), list(map(str, trials))
xticks, xlabels = [0,max_intervals // 2, max_intervals], [0,max_intervals // 2, max_intervals]
m_s = 45
l_w = 0.5
plot_var(ax[0][n], df_1_id, variable, trials, color_map, m_s, l_w=l_w)
plot_var(ax[1][n], df_2_id, variable, trials, color_map, m_s, l_w=l_w)
set_categorical_ticks(ax[0][n], yticks, ylabels, xticks, xlabels)
set_categorical_ticks(ax[1][n], yticks, ylabels, xticks, xlabels)
ax[0][n].set_yticks([])
ax[1][n].set_yticks([])
if n > 0:
no_top_right_ticks(ax[0][n])
no_top_right_ticks(ax[1][n])
df_1['ID'] = df_1['ID'].astype('int')
df_2['ID'] = df_2['ID'].astype('int')
neuron_int = int(neuron)
df_1_id = df_1[df_1['ID'] == neuron_int]
df_2_id = df_2[df_2['ID'] == neuron_int]
# rates_1 = get_rates(df_1, [neuron_int], intervals_dt)[neuron_int]
# rates_2 = get_rates(df_2, [neuron_int], intervals_dt)[neuron_int]
freq_id_1 = df_1_id['Interval'].value_counts().reindex(intervals, fill_value=0)
freq_id_2 = df_2_id['Interval'].value_counts().reindex(intervals, fill_value=0)
bins = np.arange(len(intervals) // 2)
# bins = len(intervals)
# ax[2][n].bar(intervals_dt, freq_id_1)
# ax[2][n].hist([freq_id_1, freq_id_2], bins=bins, histtype='step', edgecolor=['blue', 'red'],
# lw=2, alpha=0.3, facecolor=['blue', 'red'], label=['True', 'Sim'])
c_2, c_1 = rgb_values[2], rgb_values[-1]
ax[2][n].hist(df_1_id['Interval'], bins=bins, edgecolor=None, lw=2, alpha=1, facecolor=c_1, label='True')
ax[3][n].hist(df_2_id['Interval'], bins=bins, edgecolor=None, lw=2, alpha=1, facecolor=c_2, label='Predicted') # histtype='step'
# xticks, xlabels = [0, max(intervals) // 2, max(intervals)], [0, max(intervals) // 2, max(intervals)]
y_fs_hist = 15
set_categorical_ticks(ax[2][n], None, None, xticks, xlabels, y_fs_hist)
ax[2][n].spines['right'].set_visible(False)
ax[2][n].spines['top'].set_visible(False)
set_categorical_ticks(ax[3][n], None, None, xticks, xlabels, y_fs_hist)
ax[3][n].spines['right'].set_visible(False)
ax[3][n].spines['top'].set_visible(False)
if n > 0:
no_top_right_ticks(ax[2][n])
ax[3][n].get_shared_y_axes().join(ax[2][n], ax[2][n-1])
no_top_right_ticks(ax[3][n])
max_lim = (max(ax[2][n].get_ylim()[1], ax[3][n].get_ylim()[1]))
ax[0][n].set_xticklabels([])
ax[1][n].set_xticklabels([])
ax[2][n].set_xticklabels([])
ax[2][n].set_ylim(0, max_lim)
ax[3][n].set_ylim(0, max_lim)
ax[2][n].get_shared_y_axes().join(ax[3][n], ax[3][n-1])
# max_freq = max(freq_id_1.max(), freq_id_2.max(), max_freq)
# yticks, ylabels = np.linspace(0, max(freq_id_1.max(), freq_id_2.max()), 3), [i for i in range(max(freq_id_1.max(), freq_id_2.max()))]
# set_categorical_ticks(ax[2][n], yticks, ylabels, xticks, xlabels)
plt.setp(ax[0])
# ax[0][0].set_ylim(0, 32)
ax[0][0].set_ylabel('Ground Truth')
ax[1][0].set_ylabel('Simulated')
# ax[2][0].set_ylabel('PSTH, True')
# ax[3][0].set_ylabel('PSTH, Simulation')
# ax[2][-1].legend()
ax[0][0].legend(bbox_to_anchor=(0,0,1,1))
# fig.supxlabel('Time (S)', fontsize=15, y=0.07)
# fig.supylabel('Trials')
fig.suptitle('Gabor 3D Sim', fontsize=20, y=0.925)
# fig.gca().set_aspect('equal', adjustable='box')
# plt.autoscale()
# plt.tight_layout()
def get_boxplot_data(df_1, df_2, intervals, trials):
data_boxplot_true = []
data_boxplot_pred = []
for n, trial in enumerate(trials):
trial_prev = trials[n - 1] if n > 0 else trials[n + 1]
true_prev = df_1[df_1['Trial'] == trial_prev].reset_index(drop=True)
true = df_1[df_1['Trial'] == trial].reset_index(drop=True)
pred = df_2[df_2['Trial'] == trial].reset_index(drop=True)
rates_true_prev, rates_true, rates_pred = get_rates_trial(true_prev, intervals), get_rates_trial(true, intervals), get_rates_trial(pred, intervals)
corr_trials_true = calc_corr_psth(rates_true, rates_true_prev)
corr_trials_pred = calc_corr_psth(rates_true, rates_pred)
data_boxplot_true.append(np.array(corr_trials_true).flatten())
data_boxplot_pred.append(np.array(corr_trials_pred).flatten())
return data_boxplot_true, data_boxplot_pred, corr_trials_true, corr_trials_pred
def plot_error_bar(x, n, color):
"""
databoxplot_true, databoxplot_pred, corr_trials_true, corr_trials_pred = get_boxplot_data(df_1, df_2, intervals, n_trial)
plot_error_bar(corr_trials_true, n, true_color)
plot_error_bar(corr_trials_pred, n, pred_color)
"""
mins = x.min()
maxes = x.max()
means = x.mean()
std = x.std()
# plt.errorbar(n, means, std, fmt='ok', lw=3)
# plt.errorbar(n, means, [means - mins, maxes - means],
# fmt='.k', ecolor='gray', lw=1)
# plt.xlim(-1, 8)
green_diamond = dict(markerfacecolor=color, marker='o')
# fig3, ax3 = plt.subplots()
# ax3.set_title('Changed Outlier Symbols')
ax.boxplot(x, flierprops=green_diamond)
def fancy_boxplot(fig, ax1, data, color):
bp = ax1.boxplot(data, notch=0, sym='+', vert=1, whis=1.5)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
# ax1.set(
# axisbelow=True, # Hide the grid behind plot objects
# title='Comparison of IID Bootstrap Resampling Across Five Distributions',
# xlabel='Distribution',
# ylabel='Value',
# )
# Now fill the boxes with desired colors
# box_colors = ['darkkhaki', 'royalblue']
# box_colors = sns.dark_palette("#69d", len(data), reverse=True)
box_colors = [color]
num_boxes = len(data)
medians = np.empty(num_boxes)
for i in range(num_boxes):
box = bp['boxes'][i]
box_x = []
box_y = []
for j in range(5):
box_x.append(box.get_xdata()[j])
box_y.append(box.get_ydata()[j])
box_coords = np.column_stack([box_x, box_y])
# Alternate between Dark Khaki and Royal Blue
ax1.add_patch(Polygon(box_coords, facecolor=box_colors[0]))
# Now draw the median lines back over what we just filled in
med = bp['medians'][i]
median_x = []
median_y = []
for j in range(2):
median_x.append(med.get_xdata()[j])
median_y.append(med.get_ydata()[j])
ax1.plot(median_x, median_y, 'k')
medians[i] = median_y[0]
# Finally, overplot the sample averages, with horizontal alignment
# in the center of each box
ax1.plot(np.average(med.get_xdata()), np.average(data[i]),
color='w', marker='*', markeredgecolor='k')
# Set the axes ranges and axes labels
# ax1.set_xlim(0.5, num_boxes + 0.5)
# top = 40
# bottom = -5
# ax1.set_ylim(bottom, top)
# ax1.set_xticklabels(np.repeat(random_dists, 2),
# rotation=45, fontsize=8)
# Due to the Y-axis scale being different across samples, it can be
# hard to compare differences in medians across the samples. Add upper
# X-axis tick labels with the sample medians to aid in comparison
# (just use two decimal places of precision)
pos = np.arange(num_boxes) + 1
upper_labels = [str(round(s, 2)) for s in medians]
weights = ['bold', 'semibold']
for tick, label in zip(range(num_boxes), ax1.get_xticklabels()):
k = tick % 2
ax1.text(pos[tick], .95, upper_labels[tick],
transform=ax1.get_xaxis_transform(),
horizontalalignment='center', size='x-small',
weight=weights[k], color=box_colors[0])
fig.supxlabel('Trials')
fig.supylabel('Pearson Correlation (P)')
fig.suptitle('Inter-Neuron Correlation Across Trials')
plt.tight_layout()
def plot_intertrial_corr(corr_true, corr_pred, trial):
def scatter_hist(x, y, ax, ax_histy):
# no labels
# ax_histx.tick_params(axis="x", labelbottom=False)
ax_histy.tick_params(axis="y", labelleft=False)
# the scatter plot:
# ax.scatter(x, y)
# bins = 250
# now determine nice limits by hand:
# binwidth = 0.25
# xymax = max(np.max(np.abs(x)), np.max(np.abs(y)))
# lim = (int(xymax/binwidth) + 1) * binwidth
# bins = np.arange(-lim, lim + binwidth, binwidth)
# ax_histx.hist(x, bins=bins)
ax_hist = sns.distplot(y, hist=False, ax=ax_histy, vertical=True) # (x, y, bins=10, orientation='horizontal')
ax_hist.set(xlabel=None)
# sns.distplot(top_corr, hist=False, ax=ax_histy, vertical=True)
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
spacing = 0.005
rect_scatter = [left, bottom, width, height]
# rect_histx = [left, bottom + height + spacing, width, 0.2]
rect_histy = [left + width + spacing, bottom, 0.2, height]
# start with a square Figure
fig = plt.figure(figsize=(15, 15))
ax = fig.add_axes(rect_scatter)
# ax_histx = fig.add_axes(rect_histx, sharex=ax)
ax_histy = fig.add_axes(rect_histy, sharey=ax)
# use the previously defined function
scatter_hist(np.array(corr_true.index), corr_true, ax, ax_histy)
scatter_hist(np.array(corr_pred.index), corr_pred, ax, ax_histy)
ax.grid(lw=0.8, alpha=0.7, color='gray')
ax.scatter(corr_true.index, corr_true, label=f'Trial {trial} vs. 1', alpha=0.4)
ax.scatter(corr_pred.index, corr_pred, label=f'Trial {trial} vs. Pred', alpha=0.5)
ax.set_title('Pair-wise Correlation Between Trials', fontsize=25)
ax.set_xlabel('Neuron ID', fontsize=20)
ax.set_ylim(-0.1, 0.6)
plt.ylabel('Pearson Correlation (p)')
ax.legend(fontsize=20, title_fontsize=20)
plt.show() |
#! /usr/bin/env python3
import sys
import pickle
import argparse
import numpy as np
import pandas as pd
import scipy.stats as stats
def like_calc(X, y_test, unc):
"""
Given a simulated entry with uncertainty and a test entry, calculates the
likelihood that they are the same.
Parameters
----------
X : numpy array (train DB) of nuclide measurements for simulated entry
y_test : numpy array (single row) of nuclide measurements for test
("measured") entry
unc : float representing flat uncertainty percentage, or 0.0 indicated
counting error
Returns
-------
like: likelihood that the test entry is the simulated entry
"""
# TODO UNTESTED CODE (but not recently in use)
idx = np.nonzero(y_test)[0]
y_test = y_test[idx]
X = X[:, idx]
# unc arg of 0 indicates for the script to use sqrt(counts) uncertainty
if unc == 0.0:
std = np.sqrt(X)
else:
std = unc * X
like = np.prod(stats.norm.pdf(X, loc=y_test, scale=std), axis=1)
return like
def ll_calc(X, y_test, unc):
"""
Given a simulated entry with uncertainty and a test entry, calculates the
log-likelihood that they are the same.
Parameters
----------
X : numpy array (train DB) of nuclide measurements for simulated entry
y_test : numpy array (single row) of nuclide measurements for test
("measured") entry
unc : float representing flat uncertainty percentage, or 0.0 indicated
counting error
Returns
-------
ll: numpy array of log-likelihoods that the test entry is the simulated
entry for each entry in the DB
"""
idx = np.nonzero(y_test)[0]
y_test = y_test[idx]
X = X[:, idx]
# unc arg of 0 indicates for the script to use sqrt(counts) uncertainty
if unc == 0.0:
std = np.sqrt(X)
else:
std = unc * X
ll = np.sum(stats.norm.logpdf(X, loc=y_test, scale=std), axis=1)
return ll
def unc_calc(X, y_test, unc):
"""
Given a simulated entry and a test entry with uniform uncertainty,
calculates the uncertainty in the log-likelihood calculation.
Parameters
----------
X : numpy array (train DB) of nuclide measurements for simulated entry
y_test : numpy array (single row) of nuclide measurements for test
("measured") entry
unc : float representing flat uncertainty percentage, or 0.0 indicated
counting error
Returns
-------
ll_unc: numpy array of log-likelihood uncertainties for each DB entry
"""
idx = np.nonzero(y_test)[0]
y_test = y_test[idx]
X = X[:, idx]
# unc arg of 0 indicates for the script to use sqrt(counts) uncertainty
if unc == 0.0:
sim_unc_sq = X
tst_unc_sq = y_test
else:
sim_unc_sq = (unc * X)**2
tst_unc_sq = (unc * y_test)**2
unc_array = ((X - y_test) / sim_unc_sq)**2 * (sim_unc_sq + tst_unc_sq)
np.nan_to_num(unc_array, copy=False, nan=0.0, posinf=0.0, neginf=0.0)
unc_array = np.array(unc_array, dtype=np.float64)
ll_unc = np.sqrt(np.sum(unc_array, axis=1))
return ll_unc
def ratios(XY, ratio_list, labels):
"""
Given a dataframe with entries (rows) that contain nuclide measurements and
some labels, calculate the predetermined ratios of the measurements.
Parameters
----------
XY : dataframe of spent fuel entries containing nuclide measurements and
their labels
ratio_list : list of nuclide ratios
labels : list of label titles in the dataframe
Returns
-------
XY_ratios : dataframe of spent fuel entries containing nuclide measurement
ratios and their labels
"""
XY_ratios = XY.loc[:, labels].copy()
for ratio in ratio_list:
nucs = ratio.split('/')
XY_ratios[ratio] = XY[nucs[0]] / XY[nucs[1]]
XY_ratios.replace([np.inf, -np.inf], 0, inplace=True)
XY_ratios.fillna(0, inplace = True)
# reorganize columns
cols = ratio_list + labels
XY_ratios = XY_ratios[cols]
return XY_ratios
def format_pred(pred_row, lbls, nonlbls, cdf_cols):
"""
This separates off the formatting of the pred_ll dataframe from the
get_pred function for cleanliness.
Parameters
----------
pred_row : single-row dataframe including nuclide measurements, the
prediction (i.e., the predicted labels), and all saved log-
likelihoods
lbls : list of labels that are predicted
nonlbls : list of reactor parameters that aren't being predicted
cdf_cols : list of new LogLL columns added to prediction for CDF plot
Returns
-------
pred_row : single-row dataframe including the prediction (i.e., predicted
labels), LLmax, LLUnc, and a list of LLs and their Uncs to
populate a CDF
"""
lbls = lbls + nonlbls
pred_lbls = ["pred_" + s for s in lbls]
pred_row.rename(columns=dict(zip(lbls, pred_lbls)), inplace=True)
pred_lbls.extend(cdf_cols)
pred_row = pred_row.loc[:, pred_lbls]
return pred_row
def ll_cdf(pred_ll, ll_df):
"""
Returns a single-row dataframe with the prediction/MaxLogLL with 8 new
columns of log-likelihoods that can populate a CDF, which includes the 2nd
largest LogLL, and 7 percentiles that should give a decent picture of the
CDF curve. (and all corresponding uncertainties)
Parameters
----------
pred_ll : single-row dataframe including nuclide measurements, the
prediction (i.e., the predicted labels), and maxLL/LLUnc
ll_df : two-column dataframe including log-likelihood calculations and
their uncertainties for a given test sample calculation against
entire training db
Returns
-------
pred_ll : single-row dataframe including nuclide measurements, the
prediction (i.e., the predicted labels), and all saved log-
likelihoods (Max and CDF-relevant)
cdf_cols : list of column names that are the new LogLL columns added for
CDF
"""
old_cols = pred_ll.columns.values.tolist()
# First, grab adjacent LL value to MaxLL
cols = ll_df.columns.values.tolist()
maxs = ll_df.nlargest(2, cols[0])
pred_ll['2ndMaxLogLL'] = maxs[cols[0]].iloc[1]
pred_ll['2ndMaxLLUnc'] = maxs[cols[1]].iloc[1]
# Second, add columns with percentiles in the col name
quants = [0.9998, 0.9988, 0.95, 0.9, 0.5, 0.1, 0.01]
for quant in quants:
quant_df = ll_df.quantile(quant)
pred_ll['CDF_LogLL_' + str(quant)] = quant_df.loc[cols[0]]
pred_ll['CDF_LLUnc_' + str(quant)] = quant_df.loc[cols[1]]
new_cols = pred_ll.columns.values.tolist()
cdf_cols = [col for col in new_cols if col not in old_cols]
return pred_ll, cdf_cols
def get_pred(XY, test_sample, unc, lbls, nonlbls):
"""
Given a database of spent fuel entries and a test sample (nuclide
measurements only), calculates the log-likelihood (and LL-uncertainty) of
that sample against every database entry. Determines the max LL, and
therefore the corresponding prediction in the database. Also determines a
list of LL measurements that populate a CDF. Returns that prediction and LL
information as a single row dataframe.
Parameters
----------
XY : dataframe with nuclide measurements and reactor parameters
test_sample : numpy array of a sample to be predicted (nuclide measurements
only)
unc : float that represents the simulation uncertainty in nuclide
measurements
lbls : list of reactor parameters to be predicted
nonlbls : list of reactor parameters that aren't being predicted
Returns
-------
pred_ll : single-row dataframe including the prediction (i.e., predicted
labels), its max log-likelihood/uncertainty, and a list of
log-likelihoods and their uncertainties to populate a CDF
"""
ll_name = 'MaxLogLL'
unc_name = 'MaxLLUnc'
X = XY.drop(lbls+nonlbls, axis=1).copy().to_numpy()
XY[ll_name] = ll_calc(X, test_sample, unc)
XY[unc_name] = unc_calc(X, test_sample, unc)
pred_row = XY.loc[XY.index == XY[ll_name].idxmax()].copy()
pred_ll, cdf_cols = ll_cdf(pred_row, XY[[ll_name, unc_name]])
cdf_cols = [ll_name, unc_name] + cdf_cols
pred_ll = format_pred(pred_ll, lbls, nonlbls, cdf_cols)
# need to delete calculated columns so next test sample can be calculated
XY.drop(columns=[ll_name, unc_name], inplace=True)
return pred_ll
def mll_testset(XY, test, ext_test, unc, lbls, nonlbls):
"""
Given a database of spent fuel entries containing a nuclide vector and the
reactor operation parameters, and an equally formatted database of test
cases to predict, this function loops through the test database to perform
a series of predictions. It first formats the test sample for prediction,
then gathers all the predictions from the test database entries
Parameters
----------
XY : dataframe with nuclide measurements and reactor parameters
test : dataframe with test cases to predict in same format as train
ext_test : boolean indicating which of external test set or LOOV is being
performed
unc : float that represents the simulation uncertainty in nuclide
measurements
lbls : list of reactor parameters to be predicted
nonlbls : list of reactor parameters that aren't being predicted
Returns
-------
pred_df : dataframe with ground truth and predictions
"""
pred_df = pd.DataFrame()
for sim_idx, row in test.iterrows():
if ext_test:
test_sample = row.drop(lbls)
test_answer = row[lbls]
pred_ll = get_pred(XY, test_sample.to_numpy(), unc, lbls, nonlbls)
all_lbls = lbls
else:
test_sample = row.drop(lbls+nonlbls)
test_answer = row[lbls+nonlbls]
pred_ll = get_pred(XY.drop(sim_idx), test_sample.to_numpy(), unc, lbls, nonlbls)
all_lbls = lbls + nonlbls
if pred_df.empty:
pred_df = pd.DataFrame(columns = pred_ll.columns.to_list())
pred_df = pred_df.append(pred_ll)
pred_df = pd.concat([test.loc[:, all_lbls].rename_axis('sim_idx').reset_index(),
pred_df.rename_axis('pred_idx').reset_index()
], axis=1)
return pred_df
def check_traindb_equal(final, db_path, arg_ratios, ratio_list, lbls):
"""
Checks at end of script that the database was not altered
Parameters
----------
final : training database dataframe at end of script
db_path : path to pkl file containing training database
arg_ratios : Boolean arg indicating whether or not nuclide ratios are being used
ratio_list : list of ratios being created
lbls : all non-features (prediction labels and non-prediction labels)
"""
initial = pd.read_pickle(db_path)
if arg_ratios == True:
initial = ratios(initial, ratio_list, lbls)
if not initial.equals(final):
sys.exit('Final training database does not equal initial database')
return
def convert_g_to_mgUi(XY, Y_list):
"""
Converts nuclides from ORIGEN simulations measured in grams to
concentrations measured in mg / gUi
Parameters
----------
XY : dataframe of origen sims with nuclides measured in grams
Y_list : list of columns in DB that are not features (nuclides)
Returns
-------
XY : dataframe of origen sims with nuclides measured in mg / gUi
"""
nucs = XY.columns[~XY.columns.isin(Y_list)].tolist()
# [x (g) / 1e6 (gUi)] * [1000 (mg) / 1 (g)] = x / 1000
XY[nucs] = XY[nucs].div(1000, axis=0)
return XY
def parse_args(args):
"""
Command-line argument parsing
Parameters
----------
args :
Returns
-------
XY : cleaned and formatted training database
"""
parser = argparse.ArgumentParser(description='Performs maximum likelihood calculations for reactor parameter prediction.')
parser.add_argument('outdir', metavar='output-directory',
help='directory in which to organize output csv')
parser.add_argument('sim_unc', metavar='sim-uncertainty', type=float,
help='value of simulation uncertainty (in fraction) to apply to likelihood calculations')
parser.add_argument('train_db', metavar='reactor-db',
help='file path to a training set, e.g. /mnt/researchdrive/BOX_INTERNAL/opotowsky/*.pkl')
parser.add_argument('test_db', metavar='testing-set',
help='file path to an external testing set, e.g. ~/sfcompo/format_clean/sfcompo_nucXX.pkl')
parser.add_argument('outfile', metavar='csv-output',
help='name for csv output file')
parser.add_argument('db_rows', metavar='db-interval', nargs=2, type=int,
help='indices of the database interval for the job')
parser.add_argument('--ext-test', dest='ext_test', action='store_true',
help='execute script with external testing set by providing file path to a testing set')
parser.add_argument('--no-ext-test', dest='ext_test', action='store_false',
help='do not execute script with external testing set')
parser.add_argument('--ratios', dest='ratios', action='store_true',
help='compute isotopic ratios instead of using concentrations')
parser.add_argument('--no-ratios', dest='ratios', action='store_false',
help='compute using concentrations instead of isotopic ratios')
return parser.parse_args(args)
def main():
"""
Given a database of spent fuel entries (containing nuclide measurements and
labels of reactor operation parameters of interest for prediction) and a
testing database containing spent fuel entries formatted in the same way,
this script calculates the maximum log-likelihood of each test sample
against the database for a prediction. The errors of those predictions are
then calculated and saved as a CSV file.
"""
args = parse_args(sys.argv[1:])
# training set
XY = pd.read_pickle(args.train_db)
if 'total' in XY.columns:
XY.drop('total', axis=1, inplace=True)
lbls = ['ReactorType', 'CoolingTime', 'Enrichment', 'Burnup',
'OrigenReactor']
nonlbls = ['AvgPowerDensity', 'ModDensity', 'UiWeight']
# testing set
if args.ext_test == True:
test = pd.read_pickle(args.test_db)
# In-script test: order of columns must match:
xy_cols = XY.columns.tolist()
for col in nonlbls: xy_cols.remove(col)
if xy_cols != test.columns.tolist():
if sorted(xy_cols) == sorted(test.columns.tolist()):
test = test[xy_cols]
else:
sys.exit('Feature sets are different')
# slice test set
test = test.iloc[args.db_rows[0]:args.db_rows[1]]
# converting train DB to match units in sfcompo DB
XY = convert_g_to_mgUi(XY, lbls+nonlbls)
else:
test = XY.iloc[args.db_rows[0]:args.db_rows[1]]
# this is a fix for the now too-large db to test every entry
# 3 lines per job, with max_jobs currently set to 9900
# (~6% of db is tested)
#test = test.sample(3)
# TODO: need some better way to handle varying ratio lists
tamu_list = ['cs137/cs133', 'cs134/cs137', 'cs135/cs137', 'ba136/ba138',
'sm150/sm149', 'sm152/sm149', 'eu154/eu153', 'pu240/pu239',
'pu241/pu239', 'pu242/pu239'
]
ratio_list = tamu_list
if args.ratios == True:
XY = ratios(XY, ratio_list, lbls+nonlbls)
test = ratios(test, ratio_list, lbls)
unc = float(args.sim_unc)
pred_df = mll_testset(XY, test, args.ext_test, unc, lbls, nonlbls)
fname = args.outfile + '.csv'
pred_df.to_csv(fname)
return
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
""" cmddocs Class """
import os
import cmd
import sys
import signal
import configparser
import git
import pkg_resources
from cmddocs.articles import *
from cmddocs.completions import *
from cmddocs.version import __version__
class Cmddocs(cmd.Cmd):
""" Basic commandline interface class """
def __init__(self, conf="~/.cmddocsrc"):
"""
Initialize the class
Inherit from Cmd
Read config, initialize Datadir, create Prompt
"""
cmd.Cmd.__init__(self)
self.reset = '\033[0m'
self.read_config(self, conf)
self.initialize_docs(self)
self.prompt = '\033[1m\033[' + self.promptcol + 'm' + self.prompt + " " + self.reset
self.do_cd(self.datadir)
def read_config(self, sconf, conf):
"""
All Config Options being read and defaulting
"""
self.colors = {}
config = configparser.ConfigParser()
if not config.read(os.path.expanduser(conf)):
print("Error: your config %s could not be read" % conf)
exit(1)
try:
self.datadir = os.path.expanduser(config.get("General", "Datadir"))
except configparser.NoOptionError:
print("Error: Please set a Datadir in %s" % conf)
exit(1)
try:
self.exclude = os.path.expanduser(config.get("General", "Excludedir"))
except configparser.NoOptionError:
self.exclude = os.path.expanduser('.git/')
try:
self.default_commit_msg = config.get("General", "Default_Commit_Message")
except configparser.NoOptionError:
self.default_commit_msg = "small changes"
try:
self.editor = config.get("General", "Editor")
except configparser.NoOptionError:
if os.environ.get('EDITOR') is not None:
self.editor = os.environ.get('EDITOR')
else:
print("Error: Could not find usable editor.")
print("Please specify one in config or set EDITOR in your \
OS Environment")
exit(1)
try:
self.pager = config.get("General", "Pager")
except configparser.NoOptionError:
if os.environ.get('PAGER') is not None:
self.editor = os.environ.get('PAGER')
else:
print("Error: Could not find usable Pager.")
print("Please specify one in config or set PAGER in your\
OS Environment")
exit(1)
try:
self.pagerflags = config.get("General", "PagerFlags")
except configparser.NoOptionError:
self.pagerflags = False
try:
self.editorflags = config.get("General", "EditorFlags")
except configparser.NoOptionError:
self.editorflags = False
try:
self.prompt = config.get("General", "Prompt")
except configparser.NoOptionError:
self.prompt = "cmddocs>"
try:
self.promptcol = config.get("General", "Promptcolor")
except configparser.NoOptionError:
self.promptcol = "37"
try:
self.intro = config.get("General", "Intro_Message")
except configparser.NoOptionError:
self.intro = "cmddocs - press ? for help"
try:
self.mailfrom = config.get("General", "Mail")
except configparser.NoOptionError:
self.mailfrom = "nobody"
try:
self.extension = config.get("General", "Default_Extension")
except configparser.NoOptionError:
self.extension = "md"
try:
self.colors['h1'] = config.get("Colors", "Header12")
except (configparser.NoOptionError, configparser.NoSectionError):
self.colors['h1'] = "37"
try:
self.colors['h2'] = config.get("Colors", "Header345")
except (configparser.NoOptionError, configparser.NoSectionError):
self.colors['h2'] = "92"
try:
self.colors['code'] = config.get("Colors", "Codeblock")
except (configparser.NoOptionError, configparser.NoSectionError):
self.colors['code'] = "92"
return
def initialize_docs(self, docs):
""" Read or initialize git repository """
try:
self.repo = git.Repo(self.datadir)
except git.exc.NoSuchPathError:
print("Error: Specified datadir %s does not exist" % self.datadir)
exit(1)
except git.exc.InvalidGitRepositoryError:
self.repo = git.Repo.init(self.datadir)
try:
self.repo.git.add(".")
self.repo.git.commit(m=" init")
except git.exc.GitCommandError:
pass
print("Successfully created and initialized empty repo at %s" % self.datadir)
# Change to datadir
try:
os.chdir(self.datadir)
self.cwd = os.getcwd()
except OSError:
print("Error: Switching to Datadir %s not possible" % self.datadir)
exit(1)
def do_list(self, dir):
"""
Show files in current working dir
Usage:
list
l
list Databases/
"""
if not dir:
dir = "."
return list_articles(dir, self.extension)
do_l = do_list
do_ls = do_list
def do_dirs(self, dir):
"""
Show directories in current working dir
Usage:
dirs
d
dirs Databases/
"""
if not dir:
dir = "."
return list_directories(dir)
do_d = do_dirs
def do_cd(self, dir):
"""
Change directory
Usage:
cd Programming/
cd
"""
change_directory(dir, self.datadir)
def do_pwd(self, line):
"""
Show current directory
Usage:
pwd
"""
print(os.path.relpath(os.getcwd(), self.datadir))
def do_edit(self, article, test=False):
"""
Edit or create new article.
Usage:
edit databases/mongodb
edit intro
"""
return edit_article(article, os.getcwd(), self.editor, self.repo,
self.default_commit_msg, self.extension, test, self.editorflags)
do_e = do_edit
def do_view(self, article):
"""
View an article. Creates temporary file with converted markdown to
ansi colored output. Opens your PAGER. (Only less supported atm)
Usage:
view databases/mongodb
view intro
"""
return view_article(article, os.getcwd(), self.pager, self.extension,
self.pagerflags, self.colors)
def do_mail(self, article):
"""
Mail an article to a friend
Usage:
mail databases/mongodb
Recipient: <EMAIL>
mail programming/r/loops
mail intro
"""
return mail_article(article, os.getcwd(), self.mailfrom, self.extension)
def do_delete(self, article):
"""
Delete an article
Usage:
delete databases/mongodb
rm databases/mssql
"""
delete_article(article, os.getcwd(), self.repo, self.extension)
do_rm = do_delete
def do_move(self, args):
"""
Move an article to a new location
Usage:
move databases/mongodb databases/MongoDB
move life/foo notes/foo
mv life/foo notes/foo
"""
move_article(os.getcwd(), args, self.repo, self.extension)
do_mv = do_move
def do_search(self, keyword):
"""
Search for keyword in current directory
Usage:
search mongodb
search foo
"""
print(search_article(keyword, os.getcwd(), self.datadir,
self.exclude))
def do_status(self, line):
"""
Show git repo status of your docs
Usage:
status
"""
print(self.repo.git.status())
def do_log(self, args):
"""
Show git logs of your docs.
Usage:
log # default loglines: 10)
log 20 # show 20 loglines
log 20 article # show log for specific article
log databases/mongodb 3 # same
"""
show_log(args, self.repo, self.extension)
def do_info(self, article):
"""
Show infos for an article
Usage:
info article
info Databases/mongodb
Created: 2014-01-18 11:18:03 +0100
Updated: 2015-10-23 14:14:44 +0200
Commits: 26
Lines: 116
Words: 356
Characters: 2438
"""
info_article(article, os.getcwd(), self.repo, self.extension)
def do_diff(self, args):
"""
Show git diffs between files and commits
Usage:
diff 7 # show diff for last 7 changes
diff 1 article # show diff for last change to article
diff # show last 5 diffs
"""
show_diff(args, self.repo, self.extension)
def do_undo(self, args):
"""
You can revert your changes (use revert from git)
Usage:
undo HEAD
undo 355f375
Will ask for confirmation.
"""
undo_change(args, self.repo)
def do_stats(self, args):
"""
Calculate some statistics on your docs
Usage:
stats
"""
show_stats(args, self.repo, self.datadir)
def do_version(self, args):
"""
Show version of cmddocs
Usage:
version
"""
print("cmddocs %s" % __version__)
do_revert = do_undo
### exit
def do_exit(self, args):
"""
Exit cmddocs
Usage:
exit
"""
return True
do_EOF = do_exit
### completions
complete_l = path_complete
complete_ls = path_complete
complete_list = path_complete
complete_d = path_complete
complete_dirs = path_complete
complete_view = path_complete
complete_cd = path_complete
complete_e = path_complete
complete_edit = path_complete
complete_rm = path_complete
complete_delete = path_complete
complete_mail = path_complete
complete_mv = path_complete
complete_move = path_complete
complete_log = path_complete
complete_info = path_complete
def ctrlc(sig, frame):
""" Handle Interrupts """
print("\n")
sys.exit(0)
signal.signal(signal.SIGINT, ctrlc)
def main():
""" Call loop method """
Cmddocs().cmdloop()
if __name__ == '__main__':
main()
|
<filename>sdk/loadtestservice/azure-mgmt-loadtestservice/azure/mgmt/loadtestservice/models/_models_py3.py<gh_stars>1000+
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Any, Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._load_test_client_enums import *
class ErrorAdditionalInfo(msrest.serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: any
"""
_validation = {
'type': {'readonly': True},
'info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(ErrorAdditionalInfo, self).__init__(**kwargs)
self.type = None
self.info = None
class ErrorDetail(msrest.serialization.Model):
"""The error detail.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details: list[~load_test_client.models.ErrorDetail]
:ivar additional_info: The error additional info.
:vartype additional_info: list[~load_test_client.models.ErrorAdditionalInfo]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'details': {'readonly': True},
'additional_info': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetail]'},
'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'},
}
def __init__(
self,
**kwargs
):
super(ErrorDetail, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class ErrorResponse(msrest.serialization.Model):
"""Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.).
:param error: The error object.
:type error: ~load_test_client.models.ErrorDetail
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
*,
error: Optional["ErrorDetail"] = None,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = error
class Resource(msrest.serialization.Model):
"""Common fields that are returned in the response for all Azure Resource Manager resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~load_test_client.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.system_data = None
class TrackedResource(Resource):
"""The resource model definition for an Azure Resource Manager tracked top level resource which has 'tags' and a 'location'.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~load_test_client.models.SystemData
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(TrackedResource, self).__init__(**kwargs)
self.tags = tags
self.location = location
class LoadTestResource(TrackedResource):
"""LoadTest details.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~load_test_client.models.SystemData
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
:param identity: The type of identity used for the resource.
:type identity: ~load_test_client.models.SystemAssignedServiceIdentity
:param description: Description of the resource.
:type description: str
:ivar provisioning_state: Resource provisioning state. Possible values include: "Succeeded",
"Failed", "Canceled", "Deleted".
:vartype provisioning_state: str or ~load_test_client.models.ResourceState
:ivar data_plane_uri: Resource data plane URI.
:vartype data_plane_uri: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'location': {'required': True},
'description': {'max_length': 512, 'min_length': 0},
'provisioning_state': {'readonly': True},
'data_plane_uri': {'readonly': True, 'max_length': 2083, 'min_length': 0},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'SystemAssignedServiceIdentity'},
'description': {'key': 'properties.description', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'data_plane_uri': {'key': 'properties.dataPlaneURI', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
identity: Optional["SystemAssignedServiceIdentity"] = None,
description: Optional[str] = None,
**kwargs
):
super(LoadTestResource, self).__init__(tags=tags, location=location, **kwargs)
self.identity = identity
self.description = description
self.provisioning_state = None
self.data_plane_uri = None
class LoadTestResourcePageList(msrest.serialization.Model):
"""List of resources page result.
:param value: List of resources in current page.
:type value: list[~load_test_client.models.LoadTestResource]
:param next_link: Link to next page of resources.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[LoadTestResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["LoadTestResource"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(LoadTestResourcePageList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class LoadTestResourcePatchRequestBody(msrest.serialization.Model):
"""LoadTest resource patch request body.
:param tags: A set of tags. Resource tags.
:type tags: any
:param identity: The type of identity used for the resource.
:type identity: ~load_test_client.models.SystemAssignedServiceIdentity
:param properties: Load Test resource properties.
:type properties: ~load_test_client.models.LoadTestResourcePatchRequestBodyProperties
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': 'object'},
'identity': {'key': 'identity', 'type': 'SystemAssignedServiceIdentity'},
'properties': {'key': 'properties', 'type': 'LoadTestResourcePatchRequestBodyProperties'},
}
def __init__(
self,
*,
tags: Optional[Any] = None,
identity: Optional["SystemAssignedServiceIdentity"] = None,
properties: Optional["LoadTestResourcePatchRequestBodyProperties"] = None,
**kwargs
):
super(LoadTestResourcePatchRequestBody, self).__init__(**kwargs)
self.tags = tags
self.identity = identity
self.properties = properties
class LoadTestResourcePatchRequestBodyProperties(msrest.serialization.Model):
"""Load Test resource properties.
:param description: Description of the resource.
:type description: str
"""
_validation = {
'description': {'max_length': 512, 'min_length': 0},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
description: Optional[str] = None,
**kwargs
):
super(LoadTestResourcePatchRequestBodyProperties, self).__init__(**kwargs)
self.description = description
class Operation(msrest.serialization.Model):
"""Details of a REST API operation, returned from the Resource Provider Operations API.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the operation, as per Resource-Based Access Control (RBAC). Examples:
"Microsoft.Compute/virtualMachines/write", "Microsoft.Compute/virtualMachines/capture/action".
:vartype name: str
:ivar is_data_action: Whether the operation applies to data-plane. This is "true" for
data-plane operations and "false" for ARM/control-plane operations.
:vartype is_data_action: bool
:param display: Localized display information for this particular operation.
:type display: ~load_test_client.models.OperationDisplay
:ivar origin: The intended executor of the operation; as in Resource Based Access Control
(RBAC) and audit logs UX. Default value is "user,system". Possible values include: "user",
"system", "user,system".
:vartype origin: str or ~load_test_client.models.Origin
:ivar action_type: Enum. Indicates the action type. "Internal" refers to actions that are for
internal only APIs. Possible values include: "Internal".
:vartype action_type: str or ~load_test_client.models.ActionType
"""
_validation = {
'name': {'readonly': True},
'is_data_action': {'readonly': True},
'origin': {'readonly': True},
'action_type': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'is_data_action': {'key': 'isDataAction', 'type': 'bool'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
'action_type': {'key': 'actionType', 'type': 'str'},
}
def __init__(
self,
*,
display: Optional["OperationDisplay"] = None,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = None
self.is_data_action = None
self.display = display
self.origin = None
self.action_type = None
class OperationDisplay(msrest.serialization.Model):
"""Localized display information for this particular operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provider: The localized friendly form of the resource provider name, e.g. "Microsoft
Monitoring Insights" or "Microsoft Compute".
:vartype provider: str
:ivar resource: The localized friendly name of the resource type related to this operation.
E.g. "Virtual Machines" or "Job Schedule Collections".
:vartype resource: str
:ivar operation: The concise, localized friendly name for the operation; suitable for
dropdowns. E.g. "Create or Update Virtual Machine", "Restart Virtual Machine".
:vartype operation: str
:ivar description: The short, localized friendly description of the operation; suitable for
tool tips and detailed views.
:vartype description: str
"""
_validation = {
'provider': {'readonly': True},
'resource': {'readonly': True},
'operation': {'readonly': True},
'description': {'readonly': True},
}
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = None
self.resource = None
self.operation = None
self.description = None
class OperationListResult(msrest.serialization.Model):
"""A list of REST API operations supported by an Azure Resource Provider. It contains an URL link to get the next set of results.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of operations supported by the resource provider.
:vartype value: list[~load_test_client.models.Operation]
:ivar next_link: URL to get the next set of operation list results (if there are any).
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class SystemAssignedServiceIdentity(msrest.serialization.Model):
"""Managed service identity (either system assigned, or none).
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar principal_id: The service principal ID of the system assigned identity. This property
will only be provided for a system assigned identity.
:vartype principal_id: str
:ivar tenant_id: The tenant ID of the system assigned identity. This property will only be
provided for a system assigned identity.
:vartype tenant_id: str
:param type: Required. Type of managed service identity (either system assigned, or none).
Possible values include: "None", "SystemAssigned".
:type type: str or ~load_test_client.models.SystemAssignedServiceIdentityType
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
'type': {'required': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
type: Union[str, "SystemAssignedServiceIdentityType"],
**kwargs
):
super(SystemAssignedServiceIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = type
class SystemData(msrest.serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.
:param created_by: The identity that created the resource.
:type created_by: str
:param created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:type created_by_type: str or ~load_test_client.models.CreatedByType
:param created_at: The timestamp of resource creation (UTC).
:type created_at: ~datetime.datetime
:param last_modified_by: The identity that last modified the resource.
:type last_modified_by: str
:param last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:type last_modified_by_type: str or ~load_test_client.models.CreatedByType
:param last_modified_at: The timestamp of resource last modification (UTC).
:type last_modified_at: ~datetime.datetime
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
*,
created_by: Optional[str] = None,
created_by_type: Optional[Union[str, "CreatedByType"]] = None,
created_at: Optional[datetime.datetime] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[Union[str, "CreatedByType"]] = None,
last_modified_at: Optional[datetime.datetime] = None,
**kwargs
):
super(SystemData, self).__init__(**kwargs)
self.created_by = created_by
self.created_by_type = created_by_type
self.created_at = created_at
self.last_modified_by = last_modified_by
self.last_modified_by_type = last_modified_by_type
self.last_modified_at = last_modified_at
|
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A connection to the VMware vCloud platform.
"""
import os
import subprocess
import shutil
import time
import urllib2
from oslo.config import cfg
import sshclient
from nova.compute import power_state
from nova.compute import task_states
from nova import image
from nova.openstack.common import log as logging
from nova.openstack.common import fileutils as fileutils
from nova.i18n import _
from nova.virt.hybrid.common import fake_driver
from nova.virt.hybrid.common import common_tools
from nova.virt.hybrid.vcloud import hyper_agent_api
from nova.virt.hybrid.vcloud import util
from nova.virt.hybrid.vcloud.vcloud import VCLOUD_STATUS
from nova.virt.hybrid.vcloud.vcloud_client import VCloudClient
from nova.volume.cinder import API as cinder_api
from nova.network import neutronv2
vcloudapi_opts = [
cfg.StrOpt('vcloud_node_name',
default='vcloud_node_01',
help='node name, which a node is a vcloud vcd'
'host.'),
cfg.StrOpt('vcloud_host_ip',
help='Hostname or IP address for connection to VMware VCD '
'host.'),
cfg.IntOpt('vcloud_host_port',
default=443,
help='Host port for cnnection to VMware VCD '
'host.'),
cfg.StrOpt('vcloud_host_username',
help='Host username for connection to VMware VCD '
'host.'),
cfg.StrOpt('vcloud_host_password',
help='Host password for connection to VMware VCD '
'host.'),
cfg.StrOpt('vcloud_org',
help='User org for connection to VMware VCD '
'host.'),
cfg.StrOpt('vcloud_vdc',
help='Vdc for connection to VMware VCD '
'host.'),
cfg.StrOpt('vcloud_version',
default='5.5',
help='Version for connection to VMware VCD '
'host.'),
cfg.StrOpt('vcloud_service',
default='85-719',
help='Service for connection to VMware VCD '
'host.'),
cfg.BoolOpt('vcloud_verify',
default=False,
help='Verify for connection to VMware VCD '
'host.'),
cfg.BoolOpt('use_link_clone',
default=True,
help='Use link clone or not '),
cfg.StrOpt('vcloud_service_type',
default='vcd',
help='Service type for connection to VMware VCD '
'host.'),
cfg.IntOpt('vcloud_api_retry_count',
default=2,
help='Api retry count for connection to VMware VCD '
'host.'),
cfg.StrOpt('vcloud_conversion_dir',
default='/vcloud/convert_tmp',
help='the directory where images are converted in '),
cfg.StrOpt('vcloud_volumes_dir',
default='/vcloud/volumes',
help='the directory of volume files'),
cfg.StrOpt('vcloud_vm_naming_rule',
default='openstack_vm_id',
help='the rule to name vcloud VMs, valid options:'
'openstack_vm_id, openstack_vm_name, cascaded_openstack_rule'),
cfg.DictOpt('vcloud_flavor_map',
default={
'm1.tiny': '1',
'm1.small': '2',
'm1.medium': '3',
'm1.large': '4',
'm1.xlarge': '5'},
help='map nova flavor name to vcloud vm specification id'),
cfg.StrOpt('metadata_iso_catalog',
default='metadata-isos',
help='The metadata iso cotalog.'),
cfg.StrOpt('provider_base_network_name',
help='The provider network name which base provider network use.'),
cfg.StrOpt('provider_tunnel_network_name',
help='The provider network name which tunnel provider network use.'),
cfg.StrOpt('image_user',
default='',
help=''),
cfg.StrOpt('image_password',
default='',
help=''),
cfg.StrOpt('tunnel_cidr',
help='The tunnel cidr of provider network.'),
cfg.StrOpt('route_gw',
help='The route gw of the provider network.')
]
status_dict_vapp_to_instance = {
VCLOUD_STATUS.FAILED_CREATION: power_state.CRASHED,
VCLOUD_STATUS.UNRESOLVED: power_state.NOSTATE,
VCLOUD_STATUS.RESOLVED: power_state.NOSTATE,
VCLOUD_STATUS.DEPLOYED: power_state.NOSTATE,
VCLOUD_STATUS.SUSPENDED: power_state.SUSPENDED,
VCLOUD_STATUS.POWERED_ON: power_state.RUNNING,
VCLOUD_STATUS.WAITING_FOR_INPUT: power_state.NOSTATE,
VCLOUD_STATUS.UNKNOWN: power_state.NOSTATE,
VCLOUD_STATUS.UNRECOGNIZED: power_state.NOSTATE,
VCLOUD_STATUS.POWERED_OFF: power_state.SHUTDOWN,
VCLOUD_STATUS.INCONSISTENT_STATE: power_state.NOSTATE,
VCLOUD_STATUS.MIXED: power_state.NOSTATE,
VCLOUD_STATUS.DESCRIPTOR_PENDING: power_state.NOSTATE,
VCLOUD_STATUS.COPYING_CONTENTS: power_state.NOSTATE,
VCLOUD_STATUS.DISK_CONTENTS_PENDING: power_state.NOSTATE,
VCLOUD_STATUS.QUARANTINED: power_state.NOSTATE,
VCLOUD_STATUS.QUARANTINE_EXPIRED: power_state.NOSTATE,
VCLOUD_STATUS.REJECTED: power_state.NOSTATE,
VCLOUD_STATUS.TRANSFER_TIMEOUT: power_state.NOSTATE,
VCLOUD_STATUS.VAPP_UNDEPLOYED: power_state.NOSTATE,
VCLOUD_STATUS.VAPP_PARTIALLY_DEPLOYED: power_state.NOSTATE,
}
CONF = cfg.CONF
CONF.register_opts(vcloudapi_opts, 'vcloud')
LOG = logging.getLogger(__name__)
IMAGE_API = image.API()
class VCloudDriver(fake_driver.FakeNovaDriver):
"""The VCloud host connection object."""
def __init__(self, virtapi, scheme="https"):
self._node_name = CONF.vcloud.vcloud_node_name
self._vcloud_client = VCloudClient(scheme=scheme)
self.cinder_api = cinder_api()
if not os.path.exists(CONF.vcloud.vcloud_conversion_dir):
os.makedirs(CONF.vcloud.vcloud_conversion_dir)
if not os.path.exists(CONF.vcloud.vcloud_volumes_dir):
os.makedirs(CONF.vcloud.vcloud_volumes_dir)
self.hyper_agent_api = hyper_agent_api.HyperAgentAPI()
super(VCloudDriver, self).__init__(virtapi)
def _update_vm_task_state(self, instance, task_state):
instance.task_state = task_state
instance.save()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
LOG.info('begin time of vcloud create vm is %s' %
(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
image_cache_dir = CONF.vcloud.vcloud_conversion_dir
volume_cache_dir = CONF.vcloud.vcloud_volumes_dir
# update port bind host
self._binding_host(context, network_info, instance.uuid)
this_conversion_dir = '%s/%s' % (CONF.vcloud.vcloud_conversion_dir,
instance.uuid)
fileutils.ensure_tree(this_conversion_dir)
os.chdir(this_conversion_dir)
#0: create metadata iso and upload to vcloud
rabbit_host = CONF.rabbit_host
if 'localhost' in rabbit_host or '127.0.0.1' in rabbit_host:
rabbit_host = CONF.rabbit_hosts[0]
if ':' in rabbit_host:
rabbit_host = rabbit_host[0:rabbit_host.find(':')]
iso_file = common_tools.create_user_data_iso(
"userdata.iso",
{"rabbit_userid": CONF.rabbit_userid,
"rabbit_password": <PASSWORD>,
"rabbit_host": rabbit_host,
"host": instance.uuid,
"tunnel_cidr": CONF.vcloud.tunnel_cidr,
"route_gw": CONF.vcloud.route_gw},
this_conversion_dir)
vapp_name = self._get_vcloud_vapp_name(instance)
metadata_iso = self._vcloud_client.upload_metadata_iso(iso_file,
vapp_name)
# 0.get vorg, user name,password vdc from configuration file (only one
# org)
# 1.1 get image id, vm info ,flavor info
# image_uuid = instance.image_ref
if 'id' in image_meta:
# create from image
image_uuid = image_meta['id']
else:
# create from volume
image_uuid = image_meta['properties']['image_id']
#NOTE(nkapotoxin): create vapp with vapptemplate
network_names = [CONF.vcloud.provider_tunnel_network_name, CONF.vcloud.provider_base_network_name]
network_configs = self._vcloud_client.get_network_configs(network_names)
# create vapp
if CONF.vcloud.use_link_clone:
vapp = self._vcloud_client.create_vapp(vapp_name, image_uuid , network_configs)
else:
vapp = self._vcloud_client.create_vapp(vapp_name,image_uuid , network_configs,
root_gb=instance.get_flavor().root_gb)
# generate the network_connection
network_connections = self._vcloud_client.get_network_connections(vapp, network_names)
# update network
self._vcloud_client.update_vms_connections(vapp, network_connections)
# update vm specification
self._vcloud_client.modify_vm_cpu(vapp, instance.get_flavor().vcpus)
self._vcloud_client.modify_vm_memory(vapp, instance.get_flavor().memory_mb)
# mount it
self._vcloud_client.insert_media(vapp_name, metadata_iso)
# power on it
self._vcloud_client.power_on_vapp(vapp_name)
# 7. clean up
shutil.rmtree(this_conversion_dir, ignore_errors=True)
LOG.info('end time of vcloud create vm is %s' %
(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
# update port bind host
self._binding_host(context, network_info, instance.uuid)
@staticmethod
def _binding_host(context, network_info, host_id):
neutron = neutronv2.get_client(context, admin=True)
port_req_body = {'port': {'binding:host_id': host_id}}
for vif in network_info:
neutron.update_port(vif.get('id'), port_req_body)
def _get_vcloud_vapp_name(self, instance):
if CONF.vcloud.vcloud_vm_naming_rule == 'openstack_vm_id':
return instance.uuid
elif CONF.vcloud.vcloud_vm_naming_rule == 'openstack_vm_name':
return instance.display_name
elif CONF.vcloud.vcloud_vm_naming_rule == 'cascaded_openstack_rule':
return instance.display_name
else:
return instance.uuid
def _get_vcloud_volume_name(self, volume_id, volume_name):
prefix = 'volume@'
if volume_name.startswith(prefix):
vcloud_volume_name = volume_name[len(prefix):]
else:
vcloud_volume_name = volume_id
return vcloud_volume_name
def _download_vmdk_from_vcloud(self, context, src_url, dst_file_name):
# local_file_handle = open(dst_file_name, "wb")
local_file_handle = fileutils.file_open(dst_file_name, "wb")
remote_file_handle = urllib2.urlopen(src_url)
file_size = remote_file_handle.headers['content-length']
util.start_transfer(context, remote_file_handle, file_size,
write_file_handle=local_file_handle)
def _upload_image_to_glance(
self, context, src_file_name, image_id, instance):
vm_task_state = instance.task_state
file_size = os.path.getsize(src_file_name)
read_file_handle = fileutils.file_open(src_file_name, "rb")
metadata = IMAGE_API.get(context, image_id)
# The properties and other fields that we need to set for the image.
image_metadata = {"disk_format": "qcow2",
"is_public": "false",
"name": metadata['name'],
"status": "active",
"container_format": "bare",
"size": file_size,
"properties": {"owner_id": instance['project_id']}}
util.start_transfer(context,
read_file_handle,
file_size,
image_id=metadata['id'],
image_meta=image_metadata,
task_state=task_states.IMAGE_UPLOADING,
instance=instance)
self._update_vm_task_state(instance, task_state=vm_task_state)
#TODO: test it
def snapshot(self, context, instance, image_id, update_task_state):
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
# 1. get vmdk url
vapp_name = self._get_vcloud_vapp_name(instance)
remote_vmdk_url = self._vcloud_client.query_vmdk_url(vapp_name)
# 2. download vmdk
temp_dir = '%s/%s' % (CONF.vcloud.vcloud_conversion_dir, instance.uuid)
fileutils.ensure_tree(temp_dir)
vmdk_name = remote_vmdk_url.split('/')[-1]
local_file_name = '%s/%s' % (temp_dir, vmdk_name)
self._download_vmdk_from_vcloud(
context,
remote_vmdk_url,
local_file_name)
# 3. convert vmdk to qcow2
converted_file_name = temp_dir + '/converted-file.qcow2'
convert_commond = "qemu-img convert -f %s -O %s %s %s" % \
('vmdk',
'qcow2',
local_file_name,
converted_file_name)
convert_result = subprocess.call([convert_commond], shell=True)
if convert_result != 0:
# do something, change metadata
LOG.error('converting file failed')
# 4. upload qcow2 to image repository\
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
self._upload_image_to_glance(
context,
converted_file_name,
image_id,
instance)
# 5. delete temporary files
shutil.rmtree(temp_dir, ignore_errors=True)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
LOG.debug('[vcloud nova driver] begin reboot instance: %s' %
instance.uuid)
vapp_name = self._get_vcloud_vapp_name(instance)
try:
self._vcloud_client.reboot_vapp(vapp_name)
except Exception as e:
LOG.error('reboot instance %s failed, %s' % (vapp_name, e))
def power_off(self, instance, shutdown_timeout=0, shutdown_attempts=0):
LOG.debug('[vcloud nova driver] begin reboot instance: %s' %
instance.uuid)
vapp_name = self._get_vcloud_vapp_name(instance)
try:
self._vcloud_client.power_off_vapp(vapp_name)
except Exception as e:
LOG.error('power off failed, %s' % e)
def power_on(self, context, instance, network_info, block_device_info):
vapp_name = self._get_vcloud_vapp_name(instance)
self._vcloud_client.power_on_vapp(vapp_name)
def _do_destroy_vm(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
vapp_name = self._get_vcloud_vapp_name(instance)
try:
self._vcloud_client.power_off_vapp(vapp_name)
except Exception as e:
LOG.error('power off failed, %s' % e)
vm_task_state = instance.task_state
self._update_vm_task_state(instance, vm_task_state)
try:
self._vcloud_client.delete_vapp(vapp_name)
except Exception as e:
LOG.error('delete vapp failed %s' % e)
try:
self._vcloud_client.delete_metadata_iso(vapp_name)
except Exception as e:
LOG.error('delete metadata iso failed %s' % e)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
LOG.debug('[vcloud nova driver] destroy: %s' % instance.uuid)
self._do_destroy_vm(context, instance, network_info, block_device_info,
destroy_disks, migrate_data)
self.cleanup(context, instance, network_info, block_device_info,
destroy_disks, migrate_data)
# delete agent
instance_id = instance.uuid
neutron_client = neutronv2.get_client(context=None, admin=True)
agent = neutron_client.list_agents(host=instance_id)
if len(agent['agents']) == 1:
neutron_client.delete_agent(agent['agents'][0]['id'])
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
if destroy_vifs:
self.unplug_vifs(instance, network_info)
LOG.debug("Cleanup network finished", instance=instance)
def attach_interface(self, instance, image_meta, vif):
LOG.debug("attach_interface: %s, %s" % (instance, vif))
def detach_interface(self, instance, vif):
LOG.debug("detach_interface: %s, %s" % (instance, vif))
def _get_vapp_ip(self, instance):
instance_id = instance.uuid
neutron_client = neutronv2.get_client(context=None, admin=True)
agent = neutron_client.list_agents(host=instance_id)
times=10
while len(agent['agents']) == 0:
if times==0:
break
time.sleep(10)
agent = neutron_client.list_agents(host=instance_id)
times = times - 1
if times==0:
return None
else:
return agent['agents'][0]['configurations']['tunneling_ip']
def _attach_volume_iscsi(self, instance, connection_info):
user = CONF.vcloud.image_user
pwd = <PASSWORD>
vapp_ip = self._get_vapp_ip(instance)
if vapp_ip:
host = vapp_ip
else:
LOG.error("vapp_ip is None ,attach volume failed")
raise Exception(_("vapp_ip is None ,attach volume failed"))
ssh_client = sshclient.SSH(user, host, password=<PASSWORD>)
target_iqn = connection_info['data']['target_iqn']
target_portal = connection_info['data']['target_portal']
cmd1 = "sudo iscsiadm -m node -T %s -p %s" % (target_iqn, target_portal)
while True:
try:
cmd1_status, cmd1_out, cmd1_err = ssh_client.execute(cmd1)
LOG.debug("sudo cmd1 info status=%s ,out=%s, err=%s " % (cmd1_status, cmd1_out, cmd1_err))
if cmd1_status in [21, 255]:
cmd2 = "sudo iscsiadm -m node -T %s -p %s --op new" % (target_iqn, target_portal)
cmd2_status, cmd2_out, cmd2_err = ssh_client.execute(cmd2)
LOG.debug("sudo cmd2 info status=%s ,out=%s, err=%s " % (cmd2_status, cmd2_out, cmd2_err))
break
except sshclient.SSHError:
LOG.debug("wait for vm to initialize network")
time.sleep(5)
cmd3 = "sudo iscsiadm -m session"
cmd3_status, cmd3_out, cmd3_err = ssh_client.execute(cmd3)
portals = [{'portal': p.split(" ")[2], 'iqn': p.split(" ")[3]}
for p in cmd3_out.splitlines() if p.startswith("tcp:")]
stripped_portal = connection_info['data']['target_portal'].split(",")[0]
if len(portals) == 0 or len([s for s in portals
if stripped_portal ==
s['portal'].split(",")[0]
and
s['iqn'] ==
connection_info['data']['target_iqn']]
) == 0:
cmd4 = "sudo iscsiadm -m node -T %s -p %s --login" % (target_iqn, target_portal)
cmd4_status, cmd4_out, cmd4_err = ssh_client.execute(cmd4)
LOG.debug("sudo cmd4 info status=%s ,out=%s, err=%s " % (cmd4_status, cmd4_out, cmd4_err))
cmd5 = "sudo iscsiadm -m node -T %s -p %s --op update -n node.startup -v automatic" % \
(target_iqn, target_portal)
cmd5_status, cmd5_out, cmd5_err = ssh_client.execute(cmd5)
LOG.debug("sudo cmd5 info status=%s ,out=%s, err=%s " % (cmd5_status, cmd5_out, cmd5_err))
ssh_client.close()
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach volume storage to VM instance."""
instance_name = instance['display_name']
LOG.debug("Attach_volume: %(connection_info)s to %(instance_name)s",
{'connection_info': connection_info,
'instance_name': instance_name})
volume_id = connection_info['data']['volume_id']
driver_type = connection_info['driver_volume_type']
volume = self.cinder_api.get(context, volume_id)
volume_name = volume['display_name']
# use volume_name as vcloud disk name, remove prefix str `volume@`
# if volume_name does not start with volume@, then use volume id instead
vcloud_volume_name = self._get_vcloud_volume_name(volume_id,
volume_name)
# find volume reference by it's name
vapp_name = self._get_vcloud_vapp_name(instance)
if driver_type == 'iscsi':
self._attach_volume_iscsi(instance, connection_info)
return
result, resp = self._vcloud_client.get_disk_ref(vcloud_volume_name)
if result:
LOG.debug("Find volume successful, disk name is: %(disk_name)s"
"disk ref's href is: %(disk_href)s.",
{'disk_name': vcloud_volume_name,
'disk_href': resp.href})
else:
LOG.error(_('Unable to find volume %s to instance'),
vcloud_volume_name)
if self._vcloud_client.attach_disk_to_vm(vapp_name, resp):
LOG.info("Volume %(volume_name)s attached to: %(instance_name)s",
{'volume_name': vcloud_volume_name,
'instance_name': instance_name})
def _detach_volume_iscsi(self, instance, connection_info):
user = CONF.vcloud.image_user
pwd = CONF.vcloud.image_password
vapp_ip = self._get_vapp_ip(instance)
if vapp_ip:
host = vapp_ip
else:
LOG.debug("vapp_ip is None ,attach volume failed")
raise
ssh_client = sshclient.SSH(user, host, password=<PASSWORD>)
target_iqn = connection_info['data']['target_iqn']
target_portal = connection_info['data']['target_portal']
cmd1 = "ls -l /dev/disk/by-path/ | grep %s | awk -F '/' '{print $NF}'" % target_iqn
cmd1_status, cmd1_out, cmd1_err = ssh_client.execute(cmd1)
LOG.debug(" cmd1 info status=%s ,out=%s, err=%s " % (cmd1_status, cmd1_out, cmd1_err))
device = "/dev/" + cmd1_out.split('\n')[0]
path = "/sys/block/" + cmd1_out.split('\n')[0] + "/device/delete"
cmd2 = "sudo blockdev --flushbufs %s" % device
cmd2_status, cmd2_out, cmd2_err = ssh_client.execute(cmd2)
LOG.debug(" cmd2 info status=%s ,out=%s, err=%s " % (cmd2_status, cmd2_out, cmd2_err))
cmd3 = "echo 1 | sudo tee -a %s" % path
cmd3_status, cmd3_out, cmd3_err = ssh_client.execute(cmd3)
LOG.debug("sudo cmd3 info status=%s ,out=%s, err=%s " % (cmd3_status, cmd3_out, cmd3_err))
cmd4 = "sudo iscsiadm -m node -T %s -p %s --op update -n node.startup -v manual" % (target_iqn, target_portal)
cmd4_status, cmd4_out, cmd4_err = ssh_client.execute(cmd4)
LOG.debug("sudo cmd4 info status=%s ,out=%s, err=%s " % (cmd4_status, cmd4_out, cmd4_err))
cmd5 = "sudo iscsiadm -m node -T %s -p %s --logout" % (target_iqn, target_portal)
cmd5_status, cmd5_out, cmd5_err = ssh_client.execute(cmd5)
LOG.debug("sudo cmd5 info status=%s ,out=%s, err=%s " % (cmd5_status, cmd5_out, cmd5_err))
cmd6 = "sudo iscsiadm -m node -T %s -p %s --op delete" % (target_iqn, target_portal)
cmd6_status, cmd6_out, cmd6_err = ssh_client.execute(cmd6)
LOG.debug("sudo cmd6 info status=%s ,out=%s, err=%s " % (cmd6_status, cmd6_out, cmd6_err))
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
"""Detach the disk attached to the instance."""
instance_name = instance['display_name']
LOG.debug("Detach_volume: %(connection_info)s to %(instance_name)s",
{'connection_info': connection_info,
'instance_name': instance_name})
volume_id = connection_info['data']['volume_id']
driver_type = connection_info['driver_volume_type']
if driver_type == 'iscsi':
self._detach_volume_iscsi(instance, connection_info)
return
volume_name = connection_info['data']['display_name']
# use volume_name as vcloud disk name, remove prefix str `volume@`
# if volume_name does not start with volume@, then use volume id instead
vcloud_volume_name = self._get_vcloud_volume_name(volume_id,
volume_name)
# find volume reference by it's name
vapp_name = self._get_vcloud_vapp_name(instance)
#if driver_type == 'iscsi':
# self._detach_volume_iscsi(instance, connection_info)
# return
result, resp = self._vcloud_client.get_disk_ref(vcloud_volume_name)
if result:
LOG.debug("Find volume successful, disk name is: %(disk_name)s"
"disk ref's href is: %(disk_href)s.",
{'disk_name': vcloud_volume_name,
'disk_href': resp.href})
else:
LOG.error(_('Unable to find volume from instance %s'),
vcloud_volume_name)
if self._vcloud_client.detach_disk_from_vm(vapp_name, resp):
LOG.info("Volume %(volume_name)s detached from: %(instance_name)s",
{'volume_name': vcloud_volume_name,
'instance_name': instance_name})
def get_info(self, instance):
vapp_name = self._get_vcloud_vapp_name(instance)
state = self._vcloud_client.get_vcloud_vapp_status(vapp_name)
return {'state': state,
'max_mem': 0,
'mem': 0,
'num_cpu': 1,
'cpu_time': 0}
def get_available_nodes(self, refresh=False):
return [self._node_name]
def plug_vifs(self, instance, network_info):
LOG.debug("plug_vifs")
# TODO: retrieve provider info ips/macs for vcloud
for vif in network_info:
self.hyper_agent_api.plug(instance.uuid, vif, None)
def unplug_vifs(self, instance, network_info):
LOG.debug("unplug_vifs")
for vif in network_info:
self.hyper_agent_api.unplug(instance.uuid, vif)
|
#!/usr/bin/python
'''
Example of inverse kinematics using the simple gradient descent method
'''
from riglib.bmi import robot_arms
import imp
imp.reload(robot_arms)
import numpy as np
import matplotlib.pyplot as plt
import time
from riglib.stereo_opengl import ik
import cProfile
pi = np.pi
q = np.array([0, 90, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) * pi/180
q_sub = q[1::3]
chain = robot_arms.KinematicChain([15, 15, 5, 5])
[t, allt] = chain.forward_kinematics(q);
planar_chain = robot_arms.PlanarXZKinematicChain([15, 15, 5, 5])
[t, allt] = planar_chain.forward_kinematics(q_sub);
# TODO check the sign for the finger joint limits
inf = np.inf
planar_chain.joint_limits = [(-pi, pi), (-pi, 0), (-pi/2, pi/2), (-pi/2, 10*pi/180)]
# target_pos = np.array([10, 0, 10])
shoulder_anchor = np.array([2, 0, -15])
x_target_pos = (np.random.randn() - 0.5)*25
z_target_pos = (np.random.randn() - 0.5)*14
target_pos = np.array([x_target_pos, 0, z_target_pos]) - shoulder_anchor
target_pos = np.array([-14.37130744, 0. , 22.97472612])
q = q_sub[:]
def cost(q, q_start, weight=10):
return np.linalg.norm(q - q_start) + weight*np.linalg.norm(planar_chain.endpoint_pos(q) - target_pos)
def stuff():
# Initialize the particles;
n_particles = 10
n_joints = planar_chain.n_joints
q_start = np.array([np.random.uniform(-pi, pi), np.random.uniform(0, pi), np.random.uniform(-pi/2, pi/2), np.random.uniform(-pi/2, 10*pi/180)])
noise = 5*np.random.randn(3)
noise[1] = 0
angles = ik.inv_kin_2D(target_pos + noise, 15., 25.)
q_start_constr = np.array([-angles[0][1], -angles[0][3], 0, 0])
n_iter = 10
particles_q = np.tile(q_start_constr, [n_particles, 1])
particles_v = np.random.randn(n_particles, n_joints)
cost_fn = lambda x: cost(x, q_start)
gbest = particles_q.copy()
gbestcost = np.array(list(map(cost_fn, gbest)))
pbest = gbest[np.argmin(gbestcost)]
pbestcost = cost_fn(pbest)
min_limits = np.array([x[0] for x in planar_chain.joint_limits])
max_limits = np.array([x[1] for x in planar_chain.joint_limits])
min_limits = np.tile(min_limits, [n_particles, 1])
max_limits = np.tile(max_limits, [n_particles, 1])
start_time = time.time()
for k in range(n_iter):
# update positions of particles
particles_q += particles_v
# apply joint limits
# particles_q = np.array(map(lambda x: planar_chain.apply_joint_limits(x)[0], particles_q))
min_viol = particles_q < min_limits
max_viol = particles_q > max_limits
particles_q[min_viol] = min_limits[min_viol]
particles_q[max_viol] = max_limits[max_viol]
# update the costs
costs = np.array(list(map(cost_fn, particles_q)))
# update the 'bests'
gbest[gbestcost > costs] = particles_q[gbestcost > costs]
gbestcost = list(map(cost_fn, gbest))
pbest = gbest[np.argmin(gbestcost)]
pbestcost = cost_fn(pbest)
# update the velocity
phi1 = 1#np.random.rand()
phi2 = 1#np.random.rand()
w=0.25
c1=0.5
c2=0.25
particles_v = w*particles_v + c1*phi1*(np.tile(pbest, [n_particles, 1]) - particles_q) + c2*phi2*(gbest - particles_q)
if np.linalg.norm(planar_chain.endpoint_pos(pbest) - target_pos) < 0.5:
break
end_time = time.time()
print("Runtime = %g" % (end_time-start_time))
return pbest
starting_pos = np.array([-5., 0, 5])
target_pos = starting_pos - shoulder_anchor
q_start = planar_chain.random_sample()
noise = 5*np.random.randn(3)
noise[1] = 0
angles = ik.inv_kin_2D(target_pos + noise, 15., 25.)
q_start_constr = np.array([-angles[0][1], -angles[0][3], 0, 0])
pbest = planar_chain.inverse_kinematics_pso(q_start_constr, target_pos, verbose=True, time_limit=1.)
# cProfile.run('planar_chain.inverse_kinematics_pso(q_start_constr, target_pos)', timeunit=0.001)
import cProfile, pstats, io
pr = cProfile.Profile(timeunit=0.001)
pr.enable()
planar_chain.inverse_kinematics_pso(q_start_constr, target_pos)
pr.disable()
s = io.StringIO()
sortby = 'time'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
# print planar_chain.endpoint_pos(pbest)
print("target position")
print(target_pos)
print("error = %g" % np.linalg.norm(planar_chain.endpoint_pos(pbest) - target_pos))
# print "q_start_constr"
# print q_start_constr * 180/np.pi
# print "q_start"
# print q_start * 180/np.pi
|
<filename>mailchimp_marketing_asyncio/models/rss_options1.py
# coding: utf-8
"""
Mailchimp Marketing API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 3.0.74
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RSSOptions1(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'feed_url': 'str',
'frequency': 'str',
'schedule': 'SendingSchedule',
'constrain_rss_img': 'bool'
}
attribute_map = {
'feed_url': 'feed_url',
'frequency': 'frequency',
'schedule': 'schedule',
'constrain_rss_img': 'constrain_rss_img'
}
def __init__(self, feed_url=None, frequency=None, schedule=None, constrain_rss_img=None): # noqa: E501
"""RSSOptions1 - a model defined in Swagger""" # noqa: E501
self._feed_url = None
self._frequency = None
self._schedule = None
self._constrain_rss_img = None
self.discriminator = None
self.feed_url = feed_url
self.frequency = frequency
if schedule is not None:
self.schedule = schedule
if constrain_rss_img is not None:
self.constrain_rss_img = constrain_rss_img
@property
def feed_url(self):
"""Gets the feed_url of this RSSOptions1. # noqa: E501
The URL for the RSS feed. # noqa: E501
:return: The feed_url of this RSSOptions1. # noqa: E501
:rtype: str
"""
return self._feed_url
@feed_url.setter
def feed_url(self, feed_url):
"""Sets the feed_url of this RSSOptions1.
The URL for the RSS feed. # noqa: E501
:param feed_url: The feed_url of this RSSOptions1. # noqa: E501
:type: str
"""
if feed_url is None:
raise ValueError("Invalid value for `feed_url`, must not be `None`") # noqa: E501
self._feed_url = feed_url
@property
def frequency(self):
"""Gets the frequency of this RSSOptions1. # noqa: E501
The frequency of the RSS Campaign. # noqa: E501
:return: The frequency of this RSSOptions1. # noqa: E501
:rtype: str
"""
return self._frequency
@frequency.setter
def frequency(self, frequency):
"""Sets the frequency of this RSSOptions1.
The frequency of the RSS Campaign. # noqa: E501
:param frequency: The frequency of this RSSOptions1. # noqa: E501
:type: str
"""
if frequency is None:
raise ValueError("Invalid value for `frequency`, must not be `None`") # noqa: E501
allowed_values = ["daily", "weekly", "monthly"] # noqa: E501
if frequency not in allowed_values:
raise ValueError(
"Invalid value for `frequency` ({0}), must be one of {1}" # noqa: E501
.format(frequency, allowed_values)
)
self._frequency = frequency
@property
def schedule(self):
"""Gets the schedule of this RSSOptions1. # noqa: E501
:return: The schedule of this RSSOptions1. # noqa: E501
:rtype: SendingSchedule
"""
return self._schedule
@schedule.setter
def schedule(self, schedule):
"""Sets the schedule of this RSSOptions1.
:param schedule: The schedule of this RSSOptions1. # noqa: E501
:type: SendingSchedule
"""
self._schedule = schedule
@property
def constrain_rss_img(self):
"""Gets the constrain_rss_img of this RSSOptions1. # noqa: E501
Whether to add CSS to images in the RSS feed to constrain their width in campaigns. # noqa: E501
:return: The constrain_rss_img of this RSSOptions1. # noqa: E501
:rtype: bool
"""
return self._constrain_rss_img
@constrain_rss_img.setter
def constrain_rss_img(self, constrain_rss_img):
"""Sets the constrain_rss_img of this RSSOptions1.
Whether to add CSS to images in the RSS feed to constrain their width in campaigns. # noqa: E501
:param constrain_rss_img: The constrain_rss_img of this RSSOptions1. # noqa: E501
:type: bool
"""
self._constrain_rss_img = constrain_rss_img
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RSSOptions1, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RSSOptions1):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
# Copyright (c) 2009-2010 Six Apart Ltd.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Six Apart Ltd. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import cgi
import httplib
import logging
import threading
import urlparse
import batchhttp.client
import httplib2
from oauth import oauth
import typepad
__all__ = ('OAuthAuthentication', 'OAuthClient', 'OAuthHttp', 'log')
log = logging.getLogger(__name__)
class OAuthAuthentication(httplib2.Authentication):
"""An `httplib2.Authentication` module that provides OAuth authentication.
The OAuth authentication will be tried automatically, but to use OAuth
authentication with a particular user agent (`Http` instance), it must
have the OAuth consumer and access token set as one of its sets of
credentials. For instance:
>>> csr = oauth.OAuthConsumer(key='blah', secret='moo')
>>> token = get_access_token_for(user)
>>> http.add_credentials(csr, token)
"""
def request(self, method, request_uri, headers, content):
"""Add the HTTP Authorization header to the headers for this request.
In this implementation, the Authorization header contains the OAuth
signing information and signature.
"""
# httplib2 only gives us the URI in parts, so rebuild it from the
# partial uri and host.
partial_uri = urlparse.urlsplit(request_uri)
# Check the query to see if the URI is already signed.
query = partial_uri[3]
querydict = cgi.parse_qs(query)
if 'oauth_signature' in querydict:
# The URI is already signed. Don't do anything.
return
uri = urlparse.urlunsplit((self.http.default_scheme, self.host) + partial_uri[2:])
req = self.signed_request(uri, method)
headers.update(req.to_header())
def signed_request(self, uri, method):
"""Returns an `OAuthRequest` for the given URL and HTTP method, signed
with this `OAuthAuthentication` instance's credentials."""
csr, token = self.credentials
assert token.secret is not None
req = oauth.OAuthRequest.from_consumer_and_token(csr, token,
http_method=method, http_url=uri)
sign_method = oauth.OAuthSignatureMethod_HMAC_SHA1()
req.set_parameter('oauth_signature_method', sign_method.get_name())
log.debug('Signing base string %r for web request %s'
% (sign_method.build_signature_base_string(req, csr, token),
uri))
req.sign_request(sign_method, csr, token)
return req
httplib2.AUTH_SCHEME_CLASSES['oauth'] = OAuthAuthentication
httplib2.AUTH_SCHEME_ORDER[0:0] = ('oauth',) # unshift onto front
class OAuthHttp(httplib2.Http):
"""An HTTP user agent for an OAuth web service."""
default_scheme = 'https'
def add_credentials(self, name, password, domain=""):
"""Adds a name (or `OAuthConsumer` instance) and password (or
`OAuthToken` instance) to this user agent's available credentials.
If ``name`` is an `OAuthConsumer` instance and the ``domain`` parameter
is provided, the `OAuthHttp` instance will be configured to provide the
given OAuth credentials, even upon the first request to that domain.
(Normally the user agent will make the request unauthenticated first,
receive a challenge from the server, then make the request again with
the credentials.)
"""
super(OAuthHttp, self).add_credentials(name, password, domain)
log.debug("Setting credentials for name %s password %s"
% (name, password))
if isinstance(name, oauth.OAuthConsumer) and domain:
if self.default_scheme is None:
self.default_scheme = urlparse.urlsplit(typepad.client.endpoint)[0]
# Preauthorize these credentials for any request at that domain.
cred = (name, password)
domain = domain.lower()
auth = OAuthAuthentication(cred, domain, "%s://%s/" % ( self.default_scheme, domain ), {}, None, None, self)
self.authorizations.append(auth)
def url_for_signed_request(self, uri, method=None, headers=None, body=None):
"""Prepares to perform a request on the given URL with the given
parameters by signing the URL with any OAuth credentials available for
that URL.
If no such credentials are available, a `ValueError` is raised.
"""
if method is None:
method = 'GET'
uriparts = list(urlparse.urlparse(uri))
host = uriparts[1]
request_uri = urlparse.urlunparse([None, None] + uriparts[2:])
# find OAuthAuthentication for this uri
auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)]
if not auths:
raise ValueError('No authorizations with which to sign a request to %r are available' % uri)
auth = sorted(auths)[0][1]
# use it to make a signed uri instead
req = auth.signed_request(uri, method)
return req.to_url()
def signed_request(self, uri, method=None, headers=None, body=None):
"""Performs a request on the given URL with the given parameters, after
signing the URL with any OAuth credentials available for that URL.
If no such credentials are available, a `ValueError` is raised.
"""
uri = self.url_for_signed_request(uri, method=method, headers=headers, body=body)
return self.request(uri=uri, method=method, headers=headers, body=body)
def interactive_authorize(self, consumer, app, **kwargs):
from textwrap import fill
# Suppress batchhttp.client's no-log-handler warning.
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger().addHandler(NullHandler())
if not isinstance(consumer, oauth.OAuthConsumer):
consumer = oauth.OAuthConsumer(*consumer)
if not isinstance(app, typepad.Application):
app = typepad.Application.get_by_id(app)
# Set up an oauth client for our signed requestses.
oauth_client = OAuthClient(consumer, None)
oauth_client.request_token_url = app.oauth_request_token_url
oauth_client.access_token_url = app.oauth_access_token_url
oauth_client.authorization_url = app.oauth_authorization_url
# Get a request token for the viewer to interactively authorize.
request_token = oauth_client.fetch_request_token(None)
log.debug("Got request token %r", request_token)
# Ask the viewer to authorize it.
approve_url = oauth_client.authorize_token(params=kwargs)
log.debug("Asking viewer to authorize token with URL %r", approve_url)
print fill("""To join your application %r, follow this link and click "Allow":"""
% app.name, width=78)
print
print "<%s>" % approve_url
print
try:
verifier = raw_input('Enter the verifier code TypePad gave you: ')
except KeyboardInterrupt:
print
return
# Exchange the authorized request token for an access token.
access_token = oauth_client.fetch_access_token(verifier=verifier)
# Re-authorize ourselves using that access token, so we can make authenticated requests with it.
domain = urlparse.urlsplit(self.endpoint)[1]
self.add_credentials(consumer, access_token, domain=domain)
# Make sure the key works.
typepad.client.batch_request()
user = typepad.User.get_self()
typepad.client.complete_batch()
# Yay! Give the access token to the viewer for their reference.
print
print fill("""Yay! This new access token authorizes this typepad.client to act as %s (%s). Here's the token:"""
% (user.display_name, user.url_id), width=78)
print """
Key: %s
Secret: %s
""" % (access_token.key, access_token.secret)
print fill("""Pass this access token to typepad.client.add_credentials() to re-authorize as %s later."""
% user.display_name, width=78)
print
return access_token
class OAuthClient(oauth.OAuthClient):
"""An `OAuthClient` for interacting with the TypePad API."""
consumer = None
request_token_url = None
access_token_url = None
authorization_url = None
callback_url = None
def set_consumer(self, key, secret):
self.consumer = oauth.OAuthConsumer(
key = key,
secret = secret,
)
def set_token_from_string(self, token_str):
self.token = oauth.OAuthToken.from_string(token_str)
def fetch_request_token(self, callback):
if not callback:
callback = 'oob'
h = typepad.client
h.clear_credentials()
req = oauth.OAuthRequest.from_consumer_and_token(
self.consumer,
http_method='GET',
http_url=self.request_token_url,
callback=callback,
)
sign_method = oauth.OAuthSignatureMethod_HMAC_SHA1()
req.set_parameter('oauth_signature_method', sign_method.get_name())
log.debug('Signing base string %r in fetch_request_token()'
% (sign_method.build_signature_base_string(req, self.consumer,
self.token),))
req.sign_request(sign_method, self.consumer, self.token)
log.debug('Asking for request token from %r', req.to_url())
resp, content = h.request(req.to_url(), method=req.get_normalized_http_method())
if resp.status != 200:
log.debug(content)
raise httplib.HTTPException('WHAT %d %s?!' % (resp.status, resp.reason))
self.token = oauth.OAuthToken.from_string(content)
return self.token
def fetch_access_token(self, request_token_str=None, verifier=None):
# -> OAuthToken
h = typepad.client
req = oauth.OAuthRequest.from_consumer_and_token(
self.consumer,
token = self.token,
http_url = self.access_token_url,
verifier = verifier,
)
sign_method = oauth.OAuthSignatureMethod_HMAC_SHA1()
req.set_parameter('oauth_signature_method', sign_method.get_name())
log.debug('Signing base string %r in fetch_access_token()'
% (sign_method.build_signature_base_string(req, self.consumer,
self.token),))
req.sign_request(sign_method, self.consumer, self.token)
resp, content = h.request(req.to_url(), method=req.get_normalized_http_method())
self.token = oauth.OAuthToken.from_string(content)
return self.token
def authorize_token(self, params=None):
"""Returns the URL at which an interactive user can authorize this
instance's request token."""
if params is None:
params = {}
req = oauth.OAuthRequest.from_token_and_callback(
self.token,
http_url=self.authorization_url,
parameters=params,
)
return req.to_url()
def get_file_upload_url(self, upload_url):
"""Returns the given upload URL, signed for performing an HTTP ``POST``
against it, with this instance's OAuth credentials.
Such a signed URL can be used for uploading asset files to TypePad.
"""
# oauth GET params for file upload url
# since the form is multipart/form-data
req = oauth.OAuthRequest.from_consumer_and_token(
self.consumer,
token = self.token,
http_method = 'POST',
http_url = upload_url,
)
sign_method = oauth.OAuthSignatureMethod_HMAC_SHA1()
req.set_parameter('oauth_signature_method', sign_method.get_name())
log.debug('Signing base string %r in get_file_upload_url()'
% (sign_method.build_signature_base_string(req, self.consumer,
self.token),))
req.sign_request(sign_method, self.consumer, self.token)
return req.to_url()
class TypePadClient(batchhttp.client.BatchClient, OAuthHttp):
"""An HTTP user agent for performing TypePad API requests.
A `TypePadClient` instance supports the same interface as `httplib2.Http`
instances, plus some special methods for performing OAuth authenticated
requests, and using TypePad's batch HTTP endpoint.
Each `TypePadClient` instance also has a `cookies` member, a dictionary
containing any additional HTTP cookies to send when making API requests.
"""
endpoint = 'http://api.typepad.com'
"""The URL against which to perform TypePad API requests."""
subrequest_limit = 20
"""The number of subrequests permitted for a given batch."""
def __init__(self, *args, **kwargs):
self.cookies = dict()
self._consumer = None
self._token = None
kwargs['endpoint'] = self.endpoint
super(TypePadClient, self).__init__(*args, **kwargs)
self.follow_redirects = False
def request(self, uri, method="GET", body=None, headers=None, redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None):
"""Makes the given HTTP request, as specified.
If the instance's ``cookies`` dictionary contains any cookies, they
will be sent along with the request.
See `httplib2.Http.request()` for more information.
"""
if self.cookies:
if headers is None:
headers = {}
else:
headers = dict(headers)
cookies = ['='.join((key, value)) for key, value in self.cookies.items()]
headers['cookie'] = '; '.join(cookies)
return super(TypePadClient, self).request(uri, method, body, headers, redirections, connection_type)
def add_credentials(self, name, password, domain=""):
endparts = urlparse.urlsplit(self.endpoint)
if domain == '':
domain = endparts[1]
if isinstance(name, oauth.OAuthConsumer) and domain == endparts[1]:
# We're adding TypePad credentials, so upgrade to HTTPS.
self.endpoint = urlparse.urlunsplit(('https',) + endparts[1:])
super(TypePadClient, self).add_credentials(name, password, domain)
def clear_credentials(self):
super(TypePadClient, self).clear_credentials()
# We cleared our TypePad credentials too, so downgrade to HTTP.
endparts = urlparse.urlsplit(self.endpoint)
self.endpoint = urlparse.urlunsplit(('http',) + endparts[1:])
def signed_request(self, uri, method=None, body=None, headers=None):
"""Performs the given request, after signing the URL with the user
agent's configured OAuth credentials.
If the given URL is not an absolute URL, it is taken as relative to
this instance's endpoint first.
"""
host = urlparse.urlparse(uri)[1]
if not host:
uri = urlparse.urljoin(self.endpoint, uri)
return super(TypePadClient, self).signed_request(uri=uri,
method=method, body=body, headers=headers)
def _get_consumer(self):
return self._consumer
def _set_consumer(self, consumer):
if isinstance(consumer, tuple):
consumer = oauth.OAuthConsumer(consumer[0], consumer[1])
assert(consumer is None or isinstance(consumer, oauth.OAuthConsumer))
if self._consumer != consumer:
self._consumer = consumer
if consumer is None:
self.clear_credentials()
else:
self._reauthorize()
consumer = property(_get_consumer, _set_consumer)
def _get_token(self):
return self._token
def _set_token(self, token):
if isinstance(token, tuple):
token = oauth.OAuthToken(token[0], token[1])
assert(token is None or isinstance(token, oauth.OAuthToken))
if self._token != token:
self._token = token
# if token is None, forcibly clear credentials
if token is None:
self.clear_credentials()
else:
self._reauthorize()
token = property(_get_token, _set_token)
def _reauthorize(self):
if self._consumer is not None and self._token is not None:
self.clear_credentials()
self.add_credentials(self._consumer, self._token)
class ThreadAwareTypePadClientProxy(object):
def __init__(self):
self._local = threading.local()
def _get_client(self):
if not hasattr(self._local, 'client'):
self.client = typepad.client_factory()
return self._local.client
def _set_client(self, new_client):
self._local.client = new_client
client = property(_get_client, _set_client)
"""Property for accessing the real client instance.
Constructs a TypePadClient if the active thread doesn't have one."""
def __getattr__(self, name):
if name in ('_local', 'client'):
return super(ThreadAwareTypePadClientProxy,
self).__getattr__(name)
else:
return getattr(self.client, name)
def __setattr__(self, name, value):
if name in ('_local', 'client'):
super(ThreadAwareTypePadClientProxy, self).__setattr__(name,
value)
else:
setattr(self.client, name, value)
|
import tensorflow as tf
from utils import FLAT_COLOR_DIMS, COLOR_DIMS
IMAGE_SIZE = 416
# TODO(indutny): there is no reason to not calculate grid_size automatically
GRID_SIZE = 13
GRID_CHANNELS = 7
PRIOR_SIZES = [
[ 0.14377480392797287, 0.059023397839700086 ],
[ 0.20904473801128326, 0.08287369797830041 ],
[ 0.2795802996888472, 0.11140121237843759 ],
[ 0.3760081365223815, 0.1493933380505552 ],
[ 0.5984967942142249, 0.2427157057261726 ],
]
class Model:
def __init__(self, config, prior_sizes=PRIOR_SIZES):
self.config = config
self.prior_sizes = tf.constant(prior_sizes, dtype=tf.float32,
name='prior_sizes')
self.iou_threshold = config.iou_threshold
self.weight_decay = config.weight_decay
self.grid_depth = config.grid_depth
self.lambda_angle = config.lambda_angle
self.lambda_obj = config.lambda_obj
self.lambda_no_obj = config.lambda_no_obj
self.lambda_coord = config.lambda_coord
self.trainable_variables = None
def forward(self, image, training=False, coreml=False):
with tf.variable_scope('resistenz', reuse=tf.AUTO_REUSE, \
values=[ image ]) as scope:
x = image
x = self.conv_bn(x, filters=16, size=3, name='1', training=training)
x = self.max_pool(x, size=2, stride=2, name='1')
x = self.conv_bn(x, filters=32, size=3, name='2', training=training)
x = self.max_pool(x, size=2, stride=2, name='2')
x = self.conv_bn(x, filters=64, size=3, name='3', training=training)
x = self.max_pool(x, size=2, stride=2, name='3')
x = self.conv_bn(x, filters=128, size=3, name='4', training=training)
x = self.max_pool(x, size=2, stride=2, name='4')
x = self.conv_bn(x, filters=256, size=3, name='5', training=training)
x = self.max_pool(x, size=2, stride=2, name='5')
x = self.conv_bn(x, filters=512, size=3, name='6', training=training)
x = self.max_pool(x, size=2, stride=1, name='6')
# TODO(indutny): residual routes
if not self.config.minimal:
x = self.conv_bn(x, filters=1024, size=3, name='pre_final',
training=training)
####
if not self.config.minimal:
x = self.conv_bn(x, filters=256, size=1, name='final_1',
training=training)
x = self.conv_bn(x, filters=512, size=3, name='final_2',
training=training)
else:
x = self.conv_bn(x, filters=128, size=3, name='final_2',
training=training)
x = self.conv_bn(x, filters=self.grid_depth * GRID_CHANNELS + \
FLAT_COLOR_DIMS, size=1,
name='last', activation=None, training=training)
x, colors, raw_colors = self.output(x, coreml=coreml)
self.trainable_variables = scope.trainable_variables()
return x, colors, raw_colors
def loss_and_metrics(self, prediction, prediction_raw_colors, labels, \
tag='train'):
# Just a helpers
def sum_over_cells(x, name=None, max=False):
if max:
return tf.reduce_max(x, axis=3, name=name)
else:
return tf.reduce_sum(x, axis=3, name=name)
def sum_over_grid(x, name=None, max=False):
if max:
return tf.reduce_max(tf.reduce_max(x, axis=2), axis=1, name=name)
else:
return tf.reduce_sum(tf.reduce_sum(x, axis=2), axis=1, name=name)
with tf.variable_scope('resistenz_loss_{}'.format(tag), reuse=False, \
values=[ prediction, prediction_raw_colors, labels ]):
labels, label_colors = tf.split(labels, \
[ GRID_CHANNELS, FLAT_COLOR_DIMS ], axis=-1)
prediction = self.parse_box(prediction, 'prediction')
labels = self.parse_box(labels, 'labels')
iou = self.iou(prediction, labels)
# (cos x - cos y)^2 + (sin x - sin y)^2 = 2 ( 1 - cos [ x - y ] )
angle_diff = tf.reduce_mean(
(prediction['angle'] - labels['angle']) ** 2, axis=-1,
name='angle_diff')
abs_cos_diff = tf.abs(1.0 - angle_diff, name='abs_cos_diff')
iou *= abs_cos_diff
# Compute masks
active_anchors = tf.one_hot(tf.argmax(iou, axis=-1), depth=self.grid_depth,
axis=-1, on_value=1.0, off_value=0.0, dtype=tf.float32,
name='active_anchors')
active_anchors *= labels['confidence']
# Disable training for anchors with high IoU
passive_anchors = labels['confidence']
passive_anchors *= tf.cast(iou >= self.iou_threshold, dtype=tf.float32)
inactive_anchors = 1.0 - tf.maximum(active_anchors, passive_anchors)
inactive_anchors = tf.identity(inactive_anchors, name='inactive_anchors')
expected_confidence = active_anchors
# Confidence loss
confidence_loss = \
(prediction['confidence'] - expected_confidence) ** 2 / 2.0
obj_loss = sum_over_cells( \
self.lambda_obj * active_anchors * confidence_loss, name='obj_loss')
no_obj_loss = sum_over_cells( \
self.lambda_no_obj * inactive_anchors * confidence_loss,
name='no_obj_loss')
# Coordinate loss
center_loss = tf.reduce_mean(
(GRID_SIZE * (prediction['center'] - labels['center'])) ** 2,
axis=-1, name='center_loss')
size_loss = tf.reduce_mean(
(tf.sqrt(prediction['size']) - tf.sqrt(labels['size'])) ** 2,
axis=-1, name='size_loss')
angle_loss = self.lambda_angle * (1.0 - abs_cos_diff)
coord_loss = self.lambda_coord * active_anchors * \
(center_loss + size_loss + angle_loss)
coord_loss = sum_over_cells(coord_loss, name='coord_loss')
# Color loss
label_colors = tf.split(label_colors, COLOR_DIMS, axis=-1,
name='split_label_colors')
color_loss = 0.0
for l_colors, p_colors in zip(label_colors, prediction_raw_colors):
color_loss += tf.nn.softmax_cross_entropy_with_logits_v2( \
labels=l_colors,
logits=p_colors)
# Mean for each group
color_loss /= len(label_colors) + 1e-23
color_loss *= tf.squeeze(labels['confidence'], axis=-1)
color_loss = tf.identity(color_loss, name='color_loss')
# To batch losses
obj_loss = sum_over_grid(obj_loss)
no_obj_loss = sum_over_grid(no_obj_loss)
coord_loss = sum_over_grid(coord_loss)
color_loss = sum_over_grid(color_loss)
# To scalars
obj_loss = tf.reduce_mean(obj_loss)
no_obj_loss = tf.reduce_mean(no_obj_loss)
coord_loss = tf.reduce_mean(coord_loss)
color_loss = tf.reduce_mean(color_loss)
# Weight decay
weight_loss = 0.0
for var in self.trainable_variables:
if not 'bn_' in var.name:
weight_loss += tf.nn.l2_loss(var)
weight_loss *= self.weight_decay
# Total
total_loss = obj_loss + no_obj_loss + coord_loss + color_loss
regularization_loss = weight_loss
# Count objects for metrics below
active_count = sum_over_grid(sum_over_cells(active_anchors),
name='active_count')
active_count = tf.expand_dims(active_count, axis=-1)
active_count = tf.expand_dims(active_count, axis=-1)
active_count = tf.expand_dims(active_count, axis=-1)
# Some metrics
mean_anchors = active_anchors / (active_count + 1e-23)
mean_iou = sum_over_grid(sum_over_cells(iou * mean_anchors))
mean_iou = tf.reduce_mean(mean_iou)
center_loss = self.lambda_coord * center_loss * active_anchors
size_loss = self.lambda_coord * size_loss * active_anchors
angle_loss = self.lambda_coord * angle_loss * active_anchors
center_loss = sum_over_grid(sum_over_cells(center_loss))
size_loss = sum_over_grid(sum_over_cells(size_loss))
angle_loss = sum_over_grid(sum_over_cells(angle_loss))
center_loss = tf.reduce_mean(center_loss)
size_loss = tf.reduce_mean(size_loss)
angle_loss = tf.reduce_mean(angle_loss)
# NOTE: create metrics outside of variable scope for clearer name
metrics = [
tf.summary.scalar('{}/iou'.format(tag), mean_iou),
tf.summary.scalar('{}/obj_loss'.format(tag), obj_loss),
tf.summary.scalar('{}/no_obj_loss'.format(tag), no_obj_loss),
tf.summary.scalar('{}/coord_loss'.format(tag), coord_loss),
tf.summary.scalar('{}/center_loss'.format(tag), center_loss),
tf.summary.scalar('{}/size_loss'.format(tag), size_loss),
tf.summary.scalar('{}/angle_loss'.format(tag), angle_loss),
tf.summary.scalar('{}/loss'.format(tag), total_loss),
tf.summary.scalar('{}/weight_loss'.format(tag), weight_loss),
tf.summary.scalar('{}/color_loss'.format(tag), color_loss),
]
return total_loss + regularization_loss, tf.summary.merge(metrics)
# Helpers
def conv_bn(self, input, filters, size, name, training, \
activation=lambda x: tf.nn.leaky_relu(x, alpha=0.1)) :
x = tf.layers.conv2d(input, filters=filters, kernel_size=size, \
padding='SAME',
name='conv_{}'.format(name))
if not activation is None:
x = tf.layers.batch_normalization(x, momentum=0.9, epsilon=1e-5,
training=training,
name='bn_{}'.format(name))
x = activation(x)
return x
def max_pool(self, input, size, stride, name):
return tf.layers.max_pooling2d(input, pool_size=size, strides=stride,
padding='SAME')
def output(self, x, coreml=False):
with tf.name_scope('output', values=[ x ]):
batch_size = tf.shape(x)[0]
if coreml:
# CoreML does not support rank-5 tensors, strided slices, and so on
x = tf.reshape(x, [
batch_size, GRID_SIZE, GRID_SIZE,
FLAT_COLOR_DIMS + self.grid_depth * GRID_CHANNELS,
], name='output')
return x
x, colors = tf.split(x, \
[ self.grid_depth * GRID_CHANNELS, FLAT_COLOR_DIMS ], axis=-1)
x = tf.reshape(x, [
batch_size, GRID_SIZE, GRID_SIZE, self.grid_depth, GRID_CHANNELS,
])
center, size, angle, confidence = \
tf.split(x, [ 2, 2, 2, 1 ], axis=-1)
center = tf.sigmoid(center)
size = tf.exp(size)
angle = tf.nn.l2_normalize(angle, axis=-1)
confidence = tf.sigmoid(confidence)
# Apply softmax over each color group
raw_colors = tf.split(colors, COLOR_DIMS, axis=-1)
split_colors = [ tf.nn.softmax(l, axis=-1) for l in raw_colors ]
colors = tf.concat(split_colors, axis=-1)
# Apply priors
with tf.name_scope('apply_prior_sizes',
values=[ size, self.prior_sizes ]):
size *= self.prior_sizes
x = tf.concat([ center, size, angle, confidence ], axis=-1,
name='output')
# Return raw_colors for use in the loss
return x, colors, raw_colors
def parse_box(self, input, name):
center, size, angle, confidence = tf.split(input, \
[ 2, 2, 2, 1 ], \
axis=-1, name='{}_box_split'.format(name))
confidence = tf.squeeze(confidence, axis=-1,
name='{}_confidence'.format(name))
center /= GRID_SIZE
half_size = size / 2.0
return {
'center': center,
'size': size,
'angle': angle,
'confidence': confidence,
'top_left': center - half_size,
'bottom_right': center + half_size,
'area': self.area(size, name),
}
def area(self, size, name):
width, height = tf.split(size, [ 1, 1 ], axis=-1)
return tf.squeeze(width * height, axis=-1, name='{}_area'.format(name))
def iou(self, a, b):
top_left = tf.maximum(a['top_left'], b['top_left'], name='iou_top_left')
bottom_right = tf.minimum(a['bottom_right'], b['bottom_right'],
name='iou_bottom_right')
size = tf.nn.relu(bottom_right - top_left, name='iou_size')
intersection = self.area(size, 'iou_area')
union = a['area'] + b['area'] - intersection
return intersection / (union + 1e-23)
|
<reponame>brianhie/trajectorama<filename>bin/dataset_zeisel_adolescent_brain.py
from anndata import AnnData
import loompy
import numpy as np
import os
from scanorama import *
import scanpy as sc
from scipy.sparse import vstack
from sklearn.preprocessing import normalize
from process import process, load_names, merge_datasets
from utils import *
NAMESPACE = 'zeisel_adolescent_brain'
DIMRED = 100
DR_METHOD = 'svd'
data_names = [
'data/mouse_brain/zeisel/amygdala',
'data/mouse_brain/zeisel/cerebellum',
'data/mouse_brain/zeisel/cortex1',
'data/mouse_brain/zeisel/cortex2',
'data/mouse_brain/zeisel/cortex3',
'data/mouse_brain/zeisel/hippocampus',
'data/mouse_brain/zeisel/hypothalamus',
'data/mouse_brain/zeisel/medulla',
'data/mouse_brain/zeisel/midbraindorsal',
'data/mouse_brain/zeisel/midbrainventral',
'data/mouse_brain/zeisel/olfactory',
'data/mouse_brain/zeisel/pons',
'data/mouse_brain/zeisel/striatumdorsal',
'data/mouse_brain/zeisel/striatumventral',
'data/mouse_brain/zeisel/thalamus',
]
def keep_valid(datasets):
barcode_sub_type = {}
with loompy.connect('data/mouse_brain/zeisel/l6_r1.loom') as ds:
for barcode, sub_type in zip(ds.ca['CellID'], ds.ca['ClusterName']):
#for barcode, sub_type in zip(ds.ca['CellID'], ds.ca['Taxonomy_group']):
barcode_sub_type[barcode] = sub_type
valid_idx = []
cell_types = []
sub_types = []
ages = []
for data_name in data_names:
with open('{}/meta.tsv'.format(data_name)) as f:
excluded = set([
'Blood', 'Excluded', 'Immune', 'Vascular',
])
for j, line in enumerate(f):
fields = line.rstrip().split('\t')
if fields[1] == 'Neurons' and fields[2] != '?':
valid_idx.append(j)
cell_types.append(fields[1])
if fields[0] in barcode_sub_type:
sub_types.append(barcode_sub_type[fields[0]])
else:
sub_types.append('NA')
try:
age = float(fields[2][1:])
except ValueError:
age = fields[2]
if age == 'p12, p35':
age = (12 + 35) / 2.
elif age == 'p16, p24':
age = (16 + 24) / 2.
elif age == 'p19, p21':
age = (19 + 21) / 2.
elif age == 'p21-23' or age == 'p21, p23':
age = (21 + 23) / 2.
elif age == 'p22-24':
age = (22 + 24) / 2.
elif age == 'p25-27':
age = (25 + 27) / 2.
elif age == '6w':
age = 7 * 6.
else:
continue
min_age = 19.
max_age = 60.
offset = (age - min_age) / (max_age - min_age) * 3
ages.append(19 + offset)
return valid_idx, np.array(cell_types), np.array(ages), np.array(sub_types)
datasets, genes_list, n_cells = load_names(data_names, norm=False)
qc_idx, cell_types, ages, sub_types = keep_valid(datasets)
datasets, genes = merge_datasets(datasets, genes_list)
X = vstack(datasets)
X = X[qc_idx]
qc_idx = [ i for i, s in enumerate(np.sum(X != 0, axis=1))
if s >= 500 ]
tprint('Found {} valid cells among all datasets'.format(len(qc_idx)))
X = X[qc_idx]
cell_types = cell_types[qc_idx]
sub_types = sub_types[qc_idx]
ages = ages[qc_idx]
if not os.path.isfile('data/dimred/{}_{}.txt'
.format(DR_METHOD, NAMESPACE)):
mkdir_p('data/dimred')
tprint('Dimension reduction with {}...'.format(DR_METHOD))
X_dimred = reduce_dimensionality(normalize(X), dim_red_k=DIMRED)
tprint('Dimensionality = {}'.format(X_dimred.shape[1]))
np.savetxt('data/dimred/{}_{}.txt'
.format(DR_METHOD, NAMESPACE), X_dimred)
else:
X_dimred = np.loadtxt('data/dimred/{}_{}.txt'
.format(DR_METHOD, NAMESPACE))
dataset = AnnData(X)
dataset.var['gene_symbols'] = genes
dataset.obs['cell_types'] = [ NAMESPACE + '_' + l for l in cell_types ]
dataset.obs['sub_types'] = [ NAMESPACE + '_' + l for l in sub_types ]
dataset.obs['ages'] = ages
datasets = [ dataset ]
namespaces = [ NAMESPACE ]
|
#!/usr/bin/env python
from __future__ import print_function
import matplotlib as mpl
#mpl.use("Agg")
import numpy as np
import matplotlib.pyplot as plt
from costar_models import *
from costar_models.planner import GetOrderedList, PrintTopQ
from costar_models.sampler2 import PredictionSampler2
from costar_models.datasets.npz import NpzDataset
from costar_models.datasets.npy_generator import NpzGeneratorDataset
from costar_models.datasets.h5f_generator import H5fGeneratorDataset
from costar_models.planner import *
from costar_models.multi import *
def main(args):
'''
Tool for running model training without the rest of the simulation/planning/ROS
code. This should be more or less independent and only rely on a couple
external features.
'''
ConfigureGPU(args)
np.random.seed(0)
data_file_info = args['data_file'].split('.')
data_type = data_file_info[-1]
root = ""
for i, tok in enumerate(data_file_info[:-1]):
if i < len(data_file_info)-1 and i > 0:
root += '.'
root += tok
if data_type == "npz":
dataset = NpzGeneratorDataset(root)
data = dataset.load(success_only = args['success_only'])
elif data_type == "h5f":
dataset = H5fGeneratorDataset(root)
data = dataset.load(success_only = args['success_only'])
else:
raise NotImplementedError('data type not implemented: %s'%data_type)
if 'model' in args and args['model'] is not None:
model = MakeModel(taskdef=None, **args)
model.validate = True
model.load(world=None,**data)
train_generator = model.trainGenerator(dataset)
test_generator = model.testGenerator(dataset)
print(">>> GOAL_CLASSIFIER")
image_discriminator = LoadGoalClassifierWeights(model,
make_classifier_fn=MakeImageClassifier,
img_shape=(64, 64, 3))
image_discriminator.compile(loss="categorical_crossentropy",
metrics=["accuracy"],
optimizer=model.getOptimizer())
show = False
correct_g1 = 0
correct_g2 = 0
total = 0
err1_sum = 0.
err2_sum = 0.
v_sum = 0.
osum = 0.
ii = 0
for filename in dataset.test:
print(filename)
data = dataset.loadFile(filename)
length = data['example'].shape[0]
features, targets = model._getData(**data)
[I0, I, o1, o2, oin] = features
[I_target, I_target2, o1_1h, value, qa, ga, o2_1h] = targets
for i in range(length):
ii += 1
xi = np.expand_dims(I[i],axis=0)
x0 = np.expand_dims(I0[i],axis=0)
prev_option = np.array([oin[i]])
h = model.encode(xi)
h0 = model.encode(x0)
h_goal = model.transform(h0, h, np.array([o1[i]]))
h_goal2 = model.transform(h0, h_goal, np.array([o2[i]]))
p = model.pnext(h0, h_goal, np.array([o1[i]]))[0]
xg = model.decode(h_goal)
xg2 = model.decode(h_goal2)
if show:
plt.subplot(1,4,1); plt.imshow(x0[0])
plt.subplot(1,4,2); plt.imshow(xi[0])
plt.subplot(1,4,3); plt.imshow(xg[0])
plt.subplot(1,4,4); plt.imshow(xg2[0])
plt.show()
res1 = np.argmax(image_discriminator.predict([x0, xg]), axis=1)
res2 = np.argmax(image_discriminator.predict([x0, xg2]), axis=1)
if res1[0] == o1[i]:
correct_g1 += 1
if res2[0] == o2[i]:
correct_g2 += 1
err1 = np.mean(np.abs((xg[0] - I_target[i])))
err2 = np.mean(np.abs((xg2[0] - I_target2[i])))
v = model.value(h_goal2)
if v[0] > 0.5 and value[i] > 0.5:
vacc = 1.
elif v[0] < 0.5 and value[i] < 0.5:
vacc = 1.
else:
vacc = 0.
if p[0,o2[i]] > 0.1:
osum += 1.
else:
#print(GetOrderedList(p[0]))
#print(p[0,o2[i]], o2[i])
pass
err1_sum += err1
err2_sum += err2
total += 1
v_sum += vacc
mean1 = err1_sum / total
mean2 = err2_sum / total
print(correct_g1, "/", total, correct_g2, "/", total, "...",
o1[i], o2[i],
res1[0], res2[0],
#"errs =", err1, err2,
"means =", mean1, mean2,
"next =", osum, (osum/total),
"value =", v, value[i], "avg =", (v_sum/total))
else:
raise RuntimeError('Must provide a model to load')
if __name__ == '__main__':
args = ParseModelArgs()
if args['profile']:
import cProfile
cProfile.run('main(args)')
else:
main(args)
|
<gh_stars>10-100
# Downloads and uses the XML version of the US Code to extract a table of contents.
#
# Outputs JSON to STDOUT. Run and save with:
# ./run structure_xml > structure.json
#
# options:
# title: Do only a specific title (e.g. "5", "5a", "25")
# sections: Return a flat hierarchy of only titles and sections (no intervening layers)
# debug: Output debug messages only, and no JSON output (dry run)
# force: Force a re-download of the US Code
import glob, re, lxml.etree, lxml.html, json, sys, os, os.path, urllib, zipfile
import utils
import HTMLParser
pars = HTMLParser.HTMLParser()
section_symbol = u'\xa7'
ns = {
"uslm": "http://xml.house.gov/schemas/uslm/1.0"
}
def run(options):
# optional: don't print json out, just --debug information
debug = options.get('debug', False)
# optional: limit to a specific --title
title = options.get("title", None)
if not title:
title = "*"
else:
title = "xml_usc" + title + "@*"
# sync XML to disk as needed (cache by default)
download_usc(options)
filenames = glob.glob("data/uscode.house.gov/xml/%s.zip" % title)
filenames.sort()
# optional: --limit to a number of titles
limit = options.get("limit", None)
if limit:
filenames = filenames[0:int(limit)]
# optional: only retrieve titles and --sections, nothing in between
sections_only = options.get("sections", False)
# process titles
TOC = [ ]
for fn in filenames:
zf = zipfile.ZipFile(fn, "r")
xmlbody = zf.read(os.path.basename(fn).replace("xml_", "").replace(".zip", ".xml"))
#print xmlbody
dom = lxml.etree.fromstring(xmlbody)
titlenode = dom.xpath("uslm:main/uslm:title", namespaces=ns)[0]
proc_node(titlenode, TOC, [], sections_only)
# Sort the titles (take into account appendix notation).
TOC.sort(key = lambda title : (int(title["number"].replace("a", "")), title["number"]))
# Write output in JSON to stdout.
if debug:
print "\n(dry run only, not outputting)"
else:
json.dump(TOC, sys.stdout, indent=2, sort_keys=True, check_circular=False)
def proc_node(node, parent, path, sections_only):
# Form the node for this title/chapter/.../section.
remove_footnotes(node.xpath("uslm:heading", namespaces=ns)[0])
entry = {
"level": lxml.etree.QName(node.tag).localname,
"number": unicode(node.xpath("string(uslm:num/@value)", namespaces=ns)),
"name": unicode(node.xpath("string(uslm:heading)", namespaces=ns)),
}
if entry["level"] == "level": entry["level"] = "heading"
# To compare with our older HTML scraper, put these lines back in to normalize the content a bit.
#entry["name"] = entry["name"].replace(u"\u2019", "'") # curly right apostrophe => straight apostrophe (it's inconsistent, so for diffs: sed -i "s/\\\\u2019/'/g" structure_html.json)
#entry["name"] = entry["name"].replace(u"\u202f", u"\u00a0") # narrow no-break space => no-break space (probably convert this to a space later on)
#entry["name"] = entry["name"].replace(u"\u2013", "-") # replace en-dashes with simple hyphens
#if u"\u00a7\u202f" in entry["number"]: return # the HTML converter misses these
# Misformatting
entry["number"] = entry["number"].replace(u"\u00a7\u202f", "") # section symbol plus narrow no-break space
# Text reformatting.
entry["name"] = entry["name"].strip() # TODO: Flag upstream, remove this line when fixed.
entry["number"] = entry["number"].strip() # TODO: Flag upstream, remove this line when fixed.
entry["name"] = entry["name"].replace(u"\u00ad", "") # remove soft hyphens
entry["number"] = entry["number"].replace(u"\u2013", "-") # replace en-dashes with simple hyphens
if entry["number"] == "": entry["number"] = None
# Don't record structure of phantom parts.
if entry["name"] in ("]", "Repealed", "Reserved", "Reserved]", "Omitted", "Omitted]", "Transferred", "Transferred]", "Omitted or Transferred", "Vacant]"):
return
if re.match(r"(Repealed.*|Transferred|Omitted|Renumbered .*\])(\.|$)", entry["name"]):
return
# Make an array of level numbering in the path to this section.
# (To compare with the old HTML output, disable everything but section-level citations.)
if entry['level'] != None and entry['number'] != None:
path = path + [(entry['level'], entry['number'])]
else:
path = path + [None] # flag that this level and descendants do not have a path
if entry["level"] == "section":
entry["citation"] = "usc/%s/%s" % (path[0][1], entry["number"]) # title number & section number only
elif entry["level"] == "chapter":
# chapter numbering is unique within a title, like sections, but may be split across
# divisions and other levels beneath the title level. since finding chapter citations
# is important, pop their scope up so their citation values are predictable without
# having to know its intermediate levels of embedding.
entry["citation"] = "usc/title/%s/chapter/%s" % (path[0][1], entry["number"])
elif None in path:
# can't create a citation if there is an unnumbered level on the path
pass
else:
# for other levels, encode them beneath the title as a path through the numbers
entry["citation"] = "usc/%s" % "/".join("%s/%s" % p for p in path)
# Debugging helper.
#if entry.get("citation") == "usc/4/107":
# print lxml.etree.tostring(node)
# print entry
# Recurse into children.
children = []
if entry["level"] != "section":
# 25 USC 450l has a section within a section, so skip this processing because we never want bottom-half levels of structure
for child in node.xpath("uslm:title|uslm:subtitle|uslm:chapter|uslm:subchapter|uslm:part|uslm:subpart|uslm:division|uslm:level|uslm:section", namespaces=ns):
proc_node(child, children, path, sections_only)
if len(children):
entry["subparts"] = children
# Our older HTML scraper didn't include levels without subparts, except sections.
#if not len(children) and entry["level"] != "section": return
# Pop back up.
if sections_only and entry["level"] not in ['title', 'section']:
# We're only interested in these two levels. Flatten the hierarchy.
parent.extend(children)
else:
# Add this entry to the parent's list of children.
parent.append(entry)
def remove_footnotes(node):
# Remove footnote text and footnote markers from the heading text.
# lxml makes removing nodes tricky because the tail text gets removed too, but it needs to be moved.
def filter_nbsp(t):
if not t: return ""
if t[-1] in (u"\u00a0", u"\u202f"): t = t[:-1] # footnotes are often preceded by a non-breaking space which we can remove
return t
for n in node.xpath("uslm:note|uslm:ref[@class='footnoteRef']", namespaces=ns):
if n.tail:
if n.getprevious() != None:
n.getprevious().tail = filter_nbsp(n.getprevious().tail) + n.tail
else:
n.getparent().text = filter_nbsp(n.getparent().text) + n.tail
n.getparent().remove(n)
def download_usc(options):
debug = options.get("debug", False)
dest_dir = "data/uscode.house.gov/xml"
utils.mkdir_p(dest_dir)
base_url = "http://uscode.house.gov/download/"
index_page = lxml.html.parse(urllib.urlopen(base_url + "download.shtml")).getroot()
for anode in index_page.xpath('//a[.="[XML]"]'):
if "uscAll@" in anode.get("href"): continue # skip the all-titles archive
if "xml_usc34@" in anode.get("href"): continue # title 34 doesn't exist (was repealed)
source_url = base_url + anode.get("href")
dest_path = dest_dir + "/" + os.path.basename(anode.get("href"))
if os.path.exists(dest_path) and not options.get("force", False):
continue
os.system("wget -O %s %s" % (dest_path, source_url))
|
import os
from datetime import datetime, timedelta, timezone
from json import loads
# 各重要目录名。
CONFIG_DIR = "config"
RSC_DIR = "resource"
LOG_DIR = "log"
# 获取当前目录和上级目录。
cwd = os.getcwd()
cwd_parent = os.path.dirname(cwd)
# 如果 config 目录在上级目录下,那么根目录是上级目录。
if os.path.exists(os.path.join(cwd_parent, CONFIG_DIR)):
rootdir = cwd_parent
# 如果 config 目录在当前目录下,那么当前目录为根目录。
elif os.path.exists(os.path.join(cwd, CONFIG_DIR)):
rootdir = cwd
# 定位 config 目录与配置文件。
config_dir = os.path.join(rootdir, CONFIG_DIR)
batch_dir = os.path.join(config_dir, "batch-backup")
devicemap_dir = os.path.join(config_dir, "devicemap-backup")
ne_dir = os.path.join(config_dir, "ne-backup")
batch_file = os.path.join(config_dir, "batch.bat")
devicemap_file = os.path.join(config_dir, "devicemap.json")
ne_file = os.path.join(config_dir, "ne.txt")
# 定位 rsc 目录。
rsc_dir = os.path.join(rootdir, RSC_DIR)
if not os.path.exists(rsc_dir):
os.mkdir(rsc_dir)
# 定位 log 目录。
log_dir = os.path.join(rootdir, LOG_DIR)
if not os.path.exists(log_dir):
os.mkdir(log_dir)
# 时区设置。
timezone(timedelta(hours=8))
def write_log(device_id: str, message: str) -> None:
"""记录日志。
Args:
device_id: 发起记录请求的设备号。
message: 要记录的日志消息。
"""
log_path = os.path.join(log_dir, f"{device_id}.log")
with open(log_path, "a", encoding="utf-8") as fa:
fa.write(f"[{datetime.now().strftime('%H:%M:%S.%f')[:-3]}] {message}\n")
def cover_batch(stage: str) -> None:
"""将一键启动文件改为阶段对应配置。
Args:
stage: 指定阶段。
"""
src = os.path.join(batch_dir, f"{stage}.bat")
# 读取阶段配置。
with open(src, "r", encoding="utf-8") as fr:
config = fr.read()
# 写入正式配置。
with open(batch_file, "w", encoding="utf-8") as fw:
fw.write(config)
def cover_ne(stage: str) -> None:
"""将物理层配置文件改为阶段对应配置。
Args:
stage: 指定阶段。
"""
src = os.path.join(ne_dir, f"{stage}.txt")
# 读取阶段配置。
with open(src, "r", encoding="utf-8") as fr:
config = fr.read()
# 写入正式配置。
with open(ne_file, "w", encoding="utf-8") as fw:
fw.write(config)
def cover_devicemap(stage: str) -> None:
"""将设备拓扑文件改为阶段对应配置。
Args:
stage: 指定阶段。
"""
src = os.path.join(devicemap_dir, f"{stage}.json")
# 读取阶段配置。
with open(src, "r", encoding="utf-8") as fr:
config = fr.read()
# 写入正式配置。
with open(devicemap_file, "w", encoding="utf-8") as fw:
fw.write(config)
def run_batch() -> None:
"""运行一键启动文件。"""
os.system(batch_file)
def get_host_config() -> list[str]:
"""获取主机配置。
Returns:
拓扑内的主机设备号列表。
"""
# 打开配置文件。
try:
with open(devicemap_file, "r", encoding="utf-8") as fr:
# 读取该设备配置。
try:
hosts = loads(fr.read())["host"]
except KeyError:
print(f"[Error] Hosts absence")
exit(-1)
else:
return hosts
except FileNotFoundError:
print(f"[Error] {devicemap_file} not found")
exit(-1)
def get_switch_config(device_id: str) -> int:
"""获取交换机配置。
Args:
device_id: 设备号。
Returns:
物理层数量。
"""
# 打开配置文件。
try:
with open(devicemap_file, "r", encoding="utf-8") as fr:
# 读取该设备配置。
try:
num = loads(fr.read())["switch"][device_id]["phynum"]
except KeyError:
print(f"[Error] Device {device_id} absence")
exit(-1)
else:
return num
except FileNotFoundError:
print(f"[Error] {devicemap_file} not found")
exit(-1)
def get_router_WAN(device_id: str) -> dict[str, dict]:
"""获取路由表广域网环境。
Args:
device_id: 路由器设备号。
Returns:
路由器广域网环境。
- 键: 相邻路由器的网络层端口号。
- 值: 到达该路由器的路径信息,包含下列两个键:
- "exit": 要到达该路由器,消息应该从哪个本地物理层端口送出。
- "cost": 到达该路由器的费用。
"""
# 打开配置文件。
try:
with open(devicemap_file, "r", encoding="utf-8") as fr:
# 读取初始路由表。
try:
WAN_env: dict = loads(fr.read())["router"][device_id]["WAN"]
except KeyError:
print(f"[Error] Device {device_id} absence")
exit(-1)
else:
return WAN_env
except FileNotFoundError:
print(f"[Error] {devicemap_file} not found")
exit(-1)
def get_router_LAN(device_id: str) -> dict[str, str]:
"""获取路由表局域网环境。
Args:
device_id: 路由器设备号。
Returns:
路由器局域网环境。
- 键: 所属主机的设备号。
- 值: 到达该主机的本地物理层端口号。
"""
# 打开配置文件。
try:
with open(devicemap_file, "r", encoding="utf-8") as fr:
# 读取初始路由表。
try:
LAN_env: dict = loads(fr.read())["router"][device_id]["LAN"]
except KeyError:
print(f"[Error] Device {device_id} absence")
exit(-1)
else:
return LAN_env
except FileNotFoundError:
print(f"[Error] {devicemap_file} not found")
exit(-1)
def save_rsc(data: bytes) -> tuple[str, bool]:
"""保存文件至资源目录。
Args:
data: 字节形式的文件内容。
Returns:
保存成功为`True`,保存失败为`False`。
"""
filepath = os.path.join(
rsc_dir, f"received-{datetime.now().strftime('%H%M%S')}.png")
try:
with open(filepath, "wb") as fw:
fw.write(data)
except Exception:
return "", False
else:
return filepath, True
|
<filename>t/test_maybe.py
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from __future__ import annotations
from collections.abc import Callable, Hashable
from typing import TypeVar, Final
import pytest
from allusions.maybe import Some, Empty, Maybe
from t.util import VALUES, is_hashable
_T = TypeVar("_T")
_U = TypeVar("_U")
@pytest.mark.parametrize('v', VALUES)
def test_some_unwrap_returns_original_object(v: object) -> None:
assert Some(v).unwrap() is v
def test_empty_unwrap_raises_error() -> None:
with pytest.raises(ValueError):
Empty().unwrap()
_MAP_TEST_CASES: Final = [
(1, lambda x: x + 1),
('a', lambda s: s * 3)
# todo more test cases
]
@pytest.mark.parametrize('v, fn', _MAP_TEST_CASES)
def test_some_map(v: _T, fn: Callable[[_T], object]) -> None:
assert Some(v).map(fn) == Some(fn(v))
@pytest.mark.parametrize('_, fn', _MAP_TEST_CASES)
def test_empty_map(_: object, fn: Callable[[object], object]) -> None:
assert Empty().map(fn) == Empty()
_FLATMAP_TEST_CASES: Final = [
(1, lambda x: Some(x + 1), Some(2)),
(1, lambda x: Empty(), Empty())
# todo more test cases
]
@pytest.mark.parametrize('v, fn, exp', _FLATMAP_TEST_CASES)
def test_some_flat_map(v: _T, fn: Callable[[_T], Maybe[_U]], exp: Maybe[_U]) -> None:
assert Some(v).flat_map(fn) == exp
@pytest.mark.parametrize('_v, fn, _exp', _FLATMAP_TEST_CASES)
def test_empty_flat_map(_v: object, fn: Callable[[object], Maybe[_U]], _exp: object) -> None:
assert Empty().flat_map(fn) == Empty()
@pytest.mark.parametrize('value, if_some, if_empty, exp', [
(1, lambda x: x + 1, lambda: 0, 2),
(1, lambda x: 'cat', lambda: 'dog', 'cat')
# todo more test cases
])
def test_some_match(
value: _T, if_some: Callable[[_T], _U], if_empty: Callable[[], _U], exp: _U
) -> None:
assert Some(value).match(if_some=if_some, if_empty=if_empty) == exp
@pytest.mark.parametrize('if_some, if_empty, exp', [
(lambda x: x + 1, lambda: 0, 0),
(lambda x: 'cat', lambda: 'dog', 'dog')
# todo more test cases
])
def test_empty_match(if_some: Callable[[object], _U], if_empty: Callable[[], _U], exp: _U) -> None:
assert Empty().match(if_some=if_some, if_empty=if_empty) == exp
@pytest.mark.parametrize('v', VALUES)
def test_some_eq_is_reflexive(v: object) -> None:
some = Some(v)
assert some == some
@pytest.mark.parametrize('first', VALUES)
@pytest.mark.parametrize('second', VALUES)
def test_some_eq_is_symmetric(first: object, second: object) -> None:
assert (Some(first) == Some(second)) == (Some(second) == Some(first))
def test_empty_eq_is_reflexive_and_symmetric() -> None:
assert Empty() == Empty()
@pytest.mark.parametrize('first', VALUES)
@pytest.mark.parametrize('second', VALUES)
def test_some_neq_is_symmetric(first: object, second: object) -> None:
assert (Some(first) != Some(second)) == (Some(second) != Some(first))
@pytest.mark.parametrize('v', VALUES)
def test_some_and_empty_are_not_equal(v: object) -> None:
assert Some(v) != Empty() and Empty() != Some(v)
# todo test eq and neq are transitive
@pytest.mark.parametrize('hashable', filter(is_hashable, VALUES))
def test_some_is_hashable_if_contents_are_hashable(hashable: Hashable) -> None:
{Some(hashable)}
def test_empty_is_hashable() -> None:
{Empty()}
@pytest.mark.parametrize('unhashable', [list(), dict(), set()])
def test_some_is_not_hashable_if_contents_are_not_hashable(unhashable: object) -> None:
some = Some(unhashable)
with pytest.raises(TypeError, match='unhashable'):
{some}
@pytest.mark.parametrize('maybe, exp', [
(Some(1), 'Some(1)'),
(Some('a'), "Some('a')"),
(Some(1.), 'Some(1.0)'),
(Empty(), 'Empty()'),
(Some(Some(1)), 'Some(Some(1))'),
(Some(Some('a')), "Some(Some('a'))"),
(Some(Empty()), 'Some(Empty())'),
])
def test_maybe_repr(maybe: Maybe[object], exp: str) -> None:
assert repr(maybe) == exp
|
<gh_stars>1-10
import worker
from pyspark.mllib.feature import Word2VecModel
from pyspark.rdd import PipelinedRDD
from mock import patch
from test_helpers import get_job, get_fake_mongo_client
from multiprocessing import Queue, Process
from bson.binary import Binary
import pymongo
import json
def test_cleanstr():
assert worker.cleanstr("") == ""
assert worker.cleanstr("!@T#$%^&e*()-.;'S{}[]t/`~-=+_") == "t e s t"
assert worker.cleanstr(" :tesTing' cleAnsTr. ") == "testing cleanstr"
def test_train_return_type(spark_context, testserver):
urls = [testserver.url]
result = worker.train(spark_context, urls)
assert isinstance(result, Word2VecModel)
def test_url2rdd_return_type(spark_context, testserver):
result = worker.url2rdd(spark_context, testserver.url)
assert isinstance(result, PipelinedRDD)
def test_update_model_db(spark_context, testserver):
""" Test update_model. Ensure model collection is updated and the
appropriate data is stored.
:param spark_context: a pre-configured spark context fixture.
:param testserver: a WSGIServer fixture.
"""
inq = Queue()
outq = Queue()
job = get_job()
job['urls'] = [testserver.url]
expected_id = job['_id']
db = get_fake_mongo_client().ophicleide
db.models.insert_one(job)
inq.put(job)
worker.update_model(spark_context, inq, outq, db, 'http://testurl')
outq.get()
data_in_db = db.models.find_one({'_id': expected_id})
expected_keys = ['_id', 'model', 'status', 'last_updated']
assert all([key in data_in_db for key in expected_keys])
assert data_in_db['_id'] == expected_id
assert data_in_db['status'] == 'ready'
model = data_in_db['model']
assert 'words' in model and 'zndvecs' in model
words, zn = model['words'], model['zndvecs']
assert isinstance(words, list)
assert isinstance(zn, Binary)
with open('tests/resources/test_training_model_words_list.json') \
as json_data:
expected_data = json.load(json_data)
assert words == expected_data['words']
@patch('worker.SparkContext')
@patch('pymongo.MongoClient')
def test_workloop_output_in_queue(mc, sc, spark_context, testserver):
""" Test workloop. Start a workloop process and ensure the output queue
receives the appropriate response.
:param mc: a patched pymongo.MongoClient
:param sc: a mocked worker.SparkContext
:param spark_context: a pre-configured spark context fixture.
:param testserver: a WSGIServer fixture.
"""
sc.return_value = spark_context
mc.return_value = get_fake_mongo_client()
inq = Queue()
outq = Queue()
job = get_job()
job['urls'] = [testserver.url]
expected_id = job['_id']
expected_name = job['name']
db = pymongo.MongoClient("http://testurl").ophicleide
db.models.insert_one(job)
inq.put(job)
p = Process(target=worker.workloop, args=("local[2]", inq, outq,
"http://testurl"))
p.start()
# wait for worker to spin up
outq.get()
# wait for worker to train model, raise timeout if on a slower system.
result = outq.get(timeout=15)
p.terminate()
mid = result[0]
model_name = result[1]
assert model_name == expected_name
assert mid == expected_id
|
#!/usr/bin/env python2.7
# encoding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $Id$
#
# Author: mattmann
# Description: This program reads a Common Crawl Architecture dump
# directory as generated by Apache Nutch, e.g,. see:
# https://wiki.apache.org/nutch/CommonCrawlDataDumper
# and then uses that CBOR-encoded JSON data as a basis for posting
# the data to Elasticsearch using this simple schema:
#
#
# {
# url : <url of raw page>,
# timestamp: <timestamp for data when scraped, in epoch milliseconds>,
# team: <name of crawling team>,
# crawler: <name of crawler; each type of crawler should have a distinct name or reference>,
# raw_content: <full text of raw crawled page>,
# content_type: <IANA mimetype representing the crawl_data content>,
# crawl_data {
# content: <optional; used to store cleaned/processed text, etc>,
# images:[an array of URIs to the images present within the document],
# videos:[an array of URIs to the videos present within the document]
# }
# To call this program, do something like the following
#
# ./memex_cca_esindex.py -t "JPL" -c "Nutch 1.11-SNAPSHOT" -d crawl_20150410_cca/ \
# -u https://user:pass@localhost:9200/ -i memex-domains -o stuff \
# -p dump.json -s http://imagecat.dyndns.org/weapons/alldata/
#
# If you want verbose logging, turn it on with -v
import codecs
import traceback
from tika import parser
from elasticsearch import Elasticsearch
import json
import os
import cbor
import sys
import getopt
import hashlib
import datetime
from multiprocessing import Pool
from functools import partial
_verbose = False
_helpMessage = '''
Usage: memex_cca_esindex [-t <crawl team>] [-c <crawler id>] [-d <cca dir> [-u <url>]
[-i <index>] [-o docType] [-p <path>] [-s <raw store prefix path>]
Operation:
-t --team
The name of the crawler team, e.g. "JPL"
-c --crawlerId
The identifier of the crawler, e.g., "Nutch 1.11-SNAPSHOT"
-d --dataDir
The directory where CCA CBOR JSON files are located.
-u --url
The URL to Elasticsearch. If you need auth, you can use RFC-1738 to specify the url, e.g., https://user:secret@localhost:443
-p --path
The path to output file where the data shall be stored instead of indexing to elasticsearch
-s --storeprefix
The path to raw file store where the raw files are stored. Note that this is different than CBOR file dump.
-i --index
The Elasticsearch index, e.g., memex-domains, to index to.
-o --docType
The document type e.g., weapons, to index to.
'''
def list_files(dir):
r = []
subdirs = [x[0] for x in os.walk(dir)]
for subdir in subdirs:
files = os.walk(subdir).next()[2]
if (len(files) > 0):
for file in files:
r.append(subdir + "/" + file)
return r
def getContentType(ccaDoc):
for header in ccaDoc["response"]["headers"]:
if "Content-Type" in header:
return ccaDoc["response"]["headers"]["Content-Type"]
return "application/octet-stream"
def indexDoc(url, doc, index, docType):
print "Indexing "+doc["url"]+" to ES at: ["+url+"]"
es = Elasticsearch([url])
res = es.index(index=index, doc_type=docType, id=doc["id"], body=doc)
print(res['created'])
def esIndexDoc(f, team, crawler, index, docType, failedList, failedReasons, procCount,
url=None, outPath=None, storeprefix=None):
CDRVersion = 2.0
outFile = codecs.open(outPath +"/" + str(os.path.basename(f)), 'w', 'utf-8') if outPath else None
with open(f, 'r') as fd:
try:
newDoc = {}
c = fd.read()
# fix for no request body out of Nutch CCA
c.replace("\"body\" : null", "\"body\" : \"null\"")
ccaDoc = json.loads(cbor.loads(c), encoding='utf8')
newDoc["url"] = ccaDoc["url"]
newDoc["timestamp"] = datetime.datetime.fromtimestamp(ccaDoc["imported"])
newDoc["team"] = team
newDoc["crawler"] = crawler
contentType = getContentType(ccaDoc)
newDoc["content_type"] = contentType
parsed = parser.from_buffer(ccaDoc["response"]["body"].encode("utf-8"))
newDoc["crawl_data"] = {}
if "content" in parsed:
newDoc["extracted_text"] = parsed["content"]
if 'inlinks' in ccaDoc and ccaDoc['inlinks']:
newDoc["crawl_data"]["obj_parents"] = ccaDoc['inlinks']
newDoc["obj_parent"] = ccaDoc['inlinks'][0]
# CDR version 2.0 additions
newDoc["id"] = ccaDoc["key"]
newDoc["obj_original_url"] = ccaDoc["url"]
if 'text' in contentType or 'ml' in contentType:
# web page
newDoc["raw_content"] = ccaDoc["response"]["body"]
else:
# binary content, we link to store
# ideally we should be storing it both the cases, but the CDR schema decided this way
newDoc["obj_stored_url"] = url_to_nutch_dump_path(ccaDoc["url"], prefix=storeprefix)
newDoc["extracted_metadata"] = parsed["metadata"] if 'metadata' in parsed else {}
newDoc["version"] = CDRVersion
verboseLog("Indexing ["+f+"] to Elasticsearch.")
if url:
indexDoc(url, newDoc, index, docType)
if outFile:
outFile.write(json.dumps(newDoc))
outFile.write("\n")
print "Processed " + f + " successfully"
procCount += 1
except Exception as err:
failedList.append(f)
failedReasons.append(str(err))
traceback.print_exc()
def esIndex(ccaDir, team, crawler, index, docType, url=None, outPath=None, storeprefix=None):
if not url and not outPath:
raise Exception("Either Elastic Url or output path must be specified.")
ccaJsonList = list_files(ccaDir)
print "Processing ["+str(len(ccaJsonList))+"] files."
procCount = 0
failedList=[]
failedReasons=[]
CDRVersion = 2.0
# outFile = codecs.open(outPath, 'w', 'utf-8') if outPath else None
pool = Pool(processes=3)
results = pool.map(partial(esIndexDoc, team=team, crawler=crawler, index=index,
docType=docType, failedList=failedList, failedReasons=failedReasons, procCount=procCount,
url=url, outPath=outPath, storeprefix=storeprefix), ccaJsonList)
pool.close()
pool.join()
# for f in ccaJsonList:
# with open(f, 'r') as fd:
# try:
# newDoc = {}
# c = fd.read()
# # fix for no request body out of Nutch CCA
# c.replace("\"body\" : null", "\"body\" : \"null\"")
# ccaDoc = json.loads(cbor.loads(c).value, encoding='utf8')
# newDoc["url"] = ccaDoc["url"]
#
# newDoc["timestamp"] = ccaDoc["imported"]
# newDoc["team"] = team
# newDoc["crawler"] = crawler
#
# contentType = getContentType(ccaDoc)
# newDoc["content_type"] = contentType
#
# parsed = parser.from_buffer(ccaDoc["response"]["body"].encode("utf-8"))
# newDoc["crawl_data"] = {}
# if "content" in parsed:
# newDoc["crawl_data"]["content"] = parsed["content"]
# newDoc["extracted_text"] = parsed["content"]
# if 'inlinks' in ccaDoc and ccaDoc['inlinks']:
# newDoc["crawl_data"]["obj_parents"] = ccaDoc['inlinks']
# newDoc["obj_parent"] = ccaDoc['inlinks'][0]
# # CDR version 2.0 additions
# newDoc["_id"] = ccaDoc["key"]
# newDoc["obj_original_url"] = ccaDoc["url"]
#
# if 'text' in contentType or 'ml' in contentType:
# # web page
# newDoc["raw_content"] = ccaDoc["response"]["body"]
# else:
# # binary content, we link to store
# # ideally we should be storing it both the cases, but the CDR schema decided this way
# newDoc["obj_stored_url"] = url_to_nutch_dump_path(ccaDoc["url"], prefix=storeprefix)
#
# newDoc["extracted_metadata"] = parsed["metadata"] if 'metadata' in parsed else {}
# newDoc["version"] = CDRVersion
# verboseLog("Indexing ["+f+"] to Elasticsearch.")
# if url:
# indexDoc(url, newDoc, index, docType)
# if outFile:
# outFile.write(json.dumps(newDoc))
# outFile.write("\n")
# procCount += 1
# except Exception as err:
# failedList.append(f)
# failedReasons.append(str(err))
# traceback.print_exc()
# if outFile:
# print("Output Stored at %s" % outPath)
# outFile.close()
print "Processed " + str(procCount) + " CBOR files successfully."
print "Failed files: " + str(len(failedList))
if _verbose:
for i in range(len(failedList)):
verboseLog("File: "+failedList[i]+" failed because "+failedReasons[i])
def verboseLog(message):
if _verbose:
print >>sys.stderr, message
class _Usage(Exception):
'''An error for problems with arguments on the command line.'''
def __init__(self, msg):
self.msg = msg
def url_to_nutch_dump_path(url, prefix=None):
"""
Converts URL to nutch dump path (the regular dump with reverse domain, not the commons crawl dump path)
:param url: valid url string
:param prefix: prefix string (default = "")
:return: nutch dump path prefixed to given path
"""
domain = url.split("/")[2]
return "{0}/{1}/{2}".format("" if prefix is None else prefix.strip("/"),
"/".join(reversed(domain.split("."))),
hashlib.sha256(url).hexdigest().upper())
def main(argv=None):
if argv is None:
argv = sys.argv
try:
try:
opts, args = getopt.getopt(argv[1:], 'hvt:c:d:u:i:o:p:s:',
['help', 'verbose', 'team=', 'crawlerId=', 'dataDir=', 'url=', 'index=',
'docType=', 'path=', 'storeprefix='])
except getopt.error, msg:
raise _Usage(msg)
if len(opts) == 0:
raise _Usage(_helpMessage)
team=None
crawlerId=None
dataDir=None
url=None
index=None
docType=None
outPath=None
storePrefix=None
for option, value in opts:
if option in ('-h', '--help'):
raise _Usage(_helpMessage)
elif option in ('-v', '--verbose'):
global _verbose
_verbose = True
elif option in ('-t', '--team'):
team = value
elif option in ('-c', '--crawlerId'):
crawlerId = value
elif option in ('-d', '--dataDir'):
dataDir = value
elif option in ('-u', '--url'):
url = value
elif option in ('-i', '--index'):
index = value
elif option in ('-o', '--docType'):
docType = value
elif option in ('-p', '--path'):
outPath = value
elif option in ('-s', '--storeprefix'):
storePrefix = value
if team == None or crawlerId == None or dataDir == None or index == None or docType == None \
or (outPath == None and url == None) or storePrefix == None:
print("One or more arguments are missing or invalid")
raise _Usage(_helpMessage)
esIndex(dataDir, team, crawlerId, index, docType, url, outPath, storePrefix)
except _Usage, err:
print >>sys.stderr, sys.argv[0].split('/')[-1] + ': ' + str(err.msg)
return 2
if __name__ == "__main__":
sys.exit(main())
|
<filename>peregrinearb/utils/general.py
import math
import networkx as nx
import logging
__all__ = [
'ExchangeNotInCollectionsError',
'format_for_log',
'FormatForLogAdapter',
'print_profit_opportunity_for_path',
'print_profit_opportunity_for_path_multi',
]
class ExchangeNotInCollectionsError(Exception):
def __init__(self, market_ticker):
super(ExchangeNotInCollectionsError, self).__init__("{} is either an invalid exchange or has a broken API."
.format(market_ticker))
def format_for_log(msg, **kwargs):
result = ''
for key, value in kwargs.items():
key = str(key).upper()
# if key is not Labels or if the value for labels is not a list
if key != 'LABELS':
result += '{}#{} - '.format(key, value)
else:
for label in value:
result += '{}#{} - '.format('label', label)
result += msg
return result
class FormatForLogAdapter(logging.LoggerAdapter):
def __init__(self, logger, extra=None):
super().__init__(logger, extra or {})
def log(self, level, msg, *args, exc_info=None, extra=None, stack_info=False, **kwargs):
if self.isEnabledFor(level):
self.logger._log(level, format_for_log(msg, **kwargs), (), exc_info=exc_info, extra=extra,
stack_info=stack_info)
def print_profit_opportunity_for_path(graph, path, round_to=None, depth=False, starting_amount=100):
if not path:
return
print("Starting with {} in {}".format(starting_amount, path[0]))
for i in range(len(path) - 1):
start = path[i]
end = path[i + 1]
if depth:
volume = min(starting_amount, math.exp(-graph[start][end]['depth']))
starting_amount = math.exp(-graph[start][end]['weight']) * volume
else:
starting_amount *= math.exp(-graph[start][end]['weight'])
if round_to is None:
rate = math.exp(-graph[start][end]['weight'])
resulting_amount = starting_amount
else:
rate = round(math.exp(-graph[start][end]['weight']), round_to)
resulting_amount = round(starting_amount, round_to)
printed_line = "{} to {} at {} = {}".format(start, end, rate, resulting_amount)
# todo: add a round_to option for depth
if depth:
printed_line += " with {} of {} traded".format(volume, start)
print(printed_line)
def print_profit_opportunity_for_path_multi(graph: nx.Graph, path, print_output=True, round_to=None, shorten=False):
"""
The only difference between this function and the function in utils/general.py is that the print statement
specifies the exchange name. It assumes all edges in graph and in path have exchange_name and market_name
attributes.
"""
if not path:
return
money = 100
result = ''
result += "Starting with %(money)i in %(currency)s\n" % {"money": money, "currency": path[0]}
for i in range(len(path)):
if i + 1 < len(path):
start = path[i]
end = path[i + 1]
rate = math.exp(-graph[start][end]['weight'])
money *= rate
if round_to is None:
result += "{} to {} at {} = {}".format(start, end, rate, money)
else:
result += "{} to {} at {} = {}".format(start, end, round(rate, round_to), round(money, round_to))
if not shorten:
result += " on {} for {}".format(graph[start][end]['exchange_name'], graph[start][end]['market_name'])
result += '\n'
if print_output:
print(result)
return result
|
import tkinter as tk
import tkinter.ttk as ttk
from Components.db import Database
from Components.patter_menu import PatternMenu
from Components.factory import OperationFactory
class ToolBar():
def __init__(self, root, tab):
self.root = root
self.tab = tab
self.op_fac = OperationFactory()
self.add_tool_bar()
def add_tool_bar(self):
toolbar = tk.Frame(self.root, borderwidth=1, relief='raised', bg='#e6e6e6')
_photo = tk.PhotoImage(file="Images/new.png")
new_btn = tk.Button(toolbar,
image = _photo,
command=self.op_fac.create("new", self.root, self.tab).execute)
new_btn.image = _photo
new_btn.pack(side=tk.LEFT)
_photo = tk.PhotoImage(file="Images/open.png")
open_btn = tk.Button(toolbar,
image = _photo,
command=self.op_fac.create("open", self.root, self.tab).execute)
open_btn.image = _photo
open_btn.pack(side=tk.LEFT)
_photo = tk.PhotoImage(file="Images/save.png")
save_btn = tk.Button(toolbar,
image = _photo,
command=self.op_fac.create("save", self.root, self.tab).execute)
save_btn.image = _photo
save_btn.pack(side=tk.LEFT)
_photo = tk.PhotoImage(file="Images/copy.png")
copy_btn = tk.Button(toolbar,
image = _photo,
command=self.op_fac.create("copy", self.root, self.tab).execute)
copy_btn.image = _photo
copy_btn.pack(side=tk.LEFT)
_photo = tk.PhotoImage(file="Images/past.png")
past_btn = tk.Button(toolbar,
image = _photo,
command=self.op_fac.create("past", self.root, self.tab).execute)
past_btn.image = _photo
past_btn.pack(side=tk.LEFT)
_photo = tk.PhotoImage(file="Images/cut.png")
cut_btn = tk.Button(toolbar,
image = _photo,
command=self.op_fac.create("cut", self.root, self.tab).execute)
cut_btn.image = _photo
cut_btn.pack(side=tk.LEFT)
#-------------------------------
# FIND SYSTEM
#-------------------------------
sv = tk.StringVar()
self.edit = tk.Entry(toolbar, textvariable=sv)
sv.trace("w", lambda name, index, mode, sv=sv, text=self.edit: self.op_fac.create("find", self.root, self.tab).find(sv, text))
prev_btn = tk.Button(toolbar,
text = "Prev",
command= lambda edit=self.edit: self.op_fac.create("find", self.root, self.tab).find_prev(edit))
next_btn = tk.Button(toolbar,
text = "Next",
command= lambda edit=self.edit: self.op_fac.create("find", self.root, self.tab).find_next(edit))
self.edit.bind("<Return>", lambda event, edit=self.edit: self.op_fac.create("find", self.root, self.tab).find_next(edit))
prev_btn.pack(side=tk.RIGHT)
next_btn.pack(side=tk.RIGHT)
self.edit.pack(side=tk.RIGHT, fill=tk.BOTH)
tk.Label(toolbar,text='Find:').pack(side=tk.RIGHT)
#-------------------------------
# PATTERN PROJECT SELECTOR
#-------------------------------
tkvar = tk.StringVar(self.root)
self.popupMenu = PatternMenu(self.tab, toolbar, tkvar)
self.popupMenu.pack(side=tk.RIGHT)
self.popupMenu.configure(width=20)
ttk.Label(toolbar,text='Project:').pack(side=tk.RIGHT)
# Add the toolbar.
toolbar.pack(side=tk.TOP,fill=tk.X)
|
import json
import numpy as np
import pandas as pd
from .utils import get_blocked_videos
from .utils import interpolated_prec_rec
from .utils import segment_iou
from joblib import Parallel, delayed
class ActionDetectorDiagnosis(object):
GROUND_TRUTH_FIELDS = ['database', 'taxonomy', 'version']
PREDICTION_FIELDS = ['results', 'version', 'external_data']
def __init__(self, ground_truth_filename=None, prediction_filename=None,
ground_truth_fields=GROUND_TRUTH_FIELDS,
prediction_fields=PREDICTION_FIELDS,
tiou_thresholds=np.linspace(0.5, 0.95, 10),
limit_factor=None,
min_tiou_thr=0.1,
subset='testing',
verbose=False,
check_status=True,
load_extra_annotations=False,
characteristic_names_to_bins={'context-size': (range(-1,7), ['0','1','2','3','4','5','6']),
'context-distance': (range(-1,4), ['Inf','N','M','F']),
'agreement': (np.linspace(0,1.0,6), ['XW','W','M','H','XH']),
'coverage': (np.linspace(0,1.0,6), ['XS','S','M','L','XL']),
'length': (np.array([0,30,60,120,180,np.inf]), ['XS','S','M','L','XL']),
'num-instances': (np.array([-1,1,4,8,np.inf]), ['XS','S','M','L'])},
normalize_ap=False,
minimum_normalized_precision_threshold_for_detection=0.00,
evaluate_with_multi_segments=None):
if not ground_truth_filename:
raise IOError('Please input a valid ground truth file.')
if not prediction_filename:
raise IOError('Please input a valid prediction file.')
self.subset = subset
self.tiou_thresholds = tiou_thresholds
self.verbose = verbose
self.gt_fields = ground_truth_fields
self.pred_fields = prediction_fields
self.ap = None
self.check_status = check_status
self.load_extra_annotations = load_extra_annotations
self.characteristic_names_to_bins = characteristic_names_to_bins
self.characteristic_names = characteristic_names_to_bins.keys()
self.normalize_ap = normalize_ap
self.minimum_normalized_precision_threshold_for_detection = minimum_normalized_precision_threshold_for_detection
self.evaluate_with_multi_segments = evaluate_with_multi_segments
# Retrieve blocked videos from server.
if self.check_status:
self.blocked_videos = get_blocked_videos()
else:
self.blocked_videos = list()
# Import ground truth and predictions.
self.ground_truth, self.activity_index = self._import_ground_truth(
ground_truth_filename)
self.average_num_instance_per_class = len(self.ground_truth) / len(self.activity_index)
self.prediction = self._import_prediction(prediction_filename)
self.limit_factor = limit_factor
if self.limit_factor:
self.prediction = self._limit_prediction()
self.matched_gt_id_cols, self.fp_error_type_cols = [], []
for tiou in self.tiou_thresholds:
self.matched_gt_id_cols += ['matched-gt-id-' + str(tiou)]
self.fp_error_type_cols += ['fp-error-type-' + str(tiou)]
self.min_tiou_thr = min_tiou_thr
if self.verbose:
print('[INIT] Loaded annotations from {} subset.'.format(subset))
nr_gt = len(np.unique(self.ground_truth['gt-id']))
print('\tNumber of ground truth instances: {}'.format(nr_gt))
nr_pred = len(self.prediction)
print('\tNumber of predictions: {}'.format(nr_pred))
print('\tFixed threshold for tiou score: {}'.format(self.tiou_thresholds))
def _import_ground_truth(self, ground_truth_filename):
"""Reads ground truth file, checks if it is well formatted, and returns
the ground truth instances and the activity classes.
Parameters
----------
ground_truth_filename : str
Full path to the ground truth json file.
Outputs
-------
ground_truth : df
Data frame containing the ground truth instances.
activity_index : dict
Dictionary containing class index.
"""
with open(ground_truth_filename, 'r') as fobj:
data = json.load(fobj)
# Checking format
if not all([field in data.keys() for field in self.gt_fields]):
raise IOError('Please input a valid ground truth file.')
# Read ground truth data.
gt_id_lst, current_gt_id = [], 0
activity_index, cidx = {}, 0
video_lst, t_start_lst, t_end_lst, label_lst = [], [], [], []
if self.load_extra_annotations:
print('[INIT] Loading extra annotations')
extra_annotations = dict(zip(self.characteristic_names,[[] for _ in range(len(self.characteristic_names))]))
for videoid, v in data['database'].items():
if self.subset != v['subset']:
continue
if videoid in self.blocked_videos:
continue
for ann in v['annotations']:
if ann['label'] not in activity_index:
activity_index[ann['label']] = cidx
cidx += 1
if self.evaluate_with_multi_segments and self.load_extra_annotations:
for seg_idx in range(self.evaluate_with_multi_segments):
gt_id_lst.append(current_gt_id)
video_lst.append(videoid)
t_start_lst.append(float(ann['all-segments'][seg_idx][0]))
t_end_lst.append(float(ann['all-segments'][seg_idx][1]))
label_lst.append(activity_index[ann['label']])
for characteristic_name in self.characteristic_names:
extra_annotations[characteristic_name].append(ann[characteristic_name])
else:
gt_id_lst.append(current_gt_id)
video_lst.append(videoid)
t_start_lst.append(float(ann['segment'][0]))
t_end_lst.append(float(ann['segment'][1]))
label_lst.append(activity_index[ann['label']])
if self.load_extra_annotations:
for characteristic_name in self.characteristic_names:
extra_annotations[characteristic_name].append(ann[characteristic_name])
current_gt_id +=1
ground_truth = pd.DataFrame({'gt-id': gt_id_lst,
'video-id': video_lst,
't-start': t_start_lst,
't-end': t_end_lst,
'label': label_lst,
})
# cnt = 0
# for item in extra_annotations[characteristic_name]:
# if item <=30:
# cnt += 1
# print(f"Number of actions shorter than 30: {cnt}")
if self.load_extra_annotations:
for characteristic_name in self.characteristic_names:
ground_truth[characteristic_name] = extra_annotations[characteristic_name]
for (characteristic_name, (bins, labels)) in self.characteristic_names_to_bins.items():
ground_truth[characteristic_name] = extra_annotations[characteristic_name]
ground_truth[characteristic_name] = pd.cut(ground_truth[characteristic_name], precision=2, bins=bins, labels=labels, include_lowest=True)
if 'coverage' in self.characteristic_names:
# remove instances with coverage > 1
ground_truth = ground_truth.loc[(np.array(extra_annotations['coverage'])) <= 1.0]
# remove instances of length <=0
ground_truth = ground_truth.loc[ground_truth['t-start'].values < ground_truth['t-end'].values]
return ground_truth, activity_index
def _import_prediction(self, prediction_filename):
"""Reads prediction file, checks if it is well formatted, and returns
the prediction instances.
Parameters
----------
prediction_filename : str
Full path to the prediction json file.
Outputs
-------
prediction : df
Data frame containing the prediction instances.
"""
with open(prediction_filename, 'r') as fobj:
data = json.load(fobj)
# Checking format...
if not all([field in data.keys() for field in self.pred_fields]):
raise IOError('Please input a valid prediction file.')
# Read predictions.
video_lst, t_start_lst, t_end_lst = [], [], []
label_lst, score_lst = [], []
for videoid, v in data['results'].items():
if videoid in self.blocked_videos:
continue
# ------------------------------------------------------------- #
for result in v:
label = self.activity_index[result['label']]
video_lst.append(videoid)
t_start_lst.append(float(result['segment'][0]))
t_end_lst.append(float(result['segment'][1]))
label_lst.append(label)
score_lst.append(result['score'])
# ------------------------------------------------------------- #
prediction_id_lst = range(len(video_lst))
prediction = pd.DataFrame({'prediction-id': prediction_id_lst,
'video-id': video_lst,
't-start': t_start_lst,
't-end': t_end_lst,
'label': label_lst,
'score': score_lst})
return prediction
def _limit_prediction(self):
"""
Of each class J, limit the predictions to the top scoring (N_j * self.limit_factor)
predictions, where N_j is the number of ground truth instances for class J.
"""
ground_truth_gbvn = self.ground_truth.groupby('label')
prediction_gbvn = self.prediction.groupby('label')
filtered_prediction_df_list = []
for label, this_ground_truth in ground_truth_gbvn:
try:
# Check if there is at least one prediction for this class.
this_prediction = prediction_gbvn.get_group(label)
except Exception as e:
continue
# pick the top (len(this_ground_truth)*self.limit_factor) predictions
filtered_prediction_df_list += [this_prediction.nlargest(n=int(len(this_ground_truth)*self.limit_factor),
columns='score')]
filtered_prediction = pd.concat(filtered_prediction_df_list, ignore_index=True)
# reset prediction ids
filtered_prediction['prediction-id'] = range(len(filtered_prediction))
return filtered_prediction
def wrapper_compute_average_precision(self):
"""Computes average precision for each class in the subset.
"""
ap = np.zeros((len(self.tiou_thresholds), len(self.activity_index)))
recall = np.zeros((len(self.tiou_thresholds), len(self.activity_index)))
precision = np.zeros((len(self.tiou_thresholds), len(self.activity_index)))
matched_gt_id = np.zeros((len(self.tiou_thresholds), len(self.prediction)))
results = Parallel(n_jobs=len(self.activity_index))(
delayed(compute_average_precision_detection)(
ground_truth=self.ground_truth.loc[self.ground_truth['label'] == cidx].reset_index(drop=True),
prediction=self.prediction.loc[self.prediction['label'] == cidx].reset_index(drop=True),
tiou_thresholds=self.tiou_thresholds,
normalize_ap=self.normalize_ap,
average_num_instance_per_class=self.average_num_instance_per_class,
minimum_normalized_precision_threshold_for_detection=self.minimum_normalized_precision_threshold_for_detection,
) for cidx in self.activity_index.values())
for i, cidx in enumerate(self.activity_index.values()):
ap[:,cidx], matched_this_cls_gt_id, this_cls_prediction_ids, recall[:,cidx], precision[:,cidx] = results[i]
matched_gt_id[:,this_cls_prediction_ids] = matched_this_cls_gt_id
return ap, matched_gt_id, recall, precision
def evaluate(self):
"""Evaluates a prediction file. For the detection task we measure the
interpolated mean average precision to measure the performance of a
method.
"""
self.ap, self.matched_gt_id, self.recall, self.precision = self.wrapper_compute_average_precision()
for tidx, column_name in enumerate(self.matched_gt_id_cols):
self.prediction[column_name] = self.matched_gt_id[tidx]
self.mAP = self.ap.mean(axis=1)
self.average_mAP = self.mAP.mean()
self.mRecall = self.recall.mean(axis=1)
self.average_mRecall = self.mRecall.mean()
self.mPrecision = self.precision.mean(axis=1)
self.average_mPrecision = self.mPrecision.mean()
if self.verbose:
print('[RESULTS] Performance on ActivityNet detection task.')
print('[RESULTS] Using %d annotation segment(s) per instance' % self.evaluate_with_multi_segments if self.evaluate_with_multi_segments and self.load_extra_annotations else '')
print('\tAverage-mAP{}: {}'.format('_N' if self.normalize_ap else '', self.average_mAP))
# print '\tAverage-mRecall: {}'.format(self.average_mRecall)
# print '\tAverage-mPrecision: {}'.format(self.average_mPrecision)
def wrapper_analyze_fp_error_types(self):
self.fp_error_types_legned = {'True Positive': 0,
'Double Detection Err': 1,
'Wrong Label Err': 2,
'Localization Err': 3,
'Confusion Err': 4,
'Background Err': 5}
self.fp_error_types_inverse_legned = dict([(v, k) for k, v in self.fp_error_types_legned.iteritems()])
fp_error_types = Parallel(n_jobs=len(self.tiou_thresholds))(
delayed(analyze_fp_error_types)(
prediction=self.prediction,
ground_truth=self.ground_truth,
tiou_thr=tiou_thr,
matched_gt_id_col_name=matched_gt_id_col_name,
min_tiou_thr=self.min_tiou_thr,
fp_error_types_legned=self.fp_error_types_legned,
) for tiou_thr, matched_gt_id_col_name in zip(self.tiou_thresholds, self.matched_gt_id_cols))
return fp_error_types
def diagnose(self):
"""Analyzes the error types and add the results to self.prediction DataFrame.
Computes the average-mAP gain after removing each error type.
[WARNING]: diagnose() can only be run after evaluate() has finished
"""
# Augment the prediction DataFrame with the error types
self.fp_error_types = self.wrapper_analyze_fp_error_types()
self.fp_error_types_count = {}
for tidx, column_name in enumerate(self.fp_error_type_cols):
self.prediction[column_name] = self.fp_error_types[tidx]
this_tiou = self.tiou_thresholds[tidx]
self.fp_error_types_count[this_tiou] = dict(zip(self.fp_error_types_legned.keys(),
[0]*len(self.fp_error_types_legned)))
error_ids, counts = np.unique(self.fp_error_types[tidx], return_counts=True)
for error_id,count in zip(error_ids, counts):
self.fp_error_types_count[this_tiou][self.fp_error_types_inverse_legned[error_id]] = count
self.fp_error_types_count_df = pd.DataFrame(self.fp_error_types_count)
self.fp_error_types_count_df['avg'] = self.fp_error_types_count_df.mean(axis=1)
self.fp_error_types_precentage_df = self.fp_error_types_count_df/len(self.prediction)
# Computes the average-mAP gain after removing each error type
self.ap_gain, self.average_mAP_gain = {}, {}
for err_name, err_code in self.fp_error_types_legned.iteritems():
if err_code:
self.ap_gain[err_name] = np.zeros((len(self.tiou_thresholds),
len(self.activity_index)))
for cidx in self.activity_index.values():
this_pred_df = self.prediction[self.prediction['label']==cidx].reset_index(drop=True)
sort_idx = this_pred_df['score'].values.argsort()[::-1]
this_pred_df = this_pred_df.loc[sort_idx].reset_index(drop=True)
this_gt_df = self.ground_truth[self.ground_truth['label']==cidx]
npos=len(this_gt_df)
for tidx in range(len(self.tiou_thresholds)):
this_error_types = this_pred_df[self.fp_error_type_cols[tidx]].T.values
tp = (~np.isnan(this_pred_df[self.matched_gt_id_cols[tidx]].T)).astype(np.int)
tp = tp[this_error_types!=err_code]
fp = np.abs(tp - 1)
# Computing prec-rec
this_tp = np.cumsum(tp).astype(np.float)
this_fp = np.cumsum(fp).astype(np.float)
rec = this_tp / npos
if self.normalize_ap:
prec = rec * self.average_num_instance_per_class / (rec * self.average_num_instance_per_class + this_fp)
else:
prec = rec * npos / (rec * npos + this_fp)
self.ap_gain[err_name][tidx,cidx] = interpolated_prec_rec(prec, rec)
self.average_mAP_gain[err_name] = self.ap_gain[err_name].mean() - self.average_mAP
if self.verbose:
print('[DIAGNOSIS] Analysis of false positive error types.')
print('\tPercentage of each error type:\n{}'.format(self.fp_error_types_precentage_df))
print('\tAverage mAP gain after removing each error type:\n{}'.format(self.average_mAP_gain))
def compute_average_precision_detection(ground_truth, prediction, tiou_thresholds=np.linspace(0.5, 0.95, 10),
normalize_ap=False, average_num_instance_per_class=None,
minimum_normalized_precision_threshold_for_detection=0.05):
"""Compute average precision (detection task) between ground truth and
predictions data frames. If multiple predictions occurs for the same
predicted segment, only the one with highest score is matches as
true positive. This code is greatly inspired by Pascal VOC devkit.
Parameters
----------
ground_truth : df
Data frame containing the ground truth instances.
Required fields: ['video-id', 't-start', 't-end']
prediction : df
Data frame containing the prediction instances.
Required fields: ['video-id, 't-start', 't-end', 'score']
tiou_thresholds : 1darray, optional
Temporal intersection over union threshold.
Outputs
-------
ap : float
Average precision score.
"""
gt_id_lst = np.unique(ground_truth['gt-id'].values)
gt_id_to_index = dict(zip(gt_id_lst, range(len(gt_id_lst))))
lock_gt = np.ones((len(tiou_thresholds),len(gt_id_to_index))) * -1
npos = float(len(gt_id_lst))
# Sort predictions by decreasing score order.
sort_idx = prediction['score'].values.argsort()[::-1]
prediction = prediction.loc[sort_idx].reset_index(drop=True)
# Initialize true positive and false positive vectors.
tp = np.zeros((len(tiou_thresholds), len(prediction)))
fp = np.zeros((len(tiou_thresholds), len(prediction)))
matched_gt_id = np.nan*np.zeros((len(tiou_thresholds), len(prediction)))
ap = np.zeros(len(tiou_thresholds))
if prediction.empty:
return ap, matched_gt_id, prediction['prediction-id'].values, 0, 0
# Adaptation to query faster
ground_truth_gbvn = ground_truth.groupby('video-id')
# Assigning true positive to truly grount truth instances.
for idx, this_pred in prediction.iterrows():
try:
# Check if there is at least one ground truth in the video associated.
ground_truth_videoid = ground_truth_gbvn.get_group(this_pred['video-id'])
except Exception as e:
fp[:, idx] = 1
continue
this_gt = ground_truth_videoid.reset_index()
tiou_arr = segment_iou(this_pred[['t-start', 't-end']].values,
this_gt[['t-start', 't-end']].values)
# We would like to retrieve the predictions with highest tiou score.
tiou_sorted_idx = tiou_arr.argsort()[::-1]
for tidx, tiou_thr in enumerate(tiou_thresholds):
for jdx in tiou_sorted_idx:
if tiou_arr[jdx] < tiou_thr:
fp[tidx, idx] = 1
break
if lock_gt[tidx, gt_id_to_index[this_gt.loc[jdx]['gt-id']]] >= 0:
continue
# Assign as true positive after the filters above.
tp[tidx, idx] = 1
lock_gt[tidx, gt_id_to_index[this_gt.loc[jdx]['gt-id']]] = idx
matched_gt_id[tidx, idx] = this_gt.loc[jdx]['gt-id']
break
if fp[tidx, idx] == 0 and tp[tidx, idx] == 0:
fp[tidx, idx] = 1
tp_cumsum = np.cumsum(tp, axis=1).astype(np.float)
fp_cumsum = np.cumsum(fp, axis=1).astype(np.float)
recall_cumsum = tp_cumsum / npos
if normalize_ap:
precision_cumsum = recall_cumsum * average_num_instance_per_class / (recall_cumsum * average_num_instance_per_class + fp_cumsum)
discard_index = precision_cumsum <= minimum_normalized_precision_threshold_for_detection
tp[discard_index] = 0
fp[discard_index] = 1
matched_gt_id[discard_index] = np.nan
tp_cumsum = np.cumsum(tp, axis=1).astype(np.float)
fp_cumsum = np.cumsum(fp, axis=1).astype(np.float)
recall_cumsum = tp_cumsum / npos
precision_cumsum = recall_cumsum * average_num_instance_per_class / (recall_cumsum * average_num_instance_per_class + fp_cumsum)
else:
precision_cumsum = recall_cumsum * npos / (recall_cumsum * npos + fp_cumsum)
for tidx in range(len(tiou_thresholds)):
ap[tidx] = interpolated_prec_rec(precision_cumsum[tidx,:], recall_cumsum[tidx,:])
recall = recall_cumsum[:,-1]
precision = precision_cumsum[:,-1]
return ap, matched_gt_id, prediction['prediction-id'].values, recall, precision
def analyze_fp_error_types(prediction,
ground_truth,
tiou_thr,
matched_gt_id_col_name,
min_tiou_thr=0.1,
fp_error_types_legned={'True Positive': 0,
'Double Detection Err': 1,
'Wrong Label Err': 2,
'Localization Err': 3,
'Confusion Err': 4,
'Background Err': 5}):
"""Assumes that prediction is sorted by 'prediction-id' column """
fp_error_types = {}
# Adaptation to query faster
ground_truth_gbvn = ground_truth.groupby('video-id')
fp_error_types = np.zeros(len(prediction))
this_prediction = prediction[np.isnan(prediction[matched_gt_id_col_name])].reset_index(drop=True)
this_prediction.sort_values(by='video-id',inplace=True)
this_prediction.reset_index(drop=True,inplace=True)
current_video_id = None
for idx, this_pred in this_prediction.iterrows():
if this_pred['video-id'] != current_video_id:
try:
this_gt = ground_truth_gbvn.get_group(this_pred['video-id']).reset_index()
except:
fp_error_types[this_pred['prediction-id']] = fp_error_types_legned['Background Err']
current_video_id = this_pred['video-id']
continue
current_video_id = this_pred['video-id']
tiou_arr = segment_iou(this_pred[['t-start', 't-end']].values,
this_gt[['t-start', 't-end']].values)
# We would like to retrieve the predictions with highest tiou score.
gt_with_max_tiou_label = this_gt.loc[tiou_arr.argmax()]['label']
top_tiou = tiou_arr.max()
this_pred_label = this_pred['label']
if top_tiou >= tiou_thr:
if gt_with_max_tiou_label == this_pred_label:
# double detection error
fp_error_types[this_pred['prediction-id']] = fp_error_types_legned['Double Detection Err']
else:
# wrong label error
fp_error_types[this_pred['prediction-id']] = fp_error_types_legned['Wrong Label Err']
elif top_tiou >= min_tiou_thr:
if gt_with_max_tiou_label == this_pred_label:
# localization error
fp_error_types[this_pred['prediction-id']] = fp_error_types_legned['Localization Err']
else:
# confusion error
fp_error_types[this_pred['prediction-id']] = fp_error_types_legned['Confusion Err']
else:
# background error
fp_error_types[this_pred['prediction-id']] = fp_error_types_legned['Background Err']
return fp_error_types
|
import boto3
from botocore.client import ClientError
import freezegun
import pytest
from moto import mock_greengrass
from moto.core import get_account_id
from moto.settings import TEST_SERVER_MODE
ACCOUNT_ID = get_account_id()
@freezegun.freeze_time("2022-06-01 12:00:00")
@mock_greengrass
def test_create_core_definition():
client = boto3.client("greengrass", region_name="ap-northeast-1")
cores = [
{
"CertificateArn": f"arn:aws:iot:ap-northeast-1:{ACCOUNT_ID}:cert/36ed61be9c6271ae8da174e29d0e033c06af149d7b21672f3800fe322044554d",
"Id": "123456789",
"ThingArn": f"arn:aws:iot:ap-northeast-1:{ACCOUNT_ID}:thing/CoreThing",
}
]
initial_version = {"Cores": cores}
core_name = "TestCore"
res = client.create_core_definition(InitialVersion=initial_version, Name=core_name)
res.should.have.key("Arn")
res.should.have.key("Id")
if not TEST_SERVER_MODE:
res.should.have.key("CreationTimestamp").equals("2022-06-01T12:00:00.000Z")
res.should.have.key("LastUpdatedTimestamp").equals("2022-06-01T12:00:00.000Z")
res.should.have.key("LatestVersionArn")
res.should.have.key("Name").equals(core_name)
res["ResponseMetadata"]["HTTPStatusCode"].should.equal(201)
@freezegun.freeze_time("2022-06-01 12:00:00")
@mock_greengrass
def test_list_core_definitions():
client = boto3.client("greengrass", region_name="ap-northeast-1")
cores = [
{
"CertificateArn": f"arn:aws:iot:ap-northeast-1:{ACCOUNT_ID}:cert/36ed61be9c6271ae8da174e29d0e033c06af149d7b21672f3800fe322044554d",
"Id": "123456789",
"ThingArn": f"arn:aws:iot:ap-northeast-1:{ACCOUNT_ID}:thing/CoreThing",
}
]
initial_version = {"Cores": cores}
core_name = "TestCore"
client.create_core_definition(InitialVersion=initial_version, Name=core_name)
res = client.list_core_definitions()
res.should.have.key("Definitions")
core_definition = res["Definitions"][0]
core_definition.should.have.key("Name").equals(core_name)
core_definition.should.have.key("Arn")
core_definition.should.have.key("Id")
core_definition.should.have.key("LatestVersion")
core_definition.should.have.key("LatestVersionArn")
if not TEST_SERVER_MODE:
core_definition.should.have.key("CreationTimestamp").equal(
"2022-06-01T12:00:00.000Z"
)
core_definition.should.have.key("LastUpdatedTimestamp").equals(
"2022-06-01T12:00:00.000Z"
)
@freezegun.freeze_time("2022-06-01 12:00:00")
@mock_greengrass
def test_get_core_definition():
client = boto3.client("greengrass", region_name="ap-northeast-1")
cores = [
{
"CertificateArn": f"arn:aws:iot:ap-northeast-1:{ACCOUNT_ID}:cert/36ed61be9c6271ae8da174e29d0e033c06af149d7b21672f3800fe322044554d",
"Id": "123456789",
"ThingArn": f"arn:aws:iot:ap-northeast-1:{ACCOUNT_ID}:thing/CoreThing",
}
]
initial_version = {"Cores": cores}
core_name = "TestCore"
create_res = client.create_core_definition(
InitialVersion=initial_version, Name=core_name
)
core_def_id = create_res["Id"]
arn = create_res["Arn"]
latest_version = create_res["LatestVersion"]
latest_version_arn = create_res["LatestVersionArn"]
get_res = client.get_core_definition(CoreDefinitionId=core_def_id)
get_res.should.have.key("Name").equals(core_name)
get_res.should.have.key("Arn").equals(arn)
get_res.should.have.key("Id").equals(core_def_id)
get_res.should.have.key("LatestVersion").equals(latest_version)
get_res.should.have.key("LatestVersionArn").equals(latest_version_arn)
if not TEST_SERVER_MODE:
get_res.should.have.key("CreationTimestamp").equal("2022-06-01T12:00:00.000Z")
get_res.should.have.key("LastUpdatedTimestamp").equals(
"2022-06-01T12:00:00.000Z"
)
@mock_greengrass
def test_delete_core_definition():
client = boto3.client("greengrass", region_name="ap-northeast-1")
cores = [
{
"CertificateArn": f"arn:aws:iot:ap-northeast-1:{ACCOUNT_ID}:cert/36ed61be9c6271ae8da174e29d0e033c06af149d7b21672f3800fe322044554d",
"Id": "123456789",
"ThingArn": f"arn:aws:iot:ap-northeast-1:{ACCOUNT_ID}:thing/CoreThing",
}
]
initial_version = {"Cores": cores}
create_res = client.create_core_definition(
InitialVersion=initial_version, Name="TestCore"
)
core_def_id = create_res["Id"]
client.get_core_definition(CoreDefinitionId=core_def_id)
client.delete_core_definition(CoreDefinitionId=core_def_id)
with pytest.raises(ClientError) as ex:
client.delete_core_definition(CoreDefinitionId=core_def_id)
ex.value.response["Error"]["Message"].should.equal(
"That cores definition does not exist."
)
ex.value.response["Error"]["Code"].should.equal("IdNotFoundException")
@mock_greengrass
def test_update_core_definition():
client = boto3.client("greengrass", region_name="ap-northeast-1")
cores = [
{
"CertificateArn": f"arn:aws:iot:ap-northeast-1:{ACCOUNT_ID}:cert/36ed61be9c6271ae8da174e29d0e033c06af149d7b21672f3800fe322044554d",
"Id": "123456789",
"ThingArn": f"arn:aws:iot:ap-northeast-1:{ACCOUNT_ID}:thing/CoreThing",
}
]
initial_version = {"Cores": cores}
create_res = client.create_core_definition(
InitialVersion=initial_version, Name="TestCore"
)
core_def_id = create_res["Id"]
updated_core_name = "UpdatedCore"
client.update_core_definition(CoreDefinitionId=core_def_id, Name="UpdatedCore")
get_res = client.get_core_definition(CoreDefinitionId=core_def_id)
get_res.should.have.key("Name").equals(updated_core_name)
@mock_greengrass
def test_update_core_definition_with_empty_name():
client = boto3.client("greengrass", region_name="ap-northeast-1")
cores = [
{
"CertificateArn": f"arn:aws:iot:ap-northeast-1:{ACCOUNT_ID}:cert/36ed61be9c6271ae8da174e29d0e033c06af149d7b21672f3800fe322044554d",
"Id": "123456789",
"ThingArn": f"arn:aws:iot:ap-northeast-1:{ACCOUNT_ID}:thing/CoreThing",
}
]
initial_version = {"Cores": cores}
create_res = client.create_core_definition(
InitialVersion=initial_version, Name="TestCore"
)
core_def_id = create_res["Id"]
with pytest.raises(ClientError) as ex:
client.update_core_definition(CoreDefinitionId=core_def_id, Name="")
ex.value.response["Error"]["Message"].should.equal(
"Input does not contain any attributes to be updated"
)
ex.value.response["Error"]["Code"].should.equal(
"InvalidContainerDefinitionException"
)
@mock_greengrass
def test_update_core_definition_with_invalid_id():
client = boto3.client("greengrass", region_name="ap-northeast-1")
with pytest.raises(ClientError) as ex:
client.update_core_definition(
CoreDefinitionId="6fbffc21-989e-4d29-a793-a42f450a78c6", Name="abc"
)
ex.value.response["Error"]["Message"].should.equal(
"That cores definition does not exist."
)
ex.value.response["Error"]["Code"].should.equal("IdNotFoundException")
@freezegun.freeze_time("2022-06-01 12:00:00")
@mock_greengrass
def test_create_core_definition_version():
client = boto3.client("greengrass", region_name="ap-northeast-1")
v1_cores = [
{
"CertificateArn": f"arn:aws:iot:ap-northeast-1:{ACCOUNT_ID}:cert/36ed61be9c6271ae8da174e29d0e033c06af149d7b21672f3800fe322044554d",
"Id": "123456789",
"ThingArn": f"arn:aws:iot:ap-northeast-1:{ACCOUNT_ID}:thing/v1Thing",
}
]
initial_version = {"Cores": v1_cores}
core_def_res = client.create_core_definition(
InitialVersion=initial_version, Name="TestCore"
)
core_def_id = core_def_res["Id"]
v2_cores = [
{
"CertificateArn": f"arn:aws:iot:ap-northeast-1:{ACCOUNT_ID}:cert/277a6a15293c1ed5fa1aa74bae890b1827f80959537bfdcf10f63e661d54ebe1",
"Id": "987654321",
"ThingArn": f"arn:aws:iot:ap-northeast-1:{ACCOUNT_ID}:thing/v2Thing",
}
]
core_def_ver_res = client.create_core_definition_version(
CoreDefinitionId=core_def_id, Cores=v2_cores
)
core_def_ver_res.should.have.key("Arn")
core_def_ver_res.should.have.key("CreationTimestamp")
if not TEST_SERVER_MODE:
core_def_ver_res["CreationTimestamp"].should.equal("2022-06-01T12:00:00.000Z")
core_def_ver_res.should.have.key("Id").equals(core_def_id)
core_def_ver_res.should.have.key("Version")
@freezegun.freeze_time("2022-06-01 12:00:00")
@mock_greengrass
def test_get_core_definition_version():
client = boto3.client("greengrass", region_name="ap-northeast-1")
initial_version = {
"Cores": [
{
"CertificateArn": f"arn:aws:iot:ap-northeast-1:{ACCOUNT_ID}:cert/36ed61be9c6271ae8da174e29d0e033c06af149d7b21672f3800fe322044554d",
"Id": "123456789",
"ThingArn": f"arn:aws:iot:ap-northeast-1:{ACCOUNT_ID}:thing/v1Thing",
}
]
}
core_def_res = client.create_core_definition(
InitialVersion=initial_version, Name="TestCore"
)
core_def_id = core_def_res["Id"]
core_def_ver_id = core_def_res["LatestVersion"]
core_def_ver_res = client.get_core_definition_version(
CoreDefinitionId=core_def_id, CoreDefinitionVersionId=core_def_ver_id
)
core_def_ver_res.should.have.key("Arn")
core_def_ver_res.should.have.key("CreationTimestamp")
core_def_ver_res.should.have.key("Definition").should.equal(initial_version)
if not TEST_SERVER_MODE:
core_def_ver_res["CreationTimestamp"].should.equal("2022-06-01T12:00:00.000Z")
core_def_ver_res.should.have.key("Id").equals(core_def_id)
core_def_ver_res.should.have.key("Version")
@mock_greengrass
def test_get_core_definition_version_with_invalid_id():
client = boto3.client("greengrass", region_name="ap-northeast-1")
with pytest.raises(ClientError) as ex:
client.get_core_definition_version(
CoreDefinitionId="fe2392e9-e67f-4308-af1b-ff94a128b231",
CoreDefinitionVersionId="cd2ea6dc-6634-4e89-8441-8003500435f9",
)
ex.value.response["Error"]["Message"].should.equal(
"That cores definition does not exist."
)
ex.value.response["Error"]["Code"].should.equal("IdNotFoundException")
@mock_greengrass
def test_get_core_definition_version_with_invalid_version_id():
client = boto3.client("greengrass", region_name="ap-northeast-1")
core_def_res = client.create_core_definition(
Name="TestCore",
InitialVersion={
"Cores": [
{
"CertificateArn": f"arn:aws:iot:ap-northeast-1:{ACCOUNT_ID}:cert/36ed61be9c6271ae8da174e29d0e033c06af149d7b21672f3800fe322044554d",
"Id": "123456789",
"ThingArn": f"arn:aws:iot:ap-northeast-1:{ACCOUNT_ID}:thing/v1Thing",
}
]
},
)
core_def_id = core_def_res["Id"]
invalid_version_id = "cd2ea6dc-6634-4e89-8441-8003500435f9"
with pytest.raises(ClientError) as ex:
client.get_core_definition_version(
CoreDefinitionId=core_def_id, CoreDefinitionVersionId=invalid_version_id
)
ex.value.response["Error"]["Message"].should.equal(
f"Version {invalid_version_id} of Core List Definition {core_def_id} does not exist."
)
ex.value.response["Error"]["Code"].should.equal("VersionNotFoundException")
@freezegun.freeze_time("2022-06-01 12:00:00")
@mock_greengrass
def test_list_core_definition_version():
client = boto3.client("greengrass", region_name="ap-northeast-1")
initial_version = {
"Cores": [
{
"CertificateArn": f"arn:aws:iot:ap-northeast-1:{ACCOUNT_ID}:cert/36ed61be9c6271ae8da174e29d0e033c06af149d7b21672f3800fe322044554d",
"Id": "123456789",
"ThingArn": f"arn:aws:iot:ap-northeast-1:{ACCOUNT_ID}:thing/v1Thing",
}
]
}
core_def_res = client.create_core_definition(
InitialVersion=initial_version, Name="TestCore"
)
core_def_id = core_def_res["Id"]
core_def_vers_res = client.list_core_definition_versions(
CoreDefinitionId=core_def_id
)
core_def_vers_res.should.have.key("Versions")
core_def_ver = core_def_vers_res["Versions"][0]
core_def_ver.should.have.key("Arn")
core_def_ver.should.have.key("CreationTimestamp")
if not TEST_SERVER_MODE:
core_def_ver["CreationTimestamp"].should.equal("2022-06-01T12:00:00.000Z")
core_def_ver.should.have.key("Id").equals(core_def_id)
core_def_ver.should.have.key("Version")
@mock_greengrass
def test_list_core_definition_version_with_invalid_id():
client = boto3.client("greengrass", region_name="ap-northeast-1")
with pytest.raises(ClientError) as ex:
client.list_core_definition_versions(
CoreDefinitionId="cd2ea6dc-6634-4e89-8441-8003500435f9"
)
ex.value.response["Error"]["Message"].should.equal(
"That cores definition does not exist."
)
ex.value.response["Error"]["Code"].should.equal("IdNotFoundException")
|
######################################################################
#
# Software Name : Cloudnet TOSCA toolbox
# Version: 1.0
# SPDX-FileCopyrightText: Copyright (c) 2020-21 Orange
# SPDX-License-Identifier: Apache-2.0
#
# This software is distributed under the Apache License 2.0
# the text of which is available at http://www.apache.org/licenses/LICENSE-2.0
# or see the "LICENSE-2.0.txt" file for more details.
#
# Author: <NAME> <<EMAIL>>
# Software description: TOSCA to Cloudnet Translator
######################################################################
import logging # for logging purposes.
import cloudnet.tosca.configuration as configuration
import cloudnet.tosca.syntax as syntax
from cloudnet.tosca.processors import Generator
from cloudnet.tosca.utils import normalize_name, short_type_name
TOSCA_DIAGRAMS = "tosca_diagrams"
configuration.DEFAULT_CONFIGURATION[TOSCA_DIAGRAMS] = {
# Target directory where network diagrams are generated.
Generator.TARGET_DIRECTORY: "Results/ToscaDiagrams"
}
configuration.DEFAULT_CONFIGURATION["logging"]["loggers"][__name__] = {
"level": "INFO",
}
LOGGER = logging.getLogger(__name__)
class ToscaDiagramGenerator(Generator):
"""
This is the generator of TOSCA diagrams.
"""
def generator_configuration_id(self):
return TOSCA_DIAGRAMS
def get_node_name_id(self, node_name):
node_name_id = normalize_name(node_name)
if node_name_id == "node": # 'node' is a dot keyword
node_name_id = "node_node" # rename to 'node_node' to avoid dot error.
return node_name_id
def generation(self):
self.info("TOSCA diagram generation")
topology_template = syntax.get_topology_template(
self.tosca_service_template.get_yaml()
)
# Generate only for TOSCA topology template.
if topology_template is None:
return
# Generate the TOSCA diagram.
self.open_file(".dot")
self.generate("graph ToscaDiagram {")
self.generate(' rankdir="LR"')
target_capability_ids = {} # map<requirement_assignment_id,capability_id>
show_feature_capabilities = set() # set<node_name>
show_dependency_requirements = set() # set<node_name>
substitution_mappings = syntax.get_substitution_mappings(topology_template)
if substitution_mappings is not None:
for capability_name, capability_yaml in syntax.get_capabilities(
substitution_mappings
).items():
if capability_yaml:
if not isinstance(capability_yaml, list):
continue # TODO something when capability_yaml is not a list
capability_name_id = normalize_name(capability_name)
self.generate(
" ",
capability_name_id,
'[label="',
capability_name,
'" shape=cds style=filled fillcolor=orange]',
sep="",
)
self.generate(
" ",
capability_name_id,
" -- ",
normalize_name(capability_yaml[0]),
"_capability_",
normalize_name(capability_yaml[1]),
"[style=dotted]",
sep="",
)
if capability_yaml[1] == "feature":
show_feature_capabilities.add(capability_yaml[0])
substitution_mappings_node_type = syntax.get_node_type(
substitution_mappings
)
self.generate(" subgraph clusterSubstitutionMappings {")
self.generate(' label="', substitution_mappings_node_type, '"', sep="")
node_templates = syntax.get_node_templates(topology_template)
for node_name, node_yaml in node_templates.items():
node_type_requirements = syntax.get_requirements_dict(
self.type_system.merge_type(syntax.get_type(node_yaml))
)
for requirement in syntax.get_requirements_list(node_yaml):
for requirement_name, requirement_yaml in requirement.items():
# ACK for Alien4Cloud
requirement_name = syntax.get_type_requirement(
requirement_yaml, requirement_name
)
if requirement_yaml:
requirement_capability = syntax.get_requirement_capability(
node_type_requirements.get(requirement_name)
)
if requirement_capability is None:
self.error(
requirement_name + ": capability undefined",
requirement_name,
)
continue
requirement_node = syntax.get_requirement_node_template(
requirement_yaml
)
if requirement_node is None:
continue
capability_found = False
requirement_node_template = node_templates.get(requirement_node)
if requirement_node_template is None:
self.error(
requirement_node + " node template undefined",
requirement_node,
)
continue
for capability_name, capability_yaml in syntax.get_capabilities(
self.type_system.merge_node_type(
syntax.get_type(requirement_node_template)
)
).items():
if self.type_system.is_derived_from(
syntax.get_capability_type(capability_yaml),
requirement_capability,
):
capability_found = True
break
if capability_found:
target_capability_ids[id(requirement)] = (
self.get_node_name_id(requirement_node)
+ "_capability_"
+ normalize_name(capability_name)
)
if capability_name == "feature":
show_feature_capabilities.add(requirement_node)
if requirement_name == "dependency":
show_dependency_requirements.add(node_name)
else:
self.error(
' capability of type "'
+ requirement_capability
+ '" not found',
requirement_node_template,
)
for node_name, node_yaml in node_templates.items():
node_name_id = self.get_node_name_id(node_name)
node_type = syntax.get_type(node_yaml)
merged_node_type = self.type_system.merge_type(node_type)
self.generate(" subgraph cluster", node_name_id, " {", sep="")
self.generate(" color=white")
self.generate(' label=""')
self.generate(
" ",
node_name_id,
'[label="',
node_name,
": ",
short_type_name(node_type),
'|\l\l\l\l" shape=record style=rounded]',
sep="",
)
for capability_name, capability_yaml in syntax.get_capabilities(
merged_node_type
).items():
if (
capability_name != "feature"
or node_name in show_feature_capabilities
):
self.generate(
" ",
node_name_id,
"_capability_",
normalize_name(capability_name),
'[label="',
capability_name,
'" shape=cds style=filled fillcolor=orange]',
sep="",
)
self.generate(
" ",
node_name_id,
"_capability_",
normalize_name(capability_name),
" -- ",
node_name_id,
sep="",
)
for requirement_name, requirement_yaml in syntax.get_requirements_dict(
merged_node_type
).items():
if (
requirement_name != "dependency"
or node_name in show_dependency_requirements
):
self.generate(
" ",
node_name_id,
"_requirement_",
normalize_name(requirement_name),
'[label="',
requirement_name,
'" shape=cds style=filled fillcolor=turquoise]',
sep="",
)
self.generate(
" ",
node_name_id,
" -- ",
node_name_id,
"_requirement_",
normalize_name(requirement_name),
sep="",
)
self.generate(" }")
for node_name, node_yaml in node_templates.items():
node_name_id = self.get_node_name_id(node_name)
for requirement in syntax.get_requirements_list(node_yaml):
for requirement_name, requirement_yaml in requirement.items():
# ACK for Alien4Cloud
requirement_name = syntax.get_type_requirement(
requirement_yaml, requirement_name
)
capability_id = target_capability_ids.get(id(requirement))
if capability_id is not None:
self.generate(
" ",
node_name_id,
"_requirement_",
normalize_name(requirement_name),
" -- ",
capability_id,
"[style=dotted]",
sep="",
)
if substitution_mappings is not None:
self.generate(" }")
for (
requirement_name,
requirement_yaml,
) in syntax.get_substitution_mappings_requirements(
substitution_mappings
).items():
if requirement_yaml:
requirement_name_id = normalize_name(requirement_name)
self.generate(
" ",
requirement_name_id,
'[label="',
requirement_name,
'" shape=cds style=filled fillcolor=turquoise]',
sep="",
)
self.generate(
" ",
normalize_name(requirement_yaml[0]),
"_requirement_",
normalize_name(requirement_yaml[1]),
" -- ",
requirement_name_id,
"[style=dotted]",
sep="",
)
self.generate("}")
self.close_file()
|
<filename>docs/source/conf.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Configuration file for the Sphinx documentation builder.
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
import os
import sys
from ambiance.__init__ import __module_name__
from ambiance.__version__ import __version__
name = __module_name__
Name = name.capitalize()
sys.path.insert(0, os.path.abspath('../../src/ambiance/'))
sys.setrecursionlimit(1500)
# -- Project information -----------------------------------------------------
project = Name
copyright = '2019, <NAME>'
author = '<NAME>'
# version: The short X.Y version
# release: The full version, including alpha/beta/rc tags
# version = ''
version = __version__
# ===============
# AUTOMATE THINGS
# ===============
# Update the auto-docs
os.system('bash ./dev_doc/gen_auto_doc.sh')
os.system('python ./theory/make_model_page.py')
# -- General configuration ---------------------------------------------------
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
# 'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
]
# Paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# Source file parsers
# source_parsers = {
# '.md': 'recommonmark.parser.CommonMarkParser',
# }
# The suffix(es) of source filenames.
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
rst_prolog = f"""
.. |name| replace:: {Name}
.. |name_bold| replace:: **{Name}**
.. |author1| replace:: <NAME>
.. |license| replace:: *Apache-2.0*
.. _PyPI: https://pypi.org/project/ambiance/
.. _Conda: https://anaconda.org/conda-forge/ambiance
.. _pip: https://pypi.org/project/pip/
.. _NumPy: https://pypi.org/project/numpy/
.. _SciPy: https://pypi.org/project/scipy/
.. _SI units: https://en.wikipedia.org/wiki/International_System_of_Units
"""
# -- Options for HTML output -------------------------------------------------
# html_theme = 'classic'
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
html_theme_options = {
'canonical_url': '',
'logo_only': False,
'display_version': True,
'prev_next_buttons_location': 'bottom',
# Toc options
'collapse_navigation': True,
'sticky_navigation': True,
'navigation_depth': 4,
}
# Paths that contain custom static files (such as style sheets) relative to this directory.
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = f'{name}doc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
'preamble': '',
# Latex figure (float) alignment
'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, f'{name}.tex', f'{Name} Documentation',
'<NAME>', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, f'{name}', f'{Name} Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, f'{name}', f'{Name} Documentation',
author, f'{Name}', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
|
<gh_stars>0
"""
class ObjectMapping
@author: <NAME>
"""
import warnings
warnings.simplefilter('ignore', FutureWarning)
import numpy as np
from keras.preprocessing.image import img_to_array
from PIL import Image, ImageDraw, ImageFont, ImageOps
from itertools import combinations, product
from string import ascii_uppercase
from IPython.display import display
import os
class ObjectMapping:
'''
Required:
import numpy as np
from keras.preprocessing.image import img_to_array
from PIL import Image, ImageDraw, ImageFont, ImageOps
from itertools import combinations, product
from string import ascii_uppercase
from IPython.display import display
import os
'''
def __init__ (self, filename, results, class_names, cli=False):
self.filename = filename
self.r = results # results contain rois, class_ids, masks, and scores
self.class_names = class_names
self.img_height = self.r['masks'].shape[0]
self.img_width = self.r['masks'].shape[1]
self.total_objects = len(self.r['rois'])
self.font_size = 12
self.font_type = 'FreeMonoBold.ttf'
self.fnt = ImageFont.truetype(self.font_type, self.font_size)
self.cli = cli
def get_box(self, object_id):
object_id = object_id-1
h1 = self.r['rois'][object_id][0]
h2 = self.r['rois'][object_id][2]
w1 = self.r['rois'][object_id][1]
w2 = self.r['rois'][object_id][3]
return (h1, w1, h2, w2)
def get_objectID(self):
return {index:self.class_names[self.r['class_ids'][index-1]] for index, array in enumerate(self.r['rois'],\
start=1)}
def object_class(self, object_id):
object_id = object_id-1
return self.class_names[self.r['class_ids'][object_id]]
def count_objects(self):
"summarize type of objects detected with count"
objects = [self.class_names[index] for index in self.r['class_ids']]
objects = dict(zip(*np.unique(objects, return_counts=True)))
return objects
def get_mask(self, object_id):
object_id = object_id-1
return self.r['masks'][:,:,object_id]
def _merge_masks(self, *args):
"""Internal. Merge mask boolean arrays"""
mask = self._false_canvas()
for ids in args:
if(isinstance(ids, np.ndarray)):
mask = np.bitwise_or(mask, ids.copy())
else:
mask = np.bitwise_or(mask, self.get_mask(ids).copy())
return mask
def _show_id(self, *args, text_color):
"""Internal. Only for displaying object_id for masks that have an object ID"""
if text_color == 'black':
canvas_color='white'
else:
canvas_color='black'
myimage = Image.new(mode='1', size=(self.img_width, self.img_height), color=canvas_color)
draw = ImageDraw.Draw(myimage)
for id_text in args:
draw.text(self.mass_center(id_text)[::-1], f"{id_text}", font=self.fnt, fill=text_color)
myimage = img_to_array(myimage).astype(bool)
myimage = myimage[:,:,0]
return myimage
def _show_massbox(self, *args, size=2):
"""Internal. Only for displaying mass boxes for masks that have an object ID"""
mass_boxes = self._false_canvas()
temp_box = self._false_canvas()
for ids in args:
h1, w1, h2, w2 = self.mass_box(ids)
temp_box[h1:h2, w1:w2] = True
temp_box[h1+size:h2-size, w1+size:w2-size] = False
mass_boxes = np.bitwise_or(mass_boxes, temp_box)
return mass_boxes
def _image_from_bytes(self, mask):
"""Internal only"""
mask_size = mask.shape[::-1]
maskbytes = np.packbits(mask, axis=1)
mask = Image.frombytes(mode='1', size=mask_size, data=maskbytes)
return mask
def show_mask(self, *args, show_massbox = False, show_id = False, internal=True):
"""Creates PIL image from a matrix of booleans. Shows a mask that is either
directly passed as a boolean matrix or that is retrieved using the object ID.
show_massbox is only for a mask that is retrieved with the object ID.
show_id is only for a mask that is retrieved with the object ID.
"""
mask = self._merge_masks(*args)
if show_id:
id_text = self._show_id(*args, text_color='black')
mask = np.bitwise_and(mask, id_text)
if show_massbox:
mass_boxes = self._show_massbox(*args)
mask = np.bitwise_or(mask, mass_boxes)
mask = self._image_from_bytes(mask)
if self.cli and internal:
mask.show()
return mask
def box_center(self, object_id):
h1, w1, h2, w2 = self.get_box(object_id)
hbb_center = int((h1+h2)/2)
wbb_center = int((w1+w2)/2)
return (hbb_center, wbb_center)
def mask_pixel_count(self, object_id, h1=None, w1=None, h2=None, w2=None):
if(h1 == None and w1 == None and h2 == None and w2 == None):
h1, w1, h2, w2 = self.get_box(object_id)
mask = self.get_mask(object_id)
return np.sum(mask[h1:h2, w1:w2])
def _best_coord(self, object_id, current_coords, step_coord, add=True):
"""Internal. As edges of the bounding box are scanned in one at a time,
this returns the coordinate that maximizes number of mask pixels multiplied
by the percentage of mask pixels remaining in the moving bounding box."""
step=1
step_variable = current_coords[step_coord]
h1, w1, h2, w2 = current_coords
bmask = self.get_mask(object_id)
true_count = np.sum(bmask[h1:h2, w1:w2])
bmask_area = bmask.shape[0]*bmask.shape[1]
check_max = (true_count/bmask_area)*true_count # Track largest product of perc and count
while(True):
if(add):
step_variable = step_variable + step
else:
step_variable = step_variable - step
box_adj = {0:bmask[step_variable:h2, w1:w2],
1:bmask[h1:h2, step_variable:w2],
2:bmask[h1:step_variable, w1:w2],
3:bmask[h1:h2, w1:step_variable]}
temp_mask = box_adj[step_coord]
temp_true = np.sum(temp_mask)
temp_area = temp_mask.shape[0]*temp_mask.shape[1]
if (temp_area != 0):
temp_perc = temp_true/temp_area
else:
break
if (temp_true*temp_perc > check_max):
best_step_variable = step_variable
check_max = temp_true*temp_perc
return best_step_variable
def mass_box(self, object_id):
"""Adjustment to bounding box to reflect a better center of mass"""
h1, w1, h2, w2 = self.get_box(object_id)
w1_best = self._best_coord(object_id, (h1, w1, h2, w2), 1, add=True)
w2_best = self._best_coord(object_id, (h1, w1_best, h2, w2), 3, add=False)
h2_best = self._best_coord(object_id, (h1, w1_best, h2, w2_best), 2, add=False)
h1_best = self._best_coord(object_id, (h1, w1_best, h2_best, w2_best), 0, add=True)
return (h1_best, w1_best, h2_best, w2_best)
def mass_center(self, object_id):
h1, w1, h2, w2 = self.mass_box(object_id)
hm_center = int((h1+h2)/2)
wm_center = int((w1+w2)/2)
return (hm_center, wm_center)
def _center_range(self, height_center, width_center):
"""Creates two arrays which divide the vertical and horizontal into sections."""
imgH_center_range = np.array([0.5*self.img_height*(1-height_center), 0.5*self.img_height*(1+height_center)]).astype(int)
imgW_center_range = np.array([0.5*self.img_width*(1-width_center), 0.5*self.img_width*(1+width_center)]).astype(int)
return (imgH_center_range, imgW_center_range)
def object_location(self, object_id, height_center=0.333, width_center=0.2, tol = 0.0, grid=False):
"""Descriptive location on a 3x3 grid. Width and height lines are adjustable so the grid
squares can be different sizes.
height_center is the percentage of the height desired to be considered center.
width_center is the percentage of the width desired to be considered center
tol is threshold of % of total of object's pixels needed to be present for reporting grid area"""
imgH_center_range, imgW_center_range = self._center_range(height_center, width_center)
# section canvas into horizontal and vertical thirds
htop = (0, 0, imgH_center_range[0], self.img_width)
hcenter = (imgH_center_range[0], 0, imgH_center_range[1], self.img_width)
hbottom = (imgH_center_range[1], 0, self.img_height, self.img_width)
wleft = (0, 0, self.img_height, imgW_center_range[0])
wcenter = (0, imgW_center_range[0], self.img_height, imgW_center_range[1])
wright = (0, imgW_center_range[1], self.img_height, self.img_width)
# count the number of pixels in each section
htop_pixels = self.mask_pixel_count(object_id, *htop)
hcenter_pixels = self.mask_pixel_count(object_id, *hcenter)
hbottom_pixels = self.mask_pixel_count(object_id, *hbottom)
wleft_pixels = self.mask_pixel_count(object_id, *wleft)
wcenter_pixels = self.mask_pixel_count(object_id, *wcenter)
wright_pixels = self.mask_pixel_count(object_id, *wright)
ppixel = np.array([htop_pixels, hcenter_pixels, hbottom_pixels, wleft_pixels, wcenter_pixels, wright_pixels])
ppixel = ppixel/self.mask_pixel_count(object_id)
ppixel_threshold = ppixel > tol
ppixel_names = ['top', 'center', 'bottom', 'left', 'center', 'right']
hloc = set()
wloc = set()
if ppixel_threshold[0] and ppixel_threshold[2]:
hloc.update(ppixel_names[:3])
else:
for index, value in enumerate(ppixel_threshold[:3]):
if value:
hloc.add(ppixel_names[index])
if ppixel_threshold[3] and ppixel_threshold[5]:
wloc.update(ppixel_names[3:6])
else:
for index, value in enumerate(ppixel_threshold[3:6], start=3):
if value:
wloc.add(ppixel_names[index])
locations = {'vertical':hloc, 'horizontal':wloc}
if grid:
composite = self._show_grid(imgH_center_range, imgW_center_range, *[object_id])
if self.cli:
composite.show()
composite.close()
else:
display(composite)
composite.close()
return locations
def _edge_pixels(self, object_id, h1, w1, h2, w2, top=False, bottom=False, sides=False, strict=False, return_true = True):
"""Internal. Returns list of pixels at the True/False border of a mask.
return_true determines if the list is the coords True or False pixels at border."""
if(isinstance(object_id, np.ndarray)):
mask = object_id
else:
mask = self.get_mask(object_id)
edge_pixels = []
# Scan horizontally to find edge
if sides:
for i in range(h1,h2):
for j in range(w1,w2-1):
if((mask[i, j] == True) and (i==0 or j == 0 or i == self.img_height)):
edge_pixels.append((i,j))
if((mask[i, j] == True) and (j+1 == self.img_width)):
edge_pixels.append((i,j+1))
if((mask[i, j] != mask[i, j+1]) and (mask[i,j] == False)):
if return_true:
edge_pixels.append((i,j+1))
else:
edge_pixels.append((i,j))
if((mask[i, j] != mask[i, j+1]) and (mask[i,j] == True)):
if return_true:
edge_pixels.append((i,j))
else:
edge_pixels.append((i,j+1))
# Scan vertically to find edge
if top:
for j in range(w1,w2):
for i in range(h1,h2-1):
if((mask[i, j] == True) and (i == 0 or j == 0 or j == self.img_width)):
edge_pixels.append((i,j))
if((mask[i, j] == True) and (i+1 == self.img_height)):
edge_pixels.append((i+1,j))
if((mask[i, j] != mask[i+1, j]) and (mask[i, j] == False)):
if return_true:
edge_pixels.append((i+1,j))
else:
edge_pixels.append((i,j))
if strict:
break
if bottom:
for j in range(w1,w2):
for i in reversed(range(h1+1,h2)):
if((mask[i, j] == True) and (i == self.img_height or j == 0 or j == self.img_width)):
edge_pixels.append((i,j))
if((mask[i, j] == True) and (i-1 == 0)):
edge_pixels.append((i-1,j))
if((mask[i, j] != mask[i-1, j]) and (mask[i, j] == False)):
if return_true:
edge_pixels.append((i-1,j))
else:
edge_pixels.append((i,j))
if strict:
break
return edge_pixels
def _pixels_ON(self, mask, coords):
"""Internal."""
for i, j in coords:
mask[i,j] = True
return mask
def _edge_guard(self, h1, w1, h2, w2, pad):
"""Internal. Scanning methods start outside the bounding box. This checks that the start postion exists in the image."""
if (h1-pad >= 0):
h1 = h1-pad
if (w1-pad >= 0):
w1 = w1-pad
if (h2+pad <= self.img_height):
h2 = h2 + pad
if (w2+pad <= self.img_width):
w2 = w2 + pad
return (h1, w1, h2, w2)
def inflate_mask(self, object_id, inflation_factor=1):
"""Inflates mask by a specified amount. Used to give some tolerance for touching determination"""
h1, w1, h2, w2 = self.get_box(object_id)
h1, w1, h2, w2 = self._edge_guard(h1, w1, h1, w2, inflation_factor)
mask = self.get_mask(object_id).copy()
for expand in range(inflation_factor):
edge_pixels = self._edge_pixels(mask, h1, w1, h2, w2, top=True, bottom=True, sides=True, return_true=False)
return self._pixels_ON(mask, edge_pixels)
def _false_canvas(self):
"""Internal"""
return np.full((self.img_height, self.img_width), False, dtype=bool)
def create_box_mask(self, h1, w1, h2, w2):
false_canvas = self._false_canvas()
false_canvas[h1:h2, w1:w2] = True
return false_canvas
def object_outline(self, *args, pad=1, show_id=False, show_massbox=False, internal=True):
outline = self._false_canvas()
for obj in args:
h1, w1, h2, w2 = self.get_box(obj)
h1, w1, h2, w2 = self._edge_guard(h1, w1, h2, w2, pad)
edge_pixels = self._edge_pixels(obj, h1, w1, h2, w2, top=True, bottom=True, sides=True, return_true=True)
outline = self._pixels_ON(outline, edge_pixels)
if show_id:
id_text = self._show_id(*args, text_color='white')
outline = np.bitwise_or(outline, id_text)
if show_massbox:
mass_boxes = self._show_massbox(*args)
outline = np.bitwise_or(outline, mass_boxes)
outline = self._image_from_bytes(outline)
if self.cli and internal:
outline.show()
return outline
def object_topline(self, *args, pad=1):
"""Must use show_mask() to view"""
topline = self._false_canvas()
for obj in args:
h1, w1, h2, w2 = self.get_box(obj)
h1, w1, h2, w2 = self._edge_guard(h1, w1, h2, w2, pad)
top_pixels = self._edge_pixels(obj, h1, w1, h2, w2, top=True, strict=True, return_true=True)
topline = self._pixels_ON(topline, top_pixels)
return topline
def object_bottomline(self, *args, pad=1):
"""Must use show_mask() to view"""
bottomline = self._false_canvas()
for obj in args:
h1, w1, h2, w2 = self.get_box(obj)
h1, w1, h2, w2 = self._edge_guard(h1, w1, h2, w2, pad)
bottom_pixels = self._edge_pixels(obj, h1, w1, h2, w2, bottom=True, strict=True, return_true=True)
bottomline = self._pixels_ON(bottomline, bottom_pixels)
return bottomline
def object_relations(self, *args, tol=0.15):
if self.total_objects <= 1:
print('Not enough objects detected.')
else:
if len(args) == 0:
ids = range(1, self.total_objects+1)
combos = combinations(ids, r=2)
elif len(args)==1:
other_objects = (other_objects for other_objects in range(1, self.total_objects+1) if other_objects not in args)
combos = product(args, other_objects)
else:
ids = args
combos = combinations(ids, r=2)
object_relations = {'object relations': {'next to':[], 'above':[], 'below':[],
'touching':[], 'on':[], 'in':[]}
}
for rel in combos:
# print(f"Analyzing object_id {rel[0]}:{self.object_class(rel[0]):<10} "
# f" and object_id {rel[1]}:{self.object_class(rel[1])}")
obja, objb = rel
flip = rel[::-1]
h1a, w1a, h2a, w2a = self.get_box(obja)
h1b, w1b, h2b, w2b = self.get_box(objb)
# Widen width of box size by tol if possible
if(w1a-tol*w1a >= 0):
w1a_mod = int(w1a-tol*w1a)
else:
w1a_mod = w1a
if(w2a+tol*w2a <= self.img_width):
w2a_mod = int(w2a+tol*w2a)
else:
w2a_mod = w2a
if(w1b-tol*w1b >= 0):
w1b_mod = int(w1b-tol*w1b)
else:
w1b_mod = w1b
if(w2a+tol*w2a <= self.img_width):
w2b_mod = int(w2b+tol*w2b)
else:
w2b_mod = w2b
maska = self.get_mask(obja).copy()
maskb = self.get_mask(objb).copy()
boxa = self.create_box_mask(h1a, w1a_mod, h2a, w2a_mod)
boxb = self.create_box_mask(h1b, w1b_mod, h2b, w2b_mod)
h1ma, w1ma, h2ma, w2ma = self.mass_box(obja)
h1mb, w1mb, h2mb, w2mb = self.mass_box(objb)
hcentera, wcentera = self.mass_center(obja)
hcenterb, wcenterb = self.mass_center(objb)
toplinea = self.object_topline(obja)
toplineb = self.object_topline(objb)
# boolean position checks
obj_grounded = np.allclose(h2a, h2b, atol=int(0.04*self.img_height))
touching = np.any(np.bitwise_and(self.inflate_mask(obja), self.inflate_mask(objb)))
a_on_b = np.any(np.bitwise_and(maska, toplineb))
b_on_a = np.any(np.bitwise_and(maskb, toplinea))
a_align_b = b_align_a = wcentera in list(range(w1b, w2b)) or wcenterb in list(range(w1a, w2a))
a_above_b = hcentera < hcenterb
b_above_a = hcenterb < hcentera
a_below_b = hcentera > hcenterb
b_below_a = hcenterb > hcentera
a_in_b = set(range(h1ma, h2ma)).issubset(set(range(h1mb, h2mb)))\
and set(range(w1ma, w2ma)).issubset(set(range(w1mb, w2mb)))
b_in_a = set(range(h1mb, h2mb)).issubset(set(range(h1ma, h2ma)))\
and set(range(w1mb, w2mb)).issubset(set(range(w1ma, w2ma)))
if(touching):
object_relations['object relations']['touching'].append(rel)
object_relations['object relations']['touching'].append(flip)
if(a_on_b and not obj_grounded and a_above_b and not a_in_b):
object_relations['object relations']['on'].append(rel)
object_relations['object relations']['above'].append(rel)
object_relations['object relations']['below'].append(flip)
elif(b_on_a and not obj_grounded and b_above_a and not b_in_a):
object_relations['object relations']['on'].append(flip)
object_relations['object relations']['above'].append(flip)
object_relations['object relations']['below'].append(rel)
elif(a_in_b):
object_relations['object relations']['in'].append(rel)
elif(b_in_a):
object_relations['object relations']['in'].append(flip)
if(obj_grounded):
object_relations['object relations']['next to'].append(rel)
object_relations['object relations']['next to'].append(flip)
else:
if(np.any(np.bitwise_and(maska, boxb)) or np.any(np.bitwise_and(maskb, boxa))):
object_relations['object relations']['next to'].append(rel)
object_relations['object relations']['next to'].append(flip)
if(a_above_b and a_align_b):
object_relations['object relations']['above'].append(rel)
elif(a_below_b and a_align_b):
object_relations['object relations']['below'].append(rel)
if(b_above_a and b_align_a):
object_relations['object relations']['above'].append(flip)
elif(b_below_a and b_align_a):
object_relations['object relations']['below'].append(flip)
# Remove duplicate tuple pairs
object_relations = {'object relations': {'next to':set(object_relations['object relations']['next to']),
'above':set(object_relations['object relations']['above']),
'below':set(object_relations['object relations']['below']),
'touching':set(object_relations['object relations']['touching']),
'on':set(object_relations['object relations']['on']),
'in':set(object_relations['object relations']['in'])}
}
return object_relations
def grid_coords(self, object_id, height=3, width=3, grid=False):
"""Get grid coordinates using the bounding box in form 'A1' where 'A1' is the top left grid."""
h1, w1, h2, w2 = self.get_box(object_id)
letters = ascii_uppercase[0:height]
numbers = range(1,width+1)
combo_labels = product(letters, numbers)
height_array = np.arange(0, self.img_height, self.img_height/height).astype(int)
width_array = np.arange(0, self.img_width, self.img_width/width).astype(int)
combo_coords = product(height_array, width_array)
label_dict = {k:v for k,v in zip(combo_coords, combo_labels)}
height_array = np.append(height_array, self.img_height)
width_array = np.append(width_array, self.img_width)
# align to grid coordinates
h1_array = h1 < height_array
h2_array = h2 <= height_array
w1_array = w1 < width_array
w2_array = w2 <= width_array
for i in range(len(height_array)-1):
if(h1_array[i] != h1_array[i+1]):
h1_index = i
if(h2_array[i] != h2_array[i+1]):
h2_index = i
for i in range(len(width_array)-1):
if(w1_array[i] != w1_array[i+1]):
w1_index = i
if(w2_array[i] != w2_array[i+1]):
w2_index = i
h_align = height_array[h1_index:h2_index+1]
w_align = width_array[w1_index:w2_index+1]
align_combos = product(h_align, w_align)
grid_sectors = [label_dict[x] for x in align_combos]
grid_sectors = set(grid_sectors)
if grid:
composite = self._show_grid(height_array, width_array, *[object_id])
# expand grid and add text labels
border=20
composite = ImageOps.expand(composite, border=(border, 0, 0, border), fill='white')
height_mid = [int((height_array[x] + height_array[x+1])/2) for x in range(len(letters))]
width_mid = [int((width_array[x] + width_array[x+1])/2) + border for x in range(len(numbers))]
draw = ImageDraw.Draw(composite)
for coord, text in zip(height_mid, letters):
draw.text((0, coord) , f"{text}", font=self.fnt, fill='black')
for coord, text in zip(width_mid, numbers):
draw.text((coord, self.img_height+5), f"{text}", font=self.fnt, fill='black')
if self.cli:
composite.show()
composite.close()
else:
display(composite)
composite.close()
return grid_sectors
def _show_grid(self, height_array, width_array, *args):
mygrid = Image.new(mode='1', size=(self.img_width, self.img_height))
draw=ImageDraw.Draw(mygrid)
for i in width_array:
draw.line((i, 0, i, self.img_height), fill="white")
for i in height_array:
draw.line((0, i, self.img_width, i), fill="white")
mask = self.show_mask(*args, internal=False)
composite = Image.composite(mygrid, mask, mygrid)
return composite
def object_summary(self, object_id):
pass
def image_summary(self):
ids = range(1, self.total_objects+1)
outlines = self.object_outline(*ids, show_id=True, show_massbox=True, internal=False)
if self.cli:
outlines.show()
outlines.close()
else:
display(outlines)
outlines.close()
print("Object IDs:")
print(self.get_objectID())
print('\n')
print("Object Counts:")
print(self.count_objects())
print('\n')
print("Object Relations:")
relations = self.object_relations()
for _, rel in relations.items():
print('\n')
for k, v in rel.items():
print(f"{k:<10}: {v}")
print('\n')
print('Object Locations:')
print('Default Values: vertical center area is 33% of image height, horizontal center area is 20% of image width.')
print('(Use imap.object_location(object_ID, grid=True) to show grid lines.)')
for i in ids:
print(f"ID: {i:<3} {self.object_class(i):<10} Location: {self.object_location(i)}")
def main():
import warnings
warnings.simplefilter('ignore', FutureWarning)
from keras.preprocessing.image import load_img
from mrcnn.config import Config
from mrcnn.model import MaskRCNN
from mrcnn.visualize import display_instances
from mrcnn_classes import class_names
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("filename", help="filename required")
args = parser.parse_args()
if args.filename:
imagefile = args.filename
# define the test configuration
class TestConfig(Config):
NAME = "test"
GPU_COUNT = 1
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 80
# define the model
rcnn = MaskRCNN(mode='inference', model_dir='./', config=TestConfig())
# load coco model weights
model_weights = '../data/mask_rcnn_coco.h5'
print(f"loading {model_weights}...")
rcnn.load_weights(model_weights, by_name=True)
img = load_img(imagefile)
img = img_to_array(img)
# make prediction
results = rcnn.detect([img], verbose=False)
# get dictionary for first prediction
r = results[0]
# instantiate object
global imap
imap = ObjectMapping(imagefile, r, class_names, cli=True)
imap.image_summary()
display_instances(img, r['rois'], r['masks'], r['class_ids'], class_names, r['scores'])
if __name__ == '__main__':
main()
|
<filename>mod/setup_geometry.py<gh_stars>1-10
import bpy, math, bmesh, random
from typing import Set, Tuple
# Divisor coefficient for int colors
coef = {
"8 bit": 255,
"16 bit": 65535,
"32 bit": 4294967295,
}
# Return this angle if it's not possible to calculate angle between faces
ang_limit = math.radians(89.0)
# Get preferences
prefs = bpy.context.preferences.addons["svg-creator"].preferences
def rip_and_tear(context) -> Set:
"""Edge split geometry using specified angle or unique mesh settings.
Also checks non-manifold geometry and hard edges.
Returns set of colors that are used to color meshes."""
processed = set()
angle_use_fixed = prefs.RenderFixedAngleUse
# Angle fixed in radians
angle_fixed = prefs.RenderFixedAngle
precision = prefs.RenderPrecision
# Colors are saved in format specified by render precision parameter
# Totally white and totally black (and close to them) colors are prohibited
colors = set()
# Apply split_n_paint function to every object and unite resulting colors
# colors.union(tuple(set(tuple([split_n_paint(context, colors, precision, obj,
# angle_use_fixed, angle_fixed, processed) for obj in context.scene.objects
# if obj.type == "MESH"]))))
for obj in context.scene.objects:
if obj.type == "MESH":
if obj.data in processed or len(obj.data.polygons) == 0:
processed.add(obj.data)
else:
colors.union(
split_n_paint(
context, colors, precision, obj,
angle_use_fixed, angle_fixed,
processed,
)
)
return colors
def split_n_paint(context, colors, precision, obj, angle_use_fixed,
angle_fixed, processed) -> Set[Tuple]:
"""Split edges of mesh and paint them with random color, add processed meshes into set to avoid splitting and painting them for the second time.
Processed set is totally ignored in case scene has single-user objects and
data, in this case every surface is guaranteed to have unique and random
colors, but overall processing time will be increased."""
if not angle_use_fixed:
if obj.data.use_auto_smooth:
angle_fixed = obj.data.auto_smooth_angle
else:
# If auto smooth is disabled, default edge split at 30 degrees can
# lead to incorrect mesh appearance, nothing should be done
# as it's 3D Artist decision to ignore this setting
angle_fixed = math.pi
# Add VCol layer to the model in case it already has one or has none
if not "VCol" in obj.data.vertex_colors:
# vcol = bpy.ops.mesh.vertex_color_add()
vcol = obj.data.vertex_colors.new(name = "VCol", do_init = False)
vcol.name = "VCol"
vcol.active = True
vcol.active_render = True
bm = bmesh.new(use_operators = True)
bm.from_mesh(obj.data)
bm.select_mode = {"FACE"}
# Generate indices in bmesh same as obj.data indices
bm.verts.ensure_lookup_table()
bm.edges.ensure_lookup_table()
bm.faces.ensure_lookup_table()
for face in bm.faces:
face.hide_set(False)
face.select_set(False)
# Split every mesh into chunks corresponding to smooth surfaces limited by
# hard edges, basically it's bmesh implementation of edge split modifier.
# Boundaries is the list for pairs of lists of vertices and edges for
# bmesh.ops.split_edges operator
boundaries = []
for index, face in enumerate(bm.faces):
# Select random face and grow selection till boundary is reached
if not face.hide:
bm.faces.active = bm.faces[index]
# face_bm, active face
fbm = bm.faces.active
fbm.select_set(True)
sel = False
# List of selected faces
sf = [fbm, ]
# Grow selection until there is nothing new to select
while not sel:
# for selected current face in selected faces
for fsc in sf:
# for edge in edges of selected faces
for e in fsc.edges:
# non-manifold geometry can lead to incorrect shading
# on surfaces where this kind of shading is not
# expected, so it's a good choice to split using
# non-manifold, edge smoothness is calculated when
# auto-smoothing tick is active
c0 = e.smooth
c1 = e.calc_face_angle(ang_limit) <= angle_fixed
c2 = e.is_manifold
c3 = not obj.data.edges[e.index].use_edge_sharp
if c0 and c1 and c2 and c3:
# Select linked faces
[lf.select_set(True) for lf in e.link_faces]
# Temp tuple of selected geometry
sft = [f for f in bm.faces if f.select]
# Selection is exausted
if sft == sf:
sel = True
else:
sf = sft
# Tuples of selected vertices and edges
sv = tuple([v for v in bm.verts if v.select])
se = tuple([e for e in bm.edges if e.select])
# Sets of boundary vertices and edges
bv = set()
be = set()
# Get boundary vertices and edges
for v in sv:
for le in v.link_edges:
if not le.select:
bv.add(v)
for e in se:
for lf in e.link_faces:
if not lf.select:
be.add(e)
bv = list(bv)
be = list(be)
boundaries.append((bv, be))
# Hide and deselect processed mesh chunk,
# so you can't access it again
for f in sf:
f.select_set(False)
f.hide_set(True)
# Unhide back, so operator can work with geometry
for f in bm.faces:
f.select_set(False)
f.hide_set(False)
# Finally split edges
# Additional for loop because every change of bmesh demands indices
# regeneration and c3 in edge check needs check in separate mesh
# structure, because there is no access to edge mark data from bmesh
for b in boundaries:
bv, be = b[0], b[1]
bmesh.ops.split_edges(bm, verts = bv, edges = be, use_verts = True)
# Regenerate indices because bmesh have changed
bm.faces.ensure_lookup_table()
# Unhide and unselect faces to start painting
for f in bm.faces:
f.hide_set(False)
f.select_set(False)
# Paint every splitted chunk into random vertex color
for index, face in enumerate(bm.faces):
colors, _color, color_f = generate_color(context, colors, precision)
# if not face.hide: # No need to check it anymore TODO remove
bm.faces.active = bm.faces[index]
fbm = bm.faces.active
fbm.select_set(True)
sel = False
sf = [fbm, ]
# Grow selection until there is nothing new to select
while not sel:
se = tuple([e for e in bm.edges if e.select])
for e in se:
for f in e.link_faces:
f.select_set(True)
sft = [f for f in bm.faces if f.select]
if sf == sft:
sel = True
else:
sf = sft
vcol = bm.loops.layers.color.get("VCol")
for f in sf:
for loop in f.loops:
loop[vcol] = (color_f[0], color_f[1], color_f[2], 1.0)
for f in sf:
f.select_set(False)
f.hide_set(True)
# Unhide faces, so there is no need to unhide faces after entering the
# edit mode, speeds up work a bit
for f in bm.faces:
f.hide_set(False)
# Remove doubles after coloring and edge split to avoid artifacts in
# renders using any engine
bmesh.ops.remove_doubles(bm, verts = [v for v in bm.verts], dist = 1e-5)
bm.to_mesh(obj.data)
obj.data.update()
bm.free()
return colors
def generate_color(context, colors, precision) -> (Set, Tuple, Tuple):
"""Generate random with desired precision and return it with updated
color set. Colors are normalized in 0-1 range ever after, valid for VCol.
8-bit have to be divided by 255,
16-bit - by 65535,
32-bit - by 4294967295."""
# Black and dark colors are prohibited, because render doesn't store any
# info in case there is no alpha is in picture.
# About 0.4% precision step is cut from the bottom of the range
# There is naive implementation of color regeneration re-checking new random
# color is not in tuple, there should be way to do this better
# TODO better random color creation algorithm
if precision == "8 bit":
color = tuple([random.randint(1, 255) for _ in range(3)])
while color in colors:
color = tuple([random.randint(1, 255) for _ in range(3)])
elif precision == "16 bit":
color = tuple([random.randint(250, 65_535) for _ in range(3)])
while color in colors:
color = tuple([random.randint(250, 65_535) for _ in range(3)])
elif precision == "32 bit":
color = tuple(
[random.randint(1_750_000, 4_294_967_295) for _ in range(3)])
while color in colors:
color = tuple(
[random.randint(1_750_000, 4_294_967_295) for _ in range(3)])
colors.add(color)
color_f = tuple([c / coef[precision] for c in color])
return (colors, color, color_f)
|
"""
wf_netcdfio
-----------
netcdf reading and writing for wflow
$Author: schelle $
$Id: wf_DynamicFramework.py 915 2014-02-10 07:33:56Z schelle $
$Rev: 915 $
"""
import osgeo
import osgeo.ogr
import netCDF4
import pyproj
import os
# the two below are needed fpr bbfreeze
try:
import netCDF4.utils
except:
import netCDF4_utils
import netcdftime
from pcraster import *
from numpy import *
import time
import datetime as dt
import wflow.wflow_lib as wflow_lib
import wflow.pcrut as _pcrut
globmetadata = {}
globmetadata['title'] = 'wflow output mapstack'
globmetadata['institution'] = 'Deltares'
globmetadata['source'] = 'wflow'
globmetadata['history'] = time.ctime()
globmetadata['references'] = 'https://github.com/openstreams/wflow'
globmetadata['Conventions'] = 'CF-1.4'
def convertCoord(proj_src, proj_trg, x, y):
"""
Convert a list of x,y pairs in a certain projection to another projection
input:
proj_src: string, EPSG or proj4 string referring to projection of source coordinates
proj_trg: string, EPSG or proj4 string referring to projection of target coordinates
x: NumPy array, vector or 2D array of x-coordinates (source)
y: NumPy array, vector or 2D array of y-coordinates (source)
output:
X: NumPy array, vector or 2D array of x-coordinates (target)
Y: NumPy array, vector or 2D array of y-coordinates (target)
"""
srs1 = pyproj.Proj(proj_src) # OPT['proj4_params'])
srs2 = pyproj.Proj(proj_trg) # wgs84
X,Y = pyproj.transform(srs1, srs2, x,y) # Do add 0. to avoid trunc issues.
return X,Y
def prepare_nc(trgFile, timeList, x, y, metadata, logger, EPSG="EPSG:4326", units=None,
calendar='gregorian', Format="NETCDF4", complevel=9, zlib=True, least_significant_digit=None,FillValue=1E31):
"""
This function prepares a NetCDF file with given metadata, for a certain year, daily basis data
The function assumes a gregorian calendar and a time unit 'Days since 1900-01-01 00:00:00'
"""
import datetime as dt
logger.info('Setting up netcdf output: ' + trgFile)
if units == None: # Use start of the run
epoch = timeList[0]
units = 'seconds since %04d-%02d-%02d %02d:%02d:%02d.0 00:00' % (
epoch.year, epoch.month, epoch.day, epoch.hour, epoch.minute, epoch.second)
startDayNr = netCDF4.date2num(timeList[0].replace(tzinfo=None), units=units, calendar=calendar)
endDayNr = netCDF4.date2num(timeList[-1].replace(tzinfo=None), units=units, calendar=calendar)
timeAR = linspace(startDayNr, endDayNr, num=len(timeList))
nc_trg = netCDF4.Dataset(trgFile, 'w', format=Format, zlib=zlib, complevel=complevel)
logger.info(
'Setting up dimensions and attributes. Steps: ' + str(len(timeList)) + ' lat: ' + str(len(y)) + " lon: " + str(
len(x)))
if len(timeAR) == 1:
nc_trg.createDimension('time', 1)
else:
nc_trg.createDimension('time', 0) # NrOfDays*8
DateHour = nc_trg.createVariable('time', 'f8', ('time',), fill_value=FillValue, zlib=zlib, complevel=complevel)
DateHour.units = units
DateHour.calendar = calendar
DateHour.standard_name = 'time'
DateHour.long_name = 'time'
DateHour.axis = 'T'
DateHour[:] = timeAR
# make a proj4 string
srs = osgeo.osr.SpatialReference()
res = srs.ImportFromEPSG(int(EPSG[5:]))
if res != 0:
logger.error("EPGS not converted correctly: " + EPSG + ". Is the GDAL_DATA environment variable set correctly?")
exit(1)
projStr = srs.ExportToProj4()
proj_src = '+proj=longlat +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +no_defs'
if srs.IsProjected() == 0: # ONly lat lon needed
nc_trg.createDimension('lat', len(y))
nc_trg.createDimension('lon', len(x))
y_var = nc_trg.createVariable('lat', 'f4', ('lat',), fill_value=FillValue, zlib=zlib, complevel=complevel)
y_var.standard_name = 'latitude'
y_var.long_name = 'latitude'
y_var.units = 'degrees_north'
y_var.axis = 'Y'
x_var = nc_trg.createVariable('lon', 'f4', ('lon',), fill_value=FillValue, zlib=zlib, complevel=complevel)
x_var.standard_name = 'longitude'
x_var.long_name = 'longitude'
x_var.units = 'degrees_east'
x_var.axis = 'X'
y_var[:] = y
x_var[:] = x
crs = nc_trg.createVariable('crs', 'c')
crs.long_name = 'wgs84'
crs.proj4_params = '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'
crs.grid_mapping_name = 'latitude_longitude'
else: # Assume regular grid in m
nc_trg.createDimension('y', len(y))
nc_trg.createDimension('x', len(x))
y_var = nc_trg.createVariable('y', 'f4', ('y',), fill_value=FillValue, zlib=zlib, complevel=complevel)
y_var.standard_name = 'projection_y_coordinate'
y_var.long_name = 'y-coordinate in Cartesian system'
y_var.units = 'm'
y_var.axis = 'Y'
x_var = nc_trg.createVariable('x', 'f4', ('x',), fill_value=FillValue, zlib=zlib, complevel=complevel)
x_var.standard_name = 'projection_x_coordinate'
x_var.long_name = 'x-coordinate in Cartesian system'
x_var.units = 'm'
x_var.axis = 'X'
y_var[:] = y
x_var[:] = x
crs = nc_trg.createVariable('crs', 'c')
crs.long_name = EPSG
crs.grid_mapping_name = 'universal_transverse_mercator'
crs.utm_zone_number = srs.GetUTMZone()
crs.semi_major_axis = srs.GetSemiMajor()
crs.inverse_flattening = srs.GetInvFlattening()
crs._CoordinateTransformType = "Projection"
crs._CoordinateAxisTypes = "y x"
crs.proj4_params = projStr
# Also write lat lon fields
XI,YI = meshgrid(x,y)
lon_vals,lat_vals = convertCoord(projStr, proj_src, XI, YI)
# Need to create lat-lon fields
lat = nc_trg.createVariable('lat','f4',('y','x',))
lat.standard_name = 'latitude'
lat.long_name = 'latitude coordinate'
lat.units = 'degrees_north'
lat.coordinates = 'lat lon'
lat.grid_mapping = 'wgs84'
#lat._CoordinateAxisType = "Lat"
lat[:,:] = lat_vals
lon = nc_trg.createVariable('lon','f4',('y','x',))
lon.standard_name = 'longitude'
lon.long_name = 'longitude coordinate'
lon.units = 'degrees_east'
lon.coordinates = 'lat lon'
lon.grid_mapping = 'wgs84'
#lon._CoordinateAxisType = "Lon"
lon[:,:] = lon_vals
crs.EPSG_code = EPSG
# now add all attributes from user-defined metadata
for attr in metadata:
nc_trg.setncattr(attr, metadata[attr])
nc_trg.sync()
nc_trg.close()
class netcdfoutput():
def __init__(self, netcdffile, logger, starttime, timesteps, EPSG="EPSG:4326", timestepsecs=86400,
metadata={}, zlib=True, Format="NETCDF4",
maxbuf=25, least_significant_digit=None):
"""
Under construction
"""
self.EPSG = EPSG
self.zlib = zlib
self.Format = Format
self.least_significant_digit = least_significant_digit
def date_range(start, end, timestepsecs):
r = int((end + dt.timedelta(seconds=timestepsecs) - start).total_seconds()/timestepsecs)
return [start + dt.timedelta(seconds=(timestepsecs * i)) for i in range(r)]
self.logger = logger
# Do not allow a max buffer larger than the number of timesteps
self.maxbuf = maxbuf if timesteps >= maxbuf else timesteps
self.ncfile = netcdffile
self.timesteps = timesteps
rows = pcraster._pcraster.clone().nrRows()
cols = pcraster._pcraster.clone().nrCols()
cellsize = pcraster._pcraster.clone().cellSize()
yupper = pcraster._pcraster.clone().north()
xupper = pcraster._pcraster.clone().west()
x = _pcrut.pcr2numpy(_pcrut.xcoordinate(_pcrut.boolean(_pcrut.cover(1.0))), NaN)[0, :]
y = _pcrut.pcr2numpy(_pcrut.ycoordinate(_pcrut.boolean(_pcrut.cover(1.0))), NaN)[:, 0]
# Shift one timestep as we output at the end
#starttime = starttime + dt.timedelta(seconds=timestepsecs)
end = starttime + dt.timedelta(seconds=timestepsecs * (self.timesteps -1))
timeList = date_range(starttime, end, timestepsecs)
self.timestepbuffer = zeros((self.maxbuf, len(y), len(x)))
self.bufflst = {}
globmetadata.update(metadata)
prepare_nc(self.ncfile, timeList, x, y, globmetadata, logger, Format=self.Format, EPSG=EPSG,zlib=self.zlib,
least_significant_digit=self.least_significant_digit)
def savetimestep(self, timestep, pcrdata, unit="mm", var='P', name="Precipitation"):
"""
save a single timestep for a variable
input:
- timestep - current timestep
- pcrdata - pcraster map to save
- unit - unit string
- var - variable string
- name - name of the variable
"""
# Open target netCDF file
var = os.path.basename(var)
self.nc_trg = netCDF4.Dataset(self.ncfile, 'a', format=self.Format, zlib=self.zlib, complevel=9)
self.nc_trg.set_fill_off()
# read time axis and convert to time objects
# TODO: use this to append time
# time = self.nc_trg.variables['time']
# timeObj = netCDF4.num2date(time[:], units=time.units, calendar=time.calendar)
idx = timestep - 1
buffreset = (idx + 1) % self.maxbuf
bufpos = (idx) % self.maxbuf
try:
nc_var = self.nc_trg.variables[var]
except:
self.logger.debug("Creating variable " + var + " in netcdf file. Format: " + self.Format)
if self.EPSG.lower() == "epsg:4326":
nc_var = self.nc_trg.createVariable(var, 'f4', ('time', 'lat', 'lon',), fill_value=-9999.0, zlib=self.zlib,
complevel=9, least_significant_digit=self.least_significant_digit)
nc_var.coordinates = "lat lon"
else:
nc_var = self.nc_trg.createVariable(var, 'f4', ('time', 'y', 'x',), fill_value=-9999.0, zlib=self.zlib,
complevel=9, least_significant_digit=self.least_significant_digit)
nc_var.coordinates = "lat lon"
nc_var.grid_mapping = "crs"
nc_var.units = unit
nc_var.standard_name = name
self.nc_trg.sync()
miss = float(nc_var._FillValue)
data = pcr2numpy(scalar(pcrdata), miss)
if self.bufflst.has_key(var):
self.bufflst[var][bufpos, :, :] = data
else:
self.bufflst[var] = self.timestepbuffer.copy()
self.bufflst[var][bufpos, :, :] = data
# Write out timestep buffer.....
if buffreset == 0 or idx == self.maxbuf - 1 or self.timesteps <= timestep:
spos = idx - bufpos
self.logger.debug(
"Writing buffer for " + var + " to file at: " + str(spos) + " " + str(int(bufpos) + 1) + " timesteps")
nc_var[spos:idx + 1, :, :] = self.bufflst[var][0:bufpos + 1, :, :]
self.nc_trg.sync()
def finish(self):
"""
Flushes and closes the netcdf file
:return: Nothing
"""
if hasattr(self, "nc_trg"):
self.nc_trg.sync()
self.nc_trg.close()
class netcdfoutputstatic():
def __init__(self, netcdffile, logger, starttime, timesteps, EPSG="EPSG:4326", timestepsecs=86400,
metadata={}, zlib=True, Format="NETCDF4",
maxbuf=25, least_significant_digit=None):
"""
Under construction
"""
self.EPSG = EPSG
self.zlib = zlib
self.Format = Format
self.least_significant_digit = least_significant_digit
def date_range(start, end, timestepsecs):
r = int((end + dt.timedelta(seconds=timestepsecs) - start).total_seconds()/timestepsecs)
return [start + dt.timedelta(seconds=(timestepsecs * i)) for i in range(r)]
self.logger = logger
# Do not allow a max buffer larger than the number of timesteps
self.maxbuf = maxbuf if timesteps >= maxbuf else timesteps
self.ncfile = netcdffile
self.timesteps = timesteps
rows = pcraster._pcraster.clone().nrRows()
cols = pcraster._pcraster.clone().nrCols()
cellsize = pcraster._pcraster.clone().cellSize()
yupper = pcraster._pcraster.clone().north()
xupper = pcraster._pcraster.clone().west()
x = _pcrut.pcr2numpy(_pcrut.xcoordinate(_pcrut.boolean(_pcrut.cover(1.0))), NaN)[0, :]
y = _pcrut.pcr2numpy(_pcrut.ycoordinate(_pcrut.boolean(_pcrut.cover(1.0))), NaN)[:, 0]
# Shift one timestep as we output at the end
#starttime = starttime + dt.timedelta(seconds=timestepsecs)
end = starttime + dt.timedelta(seconds=timestepsecs * (self.timesteps -1))
timeList = date_range(starttime, end, timestepsecs)
self.timestepbuffer = zeros((self.maxbuf, len(y), len(x)))
self.bufflst = {}
self.buffdirty = False
globmetadata.update(metadata)
prepare_nc(self.ncfile, timeList, x, y, globmetadata, logger, Format=self.Format, EPSG=EPSG,zlib=self.zlib,
least_significant_digit=self.least_significant_digit)
def savetimestep(self, timestep, pcrdata, unit="mm", var='P', name="Precipitation"):
"""
save a single timestep for a variable
input:
- timestep - current timestep
- pcrdata - pcraster map to save
- unit - unit string
- var - variable string
- name - name of the variable
"""
# Open target netCDF file
var = os.path.basename(var)
self.nc_trg = netCDF4.Dataset(self.ncfile, 'a', format=self.Format, zlib=self.zlib, complevel=9)
self.nc_trg.set_fill_off()
# read time axis and convert to time objects
# TODO: use this to append time
# time = self.nc_trg.variables['time']
# timeObj = netCDF4.num2date(time[:], units=time.units, calendar=time.calendar)
idx = timestep - 1
buffreset = (idx + 1) % self.maxbuf
bufpos = (idx) % self.maxbuf
try:
nc_var = self.nc_trg.variables[var]
except:
self.logger.debug("Creating variable " + var + " in netcdf file. Format: " + self.Format)
if self.EPSG.lower() == "epsg:4326":
nc_var = self.nc_trg.createVariable(var, 'f4', ('time', 'lat', 'lon',), fill_value=-9999.0, zlib=self.zlib,
complevel=9, least_significant_digit=self.least_significant_digit)
nc_var.coordinates = "lat lon"
else:
nc_var = self.nc_trg.createVariable(var, 'f4', ('time', 'y', 'x',), fill_value=-9999.0, zlib=self.zlib,
complevel=9, least_significant_digit=self.least_significant_digit)
nc_var.coordinates = "lat lon"
nc_var.grid_mapping = "crs"
nc_var.units = unit
nc_var.standard_name = name
self.nc_trg.sync()
miss = float(nc_var._FillValue)
data = pcr2numpy(scalar(pcrdata), miss)
if self.bufflst.has_key(var):
self.bufflst[var][bufpos, :, :] = data
self.buffdirty = True
else:
self.bufflst[var] = self.timestepbuffer.copy()
self.bufflst[var][bufpos, :, :] = data
self.buffdirty = True
# Write out timestep buffer.....
if buffreset == 0 or idx == self.maxbuf - 1 or self.timesteps <= timestep:
spos = idx - bufpos
self.logger.debug(
"Writing buffer for " + var + " to file at: " + str(spos) + " " + str(int(bufpos) + 1) + " timesteps")
nc_var[spos:idx + 1, :, :] = self.bufflst[var][0:bufpos + 1, :, :]
self.nc_trg.sync()
self.buffdirty = False
def finish(self):
"""
Flushes and closes the netcdf file
:return: Nothing
"""
if hasattr(self, "nc_trg"):
self.nc_trg.sync()
self.nc_trg.close()
if self.buffdirty:
self.logger.error('Finishing with dirty netcdf write buffer...!')
class netcdfinput():
def __init__(self, netcdffile, logging, vars=[]):
"""
First try to setup a class read netcdf files
(converted with pcr2netcdf.py)
netcdffile: file to read the forcing data from
logging: python logging object
vars: list of variables to get from file
"""
if os.path.exists(netcdffile):
self.dataset = netCDF4.Dataset(netcdffile, mode='r')
else:
logging.error(os.path.abspath(netcdffile) + " not found!")
exit(ValueError)
logging.info("Reading input from netCDF file: " + netcdffile + ": " + str(self.dataset).replace('\n', ' '))
self.alldat = {}
a = pcr2numpy(cover(0.0), 0.0).flatten()
# Determine steps to load in mem based on estimated memory usage
floatspermb = 1048576 / 4
maxmb = 40
maxlentime = len(self.dataset.variables['time'])
self.maxsteps = minimum(maxmb * len(a) / floatspermb + 1,maxlentime - 1)
self.fstep = 0
self.lstep = self.fstep + self.maxsteps
self.datetime = self.dataset.variables['time'][:]
if hasattr(self.dataset.variables['time'],'units'):
self.timeunits=self.dataset.variables['time'].units
else:
self.timeunits ='Seconds since 1970-01-01 00:00:00'
if hasattr(self.dataset.variables['time'], 'calendar'):
self.calendar= self.dataset.variables['time'].calendar
else:
self.calendar ='gregorian'
self.datetimelist=netCDF4.num2date(self.datetime,self.timeunits, calendar=self.calendar)
try:
self.x = self.dataset.variables['x'][:]
except:
self.x = self.dataset.variables['lon'][:]
# Now check Y values to see if we must flip the data
try:
self.y = self.dataset.variables['y'][:]
except:
self.y = self.dataset.variables['lat'][:]
# test if 1D or 2D array
if len(self.y.shape) == 1:
if self.y[0] > self.y[-1]:
self.flip = False
else:
self.flip = True
else: # not sure if this works
self.y = self.y[:][0]
if self.y[0] > self.y[-1]:
self.flip = False
else:
self.flip = True
x = _pcrut.pcr2numpy(_pcrut.xcoordinate(_pcrut.boolean(_pcrut.cover(1.0))), NaN)[0, :]
y = _pcrut.pcr2numpy(_pcrut.ycoordinate(_pcrut.boolean(_pcrut.cover(1.0))), NaN)[:, 0]
#Get average cell size
acc = diff(x).mean() * 0.25
if self.flip:
(self.latidx,) = logical_and(self.y[::-1] +acc >= y.min(), self.y[::-1] <= y.max() + acc).nonzero()
(self.lonidx,) = logical_and(self.x + acc >= x.min(), self.x <= x.max() + acc).nonzero()
else:
(self.latidx,) = logical_and(self.y +acc >= y.min(), self.y <= y.max() + acc).nonzero()
(self.lonidx,) = logical_and(self.x +acc >= x.min(), self.x <= x.max() + acc).nonzero()
if len(self.lonidx) != len(x):
logging.error("error in determining X coordinates in netcdf...")
if len(self.latidx) != len(y):
logging.error("error in determining X coordinates in netcdf...")
for var in vars:
try:
self.alldat[var] = self.dataset.variables[var][self.fstep:self.maxsteps]
except:
self.alldat.pop(var, None)
logging.warn("Variable " + var + " not found in netcdf file: " + netcdffile)
def gettimestep(self, timestep, logging, tsdatetime=None, var='P', shifttime=False):
"""
Gets a map for a single timestep. reads data in blocks assuming sequential access
:var timestep: framework timestep (1-based)
:var logging: python logging object
:var var: variable to get from the file
:var shifttime: is True start at 1 in the NC file (instead of 0)
:var tsdatetime: Assumed date/time of this timestep
window = data[dpos,latidx.min():latidx.max()+1,lonidx.min():lonidx.max()+1]
"""
if shifttime:
ncindex = timestep
else:
ncindex = timestep - 1
if tsdatetime != None:
if tsdatetime != self.datetimelist[ncindex]:
logging.warn("Date/time does not match. Wanted " + str(tsdatetime) + " got " + str(self.datetimelist[ncindex]))
logging.warn("Index: " + str(ncindex) + " Par: " + var)
if self.alldat.has_key(var):
if ncindex == self.lstep: # Read new block of data in mem
logging.debug("reading new netcdf data block starting at: " + str(ncindex))
for vars in self.alldat:
self.alldat[vars] = self.dataset.variables[vars][ncindex:ncindex + self.maxsteps]
self.fstep = ncindex
self.lstep = ncindex + self.maxsteps
np_step = self.alldat[var][ncindex - self.fstep, self.latidx.min():self.latidx.max()+1,
self.lonidx.min():self.lonidx.max()+1]
miss = float(self.dataset.variables[var]._FillValue)
if self.flip:
return numpy2pcr(Scalar, flipud(np_step).copy(), miss), True
else:
return numpy2pcr(Scalar, np_step, miss), True
else:
#logging.debug("Var (" + var + ") not found returning 0")
return cover(scalar(0.0)), False
class netcdfinputstates():
def __init__(self, netcdffile, logging, vars=[]):
"""
First try to setup a class read netcdf files
(converted with pcr2netcdf.py)
netcdffile: file to read the forcing data from
logging: python logging object
vars: list of variables to get from file
"""
if os.path.exists(netcdffile):
self.dataset = netCDF4.Dataset(netcdffile, mode='r')
else:
logging.error(os.path.abspath(netcdffile) + " not found!")
exit(ValueError)
logging.info("Reading state input from netCDF file: " + netcdffile + ": " + str(self.dataset).replace('\n', ' '))
self.alldat = {}
a = pcr2numpy(cover(0.0), 0.0).flatten()
# Determine steps to load in mem based on estimated memory usage
floatspermb = 1048576 / 4
maxmb = 40
self.maxsteps = maxmb * len(a) / floatspermb + 1
self.fstep = 0
self.lstep = self.fstep + self.maxsteps
try:
self.x = self.dataset.variables['x'][:]
except:
self.x = self.dataset.variables['lon'][:]
# Now check Y values to see if we must flip the data
try:
self.y = self.dataset.variables['y'][:]
except:
self.y = self.dataset.variables['lat'][:]
x = _pcrut.pcr2numpy(_pcrut.xcoordinate(_pcrut.boolean(_pcrut.cover(1.0))), NaN)[0, :]
y = _pcrut.pcr2numpy(_pcrut.ycoordinate(_pcrut.boolean(_pcrut.cover(1.0))), NaN)[:, 0]
(self.latidx,) = logical_and(self.x >= x.min(), self.x < x.max()).nonzero()
(self.lonidx,) = logical_and(self.y >= x.min(), self.y < y.max()).nonzero()
for var in vars:
try:
self.alldat[var] = self.dataset.variables[var][self.fstep:self.maxsteps]
except:
self.alldat.pop(var, None)
logging.warn("Variable " + var + " not found in netcdf file: " + netcdffile)
def gettimestep(self, timestep, logging, var='P'):
"""
Gets a map for a single timestep. reads data in blocks assuming sequential access
timestep: framework timestep (1-based)
logging: python logging object
var: variable to get from the file
"""
ncindex = timestep - 1
if self.alldat.has_key(var):
if ncindex == self.lstep: # Read new block of data in mem
logging.debug("reading new netcdf data block starting at: " + str(ncindex))
for vars in self.alldat:
self.alldat[vars] = self.dataset.variables[vars][ncindex:ncindex + self.maxsteps]
self.fstep = ncindex
self.lstep = ncindex + self.maxsteps
np_step = self.alldat[var][ncindex - self.fstep, self.latidx.min():self.latidx.max() + 1,
self.lonidx.min():self.lonidx.max() + 1]
miss = float(self.dataset.variables[var]._FillValue)
return numpy2pcr(Scalar, np_step, miss), True
else:
logging.debug("Var (" + var + ") not found returning 0")
return cover(scalar(0.0)), False
class netcdfinputstatic():
def __init__(self, netcdffile, logging):
"""
First try to setup a class read netcdf files
(converted with pcr2netcdf.py)
netcdffile: file to read the forcing data from
logging: python logging object
vars: list of variables to get from file
"""
if os.path.exists(netcdffile):
self.dataset = netCDF4.Dataset(netcdffile, mode='r')
else:
logging.error(os.path.abspath(netcdffile) + " not found!")
exit(ValueError)
try:
self.x = self.dataset.variables['x'][:]
except:
self.x = self.dataset.variables['lon'][:]
# Now check Y values to see if we must flip the data
try:
self.y = self.dataset.variables['y'][:]
except:
self.y = self.dataset.variables['lat'][:]
x = _pcrut.pcr2numpy(_pcrut.xcoordinate(_pcrut.boolean(_pcrut.cover(1.0))), NaN)[0, :]
y = _pcrut.pcr2numpy(_pcrut.ycoordinate(_pcrut.boolean(_pcrut.cover(1.0))), NaN)[:, 0]
(self.latidx,) = logical_and(self.x >= x.min(), self.x < x.max()).nonzero()
(self.lonidx,) = logical_and(self.y >= x.min(), self.y < y.max()).nonzero()
logging.info("Reading static input from netCDF file: " + netcdffile + ": " + str(self.dataset).replace('\n', ' '))
def gettimestep(self, timestep, logging, var='P'):
"""
Gets a map for a single timestep. reads data in blocks assuming sequential access
timestep: framework timestep (1-based)
logging: python logging object
var: variable to get from the file
"""
if self.dataset.variables.has_key(var):
np_step = self.alldat[var][timestep-1, self.latidx.min():self.latidx.max() + 1,
self.lonidx.min():self.lonidx.max() + 1]
miss = float(self.dataset.variables[var]._FillValue)
return numpy2pcr(Scalar, np_step, miss), True
else:
logging.debug("Var (" + var + ") not found returning 0")
return cover(scalar(0.0)), False
|
import sys
import json
import os.path
import importlib.util
import conducto as co
from conducto.shared import constants
from conducto import api
from conducto.contrib.discover.cli import discover_cli
from conducto.debug import debug, livedebug
from conducto.glue import method
import asyncio
def show(id, app=method._get_default_app(), shell=method._get_default_shell()):
"""
Attach to a an active pipeline. If it is sleeping it will be awakened.
"""
from .internal import build
pl = constants.PipelineLifecycle
pipeline_id = id
token = co.api.Config().get_token_from_shell(force=True)
pipeline = method._get_pipeline_validated(token, pipeline_id)
perms = co.api.Pipeline().perms(pipeline_id, token=token)
status = pipeline["status"]
if status not in pl.active | pl.standby and status in pl.local:
local_basedir = constants.ConductoPaths.get_profile_base_dir()
cpser = constants.ConductoPaths.SERIALIZATION
serialization_path = f"{local_basedir}/pipelines/{pipeline_id}/{cpser}"
if not os.path.exists(serialization_path):
m = (
f"The serialization for {pipeline_id} could not be found. "
"This is likely because it is local to another computer."
)
host = pipeline["meta"].get("hostname", None)
if host is not None:
m += f" Try waking it from '{host}' with conducto show."
m += " For further assistance, contact us on Slack at ConductoHQ."
print(m, file=sys.stderr)
sys.exit(1)
def cloud_wakeup():
co.api.Manager().launch(pipeline_id, token=token)
def local_wakeup():
build.run_in_local_container(token, pipeline_id, update_token=True)
if status in pl.active | pl.standby:
if not app and not shell:
print(f"Pipeline {pipeline_id} is already running.")
return
msg = "Connecting to"
func = lambda: 0
starting = True
elif status in pl.local:
if constants.Perms.LAUNCH not in perms:
raise PermissionError(
f"Pipeline {pipeline_id} is sleeping and you do not have permissions to wake it."
)
func = local_wakeup
msg = "Waking"
starting = True
elif status in pl.cloud:
if constants.Perms.LAUNCH not in perms:
raise PermissionError(
f"Pipeline {pipeline_id} is sleeping and you do not have permissions to wake it."
)
func = cloud_wakeup
msg = "Waking"
starting = False
else:
raise RuntimeError(
f"Pipeline status {pipeline['status']} for {pipeline_id} is not recognized."
)
build.run(token, pipeline_id, func, app, shell, msg, starting)
async def sleep(id):
pipeline_id = id
token = co.api.Config().get_token_from_shell(force=True)
pipeline = method._get_pipeline_validated(token, pipeline_id)
status = pipeline["status"]
pl = constants.PipelineLifecycle
if status in pl.active:
conn = await co.api.connect_to_pipeline(pipeline_id, token=token)
try:
await conn.send(json.dumps({"type": "CLOSE_PROGRAM"}))
async def await_confirm(conn):
was_slept = False
async for msg_text in conn:
msg = json.loads(msg_text)
if msg["type"] == "SLEEP":
was_slept = True
# we are done here, acknowledged!
break
return was_slept
# 60 seconds is an extravagantly long expectation here, but it is
# intended to cover our bases and only error on true errors.
await asyncio.wait_for(await_confirm(conn), timeout=60.0)
except asyncio.TimeoutError:
print("The pipeline was not slept successfully.", file=sys.stderr)
sys.exit(1)
finally:
await conn.close()
else:
co.api.Pipeline().sleep_standby(pipeline_id, token=token)
def dump_serialization(id, outfile=None):
import gzip
import base64
string = gzip.decompress(base64.b64decode(method.return_serialization(id)))
data = json.loads(string)
if outfile is None:
print(json.dumps(data, indent=4, sort_keys=True))
else:
with open(outfile, "w") as f2:
json.dump(data, f2)
def _load_file_module(filename):
# put the directory of the file on sys.path for relative imports
norm = os.path.realpath(filename)
sys.path.append(os.path.dirname(norm))
# get module name
basename = os.path.basename(filename)
modname = os.path.splitext(basename)[0]
# import
spec = importlib.util.spec_from_file_location(modname, filename)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def share_directory(
name,
relative,
):
from conducto.image import Image
Image.share_directory(name, relative)
def validate_serialization():
serialization = sys.stdin.read()
node = co.Node.deserialize(serialization)
if os.getenv("__RUN_BY_WORKER__"):
# Variable is set in conducto_worker/__main__.py to avoid
# printing ugly serialization when not needed.
from conducto.internal.build import validate_tree
validate_tree(node, cloud=False, check_images=False, set_default=False)
s = node.serialize()
print(f"\n<__conducto_serialization>{s}</__conducto_serialization>\n")
print(node.pretty(strict=False))
def build(
shell=False,
app=True,
local=False,
cloud=False,
retention=7,
is_public=False,
run=False,
run_at=None,
):
from conducto.internal import build
assert local ^ cloud, "An invalid number of build modes were specified"
serialization = sys.stdin.read()
node = co.Node.deserialize(serialization)
node._autorun = run
node._autorun_at = run_at
build.build(
node,
use_shell=shell,
use_app=app,
build_mode=constants.BuildMode.LOCAL
if local
else constants.BuildMode.DEPLOY_TO_CLOUD,
retention=retention,
is_public=is_public,
)
def main():
# _thisfile, file_to_execute, *arguments = sys.argv
args = sys.argv[1:]
if not args or args[0] in (
"-h",
"--help",
"--version",
"show",
"debug",
"livedebug",
"dump-serialization",
"validate-serialization",
"share-directory",
"build",
"sleep",
"discover",
):
variables = {
"build": build,
"show": show,
"debug": debug,
"livedebug": livedebug,
"dump-serialization": dump_serialization,
"validate-serialization": validate_serialization,
"share-directory": share_directory,
"sleep": sleep,
"discover": discover_cli,
}
co.main(variables=variables)
else:
file_to_execute, *arguments = args
if file_to_execute.endswith(".js"):
fxn, props = arguments
script = f'let out = require("./{file_to_execute}").{fxn}({props});'
script += (
'if(typeof(out) == "object") { '
"let cls_name = out.constructor.name; "
'if(cls_name == "Exec" || cls_name == "Notebook" || cls_name == "Parallel" || cls_name == "Serial") out.output();}'
)
to_exec = f"node -r esm -e '{script}'"
import subprocess
subprocess.run(to_exec, shell=True, check=True)
return
if not os.path.exists(file_to_execute):
print(f"No such file or directory: '{file_to_execute}'", file=sys.stderr)
sys.exit(1)
if file_to_execute.endswith(".cfg"):
with open(file_to_execute) as f:
co.glue.run_cfg(f, arguments)
else:
module = _load_file_module(file_to_execute)
variables = {k: getattr(module, k) for k in dir(module)}
co.main(variables=variables, argv=arguments, filename=file_to_execute)
if __name__ == "__main__":
main()
|
<reponame>steinst/ABLTagger<filename>preprocess/vectorize_dim.py
import numpy
import argparse
import sys
tag_matrix = {"no":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"lo":[0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"fn":[0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"gr":[0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"to":[0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"so":[0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"ao":[0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"st":[0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"e":[0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"x":[0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"kk":[0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"kvk":[0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"hk":[0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"ókyngr":[0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"et":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"ft":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"nf":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"þf":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"þgf":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"ef":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"vsk_gr":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"sérn":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"sb":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"vb":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"ób":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"fst":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"mst":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"est":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"ábfn":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"óákv_ábfn":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"efn":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"óákv_fn":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"pfn":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"sfn":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"tfn":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"1p":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"2p":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"3p":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"frumt":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"árt":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"prós":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"fjöldat":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"nh":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"bh":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"fh":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"vh":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"sagnb":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"lhn":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"lhþ":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"gm":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"mm":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"nt":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0],
"þt":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0],
"ekki_fallst":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0],
"upphr":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0],
"st_þol":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0],
"st_þag":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0],
"st_ef":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0],
"nhm":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0],
"tilvt":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0],
"stýfður":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0],
"afn": [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0],
"óp": [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0],
"spurnar": [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0],
"sérst": [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1]}
def build_tagarray(current):
temp = ''
#cases
if current.startswith('NF'):
temp = numpy.array(tag_matrix['nf'])
current = current[2:]
elif current.startswith('ÞGF'):
temp = numpy.array(tag_matrix['þgf'])
current = current[3:]
elif current.startswith('ÞF'):
temp = numpy.array(tag_matrix['þf'])
current = current[2:]
elif current.startswith('EF'):
temp = numpy.array(tag_matrix['ef'])
current = current[2:]
elif current.startswith('-NF'):
temp = numpy.array(tag_matrix['nf'])
current = current[3:]
elif current.startswith('-ÞF'):
temp = numpy.array(tag_matrix['þf'])
current = current[3:]
elif current.startswith('-ÞGF'):
temp = numpy.array(tag_matrix['þgf'])
current = current[4:]
elif current.startswith('-EF'):
temp = numpy.array(tag_matrix['ef'])
current = current[3:]
elif current.startswith('_NF'):
temp = numpy.array(tag_matrix['nf'])
current = current[3:]
elif current.startswith('_ÞF'):
temp = numpy.array(tag_matrix['þf'])
current = current[3:]
elif current.startswith('_ÞGF'):
temp = numpy.array(tag_matrix['þgf'])
current = current[4:]
elif current.startswith('_EF'):
temp = numpy.array(tag_matrix['ef'])
current = current[3:]
#aðeins óákveðin fornöfn eru sérstæð
elif current.startswith('-SERST'):
temp = numpy.array(tag_matrix['sérst'])
current = current[6:]
#number
elif current.startswith('ET'):
temp = numpy.array(tag_matrix['et'])
current = current[2:]
elif current.startswith('FT'):
temp = numpy.array(tag_matrix['ft'])
current = current[2:]
elif current.startswith('-ET'):
temp = numpy.array(tag_matrix['et'])
current = current[3:]
elif current.startswith('-FT'):
temp = numpy.array(tag_matrix['ft'])
current = current[3:]
#person
elif current.startswith('1P'):
temp = numpy.array(tag_matrix['1p'])
current = current[2:]
elif current.startswith('2P'):
temp = numpy.array(tag_matrix['2p'])
current = current[2:]
elif current.startswith('3P'):
temp = numpy.array(tag_matrix['3p'])
current = current[2:]
elif current.startswith('-1P'):
temp = numpy.array(tag_matrix['1p'])
current = current[3:]
elif current.startswith('-2P'):
temp = numpy.array(tag_matrix['2p'])
current = current[3:]
elif current.startswith('-3P'):
temp = numpy.array(tag_matrix['3p'])
current = current[3:]
#article
elif current.startswith('gr'):
temp = numpy.array(tag_matrix['vsk_gr'])
current = current[2:]
elif current.startswith('LHÞT'):
temp = numpy.array(tag_matrix['lhþ'])
current = current[4:]
elif current.startswith('-VB'):
temp = numpy.array(tag_matrix['vb'])
current = current[3:]
elif current.startswith('-SB'):
temp = numpy.array(tag_matrix['sb'])
current = current[3:]
elif current.startswith('OP'):
temp = numpy.array(tag_matrix['óp'])
current = current[2:]
elif current.startswith('-það'):
temp = numpy.array(tag_matrix['óp'])
current = current[4:]
elif current.startswith('LHNT'):
temp = numpy.array(tag_matrix['lhn'])
current = current[4:]
elif current.startswith('LH-NT'):
temp = numpy.array(tag_matrix['lhn'])
current = current[5:]
elif current.startswith('SP'):
temp = numpy.array(tag_matrix['spurnar'])
current = current[2:]
#gender
elif current.startswith('-KK'):
temp = numpy.array(tag_matrix['kk'])
current = current[3:]
elif current.startswith('-KVK'):
temp = numpy.array(tag_matrix['kvk'])
current = current[4:]
elif current.startswith('-HK'):
temp = numpy.array(tag_matrix['hk'])
current = current[3:]
elif current.startswith('KK'):
temp = numpy.array(tag_matrix['kk'])
current = current[2:]
elif current.startswith('KVK'):
temp = numpy.array(tag_matrix['kvk'])
current = current[3:]
elif current.startswith('HK'):
temp = numpy.array(tag_matrix['hk'])
current = current[2:]
#voice
elif current.startswith('MM'):
temp = numpy.array(tag_matrix['mm'])
current = current[2:]
elif current.startswith('-MM'):
temp = numpy.array(tag_matrix['mm'])
current = current[3:]
elif current.startswith('GM'):
temp = numpy.array(tag_matrix['gm'])
current = current[2:]
elif current.startswith('-GM'):
temp = numpy.array(tag_matrix['gm'])
current = current[3:]
#mood
elif current.startswith('-NH'):
temp = numpy.array(tag_matrix['nh'])
current = current[3:]
elif current.startswith('-FH'):
temp = numpy.array(tag_matrix['fh'])
current = current[3:]
elif current.startswith('-VH'):
temp = numpy.array(tag_matrix['vh'])
current = current[3:]
elif current.startswith('-BH'):
temp = numpy.array(tag_matrix['bh'])
current = current[3:]
elif current.startswith('-SAGNB'):
temp = numpy.array(tag_matrix['sagnb'])
current = current[6:]
elif current.startswith('-ST'):
temp = numpy.array(tag_matrix['stýfður'])
current = current[3:]
#tense
elif current.startswith('-NT'):
temp = numpy.array(tag_matrix['nt'])
current = current[3:]
elif current.startswith('-ÞT'):
temp = numpy.array(tag_matrix['þt'])
current = current[3:]
elif current.startswith('FSB'):
temp = numpy.array(tag_matrix['fst'])
temp += numpy.array(tag_matrix['sb'])
current = current[3:]
elif current.startswith('FVB'):
temp = numpy.array(tag_matrix['fst'])
temp += numpy.array(tag_matrix['vb'])
current = current[3:]
elif current.startswith('ESB'):
temp = numpy.array(tag_matrix['est'])
temp += numpy.array(tag_matrix['sb'])
current = current[3:]
elif current.startswith('EVB'):
temp = numpy.array(tag_matrix['est'])
temp += numpy.array(tag_matrix['vb'])
current = current[3:]
elif current.startswith('FST'):
temp = numpy.array(tag_matrix['fst'])
current = current[3:]
elif current.startswith('MSTSB'):
temp = numpy.array(tag_matrix['mst'])
temp += numpy.array(tag_matrix['sb'])
current = current[5:]
elif current.startswith('MST2'):
temp = numpy.array(tag_matrix['mst'])
current = current[4:]
elif current.startswith('MST'):
temp = numpy.array(tag_matrix['mst'])
current = current[3:]
elif current.startswith('EST'):
temp = numpy.array(tag_matrix['est'])
current = current[3:]
elif current.startswith('OBEYGJANLEGT'):
temp = numpy.array(tag_matrix['ób'])
current = current[12:]
return current, temp
def vectorise_all(word_form_list, outfile):
bin_dict = {}
ctr = 0
wfl_length = len(word_form_list)
for i in word_form_list:
ctr += 1
if ctr % 10000 == 0:
print(str(ctr) + ' of ' + str(wfl_length))
current_2 = i.split(';')[2].strip()
current_3 = i.split(';')[3].strip()
current = i.split(';')[5].strip().strip('2').strip('3')
current_wordform = i.split(';')[4].strip()
# creating the vectors - do this more properly
if current_2 == 'kk':
temp = numpy.array(tag_matrix['no']) + numpy.array(tag_matrix['kk'])
if current_2 == 'kvk':
temp = numpy.array(tag_matrix['no']) + numpy.array(tag_matrix['kvk'])
if current_2 == 'hk':
temp = numpy.array(tag_matrix['no']) + numpy.array(tag_matrix['hk'])
if current_2 == 'so':
temp = numpy.array(tag_matrix['so'])
if current_2 == 'lo':
temp = numpy.array(tag_matrix['lo'])
if current_2 == 'to':
temp = numpy.array(tag_matrix['to'])
if current_2 == 'gr':
temp = numpy.array(tag_matrix['gr'])
if current_2 == 'ao':
temp = numpy.array(tag_matrix['ao'])
if current_2 == 'fn':
temp = numpy.array(tag_matrix['fn'])
if current_2 == 'rt': #add to tag_matrix?
temp = numpy.array(tag_matrix['lo'])
if current_2 == 'pfn':
temp = numpy.array(tag_matrix['fn']) + numpy.array(tag_matrix['pfn'])
if current_2 == 'fs':
temp += numpy.array(tag_matrix['st_þag']) + numpy.array(tag_matrix['st_þol']) + numpy.array(tag_matrix['st_ef'])
if current_2 == 'st':
temp += numpy.array(tag_matrix['st'])
if current_2 == 'nhm':
temp += numpy.array(tag_matrix['nhm'])
if current_2 == 'uh':
temp += numpy.array(tag_matrix['upphr'])
if current_2 == 'afn':
temp += numpy.array(tag_matrix['afn'])
# In the latest version of DIM there may be more categories of proper nouns
if current_3 in ['heö','fyr','örn','föð','ism','móð','gæl','lönd','erl','göt','hetja','mvirk','bær','þor','hug','erm','dýr','ætt']:
temp += numpy.array(tag_matrix['sérn'])
while len(current) > 0:
current_out, mark = build_tagarray(current)
if current_out == current:
print(i, current)
current = ''
else:
temp += numpy.array(mark)
current = current_out
#using a dict for it all - merging all possibilities for a wordform into one vector
if current_wordform in bin_dict:
bin_dict[current_wordform] = numpy.logical_or(bin_dict[current_wordform], temp)
else:
bin_dict[current_wordform] = temp
with open(outfile, "w") as f:
for j in bin_dict.keys():
if len(j) > 0:
if len(bin_dict[j]) > 0:
try:
f.write(j + ';' + numpy.array2string(1 * numpy.array(bin_dict[j]), max_line_width=200,
separator=',') + '\n')
except:
print(numpy.array2string(eval(bin_dict[j])))
sys.exit(0)
if __name__ == '__main__':
# reading input parameters
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input', '-i', help='Name of input file.', default="./data/SHsnid.csv")
parser.add_argument('--output', '-o', help='Name of output file.', default="./extra/dmii.vectors")
try:
args = parser.parse_args()
except:
sys.exit(0)
dim_file = open(args.input, 'r')
wordforms = dim_file.readlines()
vectorise_all(wordforms, args.output)
|
__copyright__ = "Copyright 2013-2016, http://radical.rutgers.edu"
__license__ = "MIT"
import os
import time
import threading as mt
import radical.utils as ru
from . import utils as rpu
from . import states as rps
from . import constants as rpc
from . import compute_unit_description as rpcud
# bulk callbacks are implemented, but are currently not used nor exposed.
_USE_BULK_CB = False
if os.environ.get('RADICAL_PILOT_BULK_CB', '').lower() in ['true', 'yes', '1']:
_USE_BULK_CB = True
# ------------------------------------------------------------------------------
#
class UnitManager(rpu.Component):
"""
A UnitManager manages :class:`radical.pilot.ComputeUnit` instances which
represent the **executable** workload in RADICAL-Pilot. A UnitManager
connects the ComputeUnits with one or more :class:`Pilot` instances (which
represent the workload **executors** in RADICAL-Pilot) and a **scheduler**
which determines which :class:`ComputeUnit` gets executed on which
:class:`Pilot`.
**Example**::
s = rp.Session(database_url=DBURL)
pm = rp.PilotManager(session=s)
pd = rp.ComputePilotDescription()
pd.resource = "futuregrid.alamo"
pd.cores = 16
p1 = pm.submit_pilots(pd) # create first pilot with 16 cores
p2 = pm.submit_pilots(pd) # create second pilot with 16 cores
# Create a workload of 128 '/bin/sleep' compute units
compute_units = []
for unit_count in range(0, 128):
cu = rp.ComputeUnitDescription()
cu.executable = "/bin/sleep"
cu.arguments = ['60']
compute_units.append(cu)
# Combine the two pilots, the workload and a scheduler via
# a UnitManager.
um = rp.UnitManager(session=session, scheduler=rp.SCHEDULER_ROUND_ROBIN)
um.add_pilot(p1)
um.submit_units(compute_units)
The unit manager can issue notification on unit state changes. Whenever
state notification arrives, any callback registered for that notification is
fired.
NOTE: State notifications can arrive out of order wrt the unit state model!
"""
# --------------------------------------------------------------------------
#
def __init__(self, session, cfg='default', scheduler=None):
"""
Creates a new UnitManager and attaches it to the session.
**Arguments:**
* session [:class:`radical.pilot.Session`]:
The session instance to use.
* cfg (`dict` or `string`):
The configuration or name of configuration to use.
* scheduler (`string`):
The name of the scheduler plug-in to use.
**Returns:**
* A new `UnitManager` object [:class:`radical.pilot.UnitManager`].
"""
self._pilots = dict()
self._pilots_lock = ru.RLock('umgr.pilots_lock')
self._units = dict()
self._units_lock = ru.RLock('umgr.units_lock')
self._callbacks = dict()
self._cb_lock = ru.RLock('umgr.cb_lock')
self._terminate = mt.Event()
self._closed = False
self._rec_id = 0 # used for session recording
self._uid = ru.generate_id('umgr.%(item_counter)04d',
ru.ID_CUSTOM, ns=session.uid)
for m in rpc.UMGR_METRICS:
self._callbacks[m] = dict()
# NOTE: `name` and `cfg` are overloaded, the user cannot point to
# a predefined config and amed it at the same time. This might
# be ok for the session, but introduces a minor API inconsistency.
#
name = None
if isinstance(cfg, str):
name = cfg
cfg = None
cfg = ru.Config('radical.pilot.umgr', name=name, cfg=cfg)
cfg.uid = self._uid
cfg.owner = self._uid
cfg.sid = session.uid
cfg.base = session.base
cfg.path = session.path
cfg.dburl = session.dburl
cfg.heartbeat = session.cfg.heartbeat
if scheduler:
# overwrite the scheduler from the config file
cfg.scheduler = scheduler
rpu.Component.__init__(self, cfg, session=session)
self.start()
self._log.info('started umgr %s', self._uid)
self._rep.info('<<create unit manager')
# create pmgr bridges and components, use session cmgr for that
self._cmgr = rpu.ComponentManager(self._cfg)
self._cmgr.start_bridges()
self._cmgr.start_components()
# The output queue is used to forward submitted units to the
# scheduling component.
self.register_output(rps.UMGR_SCHEDULING_PENDING,
rpc.UMGR_SCHEDULING_QUEUE)
# the umgr will also collect units from the agent again, for output
# staging and finalization
if self._cfg.bridges.umgr_staging_output_queue:
self._has_sout = True
self.register_output(rps.UMGR_STAGING_OUTPUT_PENDING,
rpc.UMGR_STAGING_OUTPUT_QUEUE)
else:
self._has_sout = False
# register the state notification pull cb
# FIXME: this should be a tailing cursor in the update worker
self.register_timed_cb(self._state_pull_cb,
timer=self._cfg['db_poll_sleeptime'])
# register callback which pulls units back from agent
# FIXME: this should be a tailing cursor in the update worker
self.register_timed_cb(self._unit_pull_cb,
timer=self._cfg['db_poll_sleeptime'])
# also listen to the state pubsub for unit state changes
self.register_subscriber(rpc.STATE_PUBSUB, self._state_sub_cb)
# let session know we exist
self._session._register_umgr(self)
self._prof.prof('setup_done', uid=self._uid)
self._rep.ok('>>ok\n')
# --------------------------------------------------------------------------
#
def initialize(self):
# the manager must not carry bridge and component handles across forks
ru.atfork(self._atfork_prepare, self._atfork_parent, self._atfork_child)
# --------------------------------------------------------------------------
#
# EnTK forks, make sure we don't carry traces of children across the fork
#
def _atfork_prepare(self): pass
def _atfork_parent(self) : pass
def _atfork_child(self) :
self._bridges = dict()
self._components = dict()
# --------------------------------------------------------------------------
#
def finalize(self):
self._cmgr.close()
# --------------------------------------------------------------------------
#
def close(self):
"""
Shut down the UnitManager and all its components.
"""
# we do not cancel units at this point, in case any component or pilot
# wants to continue to progress unit states, which should indeed be
# independent from the umgr life cycle.
if self._closed:
return
self._terminate.set()
self._rep.info('<<close unit manager')
# disable callbacks during shutdown
with self._cb_lock:
self._callbacks = dict()
for m in rpc.UMGR_METRICS:
self._callbacks[m] = dict()
self._cmgr.close()
self._log.info("Closed UnitManager %s." % self._uid)
self._closed = True
self._rep.ok('>>ok\n')
# --------------------------------------------------------------------------
#
def as_dict(self):
"""
Returns a dictionary representation of the UnitManager object.
"""
ret = {
'uid': self.uid,
'cfg': self.cfg
}
return ret
# --------------------------------------------------------------------------
#
def __str__(self):
"""
Returns a string representation of the UnitManager object.
"""
return str(self.as_dict())
# --------------------------------------------------------------------------
#
def _pilot_state_cb(self, pilots, state=None):
if self._terminate.is_set():
return False
# we register this callback for pilots added to this umgr. It will
# specifically look out for pilots which complete, and will make sure
# that all units are pulled back into umgr control if that happens
# prematurely.
#
# If we find units which have not completed the agent part of the unit
# state model, we declare them FAILED. If they can be restarted, we
# resubmit an identical unit, which then will get a new unit ID. This
# avoids state model confusion (the state model is right now expected to
# be linear), but is not intuitive for the application (FIXME).
#
# FIXME: there is a race with the umgr scheduler which may, just now,
# and before being notified about the pilot's demise, send new
# units to the pilot.
# we only look into pilot states when the umgr is still active
# FIXME: note that there is a race in that the umgr can be closed while
# we are in the cb.
# FIXME: `self._closed` is not an `mt.Event`!
if self._closed:
self._log.debug('umgr closed, ignore pilot cb %s',
['%s:%s' % (p.uid, p.state) for p in pilots])
return True
if not isinstance(pilots, list):
pilots = [pilots]
for pilot in pilots:
state = pilot.state
if state in rps.FINAL:
self._log.debug('pilot %s is final - pull units', pilot.uid)
unit_cursor = self.session._dbs._c.find({
'type' : 'unit',
'pilot' : pilot.uid,
'umgr' : self.uid,
'control' : {'$in' : ['agent_pending', 'agent']}})
if not unit_cursor.count():
units = list()
else:
units = list(unit_cursor)
self._log.debug("units pulled: %3d (pilot dead)", len(units))
if not units:
continue
# update the units to avoid pulling them again next time.
# NOTE: this needs not locking with the unit pulling in the
# _unit_pull_cb, as that will only pull umgr_pending
# units.
uids = [unit['uid'] for unit in units]
self._session._dbs._c.update({'type' : 'unit',
'uid' : {'$in' : uids}},
{'$set' : {'control' : 'umgr'}},
multi=True)
to_restart = list()
for unit in units:
unit['state'] = rps.FAILED
if not unit['description'].get('restartable'):
self._log.debug('unit %s not restartable', unit['uid'])
continue
self._log.debug('unit %s is restartable', unit['uid'])
unit['restarted'] = True
ud = rpcud.ComputeUnitDescription(unit['description'])
to_restart.append(ud)
# FIXME: increment some restart counter in the description?
# FIXME: reference the resulting new uid in the old unit.
if to_restart and not self._closed:
self._log.debug('restart %s units', len(to_restart))
restarted = self.submit_units(to_restart)
for u in restarted:
self._log.debug('restart unit %s', u.uid)
# final units are not pushed
self.advance(units, publish=True, push=False)
# keep cb registered
return True
# --------------------------------------------------------------------------
#
def _state_pull_cb(self):
if self._terminate.is_set():
return False
# pull all unit states from the DB, and compare to the states we know
# about. If any state changed, update the unit instance and issue
# notification callbacks as needed. Do not advance the state (again).
# FIXME: we also pull for dead units. That is not efficient...
# FIXME: this needs to be converted into a tailed cursor in the update
# worker
units = self._session._dbs.get_units(umgr_uid=self.uid)
for unit in units:
if not self._update_unit(unit, publish=True, advance=False):
return False
return True
# --------------------------------------------------------------------------
#
def _unit_pull_cb(self):
if self._terminate.is_set():
return False
# pull units from the agent which are about to get back
# under umgr control, and push them into the respective queues
# FIXME: this should also be based on a tailed cursor
# FIXME: Unfortunately, 'find_and_modify' is not bulkable, so we have
# to use 'find'. To avoid finding the same units over and over
# again, we update the 'control' field *before* running the next
# find -- so we do it right here.
unit_cursor = self.session._dbs._c.find({'type' : 'unit',
'umgr' : self.uid,
'control' : 'umgr_pending'})
if not unit_cursor.count():
# no units whatsoever...
# self._log.info("units pulled: 0")
return True # this is not an error
# update the units to avoid pulling them again next time.
units = list(unit_cursor)
uids = [unit['uid'] for unit in units]
self._log.info("units pulled: %d", len(uids))
for unit in units:
unit['control'] = 'umgr'
self._session._dbs._c.update({'type' : 'unit',
'uid' : {'$in' : uids}},
{'$set' : {'control' : 'umgr'}},
multi=True)
self._log.info("units pulled: %4d", len(units))
self._prof.prof('get', msg="bulk size: %d" % len(units), uid=self.uid)
for unit in units:
# we need to make sure to have the correct state:
uid = unit['uid']
self._prof.prof('get', uid=uid)
old = unit['state']
new = rps._unit_state_collapse(unit['states'])
if old != new:
self._log.debug("unit pulled %s: %s / %s", uid, old, new)
unit['state'] = new
# now we really own the CUs, and can start working on them (ie. push
# them into the pipeline).
to_stage = list()
to_finalize = list()
for unit in units:
# only advance units to data stager if we need data staging
# = otherwise finalize them right away
if unit['description'].get('output_staging'):
to_stage.append(unit)
else:
to_finalize.append(unit)
# don't profile state transitions - those happened in the past
if to_stage:
if self._has_sout:
# normal route: needs data stager
self.advance(to_stage, publish=True, push=True, prof=False)
else:
self._log.error('output staging needed but not available!')
for unit in to_stage:
unit['target_state'] = rps.FAILED
to_finalize.append(unit)
if to_finalize:
# shortcut, skip the data stager, but fake state transition
self.advance(to_finalize, state=rps.UMGR_STAGING_OUTPUT,
publish=True, push=False)
# move to final stata
for unit in to_finalize:
unit['state'] = unit['target_state']
self.advance(to_finalize, publish=True, push=False)
return True
# --------------------------------------------------------------------------
#
def _state_sub_cb(self, topic, msg):
if self._terminate.is_set():
return False
cmd = msg.get('cmd')
arg = msg.get('arg')
if cmd != 'update':
self._log.debug('ignore state cb msg with cmd %s', cmd)
return True
if isinstance(arg, list): things = arg
else : things = [arg]
cb_requests = list()
for thing in things:
if thing.get('type') == 'unit':
# we got the state update from the state callback - don't
# publish it again
to_notify = self._update_unit(thing, publish=False,
advance=False)
if to_notify:
cb_requests += to_notify
else:
self._log.debug('umgr state cb ignores %s/%s', thing.get('uid'),
thing.get('state'))
if cb_requests:
if _USE_BULK_CB:
self._bulk_cbs(set([unit for unit,state in cb_requests]))
else:
for unit,state in cb_requests:
self._unit_cb(unit, state)
return True
# --------------------------------------------------------------------------
#
def _update_unit(self, unit_dict, publish=False, advance=False):
uid = unit_dict['uid']
# return information about needed callback and advance activities, so
# that we don't break bulks here.
# note however that individual unit callbacks are still being called on
# each unit (if any are registered), which can lead to arbitrary,
# application defined delays.
to_notify = list()
with self._units_lock:
# we don't care about units we don't know
if uid not in self._units:
self._log.debug('umgr: unknown: %s', uid)
return None
unit = self._units[uid]
# only update on state changes
current = unit.state
target = unit_dict['state']
if current == target:
self._log.debug('umgr: static: %s', uid)
return None
target, passed = rps._unit_state_progress(uid, current, target)
if target in [rps.CANCELED, rps.FAILED]:
# don't replay intermediate states
passed = passed[-1:]
for s in passed:
unit_dict['state'] = s
self._units[uid]._update(unit_dict)
to_notify.append([unit, s])
# we don't usually advance state at this point, but just keep up
# with state changes reported from elsewhere
if advance:
self.advance(unit_dict, s, publish=publish, push=False,
prof=False)
self._log.debug('umgr: notify: %s %s %s', len(to_notify), unit_dict,
unit_dict['state'])
return to_notify
# --------------------------------------------------------------------------
#
def _unit_cb(self, unit, state):
with self._cb_lock:
uid = unit.uid
cb_dicts = list()
metric = rpc.UNIT_STATE
# get wildcard callbacks
cb_dicts += self._callbacks[metric].get('*', {}).values()
cb_dicts += self._callbacks[metric].get(uid, {}).values()
for cb_dict in cb_dicts:
cb = cb_dict['cb']
cb_data = cb_dict['cb_data']
try:
if cb_data: cb(unit, state, cb_data)
else : cb(unit, state)
except:
self._log.exception('cb error (%s)', cb.__name__)
# --------------------------------------------------------------------------
#
def _bulk_cbs(self, units, metrics=None):
if not metrics: metrics = [rpc.UNIT_STATE]
else : metrics = ru.as_list(metrics)
cbs = dict() # bulked callbacks to call
with self._cb_lock:
for metric in metrics:
# get wildcard callbacks
cb_dicts = self._callbacks[metric].get('*')
for cb_name in cb_dicts:
cbs[cb_name] = {'cb' : cb_dicts[cb_name]['cb'],
'cb_data': cb_dicts[cb_name]['cb_data'],
'units' : set(units)}
# add unit specific callbacks if needed
for unit in units:
uid = unit.uid
if uid not in self._callbacks[metric]:
continue
cb_dicts = self._callbacks[metric].get(uid, {})
for cb_name in cb_dicts:
if cb_name in cbs:
cbs[cb_name]['units'].add(unit)
else:
cbs[cb_name] = {'cb' : cb_dicts[cb_name]['cb'],
'cb_data': cb_dicts[cb_name]['cb_data'],
'units' : set([unit])}
for cb_name in cbs:
cb = cbs[cb_name]['cb']
cb_data = cbs[cb_name]['cb_data']
objs = cbs[cb_name]['units']
if cb_data: cb(list(objs), cb_data)
else : cb(list(objs))
# --------------------------------------------------------------------------
#
# FIXME: this needs to go to the scheduler
def _default_wait_queue_size_cb(self, umgr, wait_queue_size):
# FIXME: this needs to come from the scheduler?
if self._terminate.is_set():
return False
self._log.info("[Callback]: wait_queue_size: %s.", wait_queue_size)
# --------------------------------------------------------------------------
#
@property
def uid(self):
"""
Returns the unique id.
"""
return self._uid
# --------------------------------------------------------------------------
#
@property
def scheduler(self):
"""
Returns the scheduler name.
"""
return self._cfg.get('scheduler')
# --------------------------------------------------------------------------
#
def add_pilots(self, pilots):
"""
Associates one or more pilots with the unit manager.
**Arguments:**
* **pilots** [:class:`radical.pilot.ComputePilot` or list of
:class:`radical.pilot.ComputePilot`]: The pilot objects that will be
added to the unit manager.
"""
if not isinstance(pilots, list):
pilots = [pilots]
if len(pilots) == 0:
raise ValueError('cannot add no pilots')
with self._pilots_lock:
# sanity check, and keep pilots around for inspection
for pilot in pilots:
pid = pilot.uid
if pid in self._pilots:
raise ValueError('pilot %s already added' % pid)
self._pilots[pid] = pilot
# subscribe for state updates
pilot.register_callback(self._pilot_state_cb)
pilot_docs = [pilot.as_dict() for pilot in pilots]
# publish to the command channel for the scheduler to pick up
self.publish(rpc.CONTROL_PUBSUB, {'cmd' : 'add_pilots',
'arg' : {'pilots': pilot_docs,
'umgr' : self.uid}})
# --------------------------------------------------------------------------
#
def list_pilots(self):
"""
Lists the UIDs of the pilots currently associated with the unit manager.
**Returns:**
* A list of :class:`radical.pilot.ComputePilot` UIDs [`string`].
"""
with self._pilots_lock:
return list(self._pilots.keys())
# --------------------------------------------------------------------------
#
def get_pilots(self):
"""
Get the pilots instances currently associated with the unit manager.
**Returns:**
* A list of :class:`radical.pilot.ComputePilot` instances.
"""
with self._pilots_lock:
return list(self._pilots.values())
# --------------------------------------------------------------------------
#
def remove_pilots(self, pilot_ids, drain=False):
"""
Disassociates one or more pilots from the unit manager.
After a pilot has been removed from a unit manager, it won't process
any of the unit manager's units anymore. Calling `remove_pilots`
doesn't stop the pilot itself.
**Arguments:**
* **drain** [`boolean`]: Drain determines what happens to the units
which are managed by the removed pilot(s). If `True`, all units
currently assigned to the pilot are allowed to finish execution.
If `False` (the default), then non-final units will be canceled.
"""
# TODO: Implement 'drain'.
# NOTE: the actual removal of pilots from the scheduler is asynchron!
if drain:
raise RuntimeError("'drain' is not yet implemented")
if not isinstance(pilot_ids, list):
pilot_ids = [pilot_ids]
if len(pilot_ids) == 0:
raise ValueError('cannot remove no pilots')
with self._pilots_lock:
# sanity check, and keep pilots around for inspection
for pid in pilot_ids:
if pid not in self._pilots:
raise ValueError('pilot %s not removed' % pid)
del(self._pilots[pid])
# publish to the command channel for the scheduler to pick up
self.publish(rpc.CONTROL_PUBSUB, {'cmd' : 'remove_pilots',
'arg' : {'pids' : pilot_ids,
'umgr' : self.uid}})
# --------------------------------------------------------------------------
#
def list_units(self):
"""
Returns the UIDs of the :class:`radical.pilot.ComputeUnit` managed by
this unit manager.
**Returns:**
* A list of :class:`radical.pilot.ComputeUnit` UIDs [`string`].
"""
with self._pilots_lock:
return list(self._units.keys())
# --------------------------------------------------------------------------
#
def submit_units(self, descriptions):
"""
Submits on or more :class:`radical.pilot.ComputeUnit` instances to the
unit manager.
**Arguments:**
* **descriptions** [:class:`radical.pilot.ComputeUnitDescription`
or list of :class:`radical.pilot.ComputeUnitDescription`]: The
description of the compute unit instance(s) to create.
**Returns:**
* A list of :class:`radical.pilot.ComputeUnit` objects.
"""
from .compute_unit import ComputeUnit
ret_list = True
if not isinstance(descriptions, list):
ret_list = False
descriptions = [descriptions]
if len(descriptions) == 0:
raise ValueError('cannot submit no unit descriptions')
# we return a list of compute units
self._rep.progress_tgt(len(descriptions), label='submit')
units = list()
for ud in descriptions:
if not ud.executable:
raise ValueError('compute unit executable must be defined')
unit = ComputeUnit(umgr=self, descr=ud)
units.append(unit)
# keep units around
with self._units_lock:
self._units[unit.uid] = unit
if self._session._rec:
ru.write_json(ud.as_dict(), "%s/%s.batch.%03d.json"
% (self._session._rec, unit.uid, self._rec_id))
self._rep.progress()
self._rep.progress_done()
if self._session._rec:
self._rec_id += 1
# insert units into the database, as a bulk.
unit_docs = [u.as_dict() for u in units]
self._session._dbs.insert_units(unit_docs)
# Only after the insert can we hand the units over to the next
# components (ie. advance state).
self.advance(unit_docs, rps.UMGR_SCHEDULING_PENDING,
publish=True, push=True)
if ret_list: return units
else : return units[0]
# --------------------------------------------------------------------------
#
def get_units(self, uids=None):
"""Returns one or more compute units identified by their IDs.
**Arguments:**
* **uids** [`string` or `list of strings`]: The IDs of the
compute unit objects to return.
**Returns:**
* A list of :class:`radical.pilot.ComputeUnit` objects.
"""
if not uids:
with self._units_lock:
ret = list(self._units.values())
return ret
ret_list = True
if (not isinstance(uids, list)) and (uids is not None):
ret_list = False
uids = [uids]
ret = list()
with self._units_lock:
for uid in uids:
if uid not in self._units:
raise ValueError('unit %s not known' % uid)
ret.append(self._units[uid])
if ret_list: return ret
else : return ret[0]
# --------------------------------------------------------------------------
#
def wait_units(self, uids=None, state=None, timeout=None):
"""
Returns when one or more :class:`radical.pilot.ComputeUnits` reach a
specific state.
If `uids` is `None`, `wait_units` returns when **all**
ComputeUnits reach the state defined in `state`. This may include
units which have previously terminated or waited upon.
**Example**::
# TODO -- add example
**Arguments:**
* **uids** [`string` or `list of strings`]
If uids is set, only the ComputeUnits with the specified
uids are considered. If uids is `None` (default), all
ComputeUnits are considered.
* **state** [`string`]
The state that ComputeUnits have to reach in order for the call
to return.
By default `wait_units` waits for the ComputeUnits to
reach a terminal state, which can be one of the following:
* :data:`radical.pilot.rps.DONE`
* :data:`radical.pilot.rps.FAILED`
* :data:`radical.pilot.rps.CANCELED`
* **timeout** [`float`]
Timeout in seconds before the call returns regardless of Pilot
state changes. The default value **None** waits forever.
"""
if not uids:
with self._units_lock:
uids = list()
for uid,unit in self._units.items():
if unit.state not in rps.FINAL:
uids.append(uid)
if not state : states = rps.FINAL
elif not isinstance(state, list): states = [state]
else : states = state
# we simplify state check by waiting for the *earliest* of the given
# states - if the unit happens to be in any later state, we are sure the
# earliest has passed as well.
check_state_val = rps._unit_state_values[rps.FINAL[-1]]
for state in states:
check_state_val = min(check_state_val,
rps._unit_state_values[state])
ret_list = True
if not isinstance(uids, list):
ret_list = False
uids = [uids]
start = time.time()
to_check = None
with self._units_lock:
to_check = [self._units[uid] for uid in uids]
# We don't want to iterate over all units again and again, as that would
# duplicate checks on units which were found in matching states. So we
# create a list from which we drop the units as we find them in
# a matching state
self._rep.progress_tgt(len(to_check), label='wait')
while to_check and not self._terminate.is_set():
# check timeout
if timeout and (timeout <= (time.time() - start)):
self._log.debug ("wait timed out")
break
time.sleep (0.1)
# FIXME: print percentage...
# print 'wait units: %s' % [[u.uid, u.state] for u in to_check]
check_again = list()
for unit in to_check:
# we actually don't check if a unit is in a specific (set of)
# state(s), but rather check if it ever *has been* in any of
# those states
if unit.state not in rps.FINAL and \
rps._unit_state_values[unit.state] < check_state_val:
# this unit does not match the wait criteria
check_again.append(unit)
else:
# stop watching this unit
if unit.state in [rps.FAILED]:
self._rep.progress() # (color='error', c='-')
elif unit.state in [rps.CANCELED]:
self._rep.progress() # (color='warn', c='*')
else:
self._rep.progress() # (color='ok', c='+')
to_check = check_again
self._rep.progress_done()
# grab the current states to return
state = None
with self._units_lock:
states = [self._units[uid].state for uid in uids]
sdict = {state: states.count(state) for state in set(states)}
for state in sorted(set(states)):
self._rep.info('\t%-10s: %5d\n' % (state, sdict[state]))
if to_check: self._rep.warn('>>timeout\n')
else : self._rep.ok ('>>ok\n')
# done waiting
if ret_list: return states
else : return states[0]
# --------------------------------------------------------------------------
#
def cancel_units(self, uids=None):
"""
Cancel one or more :class:`radical.pilot.ComputeUnits`.
Note that cancellation of units is *immediate*, i.e. their state is
immediately set to `CANCELED`, even if some RP component may still
operate on the units. Specifically, other state transitions, including
other final states (`DONE`, `FAILED`) can occur *after* cancellation.
This is a side effect of an optimization: we consider this
acceptable tradeoff in the sense "Oh, that unit was DONE at point of
cancellation -- ok, we can use the results, sure!".
If that behavior is not wanted, set the environment variable:
export RADICAL_PILOT_STRICT_CANCEL=True
**Arguments:**
* **uids** [`string` or `list of strings`]: The IDs of the
compute units objects to cancel.
"""
if not uids:
with self._units_lock:
uids = list(self._units.keys())
else:
if not isinstance(uids, list):
uids = [uids]
# NOTE: We advance all units to cancelled, and send a cancellation
# control command. If that command is picked up *after* some
# state progression, we'll see state transitions after cancel.
# For non-final states that is not a problem, as it is equivalent
# with a state update message race, which our state collapse
# mechanism accounts for. For an eventual non-canceled final
# state, we do get an invalid state transition. That is also
# corrected eventually in the state collapse, but the point
# remains, that the state model is temporarily violated. We
# consider this a side effect of the fast-cancel optimization.
#
# The env variable 'RADICAL_PILOT_STRICT_CANCEL == True' will
# disable this optimization.
#
# FIXME: the effect of the env var is not well tested
if 'RADICAL_PILOT_STRICT_CANCEL' not in os.environ:
with self._units_lock:
units = [self._units[uid] for uid in uids ]
unit_docs = [unit.as_dict() for unit in units]
self.advance(unit_docs, state=rps.CANCELED, publish=True, push=True)
# we *always* issue the cancellation command to the local components
self.publish(rpc.CONTROL_PUBSUB, {'cmd' : 'cancel_units',
'arg' : {'uids' : uids,
'umgr' : self.uid}})
# we also inform all pilots about the cancelation request
self._session._dbs.pilot_command(cmd='cancel_units', arg={'uids':uids})
# In the default case of calling 'advance' above, we just set the state,
# so we *know* units are canceled. But we nevertheless wait until that
# state progression trickled through, so that the application will see
# the same state on unit inspection.
self.wait_units(uids=uids)
# --------------------------------------------------------------------------
#
def register_callback(self, cb, cb_data=None, metric=None, uid=None):
"""
Registers a new callback function with the UnitManager. Manager-level
callbacks get called if the specified metric changes. The default
metric `UNIT_STATE` fires the callback if any of the ComputeUnits
managed by the PilotManager change their state.
All callback functions need to have the same signature::
def cb(obj, value)
where ``object`` is a handle to the object that triggered the callback,
``value`` is the metric, and ``data`` is the data provided on
callback registration.. In the example of `UNIT_STATE` above, the
object would be the unit in question, and the value would be the new
state of the unit.
If 'cb_data' is given, then the 'cb' signature changes to
def cb(obj, state, cb_data)
and 'cb_data' are passed unchanged.
If 'uid' is given, the callback will invoked only for the specified
unit.
Available metrics are:
* `UNIT_STATE`: fires when the state of any of the units which are
managed by this unit manager instance is changing. It communicates
the unit object instance and the units new state.
* `WAIT_QUEUE_SIZE`: fires when the number of unscheduled units (i.e.
of units which have not been assigned to a pilot for execution)
changes.
"""
# FIXME: the signature should be (self, metrics, cb, cb_data)
if not metric:
metric = rpc.UNIT_STATE
if metric not in rpc.UMGR_METRICS:
raise ValueError ("Metric '%s' not available on the umgr" % metric)
if not uid:
uid = '*'
elif uid not in self._units:
raise ValueError('no such unit %s' % uid)
with self._cb_lock:
cb_name = cb.__name__
if metric not in self._callbacks:
self._callbacks[metric] = dict()
if uid not in self._callbacks[metric]:
self._callbacks[metric][uid] = dict()
self._callbacks[metric][uid][cb_name] = {'cb' : cb,
'cb_data' : cb_data}
# --------------------------------------------------------------------------
#
def unregister_callback(self, cb=None, metrics=None, uid=None):
if not metrics: metrics = [rpc.UMGR_METRICS]
else : metrics = ru.as_list(metrics)
if not uid:
uid = '*'
elif uid not in self._units:
raise ValueError('no such unit %s' % uid)
for metric in metrics:
if metric not in rpc.UMGR_METRICS :
raise ValueError ("invalid umgr metric '%s'" % metric)
with self._cb_lock:
for metric in metrics:
if metric not in rpc.UMGR_METRICS :
raise ValueError("cb metric '%s' unknown" % metric)
if metric not in self._callbacks:
raise ValueError("cb metric '%s' invalid" % metric)
if uid not in self._callbacks[metric]:
raise ValueError("cb target '%s' invalid" % uid)
if cb:
to_delete = [cb.__name__]
else:
to_delete = list(self._callbacks[metric][uid].keys())
for cb_name in to_delete:
if cb_name not in self._callbacks[uid][metric]:
raise ValueError("cb %s not registered" % cb_name)
del(self._callbacks[uid][metric][cb_name])
# ------------------------------------------------------------------------------
|
<gh_stars>0
import astropy.units as u
import gwcs.coordinate_frames as cf
import numpy as np
import pytest
from astropy.coordinates import SkyCoord
from astropy.time import Time
from ndcube.extra_coords.lookup_table_coord import (MultipleTableCoordinate, QuantityTableCoordinate,
SkyCoordTableCoordinate, TimeTableCoordinate)
@pytest.fixture
def lut_1d_distance():
lookup_table = u.Quantity(np.arange(10) * u.km)
return QuantityTableCoordinate(lookup_table, names='x')
@pytest.fixture
def lut_3d_distance_mesh():
lookup_table = (u.Quantity(np.arange(10) * u.km),
u.Quantity(np.arange(10, 20) * u.km),
u.Quantity(np.arange(20, 30) * u.km))
return QuantityTableCoordinate(*lookup_table, mesh=True, names=['x', 'y', 'z'])
@pytest.fixture
def lut_2d_distance_no_mesh():
lookup_table = np.arange(9).reshape(3, 3) * u.km, np.arange(9, 18).reshape(3, 3) * u.km
return QuantityTableCoordinate(*lookup_table, mesh=False)
@pytest.fixture
def lut_1d_skycoord_no_mesh():
sc = SkyCoord(range(10), range(10), unit=u.deg)
return SkyCoordTableCoordinate(sc, mesh=False, names=['lon', 'lat'])
@pytest.fixture
def lut_2d_skycoord_no_mesh():
data = np.arange(9).reshape(3, 3), np.arange(9, 18).reshape(3, 3)
sc = SkyCoord(*data, unit=u.deg)
return SkyCoordTableCoordinate(sc, mesh=False)
@pytest.fixture
def lut_2d_skycoord_mesh():
sc = SkyCoord(range(10), range(10), unit=u.deg)
return SkyCoordTableCoordinate(sc, mesh=True)
@pytest.fixture
def lut_3d_skycoord_mesh():
sc = SkyCoord(range(10), range(10), range(10), unit=(u.deg, u.deg, u.AU))
return SkyCoordTableCoordinate(sc, mesh=True)
@pytest.fixture
def lut_1d_time():
data = Time(["2011-01-01T00:00:00",
"2011-01-01T00:00:10",
"2011-01-01T00:00:20",
"2011-01-01T00:00:30"], format="isot")
return TimeTableCoordinate(data, names='time', physical_types='time')
@pytest.fixture
def lut_1d_wave():
# TODO: Make this into a SpectralCoord object
return QuantityTableCoordinate(range(10) * u.nm)
def test_exceptions():
with pytest.raises(TypeError) as ei:
QuantityTableCoordinate(u.Quantity([1, 2, 3], u.nm), [1, 2, 3])
assert "All tables must be astropy Quantity objects" in str(ei)
with pytest.raises(u.UnitsError) as ei:
QuantityTableCoordinate(u.Quantity([1, 2, 3], u.nm), [1, 2, 3] * u.deg)
assert "All tables must have equivalent units." in str(ei)
with pytest.raises(ValueError) as ei:
QuantityTableCoordinate(u.Quantity([1, 2, 3], u.nm), [1, 2, 3] * u.m, names='x')
assert "The number of names should match the number of world dimensions" in str(ei)
with pytest.raises(ValueError) as ei:
QuantityTableCoordinate(u.Quantity([1, 2, 3], u.nm), [1, 2, 3] * u.m, physical_types='x')
assert "The number of physical types should match the number of world dimensions" in str(ei)
# Test two Time
with pytest.raises(ValueError) as ei:
TimeTableCoordinate(Time("2011-01-01"), Time("2011-01-01"))
assert "single Time object" in str(ei)
with pytest.raises(ValueError) as ei:
TimeTableCoordinate(Time("2011-01-01"), names=['a', 'b'])
assert "only have one name." in str(ei)
with pytest.raises(ValueError) as ei:
TimeTableCoordinate(Time("2011-01-01"), physical_types=['a', 'b'])
assert "only have one physical type." in str(ei)
# Test two SkyCoord
with pytest.raises(ValueError) as ei:
SkyCoordTableCoordinate(SkyCoord(10, 10, unit=u.deg), SkyCoord(10, 10, unit=u.deg))
assert "single SkyCoord object" in str(ei)
with pytest.raises(ValueError) as ei:
SkyCoordTableCoordinate(SkyCoord(10, 10, unit=u.deg), names='x')
assert "names must equal two" in str(ei)
with pytest.raises(ValueError) as ei:
SkyCoordTableCoordinate(SkyCoord(10, 10, unit=u.deg), physical_types='x')
assert "physical types must equal two" in str(ei)
with pytest.raises(TypeError) as ei:
MultipleTableCoordinate(10, SkyCoordTableCoordinate(SkyCoord(10, 10, unit=u.deg)))
assert "All arguments must be BaseTableCoordinate" in str(ei)
with pytest.raises(TypeError) as ei:
MultipleTableCoordinate(MultipleTableCoordinate(SkyCoordTableCoordinate(SkyCoord(10, 10, unit=u.deg))))
assert "All arguments must be BaseTableCoordinate" in str(ei)
def test_1d_distance(lut_1d_distance):
assert lut_1d_distance.model.n_inputs == 1
assert lut_1d_distance.model.n_outputs == 1
assert lut_1d_distance.model.lookup_table.shape == (10,)
assert u.allclose(u.Quantity(range(10), u.pix), lut_1d_distance.model.points)
assert u.allclose(lut_1d_distance.wcs.pixel_to_world(0), 0 * u.km)
assert u.allclose(lut_1d_distance.wcs.pixel_to_world(9), 9 * u.km)
assert lut_1d_distance.wcs.world_to_pixel(0 * u.km) == 0
def test_3d_distance(lut_3d_distance_mesh):
ltc = lut_3d_distance_mesh
assert ltc.model.n_inputs == 3
assert ltc.model.n_outputs == 3
assert ltc.wcs.world_n_dim == 3
assert ltc.wcs.pixel_n_dim == 3
assert u.allclose(ltc.wcs.pixel_to_world(0*u.pix, 0*u.pix, 0*u.pix),
(0, 10, 20)*u.km)
assert u.allclose(ltc.wcs.world_to_pixel(0*u.km, 10*u.km, 20*u.km), (0, 0, 0))
def test_2d_nout_1_no_mesh(lut_2d_distance_no_mesh):
ltc = lut_2d_distance_no_mesh
assert ltc.wcs.world_n_dim == 2
assert ltc.wcs.pixel_n_dim == 2
assert ltc.model.n_inputs == 2
assert ltc.model.n_outputs == 2
assert u.allclose(ltc.wcs.pixel_to_world(0*u.pix, 0*u.pix),
(0, 9)*u.km)
# TODO: this model is not invertable
# assert u.allclose(ltc.wcs.world_to_pixel(0*u.km, 9*u.km), (0, 0))
def test_1d_skycoord_no_mesh(lut_1d_skycoord_no_mesh):
ltc = lut_1d_skycoord_no_mesh
assert ltc.model.n_inputs == 1
assert ltc.model.n_outputs == 2
def test_2d_skycoord_mesh(lut_2d_skycoord_mesh):
ltc = lut_2d_skycoord_mesh
assert ltc.model.n_inputs == 2
assert ltc.model.n_outputs == 2
def test_3d_skycoord_mesh(lut_3d_skycoord_mesh):
ltc = lut_3d_skycoord_mesh
assert ltc.model.n_inputs == 3
assert ltc.model.n_outputs == 3
# Known failure due to gwcs#120
# assert isinstance(ltc.wcs, gwcs.WCS)
#
# sub_ltc = ltc[0:4, 0:5, 0:6]
# assert sub_ltc.delayed_models[0].lookup_table[0].shape == (4, )
# assert sub_ltc.delayed_models[0].lookup_table[1].shape == (5, )
# assert sub_ltc.delayed_models[0].lookup_table[2].shape == (6, )
def test_2d_skycoord_no_mesh(lut_2d_skycoord_no_mesh):
ltc = lut_2d_skycoord_no_mesh
assert ltc.model.n_inputs == 2
assert ltc.model.n_outputs == 2
def test_1d_time(lut_1d_time):
assert lut_1d_time.model.n_inputs == 1
assert lut_1d_time.model.n_outputs == 1
assert u.allclose(lut_1d_time.model.lookup_table, u.Quantity((0, 10, 20, 30), u.s))
assert lut_1d_time.wcs.pixel_to_world(0) == Time("2011-01-01T00:00:00")
assert lut_1d_time.wcs.world_to_pixel(Time("2011-01-01T00:00:00")) == 0
def test_join(lut_1d_time, lut_1d_wave):
ltc = lut_1d_time & lut_1d_wave
assert ltc.model.n_inputs == 2
assert ltc.model.n_outputs == 2
assert isinstance(ltc.frame, cf.CompositeFrame)
world = ltc.wcs.pixel_to_world(0, 0)
assert world[0] == Time("2011-01-01T00:00:00")
assert u.allclose(world[1], 0 * u.nm)
assert u.allclose(ltc.wcs.world_to_pixel(*world), (0, 0))
def test_join_3d(lut_2d_skycoord_mesh, lut_1d_wave):
ltc = lut_2d_skycoord_mesh & lut_1d_wave
assert ltc.model.n_inputs == 3
assert ltc.model.n_outputs == 3
assert isinstance(ltc.frame, cf.CompositeFrame)
world = ltc.wcs.pixel_to_world(0, 0, 0)
assert isinstance(world[0], SkyCoord)
assert u.allclose(world[1], 0 * u.nm)
# TODO: Investigate this, something about inverse model
# assert u.allclose(ltc.wcs.world_to_pixel(*world), (0, 0, 0))
def test_2d_quantity():
shape = (3, 3)
data = np.arange(np.product(shape)).reshape(shape) * u.m / u.s
ltc = QuantityTableCoordinate(data)
assert u.allclose(ltc.wcs.pixel_to_world(0, 0), 0 * u.m / u.s)
def test_repr_str(lut_1d_time, lut_1d_wave):
assert str(lut_1d_time.table) in str(lut_1d_time)
assert "TimeTableCoordinate" in repr(lut_1d_time)
join = lut_1d_time & lut_1d_wave
assert str(lut_1d_time.table) in str(join)
assert str(lut_1d_wave.table) in str(join)
assert "TimeTableCoordinate" not in repr(join)
assert "MultipleTableCoordinate" in repr(join)
################################################################################
# Slicing Tests
################################################################################
def test_slicing_quantity_table_coordinate():
qtc = QuantityTableCoordinate(range(10)*u.m, mesh=False, names='x', physical_types='pos:x')
assert u.allclose(qtc[2:8].table[0], range(2, 8)*u.m)
assert u.allclose(qtc[2].table[0], 2*u.m)
assert qtc.names == ['x']
assert qtc.physical_types == ['pos:x']
qtc = QuantityTableCoordinate(range(10)*u.m, mesh=True)
assert u.allclose(qtc[2:8].table[0], range(2, 8)*u.m)
assert u.allclose(qtc[2].table[0], 2*u.m)
qtc = QuantityTableCoordinate(*np.mgrid[0:10, 0:10]*u.m, mesh=False,
names=['x', 'y'], physical_types=['pos:x', 'pos:y'])
assert u.allclose(qtc[2:8, 2:8].table[0], (np.mgrid[2:8, 2:8]*u.m)[0])
assert u.allclose(qtc[2:8, 2:8].table[1], (np.mgrid[2:8, 2:8]*u.m)[1])
assert qtc.names == ['x', 'y']
assert qtc.physical_types == ['pos:x', 'pos:y']
assert qtc.frame.axes_names == ('x', 'y')
assert qtc.frame.axis_physical_types == ('custom:pos:x', 'custom:pos:y')
assert u.allclose(qtc[2, 2:8].table[0], 2*u.m)
assert u.allclose(qtc[2, 2:8].table[1], (np.mgrid[2:8, 2:8]*u.m)[1])
qtc = QuantityTableCoordinate(range(10)*u.m, range(10)*u.m, mesh=True,
names=['x', 'y'], physical_types=['pos:x', 'pos:y'])
assert u.allclose(qtc[2:8, 2:8].table[0], range(2, 8)*u.m)
assert u.allclose(qtc[2:8, 2:8].table[1], range(2, 8)*u.m)
# we have dropped one dimension
assert len(qtc[2, 2:8].table) == 1
assert u.allclose(qtc[2, 2:8].table[0], range(2, 8)*u.m)
assert qtc.names == ['x', 'y']
assert qtc.physical_types == ['pos:x', 'pos:y']
assert qtc.frame.axes_names == ('x', 'y')
assert qtc.frame.axis_physical_types == ('custom:pos:x', 'custom:pos:y')
def _assert_skycoord_equal(sc1, sc2):
sc2 = sc2.transform_to(sc1.frame)
assert sc1.shape == sc2.shape
components1 = tuple(getattr(sc1.data, comp) for comp in sc1.data.components)
components2 = tuple(getattr(sc2.data, comp) for comp in sc2.data.components)
for c1, c2 in zip(components1, components2):
assert u.allclose(c1, c2)
def test_slicing_skycoord_table_coordinate():
# 1D, no mesh
sc = SkyCoord(range(10)*u.deg, range(10)*u.deg)
stc = SkyCoordTableCoordinate(sc, mesh=False, names=['lon', 'lat'], physical_types=['pos:x', 'pos:y'])
_assert_skycoord_equal(stc[2:8].table, sc[2:8])
_assert_skycoord_equal(stc[2].table, sc[2])
assert stc.names == ['lon', 'lat']
assert stc.physical_types == ['pos:x', 'pos:y']
assert stc.frame.axes_names == ('lon', 'lat')
assert stc.frame.axis_physical_types == ('custom:pos:x', 'custom:pos:y')
# 2D, no mesh
sc = SkyCoord(*np.mgrid[0:10, 0:10]*u.deg)
stc = SkyCoordTableCoordinate(sc, mesh=False)
_assert_skycoord_equal(stc[2:8, 2:8].table, sc[2:8, 2:8])
_assert_skycoord_equal(stc[2, 2:8].table, sc[2, 2:8])
# 2D with mesh
# When mesh is True the constructor will run meshgrid
sc = SkyCoord(*u.Quantity(np.meshgrid(range(10), range(10)), u.deg))
stc = SkyCoordTableCoordinate(SkyCoord(range(10), range(10), unit=u.deg), mesh=True)
_assert_skycoord_equal(stc.table, sc)
_assert_skycoord_equal(stc[2:8, 2:8].table, sc[2:8, 2:8])
_assert_skycoord_equal(stc[2, 2:8].table, sc[2, 2:8])
def test_slicing_time_table_coordinate():
data = Time(["2011-01-01T00:00:00",
"2011-01-01T00:00:10",
"2011-01-01T00:00:20",
"2011-01-01T00:00:30"], format="isot")
ttc = TimeTableCoordinate(data)
assert (ttc.table == data).all()
assert (ttc[2:8].table == data[2:8]).all()
assert ttc[2].table == data[2]
def test_1d_distance_slice(lut_1d_distance):
sub_ltc = lut_1d_distance[0:5]
assert len(sub_ltc.table[0]) == 5
def test_3d_distance_slice(lut_3d_distance_mesh):
sub_ltc = lut_3d_distance_mesh[0:5, 0:6, 0:7]
assert len(sub_ltc.table[0]) == 5
assert len(sub_ltc.table[1]) == 6
assert len(sub_ltc.table[2]) == 7
def test_2d_nout_1_no_mesh_slice(lut_2d_distance_no_mesh):
ltc = lut_2d_distance_no_mesh
sub_ltc = ltc[0:2, 0:2]
assert sub_ltc.table[0].shape == (2, 2)
assert sub_ltc.table[1].shape == (2, 2)
# sub_ltc = ltc[0]
# assert ltc.wcs.world_n_dim == 2
# assert ltc.wcs.pixel_n_dim == 2
def test_1d_skycoord_no_mesh_slice(lut_1d_skycoord_no_mesh):
sub_ltc = lut_1d_skycoord_no_mesh[0:4]
assert sub_ltc.table.shape == (4, )
assert sub_ltc.table.shape == (4, )
def test_2d_skycoord_mesh_slice(lut_2d_skycoord_mesh):
sub_ltc = lut_2d_skycoord_mesh[0:4, 0:5]
assert sub_ltc.table.shape == (4, 5)
def test_2d_skycoord_no_mesh_slice(lut_2d_skycoord_no_mesh):
sub_ltc = lut_2d_skycoord_no_mesh[1:3, 1:2]
assert sub_ltc.table.shape == (2, 1)
def test_1d_time_slice(lut_1d_time):
sub_ltc = lut_1d_time[1:3]
assert sub_ltc.table.shape == (2,)
def test_join_slice(lut_1d_time, lut_1d_wave):
ltc = lut_1d_time & lut_1d_wave
sub_ltc = ltc[2:8, 2:8]
assert len(sub_ltc._table_coords) == 2
assert (sub_ltc._table_coords[0].table == lut_1d_time.table[2:8]).all()
assert u.allclose(sub_ltc._table_coords[1].table[0], lut_1d_wave.table[0][2:8])
def test_slicing_errors(lut_1d_time, lut_1d_wave, lut_1d_distance, lut_2d_skycoord_mesh):
with pytest.raises(ValueError) as ei:
lut_1d_time[1, 2]
assert "slice with incorrect length" in str(ei)
with pytest.raises(ValueError) as ei:
lut_1d_wave[1, 2]
assert "slice with incorrect length" in str(ei)
with pytest.raises(ValueError) as ei:
lut_1d_distance[1, 2]
assert "slice with incorrect length" in str(ei)
with pytest.raises(ValueError) as ei:
lut_2d_skycoord_mesh[1, 2, 3]
assert "slice with incorrect length" in str(ei)
join = lut_1d_time & lut_1d_distance
with pytest.raises(ValueError) as ei:
join[1]
assert "length of the slice" in str(ei)
def test_mtc_dropped_table(lut_1d_time):
mtc = MultipleTableCoordinate(lut_1d_time)
sub = mtc[0]
assert len(sub._table_coords) == 0
assert len(sub._dropped_coords) == 1
dwd = sub.dropped_world_dimensions
assert isinstance(dwd, dict)
wao_classes = dwd.pop("world_axis_object_classes")
assert all(isinstance(value, list) for value in dwd.values())
assert all(len(value) == 1 for value in dwd.values())
assert dwd["world_axis_names"] == ["time"]
assert dwd["world_axis_units"] == ["s"]
assert dwd["world_axis_physical_types"] == ["time"]
assert dwd["world_axis_object_components"][0][0:2] == ("temporal", 0)
assert wao_classes["temporal"][0] is Time
assert dwd["value"] == [0*u.s]
def test_mtc_dropped_table_join(lut_1d_time, lut_2d_skycoord_mesh):
mtc = MultipleTableCoordinate(lut_1d_time, lut_2d_skycoord_mesh)
sub = mtc[0, :, :]
assert len(sub._table_coords) == 1
assert len(sub._dropped_coords) == 1
dwd = sub.dropped_world_dimensions
assert isinstance(dwd, dict)
wao_classes = dwd.pop("world_axis_object_classes")
assert all(isinstance(value, list) for value in dwd.values())
assert all(len(value) == 1 for value in dwd.values())
assert dwd["world_axis_names"] == ["time"]
assert all(isinstance(u, str) for u in dwd["world_axis_units"])
assert dwd["world_axis_units"] == ["s"]
assert dwd["world_axis_physical_types"] == ["time"]
assert dwd["world_axis_object_components"][0][0:2] == ("temporal", 0)
assert wao_classes["temporal"][0] is Time
assert dwd["value"] == [0*u.s]
def test_mtc_dropped_table_skycoord_join(lut_1d_time, lut_2d_skycoord_mesh):
mtc = MultipleTableCoordinate(lut_1d_time, lut_2d_skycoord_mesh)
sub = mtc[:, 0, 0]
assert len(sub._table_coords) == 1
assert len(sub._dropped_coords) == 1
dwd = sub.dropped_world_dimensions
assert isinstance(dwd, dict)
wao_classes = dwd.pop("world_axis_object_classes")
assert all(isinstance(value, list) for value in dwd.values())
assert all(len(value) == 2 for value in dwd.values())
assert dwd["world_axis_names"] == ["lon", "lat"]
assert all(isinstance(u, str) for u in dwd["world_axis_units"])
assert dwd["world_axis_units"] == ["deg", "deg"]
assert dwd["world_axis_physical_types"] == ["pos.eq.ra", "pos.eq.dec"]
assert dwd["world_axis_object_components"] == [("celestial", 0, "spherical.lon"), ("celestial", 1, "spherical.lat")]
assert wao_classes["celestial"][0] is SkyCoord
assert dwd["value"] == [0*u.deg, 0*u.deg]
def test_mtc_dropped_quantity_table(lut_1d_time, lut_2d_distance_no_mesh):
mtc = MultipleTableCoordinate(lut_1d_time, lut_2d_distance_no_mesh)
sub = mtc[:, 0, 0]
assert len(sub._table_coords) == 1
assert len(sub._dropped_coords) == 1
pytest.importorskip("gwcs", minversion="0.16.2a1.dev17")
dwd = sub.dropped_world_dimensions
assert isinstance(dwd, dict)
wao_classes = dwd.pop("world_axis_object_classes")
assert all(isinstance(value, list) for value in dwd.values())
assert dwd
assert all(len(value) == 2 for value in dwd.values())
assert dwd["world_axis_names"] == [None, None]
assert all(isinstance(u, str) for u in dwd["world_axis_units"])
assert dwd["world_axis_units"] == ["km", "km"]
assert dwd["world_axis_physical_types"] == ["custom:SPATIAL", "custom:SPATIAL"]
assert dwd["world_axis_object_components"] == [("SPATIAL0", 0, "value"), ("SPATIAL1", 0, "value")]
assert wao_classes["SPATIAL0"][0] is u.Quantity
assert wao_classes["SPATIAL1"][0] is u.Quantity
assert dwd["value"] == [0*u.km, 9*u.km]
def test_mtc_dropped_quantity_inside_table(lut_3d_distance_mesh):
sub = lut_3d_distance_mesh[:, 0, :]
assert len(sub.table) == 2
pytest.importorskip("gwcs", minversion="0.16.2a1.dev17")
dwd = sub.dropped_world_dimensions
assert isinstance(dwd, dict)
dwd.pop("world_axis_object_classes")
assert all(isinstance(value, list) for value in dwd.values())
assert dwd
assert all(len(value) == 1 for value in dwd.values())
sub = lut_3d_distance_mesh[:, 0, 0]
assert len(sub.table) == 1
dwd = sub.dropped_world_dimensions
assert isinstance(dwd, dict)
dwd.pop("world_axis_object_classes")
assert all(isinstance(value, list) for value in dwd.values())
assert dwd
assert all(len(value) == 2 for value in dwd.values())
def test_mtc_dropped_quantity_inside_table_no_mesh(lut_2d_distance_no_mesh):
"""
When not meshing, we don't drop a coord, as the coordinate for the sliced
out axis can still vary along the remaining coordinate.
"""
sub = lut_2d_distance_no_mesh[:, 0]
assert len(sub.table) == 2
pytest.importorskip("gwcs", minversion="0.16.2a1.dev17")
dwd = sub.dropped_world_dimensions
assert isinstance(dwd, dict)
assert not dwd
def test_mtc_dropped_quantity_join_drop_table(lut_1d_time, lut_3d_distance_mesh):
mtc = MultipleTableCoordinate(lut_1d_time, lut_3d_distance_mesh)
sub = mtc[:, 0, :, :]
assert len(sub._table_coords) == 2
assert len(sub._dropped_coords) == 0
pytest.importorskip("gwcs", minversion="0.16.2a1.dev17")
dwd = sub.dropped_world_dimensions
assert isinstance(dwd, dict)
dwd.pop("world_axis_object_classes")
assert all(isinstance(value, list) for value in dwd.values())
assert all(len(value) == 1 for value in dwd.values())
sub = mtc[0, 0, :, :]
assert len(sub._table_coords) == 1
assert len(sub._dropped_coords) == 1
pytest.importorskip("gwcs", minversion="0.16.2a1.dev17")
dwd = sub.dropped_world_dimensions
assert isinstance(dwd, dict)
dwd.pop("world_axis_object_classes")
assert all(isinstance(value, list) for value in dwd.values())
assert all(len(value) == 2 for value in dwd.values())
################################################################################
# Tests of & operator
################################################################################
def test_and_base_table_coordinate():
data = Time(["2011-01-01T00:00:00",
"2011-01-01T00:00:10",
"2011-01-01T00:00:20",
"2011-01-01T00:00:30"], format="isot")
ttc = TimeTableCoordinate(data)
qtc = QuantityTableCoordinate(range(10)*u.m, mesh=False)
join = ttc & ttc
assert isinstance(join, MultipleTableCoordinate)
join2 = join & qtc
assert isinstance(join2, MultipleTableCoordinate)
assert len(join2._table_coords) == 3
assert join2._table_coords[2] is qtc
join3 = qtc & join
assert isinstance(join3, MultipleTableCoordinate)
assert len(join3._table_coords) == 3
assert join3._table_coords[0] is qtc
join4 = ttc & qtc
assert isinstance(join4, MultipleTableCoordinate)
assert len(join4._table_coords) == 2
assert join4._table_coords[0] is ttc
assert join4._table_coords[1] is qtc
join5 = join & join
assert isinstance(join5, MultipleTableCoordinate)
assert len(join5._table_coords) == 4
def test_and_errors():
data = Time(["2011-01-01T00:00:00",
"2011-01-01T00:00:10",
"2011-01-01T00:00:20",
"2011-01-01T00:00:30"], format="isot")
ttc = TimeTableCoordinate(data)
qtc = QuantityTableCoordinate(range(10)*u.m, mesh=False)
with pytest.raises(TypeError) as ei:
ttc & 5
assert "unsupported operand type(s) for &: 'TimeTableCoordinate' and 'int'" in str(ei)
join = ttc & qtc
with pytest.raises(TypeError) as ei:
join & 5
assert "unsupported operand type(s) for &: 'MultipleTableCoordinate' and 'int'" in str(ei)
with pytest.raises(TypeError) as ei:
5 & join
assert "unsupported operand type(s) for &: 'int' and 'MultipleTableCoordinate'" in str(ei)
|
"""Utilities for mapping between actual and formal arguments (and their types)."""
from typing import TYPE_CHECKING, List, Optional, Sequence, Callable, Set
from mypy.maptype import map_instance_to_supertype
from mypy.types import (
Type, Instance, TupleType, AnyType, TypeOfAny, TypedDictType, ParamSpecType, get_proper_type
)
from mypy import nodes
if TYPE_CHECKING:
from mypy.infer import ArgumentInferContext
def map_actuals_to_formals(actual_kinds: List[nodes.ArgKind],
actual_names: Optional[Sequence[Optional[str]]],
formal_kinds: List[nodes.ArgKind],
formal_names: Sequence[Optional[str]],
actual_arg_type: Callable[[int],
Type]) -> List[List[int]]:
"""Calculate mapping between actual (caller) args and formals.
The result contains a list of caller argument indexes mapping to each
callee argument index, indexed by callee index.
The caller_arg_type argument should evaluate to the type of the actual
argument type with the given index.
"""
nformals = len(formal_kinds)
formal_to_actual: List[List[int]] = [[] for i in range(nformals)]
ambiguous_actual_kwargs: List[int] = []
fi = 0
for ai, actual_kind in enumerate(actual_kinds):
if actual_kind == nodes.ARG_POS:
if fi < nformals:
if not formal_kinds[fi].is_star():
formal_to_actual[fi].append(ai)
fi += 1
elif formal_kinds[fi] == nodes.ARG_STAR:
formal_to_actual[fi].append(ai)
elif actual_kind == nodes.ARG_STAR:
# We need to know the actual type to map varargs.
actualt = get_proper_type(actual_arg_type(ai))
if isinstance(actualt, TupleType):
# A tuple actual maps to a fixed number of formals.
for _ in range(len(actualt.items)):
if fi < nformals:
if formal_kinds[fi] != nodes.ARG_STAR2:
formal_to_actual[fi].append(ai)
else:
break
if formal_kinds[fi] != nodes.ARG_STAR:
fi += 1
else:
# Assume that it is an iterable (if it isn't, there will be
# an error later).
while fi < nformals:
if formal_kinds[fi].is_named(star=True):
break
else:
formal_to_actual[fi].append(ai)
if formal_kinds[fi] == nodes.ARG_STAR:
break
fi += 1
elif actual_kind.is_named():
assert actual_names is not None, "Internal error: named kinds without names given"
name = actual_names[ai]
if name in formal_names:
formal_to_actual[formal_names.index(name)].append(ai)
elif nodes.ARG_STAR2 in formal_kinds:
formal_to_actual[formal_kinds.index(nodes.ARG_STAR2)].append(ai)
else:
assert actual_kind == nodes.ARG_STAR2
actualt = get_proper_type(actual_arg_type(ai))
if isinstance(actualt, TypedDictType):
for name in actualt.items:
if name in formal_names:
formal_to_actual[formal_names.index(name)].append(ai)
elif nodes.ARG_STAR2 in formal_kinds:
formal_to_actual[formal_kinds.index(nodes.ARG_STAR2)].append(ai)
else:
# We don't exactly know which **kwargs are provided by the
# caller, so we'll defer until all the other unambiguous
# actuals have been processed
ambiguous_actual_kwargs.append(ai)
if ambiguous_actual_kwargs:
# Assume the ambiguous kwargs will fill the remaining arguments.
#
# TODO: If there are also tuple varargs, we might be missing some potential
# matches if the tuple was short enough to not match everything.
unmatched_formals = [fi for fi in range(nformals)
if (formal_names[fi]
and (not formal_to_actual[fi]
or actual_kinds[formal_to_actual[fi][0]] == nodes.ARG_STAR)
and formal_kinds[fi] != nodes.ARG_STAR)
or formal_kinds[fi] == nodes.ARG_STAR2]
for ai in ambiguous_actual_kwargs:
for fi in unmatched_formals:
formal_to_actual[fi].append(ai)
return formal_to_actual
def map_formals_to_actuals(actual_kinds: List[nodes.ArgKind],
actual_names: Optional[Sequence[Optional[str]]],
formal_kinds: List[nodes.ArgKind],
formal_names: List[Optional[str]],
actual_arg_type: Callable[[int],
Type]) -> List[List[int]]:
"""Calculate the reverse mapping of map_actuals_to_formals."""
formal_to_actual = map_actuals_to_formals(actual_kinds,
actual_names,
formal_kinds,
formal_names,
actual_arg_type)
# Now reverse the mapping.
actual_to_formal: List[List[int]] = [[] for _ in actual_kinds]
for formal, actuals in enumerate(formal_to_actual):
for actual in actuals:
actual_to_formal[actual].append(formal)
return actual_to_formal
class ArgTypeExpander:
"""Utility class for mapping actual argument types to formal arguments.
One of the main responsibilities is to expand caller tuple *args and TypedDict
**kwargs, and to keep track of which tuple/TypedDict items have already been
consumed.
Example:
def f(x: int, *args: str) -> None: ...
f(*(1, 'x', 1.1))
We'd call expand_actual_type three times:
1. The first call would provide 'int' as the actual type of 'x' (from '1').
2. The second call would provide 'str' as one of the actual types for '*args'.
2. The third call would provide 'float' as one of the actual types for '*args'.
A single instance can process all the arguments for a single call. Each call
needs a separate instance since instances have per-call state.
"""
def __init__(self, context: 'ArgumentInferContext') -> None:
# Next tuple *args index to use.
self.tuple_index = 0
# Keyword arguments in TypedDict **kwargs used.
self.kwargs_used: Set[str] = set()
# Type context for `*` and `**` arg kinds.
self.context = context
def expand_actual_type(self,
actual_type: Type,
actual_kind: nodes.ArgKind,
formal_name: Optional[str],
formal_kind: nodes.ArgKind) -> Type:
"""Return the actual (caller) type(s) of a formal argument with the given kinds.
If the actual argument is a tuple *args, return the next individual tuple item that
maps to the formal arg.
If the actual argument is a TypedDict **kwargs, return the next matching typed dict
value type based on formal argument name and kind.
This is supposed to be called for each formal, in order. Call multiple times per
formal if multiple actuals map to a formal.
"""
actual_type = get_proper_type(actual_type)
if actual_kind == nodes.ARG_STAR:
if isinstance(actual_type, Instance) and actual_type.args:
from mypy.subtypes import is_subtype
if is_subtype(actual_type, self.context.iterable_type):
return map_instance_to_supertype(
actual_type,
self.context.iterable_type.type,
).args[0]
else:
# We cannot properly unpack anything other
# than `Iterable` type with `*`.
# Just return `Any`, other parts of code would raise
# a different error for improper use.
return AnyType(TypeOfAny.from_error)
elif isinstance(actual_type, TupleType):
# Get the next tuple item of a tuple *arg.
if self.tuple_index >= len(actual_type.items):
# Exhausted a tuple -- continue to the next *args.
self.tuple_index = 1
else:
self.tuple_index += 1
return actual_type.items[self.tuple_index - 1]
elif isinstance(actual_type, ParamSpecType):
# ParamSpec is valid in *args but it can't be unpacked.
return actual_type
else:
return AnyType(TypeOfAny.from_error)
elif actual_kind == nodes.ARG_STAR2:
from mypy.subtypes import is_subtype
if isinstance(actual_type, TypedDictType):
if formal_kind != nodes.ARG_STAR2 and formal_name in actual_type.items:
# Lookup type based on keyword argument name.
assert formal_name is not None
else:
# Pick an arbitrary item if no specified keyword is expected.
formal_name = (set(actual_type.items.keys()) - self.kwargs_used).pop()
self.kwargs_used.add(formal_name)
return actual_type.items[formal_name]
elif (
isinstance(actual_type, Instance) and
len(actual_type.args) > 1 and
is_subtype(actual_type, self.context.mapping_type)
):
# Only `Mapping` type can be unpacked with `**`.
# Other types will produce an error somewhere else.
return map_instance_to_supertype(
actual_type,
self.context.mapping_type.type,
).args[1]
elif isinstance(actual_type, ParamSpecType):
# ParamSpec is valid in **kwargs but it can't be unpacked.
return actual_type
else:
return AnyType(TypeOfAny.from_error)
else:
# No translation for other kinds -- 1:1 mapping.
return actual_type
|
from pathlib import Path
import json
import shutil
import os.path
import pyclbr
import copy
renames = {"cc_defect_detection":"cc_defect_detect",
"cc_clone_detection_big_clone_bench":"cc_clone_detect_big",
"cc_code_refinement":"cc_refine",
"cc_code_completion_token":"cc_complete_token",
"cc_code_to_code_trans":"cc_code_to_code",
"cc_code_completion_line":"cc_code_complete_line",
"cc_clone_detection_poj_104":"cc_clone_detect_poj",
"cc_cloze_testing_maxmin":"cc_cloze_test_maxmin",
"cc_cloze_testing_all":"cc_cloze_test_all",
"ct_code_to_text":"ct_code_to_text",
"tt_text_to_text":"tt_text_to_text",
#"tc_nl_code_search_web_query": "tc_search_web_query",
"tc_text_to_code":"tc_text_to_code",
"tc_nl_code_search_adv":"tc_search_adv"}
class Splitter():
UNUSED_CONFIG_KEYS = "data_dir_name", "files"
def __init__(self, src_path, dest_path):
self.src_path = Path(src_path)
self.dest_path = Path(dest_path)
self.datasets = {}
self.class_code = {}
def gather_definition_info(self):
from hf_datasets.generated_definitions import DEFINITIONS
for name, definition in DEFINITIONS.items():
key = definition["dir_name"]
definition["full_name"] = name
if key not in self.datasets:
self.datasets[key] = dict(configurations = [])
self.datasets[key]["configurations"].append(definition)
for dataset in self.datasets.values():
names = []
for config in dataset["configurations"]:
names.append(config["name"])
common_prefix = os.path.commonprefix(names)
dataset["name"] = common_prefix
def gather_class_info(self, module_name):
source_code_data = pyclbr.readmodule_ex(module_name, path=["code_x_glue"])
lines = list(open(f"hf_datasets/{module_name}.py").readlines())
for c, d in source_code_data.items():
code_string = "".join(lines[d.lineno - 1:d.end_lineno])
self.class_code[c] = code_string
def gather_classes_info(self):
kinds = ["code", "text"]
for src in kinds:
for dest in kinds:
module_name = f"code_x_glue_{src}_to_{dest}"
self.gather_class_info(module_name)
def generate_dataset(self, dataset_name, dataset_info, dataset_path):
#shutil.rmtree(dataset_path, ignore_errors=True)
dataset_path.mkdir(exist_ok=True)
for filename in ["common.py"]:
shutil.copy(self.src_path / filename, dataset_path / filename)
definitions = copy.deepcopy(dataset_info["configurations"])
for d in definitions:
for k in self.UNUSED_CONFIG_KEYS:
del d[k]
config_name = d["full_name"][len(dataset_info["name"]):] or "default"
if config_name.startswith("_"):
config_name = config_name[1:]
d["name"] = config_name
del d["full_name"]
d["sizes"] = self.sizes[dataset_name[len("code_x_glue_"):]][config_name]
definitions = {definition["name"]:definition for definition in definitions}
with open(dataset_path / "generated_definitions.py", "w") as f:
f.write("DEFINITIONS=" + json.dumps(definitions, indent=4, sort_keys=True))
BASE_CLASSES = {"CodeXGlueCCCodeCompletionTokenPython":"CodeXGlueCCCodeCompletionToken",
"CodeXGlueCCCodeCompletionTokenJava":"CodeXGlueCCCodeCompletionToken",
"CodeXGlueCCClozeTestingAll":"CodeXGlueCCClozeTesting",
"CodeXGlueCCClozeTestingMaxmin":"CodeXGlueCCClozeTesting",
"CodeXGlueTCNLCodeSearchAdv" : "CodeXGlueCTCodeToTextBase",
"CodeXGlueTCNLCodeSearchWebQuery": "CodeXGlueCTCodeToTextBase",
"CodeXGlueCTCodeToText": "CodeXGlueCTCodeToTextBase",
}
child_class_names = []
class_names = []
for d in definitions.values():
class_name = d["class_name"]
if class_name not in class_names:
if class_name in BASE_CLASSES:
base_class = BASE_CLASSES[class_name]
if base_class not in class_names:
class_names = [base_class] + class_names
class_names.append(class_name)
child_class_names.append(class_name)
if True:
IMPORTS = ["datasets", "json", "os", "os.path"]
configs_source_code = "".join(f"import {imp}\n" for imp in IMPORTS)
for base_class in ["Child", "TrainValidTestChild"]:
configs_source_code += f"from .common import {base_class}\n"
for class_name in class_names:
configs_source_code += self.class_code[class_name]
#with open(dataset_path / "configs.py", "w") as f:
# f.write(configs_source_code)
(dataset_path / "configs.py").unlink(missing_ok=True)
with open(self.src_path / "code_x_glue_template.py") as f_in:
s = f_in.read()
main_class_name = dataset_info["name"].split("_")
main_class_name = "".join([main_class_name[0].upper()] + [a.capitalize() for a in main_class_name[1:]])
main_class_name = "CodeXGlue" + main_class_name + "Main"
s = s.replace("class CodeXGlue(", f"class {main_class_name}(")
if False:
class_import_string = f"from .configs import {','.join(child_class_names)}\n"
s = s.replace("from .configs import *", class_import_string)
class_import_string = "\n\nCLASS_MAPPING={"
for child_class_name in child_class_names:
class_import_string += f"'{child_class_name}':{child_class_name},\n"
class_import_string += "}\n"
configs_source_code += class_import_string
s = s.replace("from .configs import *", configs_source_code)
with open(dataset_path / f"{dataset_name}.py", "w") as f_out:
f_out.write(s)
def generate_datasets(self):
with(open(self.src_path / "sizes.json")) as f:
self.sizes = json.loads(f.read())
for dataset_info in self.datasets.values():
dataset_name = "code_x_glue_" + dataset_info["name"]
dataset_path = self.dest_path / dataset_name
self.generate_dataset(dataset_name, dataset_info, dataset_path)
def run(self):
self.gather_definition_info()
self.gather_classes_info()
self.generate_datasets()
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
raise RuntimeError("you should specify a single argument: path to the local version of https://github.com/huggingface/datasets repository")
dataset_path = Path(sys.argv[1])
sys.path.append(".")
s = Splitter("hf_datasets", dataset_path / "datasets")
s.run()
|
<reponame>jiahuanluo/label-inference-attacks<gh_stars>1-10
"""
thanks: https://github.com/swapniel99/criteo/blob/master/criteo.py
"""
import torch.utils.data as data
from csv import DictReader
import numpy as np
import pandas as pd
import torch
from imblearn.over_sampling import SMOTE
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import PowerTransformer, StandardScaler
from torch.utils.data import DataLoader
from torchvision import transforms
from datasets.dataset_setup import DatasetSetup
from my_utils.utils import train_val_split
import itertools
import warnings
warnings.filterwarnings("ignore")
D = 2 ** 13 # number of weights use for learning
BATCH_SIZE = 1000
header = ['Label', 'i1', 'i2', 'i3', 'i4', 'i5', 'i6', 'i7', 'i8', 'i9', 'i10', 'i11', 'i12', 'i13', 'c1', 'c2', 'c3',
'c4', 'c5', 'c6', 'c7', 'c8', 'c9', 'c10', 'c11', 'c12', 'c13', 'c14', 'c15', 'c16', 'c17', 'c18', 'c19',
'c20', 'c21', 'c22', 'c23', 'c24', 'c25', 'c26']
def get_csv_row_by_index(reader, index):
row = itertools.islice(reader, index - 1, index).__next__()
return row
class Criteo(data.Dataset):
def __init__(self, processed_csv_file_path, batch_size=BATCH_SIZE, train=True, total_samples_num=1e5, test_size=0.2):
"""
Args:
processed_csv_file_path (string): Path to the criteo.csv file.
"""
self.total_samples_num = total_samples_num
self.test_size = test_size
self.train_samples_num = int(self.total_samples_num * (1 - self.test_size))
self.test_samples_num = int(self.total_samples_num * self.test_size)
self.train_batches_num = int(self.train_samples_num / batch_size)
self.test_batches_num = int(self.test_samples_num / batch_size)
self.train = train
self.processed_csv_file_path = processed_csv_file_path
self.batch_size = batch_size
df_labels = pd.read_csv(processed_csv_file_path, nrows=self.total_samples_num, usecols=['label'])
y_val = df_labels.astype('long')
self.labels = y_val.values.reshape(-1, batch_size)
def __len__(self):
# print(f'The Criteo DATALOADER\'s batch quantity. Batch size is {self.batch_size}:')
if self.train:
return self.train_batches_num
else:
return self.test_batches_num
def __getitem__(self, index):
# index is for batch
if self.train:
index = index
else:
index = index + self.train_batches_num
temp_df = pd.read_csv(self.processed_csv_file_path, skiprows=index * self.batch_size, nrows=self.batch_size)
temp_df = temp_df.drop(temp_df.columns[-1], axis=1)
feature_names = temp_df.columns.tolist()
x_feature = temp_df[feature_names]
feat_ = x_feature.values
label_ = self.labels[index]
feat_ = torch.tensor(feat_)
label_ = torch.tensor(label_)
return feat_, label_
class CriteoSetup(DatasetSetup):
def __init__(self):
super().__init__()
self.num_classes = 2
self.size_bottom_out = 4
def set_datasets_for_ssl(self, file_path, n_labeled, party_num=None):
train_labeled_dataset = CriteoLabeled(file_path, n_labeled, train=True)
train_unlabeled_dataset = CriteoUnlabeled(file_path, n_labeled, train=True)
train_complete_dataset = Criteo(file_path, train=True)
test_dataset = Criteo(file_path, train=False)
print("#Labeled:", len(train_labeled_dataset),
"#Unlabeled:", len(train_unlabeled_dataset))
return train_labeled_dataset, train_unlabeled_dataset, test_dataset, train_complete_dataset
def get_transforms(self):
transforms_ = transforms.Compose([
transforms.ToTensor(),
])
return transforms_
def get_transformed_dataset(self, file_path, party_num=None, train=True):
_dataset = Criteo(file_path, batch_size=BATCH_SIZE, train=train)
return _dataset
def clip_one_party_data(self, x, half):
x = x[:, :half]
return x
class CriteoLabeled(Criteo):
def __init__(self, file_path, n_labeled, train=True):
super(CriteoLabeled, self).__init__(file_path, batch_size=100, train=train, total_samples_num=n_labeled, test_size=0.)
class CriteoUnlabeled(Criteo):
def __init__(self, file_path, n_labeled, train=True):
super(CriteoUnlabeled, self).__init__(file_path, batch_size=100, train=train, total_samples_num=1e6 - n_labeled, test_size=0.)
self.n_labeled = n_labeled
def __getitem__(self, index):
index += self.n_labeled
feat_, label_ = super().__getitem__(index)
return feat_, label_
if __name__ == "__main__":
path = 'D:/Datasets/Criteo/criteo.csv'
dataset = Criteo(path, batch_size=5, train=True)
print('dataset constructed')
print(f'len dataset:{len(dataset)}')
feat, label = dataset[10]
print(f"len feat:{len(feat)}")
print(f"feat:{feat}")
print(f"label:{label}")
# data_loader = DataLoader(dataset, 4)
# print('dataloader constructed')
for feat, label in dataset:
print(f"feat.shape:{feat.shape}")
print(f"label:{label}")
# break
|
<reponame>jonathanengelbert/ETLs
# This script copies the basic layers from Transbase needed by TIM, with the exception of injury data.
#Last modified: 11/21/2017 by <NAME>
#
### No Known Issues
### WARNING: #CAUTION: The field "overlap" in dataset "TB_overall_hgh_injry_network" no longer exists
### in newer versions of this dataset. It has been deleted for processing, and might cause problems
### once the data is loaded. See STEP THREE below for old code and notes.
################################################################################################
import arcpy
from arcpy import env
import sys, string, os, time, datetime
# SET TO OVERWRITE
arcpy.env.overwriteOutput = True
# Logging script
myStartDate = str(datetime.date.today())
myStartTime = time.clock()
theStartTime = time.ctime()
print theStartTime
#try:
if 1==1:
myStartDate = str(datetime.date.today())
myStartTime = time.clock()
theStartTime = time.ctime()
# thisfile = os.path.realpath(__file__)
file = open("C:/ETLs/TIM/TIMUpdates/Logs/" + myStartDate + "Transbase1" + ".txt", "w")
file.write(theStartTime + "\n")
when =datetime.date.today()
theDate = when.strftime("%d")
theDay=when.strftime("%A")
print theDay
################################################################################################
# STEP ONE
# COPYING FROM SDE TO LOCAL STAGING FOLDER: SET NAMES AND PATHS
# NOTE: NO NEED TO REPROJECT TRANSBASE LAYERS
# lists for looping through later
transbaselist = []
locallist = []
# filepath for all copied files:
staging_gdb = "\\\\CP-GIS-SVR1\\arcgisserver\\DataAndMXDs\\TIMReady\\Transbase_1.gdb\\"
# intersection - transpo variables
tb_transpo = "C:\\SDE_Connections\\Transbase.sde\\transbase_public.public.vw_geo_intrsctn_trnsprtn"
tb_transpo_local = "TB_intersection_transpo"
transbaselist.append(tb_transpo) # add to list created above for looping later
locallist.append(tb_transpo_local)
# pedestrian high injury corridor
tb_hicped = "C:\\SDE_Connections\\Transbase.sde\\transbase_public.public.vw_geo_st_sgmt_2013_ped_hgh_injry_crrdr"
tb_hicped_local = "TB_ped_hgh_injry_crrdr"
transbaselist.append(tb_hicped)
locallist.append(tb_hicped_local)
# vehicle high injury corridor
tb_hicveh = "C:\\SDE_Connections\\Transbase.sde\\transbase_public.public.vw_geo_st_sgmt_2014_veh_hgh_injry_crrdr"
tb_hicveh_local = "TB_veh_hgh_injry_crrdr"
transbaselist.append(tb_hicveh)
locallist.append(tb_hicveh_local)
# vision zero capital improvements
#tb_vz = "C:\\SDE_Connections\\Transbase.sde\\transbase_public.public.vw_geo_intrsctn_sp_vz_capital_improvements_40_projects_aug15"
tb_vz = "C:\\SDE_Connections\\Transbase.sde\\transbase_public.public.vw_geo_intrsctn_sp_vz_capital_improvements_40_projects_jan16"
tb_vz_local = "TB_VZ_capitalimprovements"
transbaselist.append(tb_vz)
locallist.append(tb_vz_local)
# Bicycle high injury corridors
tb_hiccyc = "C:\\SDE_Connections\\Transbase.sde\\transbase_public.public.vw_geo_st_sgmt_2014_cyc_hgh_injry_crrdr"
tb_hiccyc_local = "TB_cyc_hgh_injry_crrdr"
transbaselist.append(tb_hiccyc)
locallist.append(tb_hiccyc_local)
# overall high injury network
tb_hic = "C:\\SDE_Connections\\Transbase.sde\\transbase_public.public.vw_geo_st_sgmt_2017_vz_hgh_injry_ntwrk"
tb_hic_local = "TB_overall_hgh_injry_network"
transbaselist.append(tb_hic)
locallist.append(tb_hic_local)
print str(len(transbaselist)) + " layers from Transbase identified"
print str(len(locallist)) + " destination layers set"
file.write(str(time.ctime()) +": " +str(len(transbaselist)) + " layers from Transbase identified"+ "\n")
file.write(str(time.ctime()) +": " +str(len(locallist)) + " destination layers set"+ "\n")
################################################################################################
# STEP TWO
# COPYING FROM SDE TO LOCAL STAGING FOLDER: DO THE ACTUAL COPYING
# loop through layers and copy to staging folder
for i in range(0,len(transbaselist)):
print("\n")
print "Copying files - iteration " + str(i) + ":"
print "New file path: " + staging_gdb + locallist[i]
print "From: " + transbaselist[i]
try:
filename = staging_gdb + locallist[i]
arcpy.CopyFeatures_management(transbaselist[i], filename, "", "0", "0", "0")
except:
print "FAILED TO COPY " + filename
file.write(str(time.ctime()) +": FAILED TO COPY"+ filename+"\n")
file.write(str(time.ctime()) +": copied files"+ "\n")
# STEP THREE
# GEOPROCESSING
# create list for looping later
bufferlist = []
# function to create buffers
def arcpybuffer(buffer_name,original_name,buffer_dist,dissolve_opt,dissolve_fld):
print("\n")
print "Buffering " + buffer_name
bufferlist.append(buffer_name)
staging_name = staging_gdb + original_name
filename_buffer = staging_gdb + buffer_name
arcpy.Buffer_analysis(staging_name, filename_buffer, buffer_dist, "", "", dissolve_opt, dissolve_fld)
# intersection - transpo variables
# 1/4 mile buffer, no dissolve
arcpybuffer("tb_int_transpo_buffer_quartermile",tb_transpo_local,".25 Miles","","")
file.write(str(time.ctime()) +": 0.25 buffer"+ "\n")
# pedestrian high injury corridor
# create 250 ft buffer, dissolve on "street_nam" and "street_typ"
arcpybuffer("TB_ped_hgh_injry_crrdr_buffer",tb_hicped_local,"250 Feet","LIST",["street_nam","street_type"])
file.write(str(time.ctime()) +": 250ft buffer - ped"+ "\n")
# vehicle high injury corridor
# create 250 ft buffer, dissolve on "street_nam" and "street_typ"
arcpybuffer("TB_veh_hgh_injry_crrdr_buffer",tb_hicveh_local,"250 Feet","LIST",["street_nam","street_type"])
file.write(str(time.ctime()) +": 250ft buffer - veh"+ "\n")
# bicycle high injury corridor
# create 250 ft buffer, dissolve on "street_nam" and "street_typ"
arcpybuffer("TB_cyc_hgh_injry_crrdr_buffer",tb_hiccyc_local,"250 Feet","LIST",["street_nam","street_type"])
file.write(str(time.ctime()) +": 250ft buffer - cyc"+ "\n")
# overall high injury network
# create 250 ft buffer, dissolve on "street_nam", "street_typ", and "overlap"
#CAUTION: The field "overlap" no longer exists in newer versions of this dataset. It has been deleted for processing, and might cause problems once the data is loaded. Below is the original line of code for the geoprocess:
#arcpybuffer("TB_overall_hgh_injry_network_buffer",tb_hic_local,"250 Feet","LIST",["street_nam","street_type","overlap"])
arcpybuffer("TB_overall_hgh_injry_network_buffer",tb_hic_local,"250 Feet","LIST",["street_nam","street_type"])
file.write(str(time.ctime()) +": 250ft buffer - overall"+ "\n")
# vision zero capital improvements
# - create 500 ft buffer, no dissolve
arcpybuffer("TB_VZ_capitalimprovements_buffer",tb_vz_local,"500 Feet","","")
file.write(str(time.ctime()) +": 500ft buffer - cap imp"+ "\n")
print str(len(bufferlist)) + " buffer layers created"
file.write(str(time.ctime()) +": FINISHED SUCCESSFULLY"+ "\n")
file.close()
################################################################################################
try:
print "FINISHED SUCCESSFULLY"
except Exception,e:
print "Ended badly"
file.write(str(time.ctime()) +": Ended badly")
file.write(arcpy.GetMessages())
file.write(arcpy.GetMessages(2))
file.write(arcpy.GetMessages(1))
print str(e)
file.write(str(e))
file.close()
print arcpy.GetMessages()
print arcpy.GetMessages(2)
print arcpy.GetMessages(1)
myEndTime = time.clock()
theTime = myEndTime - myStartTime
theEndTime = time.ctime()
theMinutes = theTime / 60
print arcpy.GetMessages(2)
print arcpy.GetMessages(1)
myEndTime = time.clock()
theTime = myEndTime - myStartTime
theEndTime = time.ctime()
theMinutes = theTime / 60
|
from sam import SAM
import copy
import os
import numpy as np
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import cv2
import os
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from sklearn.model_selection import train_test_split
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader, Dataset
from torchvision import datasets, transforms
import timm
from efficientnet import EfficientNet_Facial
from dataloader import DataLoader
from utils import set_parameter_required_grad, evaluate
from tqdm import tqdm
def train(model,n_epochs, learningrate, train_loader, test_loader, use_sam=False, train = False):
model.eval()
with torch.no_grad():
epoch_val_accuracy = 0
epoch_val_loss = 0
epoch_Positive = 0
epoch_Negative = 0
epoch_TP = 0
epoch_FP = 0
epoch_TN = 0
epoch_FN = 0
for data, label in tqdm(test_loader):
data = data.to(device)
label = label.to(device)
val_output = model(data)
val_loss = criterion(val_output, label)
acc = (val_output.argmax(dim=1) == label).float().mean()
#print("label: ", label)
epoch_val_accuracy += acc / len(test_loader)
epoch_val_loss += val_loss / len(test_loader)
#print(np.shape(val_output),np.shape(label))
c_True_Positive, c_False_Positive, c_True_Negative, c_False_Negative, c_Positive, c_Negative = evaluate(val_output, label)
epoch_TP += c_True_Positive
epoch_FP += c_False_Positive
epoch_TN += c_True_Negative
epoch_FN += c_False_Negative
epoch_Positive += c_Positive
epoch_Negative += c_Negative
print(f"Postive label: {epoch_Positive}, Negative label: {epoch_Negative}")
Recall = (epoch_TP)/(epoch_TP + epoch_FN)
Precision = (epoch_TP)/(epoch_TP + epoch_FP)
F1 = (2*(Recall * Precision))/(Recall + Precision)
print(
f"val_loss : {val_loss:.4f} - val_acc: {val_accuracy:.4f}\n"
)
print(f"Recall: {Recall:.4f}, Precision: {Precision:.4f}, F1 Score: {F1:.4f}")
def main():
n_epochs = 40
lr = 3e-5
gamma = 0.7
global device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# print(device)
#Dataloader
loader = DataLoader()
train_loader, test_loader, train_dataset, test_dataset = loader.get_loader()
class_weights = loader.get_weight_class(train_dataset)
#Initial loss function
# print("CLASS WEIGHT: ",list(class_weights.values()))
class_weight = [1.0,1.5]
weights = torch.FloatTensor(class_weight).cuda()
# weights = torch.FloatTensor(list(class_weights.values()))
global criterion
criterion = nn.CrossEntropyLoss(weights)
#Initial model
num_class = len(train_dataset.classes)
model = EfficientNet_Facial(num_class = num_class)
print(model)
#Checkpoint save
checkpoint_dir = "../Save_checkpoint"
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
#Training
#First Step Freeze Backbone, Finetune FC layer
# set_parameter_required_grad(model, required_grad = False)
# set_parameter_required_grad(model.classifier, required_grad = True)
model.freezebackbone()
train(model ,3 ,0.001 ,train_loader ,test_loader ,use_sam=False)
#Fine all layer
model.finetune_alllayer()
train(model ,25 ,3e-5 ,train_loader ,test_loader ,use_sam=False)
PATH= checkpoint_dir + '/Efficientnet_Facial.pt'
model_name='Efficientnet_Facial'
torch.save(model, PATH)
if __name__ == '__main__':
main()
|
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from functools import wraps
from airflow.exceptions import AirflowException
from kubernetes import client, config
def get_pod_port_ip(*pods, namespace):
def get_k8s_pod_port_ip(func):
@wraps(func)
def k8s_pod_port_ip_get(self, pods_ip_port):
"""This function retrieves Kubernetes Pod Port and IP
information. It can be used to retrieve information of
single pod deployment and/or statefulsets. For instance,
it can be used to retrieve the tiller pod IP and port
information for usage in the Armada Operator.
:param pods_ip_port: IP and port information of the pods
Example::
from get_k8s_pod_port_ip import get_pod_port_ip
@get_pod_port_ip('tiller', namespace='kube-system')
def get_pod_info(self, pods_ip_port={}):
tiller_ip = pods_ip_port['tiller']['ip']
tiller_port = pods_ip_port['tiller']['port']
"""
# Initialize variable
k8s_pods = {}
# The function allows us to query information on multiple
# pods
for pod_name in pods:
# Initialize variables
pod_attr = {}
pod_attr[pod_name] = {}
# Initialize/Reset counter
count = 0
# Make use of kubernetes client to retrieve pod IP
# and port information
# Note that we should use 'in_cluster_config'
# Note that we will only search for pods in the namespace
# that was specified in the request
config.load_incluster_config()
v1 = client.CoreV1Api()
ret = v1.list_namespaced_pod(namespace=namespace,
watch=False)
# Loop through items to extract port and IP information
# of the pod
for i in ret.items:
if pod_name in i.metadata.name:
# Get pod IP
logging.info("Retrieving %s IP", pod_name)
pod_attr[pod_name]['ip'] = i.status.pod_ip
logging.info("%s IP is %s", pod_name,
pod_attr[pod_name]['ip'])
# Get pod port
logging.info("Retrieving %s Port", pod_name)
# It is possible for a pod to have an IP with no
# port. For instance maas-rack takes on genesis
# node IP and has no port associated with it. We
# will assign the value 'None' to the port value
# in such cases.
try:
specs_dict = i.spec.containers[0].__dict__
ports_dict = specs_dict['_ports'][0].__dict__
pod_attr[pod_name]['port'] = (
ports_dict['_container_port'])
logging.info("%s Port is %s", pod_name,
pod_attr[pod_name]['port'])
except:
pod_attr[pod_name]['port'] = 'None'
logging.warning("%s Port is None", pod_name)
# Update k8s_pods with new entry
k8s_pods.update(pod_attr)
# It is possible for different pods to have the same
# partial names. This means that we can end up with
# inconsistent results depending on how the pods were
# ordered in the results for 'list_namespaced_pod'.
# Hence an exception should be raised when the function
# returns results for 2 or more pods.
if count > 0:
raise AirflowException(
"Pod search string is not unique!")
# Step counter
count += 1
# Raise Execptions if the pod does not exits in the
# Kubernetes cluster
if not pod_attr[pod_name]:
raise AirflowException("Unable to locate", pod_name)
return func(self, pods_ip_port=k8s_pods)
return k8s_pod_port_ip_get
return get_k8s_pod_port_ip
|
# GUIDs from https://github.com/snare/ida-efiutils/blob/master/efiguids.py
# pylint: disable=duplicate-key
edk_guids = {
"ACPI_TABLE_GUID": [
0xEB9D2D30,
0x2D88,
0x11D3,
0x9A,
0x16,
0x0,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"APPLE_REMOVABLE_MEDIA_PROTOCOL_GUID": [
0x2EA9743A,
0x23D9,
0x425E,
0x87,
0x2C,
0xF6,
0x15,
0xAA,
0x19,
0x57,
0x88,
],
"ARM_GLOBAL_VARIABLE_PPI_GUID": [
0xAB1C1816,
0xD542,
0x4E6F,
0x9B,
0x1E,
0x8E,
0xCD,
0x92,
0x53,
0xE2,
0xE7,
],
"ARM_HOB_GLOBAL_VARIABLE_GUID": [
0xC3253C90,
0xA24F,
0x4599,
0xA6,
0x64,
0x1F,
0x88,
0x13,
0x77,
0x8F,
0xC9,
],
"ARM_MP_CORE_INFO_GUID": [
0xA4EE0728,
0xE5D7,
0x4AC5,
0xB2,
0x1E,
0x65,
0x8E,
0xD8,
0x57,
0xE8,
0x34,
],
"ARM_MP_CORE_INFO_PPI_GUID": [
0x6847CC74,
0xE9EC,
0x4F8F,
0xA2,
0x9D,
0xAB,
0x44,
0xE7,
0x54,
0xA8,
0xFC,
],
"BDS_LIB_STRING_PACKAGE_GUID": [
0x3B4D9B23,
0x95AC,
0x44F6,
0x9F,
0xCD,
0xE,
0x95,
0x94,
0x58,
0x6C,
0x72,
],
"BLOCKIO_VENDOR_GUID": [
0xCF31FAC5,
0xC24E,
0x11D2,
0x85,
0xF3,
0x0,
0xA0,
0xC9,
0x3E,
0xC9,
0x3B,
],
"BLOCK_MMIO_PROTOCOL_GUID": [
0x6B558CE3,
0x69E5,
0x4C67,
0xA6,
0x34,
0xF7,
0xFE,
0x72,
0xAD,
0xBE,
0x84,
],
"BOOT_MAINT_FORMSET_GUID": [
0x642237C7,
0x35D4,
0x472D,
0x83,
0x65,
0x12,
0xE0,
0xCC,
0xF2,
0x7A,
0x22,
],
"BOOT_MANAGER_FORMSET_GUID": [
0x847BC3FE,
0xB974,
0x446D,
0x94,
0x49,
0x5A,
0xD5,
0x41,
0x2E,
0x99,
0x3B,
],
"CONNECT_CONIN_EVENT_GUID": [
0xDB4E8151,
0x57ED,
0x4BED,
0x88,
0x33,
0x67,
0x51,
0xB5,
0xD1,
0xA8,
0xD7,
],
"DEVICE_MANAGER_FORMSET_GUID": [
0x3EBFA8E6,
0x511D,
0x4B5B,
0xA9,
0x5F,
0xFB,
0x38,
0x26,
0xF,
0x1C,
0x27,
],
"DP_HII_GUID": [
0xEB832FD9,
0x9089,
0x4898,
0x83,
0xC9,
0x41,
0x61,
0x8F,
0x5C,
0x48,
0xB9,
],
"DRIVER_HEALTH_FORMSET_GUID": [
0xF76E0A70,
0xB5ED,
0x4C38,
0xAC,
0x9A,
0xE5,
0xF5,
0x4B,
0xF1,
0x6E,
0x34,
],
"DRIVER_SAMPLE_FORMSET_GUID": [
0xA04A27F4,
0xDF00,
0x4D42,
0xB5,
0x52,
0x39,
0x51,
0x13,
0x02,
0x11,
0x3D,
],
"DRIVER_SAMPLE_INVENTORY_GUID": [
0xB3F56470,
0x6141,
0x4621,
0x8F,
0x19,
0x70,
0x4E,
0x57,
0x7A,
0xA9,
0xE8,
],
"DUET_CONSOLEOUT_CONFIG_GUID": [
0xED150714,
0xDF30,
0x407D,
0xB2,
0x4A,
0x4B,
0x74,
0x2F,
0xD5,
0xCE,
0xA2,
],
"DXE_CORE_FILE_NAME_GUID": [
0xD6A2CB7F,
0x6A18,
0x4E2F,
0xB4,
0x3B,
0x99,
0x20,
0xA7,
0x33,
0x70,
0x0A,
],
"DXE_SERVICES_TABLE_GUID": [
0x5AD34BA,
0x6F02,
0x4214,
0x95,
0x2E,
0x4D,
0xA0,
0x39,
0x8E,
0x2B,
0xB9,
],
"EBL_ADD_COMMAND_PROTOCOL_GUID": [
0xAEDA2428,
0x9A22,
0x4637,
0x9B,
0x21,
0x54,
0x5E,
0x28,
0xFB,
0xB8,
0x29,
],
"ECP_PEI_PCI_CFG_PPI_GUID": [
0xB0EE53D4,
0xA049,
0x4A79,
0xB2,
0xFF,
0x19,
0xD9,
0xFA,
0xEF,
0xAA,
0x94,
],
"EFI_ABSOLUTE_POINTER_PROTOCOL_GUID": [
0x8D59D32B,
0xC655,
0x4AE9,
0x9B,
0x15,
0xF2,
0x59,
0x04,
0x99,
0x2A,
0x43,
],
"EFI_ACPI_20_TABLE_GUID": [
0x8868E871,
0xE4F1,
0x11D3,
0xBC,
0x22,
0x0,
0x80,
0xC7,
0x3C,
0x88,
0x81,
],
"EFI_ACPI_S3_CONTEXT_GUID": [
0xEF98D3A,
0x3E33,
0x497A,
0xA4,
0x1,
0x77,
0xBE,
0x3E,
0xB7,
0x4F,
0x38,
],
"EFI_ACPI_S3_SAVE_GUID": [
0x125F2DE1,
0xFB85,
0x440C,
0xA5,
0x4C,
0x4D,
0x99,
0x35,
0x8A,
0x8D,
0x38,
],
"EFI_ACPI_SDT_PROTOCOL_GUID": [
0xEB97088E,
0xCFDF,
0x49C6,
0xBE,
0x4B,
0xD9,
0x6,
0xA5,
0xB2,
0xE,
0x86,
],
"EFI_ACPI_SUPPORT_GUID": [
0xDBFF9D55,
0x89B7,
0x46DA,
0xBD,
0xDF,
0x67,
0x7D,
0x3D,
0xC0,
0x24,
0x1D,
],
"EFI_ACPI_TABLE_GUID": [
0x8868E871,
0xE4F1,
0x11D3,
0xBC,
0x22,
0x0,
0x80,
0xC7,
0x3C,
0x88,
0x81,
],
"EFI_ACPI_TABLE_GUID": [
0xEB9D2D30,
0x2D88,
0x11D3,
0x9A,
0x16,
0x0,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"EFI_ACPI_TABLE_PROTOCOL_GUID": [
0xFFE06BDD,
0x6107,
0x46A6,
0x7B,
0xB2,
0x5A,
0x9C,
0x7E,
0xC5,
0x27,
0x5C,
],
"EFI_ACPI_TABLE_STORAGE_GUID": [
0x7E374E25,
0x8E01,
0x4FEE,
0x87,
0xF2,
0x39,
0xC,
0x23,
0xC6,
0x6,
0xCD,
],
"EFI_ACPI_VARIABLE_COMPATIBILITY_GUID": [
0xC020489E,
0x6DB2,
0x4EF2,
0x9A,
0xA5,
0xCA,
0x6,
0xFC,
0x11,
0xD3,
0x6A,
],
"EFI_ALTERNATE_FV_BLOCK_GUID": [
0xF496922D,
0x172F,
0x4BBC,
0xA1,
0xEB,
0xE,
0xEB,
0x94,
0x9C,
0x34,
0x86,
],
"EFI_APRIORI_GUID": [
0xFC510EE7,
0xFFDC,
0x11D4,
0xBD,
0x41,
0x0,
0x80,
0xC7,
0x3C,
0x88,
0x81,
],
"EFI_ARP_PROTOCOL_GUID": [
0xF4B427BB,
0xBA21,
0x4F16,
0xBC,
0x4E,
0x43,
0xE4,
0x16,
0xAB,
0x61,
0x9C,
],
"EFI_ARP_SERVICE_BINDING_PROTOCOL_GUID": [
0xF44C00EE,
0x1F2C,
0x4A00,
0xAA,
0x9,
0x1C,
0x9F,
0x3E,
0x8,
0x0,
0xA3,
],
"EFI_ATA_PASS_THRU_PROTOCOL_GUID": [
0x1D3DE7F0,
0x807,
0x424F,
0xAA,
0x69,
0x11,
0xA5,
0x4E,
0x19,
0xA4,
0x6F,
],
"EFI_AUTHENTICATED_VARIABLE_GUID": [
0xAAF32C78,
0x947B,
0x439A,
0xA1,
0x80,
0x2E,
0x14,
0x4E,
0xC3,
0x77,
0x92,
],
"EFI_AUTHENTICATION_CHAP_LOCAL_GUID": [
0xC280C73E,
0x15CA,
0x11DA,
0xB0,
0xCA,
0x00,
0x10,
0x83,
0xFF,
0xCA,
0x4D,
],
"EFI_AUTHENTICATION_CHAP_RADIUS_GUID": [
0xD6062B50,
0x15CA,
0x11DA,
0x92,
0x19,
0x00,
0x10,
0x83,
0xFF,
0xCA,
0x4D,
],
"EFI_AUTHENTICATION_INFO_PROTOCOL_GUID": [
0x7671D9D0,
0x53DB,
0x4173,
0xAA,
0x69,
0x23,
0x27,
0xF2,
0x1F,
0x0B,
0xC7,
],
"EFI_BDS_ARCH_PROTOCOL_GUID": [
0x665E3FF6,
0x46CC,
0x11D4,
0x9A,
0x38,
0x00,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"EFI_BIS_PROTOCOL_GUID": [
0x0B64AAB0,
0x5429,
0x11D4,
0x98,
0x16,
0x00,
0xA0,
0xC9,
0x1F,
0xAD,
0xCF,
],
"EFI_BLOCK_IO2_PROTOCOL_GUID": [
0xA77B2472,
0xE282,
0x4E9F,
0xA2,
0x45,
0xC2,
0xC0,
0xE2,
0x7B,
0xBC,
0xC1,
],
"EFI_BLOCK_IO_PROTOCOL_GUID": [
0x964E5B21,
0x6459,
0x11D2,
0x8E,
0x39,
0x0,
0xA0,
0xC9,
0x69,
0x72,
0x3B,
],
"EFI_BOOT_LOGO_PROTOCOL_GUID": [
0xCDEA2BD3,
0xFC25,
0x4C1C,
0xB9,
0x7C,
0xB3,
0x11,
0x86,
0x6,
0x49,
0x90,
],
"EFI_BOOT_SCRIPT_EXECUTOR_CONTEXT_GUID": [
0x79CB58C4,
0xAC51,
0x442F,
0xAF,
0xD7,
0x98,
0xE4,
0x7D,
0x2E,
0x99,
0x8,
],
"EFI_BOOT_SCRIPT_EXECUTOR_VARIABLE_GUID": [
0x3079818C,
0x46D4,
0x4A73,
0xAE,
0xF3,
0xE3,
0xE4,
0x6C,
0xF1,
0xEE,
0xDB,
],
"EFI_BOOT_SCRIPT_SAVE_PROTOCOL_GUID": [
0x470E1529,
0xB79E,
0x4E32,
0xA0,
0xFE,
0x6A,
0x15,
0x6D,
0x29,
0xF9,
0xB2,
],
"EFI_BUS_SPECIFIC_DRIVER_OVERRIDE_PROTOCOL_GUID": [
0x3BC1B285,
0x8A15,
0x4A82,
0xAA,
0xBF,
0x4D,
0x7D,
0x13,
0xFB,
0x32,
0x65,
],
"EFI_CACHE_SUBCLASS_GUID": [
0x7F0013A7,
0xDC79,
0x4B22,
0x80,
0x99,
0x11,
0xF7,
0x5F,
0xDC,
0x82,
0x9D,
],
"EFI_CAPSULE_ARCH_PROTOCOL_GUID": [
0x5053697E,
0x2CBC,
0x4819,
0x90,
0xD9,
0x05,
0x80,
0xDE,
0xEE,
0x57,
0x54,
],
"EFI_CAPSULE_ARCH_PROTOCOL_GUID": [
0x5053697E,
0x2CBC,
0x4819,
0x90,
0xD9,
0x5,
0x80,
0xDE,
0xEE,
0x57,
0x54,
],
"EFI_CAPSULE_GUID": [
0x3B6686BD,
0x0D76,
0x4030,
0xB7,
0x0E,
0xB5,
0x51,
0x9E,
0x2F,
0xC5,
0xA0,
],
"EFI_CAPSULE_INFO_GUID": [
0x8B34EAC7,
0x2690,
0x460B,
0x8B,
0xA5,
0xD5,
0xCF,
0x32,
0x83,
0x17,
0x35,
],
"EFI_CAPSULE_VENDOR_GUID": [
0x711C703F,
0xC285,
0x4B10,
0xA3,
0xB0,
0x36,
0xEC,
0xBD,
0x3C,
0x8B,
0xE2,
],
"EFI_CERT_RSA2048_GUID": [
0x3C5766E8,
0x269C,
0x4E34,
0xAA,
0x14,
0xED,
0x77,
0x6E,
0x85,
0xB3,
0xB6,
],
"EFI_CERT_RSA2048_SHA1_GUID": [
0x67F8444F,
0x8743,
0x48F1,
0xA3,
0x28,
0x1E,
0xAA,
0xB8,
0x73,
0x60,
0x80,
],
"EFI_CERT_RSA2048_SHA256_GUID": [
0xE2B36190,
0x879B,
0x4A3D,
0xAD,
0x8D,
0xF2,
0xE7,
0xBB,
0xA3,
0x27,
0x84,
],
"EFI_CERT_SHA1_GUID": [
0x826CA512,
0xCF10,
0x4AC9,
0xB1,
0x87,
0xBE,
0x1,
0x49,
0x66,
0x31,
0xBD,
],
"EFI_CERT_SHA224_GUID": [
0xB6E5233,
0xA65C,
0x44C9,
0x94,
0x7,
0xD9,
0xAB,
0x83,
0xBF,
0xC8,
0xBD,
],
"EFI_CERT_SHA256_GUID": [
0xC1C41626,
0x504C,
0x4092,
0xAC,
0xA9,
0x41,
0xF9,
0x36,
0x93,
0x43,
0x28,
],
"EFI_CERT_SHA384_GUID": [
0xFF3E5307,
0x9FD0,
0x48C9,
0x85,
0xF1,
0x8A,
0xD5,
0x6C,
0x70,
0x1E,
0x1,
],
"EFI_CERT_SHA512_GUID": [
0x93E0FAE,
0xA6C4,
0x4F50,
0x9F,
0x1B,
0xD4,
0x1E,
0x2B,
0x89,
0xC1,
0x9A,
],
"EFI_CERT_TYPE_PKCS7_GUID": [
0x4AAFD29D,
0x68DF,
0x49EE,
0x8A,
0xA9,
0x34,
0x7D,
0x37,
0x56,
0x65,
0xA7,
],
"EFI_CERT_TYPE_RSA2048_SHA256_GUID": [
0xA7717414,
0xC616,
0x4977,
0x94,
0x20,
0x84,
0x47,
0x12,
0xA7,
0x35,
0xBF,
],
"EFI_CERT_X509_GUID": [
0xA5C059A1,
0x94E4,
0x4AA7,
0x87,
0xB5,
0xAB,
0x15,
0x5C,
0x2B,
0xF0,
0x72,
],
"EFI_COMPATIBLE_MEMORY_TESTED_PROTOCOL_GUID": [
0x64C475EF,
0x344B,
0x492C,
0x93,
0xAD,
0xAB,
0x9E,
0xB4,
0x39,
0x50,
0x4,
],
"EFI_COMPONENT_NAME2_PROTOCOL_GUID": [
0x6A7A5CFF,
0xE8D9,
0x4F70,
0xBA,
0xDA,
0x75,
0xAB,
0x30,
0x25,
0xCE,
0x14,
],
"EFI_COMPONENT_NAME_PROTOCOL_GUID": [
0x107A772C,
0xD5E1,
0x11D4,
0x9A,
0x46,
0x0,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"EFI_CONFIG_FILE_NAME_GUID": [
0x98B8D59B,
0xE8BA,
0x48EE,
0x98,
0xDD,
0xC2,
0x95,
0x39,
0x2F,
0x1E,
0xDB,
],
"EFI_CONSOLE_CONTROL_PROTOCOL_GUID": [
0xF42F7782,
0x12E,
0x4C12,
0x99,
0x56,
0x49,
0xF9,
0x43,
0x4,
0xF7,
0x21,
],
"EFI_CONSOLE_IN_DEVICE_GUID": [
0xD3B36F2B,
0xD551,
0x11D4,
0x9A,
0x46,
0x0,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"EFI_CONSOLE_OUT_DEVICE_GUID": [
0xD3B36F2C,
0xD551,
0x11D4,
0x9A,
0x46,
0x0,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"EFI_CPU_ARCH_PROTOCOL_GUID": [
0x26BACCB1,
0x6F42,
0x11D4,
0xBC,
0xE7,
0x0,
0x80,
0xC7,
0x3C,
0x88,
0x81,
],
"EFI_CPU_IO2_PROTOCOL_GUID": [
0xAD61F191,
0xAE5F,
0x4C0E,
0xB9,
0xFA,
0xE8,
0x69,
0xD2,
0x88,
0xC6,
0x4F,
],
"EFI_CPU_IO_PROTOCOL_GUID": [
0xB0732526,
0x38C8,
0x4B40,
0x88,
0x77,
0x61,
0xC7,
0xB0,
0x6A,
0xAC,
0x45,
],
"EFI_CRC32_GUIDED_SECTION_EXTRACTION_GUID": [
0xFC1BCDB0,
0x7D31,
0x49AA,
0x93,
0x6A,
0xA4,
0x60,
0x0D,
0x9D,
0xD0,
0x83,
],
"EFI_CRC32_GUIDED_SECTION_EXTRACTION_PROTOCOL_GUID": [
0xFC1BCDB0,
0x7D31,
0x49AA,
0x93,
0x6A,
0xA4,
0x60,
0x0D,
0x9D,
0xD0,
0x83,
],
"EFI_CUSTOMIZED_DECOMPRESS_PROTOCOL_GUID": [
0x9A44198E,
0xA4A2,
0x44E6,
0x8A,
0x1F,
0x39,
0xBE,
0xFD,
0xAC,
0x89,
0x6F,
],
"EFI_DATA_HUB_PROTOCOL_GUID": [
0xAE80D021,
0x618E,
0x11D4,
0xBC,
0xD7,
0x0,
0x80,
0xC7,
0x3C,
0x88,
0x81,
],
"EFI_DATA_HUB_STATUS_CODE_RECORD_GUID": [
0xD083E94C,
0x6560,
0x42E4,
0xB6,
0xD4,
0x2D,
0xF7,
0x5A,
0xDF,
0x6A,
0x2A,
],
"EFI_DEBUGPORT_PROTOCOL_GUID": [
0xEBA4E8D2,
0x3858,
0x41EC,
0xA2,
0x81,
0x26,
0x47,
0xBA,
0x96,
0x60,
0xD0,
],
"EFI_DEBUG_AGENT_GUID": [
0x865A5A9B,
0xB85D,
0x474C,
0x84,
0x55,
0x65,
0xD1,
0xBE,
0x84,
0x4B,
0xE2,
],
"EFI_DEBUG_ASSERT_PROTOCOL_GUID": [
0xBE499C92,
0x7D4B,
0x11D4,
0xBC,
0xEE,
0x0,
0x80,
0xC7,
0x3C,
0x88,
0x81,
],
"EFI_DEBUG_IMAGE_INFO_TABLE_GUID": [
0x49152E77,
0x1ADA,
0x4764,
0xB7,
0xA2,
0x7A,
0xFE,
0xFE,
0xD9,
0x5E,
0x8B,
],
"EFI_DEBUG_MASK_PROTOCOL_GUID": [
0x4C8A2451,
0xC207,
0x405B,
0x96,
0x94,
0x99,
0xEA,
0x13,
0x25,
0x13,
0x41,
],
"EFI_DEBUG_SERIAL_IO_PROTOCOL_GUID": [
0xE683DC4F,
0x9ED,
0x4F22,
0x86,
0x6B,
0x8E,
0x40,
0x46,
0x94,
0x7C,
0x6C,
],
"EFI_DEBUG_SUPPORT_PERIODIC_CALLBACK_PROTOCOL_GUID": [
0x9546E07C,
0x2CBB,
0x4C88,
0x98,
0x6C,
0xCD,
0x34,
0x10,
0x86,
0xF0,
0x44,
],
"EFI_DEBUG_SUPPORT_PROTOCOL_GUID": [
0x2755590C,
0x6F3C,
0x42FA,
0x9E,
0xA4,
0xA3,
0xBA,
0x54,
0x3C,
0xDA,
0x25,
],
"EFI_DECOMPRESS_PROTOCOL_GUID": [
0xD8117CFE,
0x94A6,
0x11D4,
0x9A,
0x3A,
0x0,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"EFI_DEFAULT_BMP_LOGO_GUID": [
0x7BB28B99,
0x61BB,
0x11D5,
0x9A,
0x5D,
0x00,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"EFI_DEFERRED_IMAGE_LOAD_PROTOCOL_GUID": [
0x15853D7C,
0x3DDF,
0x43E0,
0xA1,
0xCB,
0xEB,
0xF8,
0x5B,
0x8F,
0x87,
0x2C,
],
"EFI_DEVICE_IO_PROTOCOL_GUID": [
0xAF6AC311,
0x84C3,
0x11D2,
0x8E,
0x3C,
0x00,
0xA0,
0xC9,
0x69,
0x72,
0x3B,
],
"EFI_DEVICE_PATH_FROM_TEXT_PROTOCOL_GUID": [
0x5C99A21,
0xC70F,
0x4AD2,
0x8A,
0x5F,
0x35,
0xDF,
0x33,
0x43,
0xF5,
0x1E,
],
"EFI_DEVICE_PATH_PROTOCOL_GUID": [
0x9576E91,
0x6D3F,
0x11D2,
0x8E,
0x39,
0x0,
0xA0,
0xC9,
0x69,
0x72,
0x3B,
],
"EFI_DEVICE_PATH_TO_TEXT_PROTOCOL_GUID": [
0x8B843E20,
0x8132,
0x4852,
0x90,
0xCC,
0x55,
0x1A,
0x4E,
0x4A,
0x7F,
0x1C,
],
"EFI_DEVICE_PATH_UTILITIES_PROTOCOL_GUID": [
0x379BE4E,
0xD706,
0x437D,
0xB0,
0x37,
0xED,
0xB8,
0x2F,
0xB7,
0x72,
0xA4,
],
"EFI_DHCP4_PROTOCOL_GUID": [
0x8A219718,
0x4EF5,
0x4761,
0x91,
0xC8,
0xC0,
0xF0,
0x4B,
0xDA,
0x9E,
0x56,
],
"EFI_DHCP4_SERVICE_BINDING_PROTOCOL_GUID": [
0x9D9A39D8,
0xBD42,
0x4A73,
0xA4,
0xD5,
0x8E,
0xE9,
0x4B,
0xE1,
0x13,
0x80,
],
"EFI_DHCP6_PROTOCOL_GUID": [
0x87C8BAD7,
0x595,
0x4053,
0x82,
0x97,
0xDE,
0xDE,
0x39,
0x5F,
0x5D,
0x5B,
],
"EFI_DHCP6_SERVICE_BINDING_PROTOCOL_GUID": [
0x9FB9A8A1,
0x2F4A,
0x43A6,
0x88,
0x9C,
0xD0,
0xF7,
0xB6,
0xC4,
0x7A,
0xD5,
],
"EFI_DISK_INFO_AHCI_INTERFACE_GUID": [
0x9E498932,
0x4ABC,
0x45AF,
0xA3,
0x4D,
0x2,
0x47,
0x78,
0x7B,
0xE7,
0xC6,
],
"EFI_DISK_INFO_IDE_INTERFACE_GUID": [
0x5E948FE3,
0x26D3,
0x42B5,
0xAF,
0x17,
0x61,
0x2,
0x87,
0x18,
0x8D,
0xEC,
],
"EFI_DISK_INFO_PROTOCOL_GUID": [
0xD432A67F,
0x14DC,
0x484B,
0xB3,
0xBB,
0x3F,
0x2,
0x91,
0x84,
0x93,
0x27,
],
"EFI_DISK_INFO_SCSI_INTERFACE_GUID": [
0x8F74BAA,
0xEA36,
0x41D9,
0x95,
0x21,
0x21,
0xA7,
0xF,
0x87,
0x80,
0xBC,
],
"EFI_DISK_INFO_USB_INTERFACE_GUID": [
0xCB871572,
0xC11A,
0x47B5,
0xB4,
0x92,
0x67,
0x5E,
0xAF,
0xA7,
0x77,
0x27,
],
"EFI_DISK_IO_PROTOCOL_GUID": [
0xCE345171,
0xBA0B,
0x11D2,
0x8E,
0x4F,
0x0,
0xA0,
0xC9,
0x69,
0x72,
0x3B,
],
"EFI_DPC_PROTOCOL_GUID": [
0x480F8AE9,
0xC46,
0x4AA9,
0xBC,
0x89,
0xDB,
0x9F,
0xBA,
0x61,
0x98,
0x6,
],
"EFI_DRIVER_BINDING_PROTOCOL_GUID": [
0x18A031AB,
0xB443,
0x4D1A,
0xA5,
0xC0,
0xC,
0x9,
0x26,
0x1E,
0x9F,
0x71,
],
"EFI_DRIVER_CONFIGURATION2_PROTOCOL_GUID": [
0xBFD7DC1D,
0x24F1,
0x40D9,
0x82,
0xE7,
0x2E,
0x09,
0xBB,
0x6B,
0x4E,
0xBE,
],
"EFI_DRIVER_CONFIGURATION_PROTOCOL_GUID": [
0x107A772B,
0xD5E1,
0x11D4,
0x9A,
0x46,
0x0,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"EFI_DRIVER_DIAGNOSTICS2_PROTOCOL_GUID": [
0x4D330321,
0x025F,
0x4AAC,
0x90,
0xD8,
0x5E,
0xD9,
0x0,
0x17,
0x3B,
0x63,
],
"EFI_DRIVER_DIAGNOSTICS2_PROTOCOL_GUID": [
0x4D330321,
0x025F,
0x4AAC,
0x90,
0xD8,
0x5E,
0xD9,
0x00,
0x17,
0x3B,
0x63,
],
"EFI_DRIVER_DIAGNOSTICS_PROTOCOL_GUID": [
0x0784924F,
0xE296,
0x11D4,
0x9A,
0x49,
0x0,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"EFI_DRIVER_FAMILY_OVERRIDE_PROTOCOL_GUID": [
0xB1EE129E,
0xDA36,
0x4181,
0x91,
0xF8,
0x4,
0xA4,
0x92,
0x37,
0x66,
0xA7,
],
"EFI_DRIVER_HEALTH_PROTOCOL_GUID": [
0x2A534210,
0x9280,
0x41D8,
0xAE,
0x79,
0xCA,
0xDA,
0x1,
0xA2,
0xB1,
0x27,
],
"EFI_DRIVER_SUPPORTED_EFI_VERSION_PROTOCOL_GUID": [
0x5C198761,
0x16A8,
0x4E69,
0x97,
0x2C,
0x89,
0xD6,
0x79,
0x54,
0xF8,
0x1D,
],
"EFI_DXE_IPL_PPI_GUID": [
0xAE8CE5D,
0xE448,
0x4437,
0xA8,
0xD7,
0xEB,
0xF5,
0xF1,
0x94,
0xF7,
0x31,
],
"EFI_DXE_SERVICES_TABLE_GUID": [
0x5AD34BA,
0x6F02,
0x4214,
0x95,
0x2E,
0x4D,
0xA0,
0x39,
0x8E,
0x2B,
0xB9,
],
"EFI_DXE_SMM_READY_TO_LOCK_PROTOCOL_GUID": [
0x60FF8964,
0xE906,
0x41D0,
0xAF,
0xED,
0xF2,
0x41,
0xE9,
0x74,
0xE0,
0x8E,
],
"EFI_EAP_MANAGEMENT_PROTOCOL_GUID": [
0xBB62E663,
0x625D,
0x40B2,
0xA0,
0x88,
0xBB,
0xE8,
0x36,
0x23,
0xA2,
0x45,
],
"EFI_EAP_PROTOCOL_GUID": [
0x5D9F96DB,
0xE731,
0x4CAA,
0xA0,
0xD,
0x72,
0xE1,
0x87,
0xCD,
0x77,
0x62,
],
"EFI_EBC_INTERPRETER_PROTOCOL_GUID": [
0x13AC6DD1,
0x73D0,
0x11D4,
0xB0,
0x6B,
0x00,
0xAA,
0x00,
0xBD,
0x6D,
0xE7,
],
"EFI_EBC_SIMPLE_DEBUGGER_PROTOCOL_GUID": [
0x2A72D11E,
0x7376,
0x40F6,
0x9C,
0x68,
0x23,
0xFA,
0x2F,
0xE3,
0x63,
0xF1,
],
"EFI_EBC_VM_TEST_PROTOCOL_GUID": [
0xAAEACCFD,
0xF27B,
0x4C17,
0xB6,
0x10,
0x75,
0xCA,
0x1F,
0x2D,
0xFB,
0x52,
],
"EFI_EBC_VM_TEST_PROTOCOL_GUID": [
0xAAEACCFD,
0xF27B,
0x4C17,
0xB6,
0x10,
0x75,
0xCA,
0x1F,
0x2D,
0xFB,
0x52,
],
"EFI_EDID_ACTIVE_PROTOCOL_GUID": [
0xBD8C1056,
0x9F36,
0x44EC,
0x92,
0xA8,
0xA6,
0x33,
0x7F,
0x81,
0x79,
0x86,
],
"EFI_EDID_DISCOVERED_PROTOCOL_GUID": [
0x1C0C34F6,
0xD380,
0x41FA,
0xA0,
0x49,
0x8A,
0xD0,
0x6C,
0x1A,
0x66,
0xAA,
],
"EFI_EDID_DISCOVERED_PROTOCOL_GUID": [
0x1C0C34F6,
0xD380,
0x41FA,
0xA0,
0x49,
0x8A,
0xD0,
0x6C,
0x1A,
0x66,
0xAA,
],
"EFI_EDID_OVERRIDE_PROTOCOL_GUID": [
0x48ECB431,
0xFB72,
0x45C0,
0xA9,
0x22,
0xF4,
0x58,
0xFE,
0x4,
0xB,
0xD5,
],
"EFI_EMU_PHYSICAL_DISK_GUID": [
0xF2BA331A,
0x8985,
0x11DB,
0xA4,
0x06,
0x00,
0x40,
0xD0,
0x2B,
0x18,
0x35,
],
"EFI_EMU_SYSTEM_CONFIG_GUID": [
0x9C4FB516,
0x3A1E,
0xD847,
0xA1,
0xA1,
0x70,
0x58,
0xB6,
0x98,
0x67,
0x32,
],
"EFI_EMU_VIRTUAL_DISK_GUID": [
0xF2BA331A,
0x8985,
0x11DB,
0xA4,
0x06,
0x00,
0x40,
0xD0,
0x2B,
0x18,
0x35,
],
"EFI_END_OF_DXE_EVENT_GROUP_GUID": [
0x2CE967A,
0xDD7E,
0x4FFC,
0x9E,
0xE7,
0x81,
0x0C,
0xF0,
0x47,
0x8,
0x80,
],
"EFI_END_OF_DXE_EVENT_GROUP_GUID": [
0x2CE967A,
0xDD7E,
0x4FFC,
0x9E,
0xE7,
0x81,
0xC,
0xF0,
0x47,
0x8,
0x80,
],
"EFI_ERROR_SECTION_DIRECTED_IO_DMAR_GUID": [
0x71761D37,
0x32B2,
0x45CD,
0xA7,
0xD0,
0xB0,
0xFE,
0xDD,
0x93,
0xE8,
0xCF,
],
"EFI_ERROR_SECTION_DMAR_GENERIC_GUID": [
0x5B51FEF7,
0xC79D,
0x4434,
0x8F,
0x1B,
0xAA,
0x62,
0xDE,
0x3E,
0x2C,
0x64,
],
"EFI_ERROR_SECTION_FW_ERROR_RECORD_GUID": [
0x81212A96,
0x09ED,
0x4996,
0x94,
0x71,
0x8D,
0x72,
0x9C,
0x8E,
0x69,
0xED,
],
"EFI_ERROR_SECTION_IOMMU_DMAR_GUID": [
0x036F84E1,
0x7F37,
0x428C,
0xA7,
0x9E,
0x57,
0x5F,
0xDF,
0xAA,
0x84,
0xEC,
],
"EFI_ERROR_SECTION_PCIE_GUID": [
0xD995E954,
0xBBC1,
0x430F,
0xAD,
0x91,
0xB4,
0x4D,
0xCB,
0x3C,
0x6F,
0x35,
],
"EFI_ERROR_SECTION_PCI_DEVICE_GUID": [
0xEB5E4685,
0xCA66,
0x4769,
0xB6,
0xA2,
0x26,
0x06,
0x8B,
0x00,
0x13,
0x26,
],
"EFI_ERROR_SECTION_PCI_PCIX_BUS_GUID": [
0xC5753963,
0x3B84,
0x4095,
0xBF,
0x78,
0xED,
0xDA,
0xD3,
0xF9,
0xC9,
0xDD,
],
"EFI_ERROR_SECTION_PLATFORM_MEMORY_GUID": [
0xA5BC1114,
0x6F64,
0x4EDE,
0xB8,
0x63,
0x3E,
0x83,
0xED,
0x7C,
0x83,
0xB1,
],
"EFI_ERROR_SECTION_PROCESSOR_GENERIC_GUID": [
0x9876CCAD,
0x47B4,
0x4BDB,
0xB6,
0x5E,
0x16,
0xF1,
0x93,
0xC4,
0xF3,
0xDB,
],
"EFI_ERROR_SECTION_PROCESSOR_SPECIFIC_GUID": [
0xDC3EA0B0,
0xA144,
0x4797,
0xB9,
0x5B,
0x53,
0xFA,
0x24,
0x2B,
0x6E,
0x1D,
],
"EFI_EVENT_GROUP_DXE_DISPATCH_GUID": [
0x7081E22F,
0xCAC6,
0x4053,
0x94,
0x68,
0x67,
0x57,
0x82,
0xCF,
0x88,
0xE5,
],
"EFI_EVENT_LEGACY_BOOT_GUID": [
0x2A571201,
0x4966,
0x47F6,
0x8B,
0x86,
0xF3,
0x1E,
0x41,
0xF3,
0x2F,
0x10,
],
"EFI_EVENT_NOTIFICATION_TYEP_BOOT_GUID": [
0x3D61A466,
0xAB40,
0x409A,
0xA6,
0x98,
0xF3,
0x62,
0xD4,
0x64,
0xB3,
0x8F,
],
"EFI_EVENT_NOTIFICATION_TYEP_CMC_GUID": [
0x2DCE8BB1,
0xBDD7,
0x450E,
0xB9,
0xAD,
0x9C,
0xF4,
0xEB,
0xD4,
0xF8,
0x90,
],
"EFI_EVENT_NOTIFICATION_TYEP_CPE_GUID": [
0x4E292F96,
0xD843,
0x4A55,
0xA8,
0xC2,
0xD4,
0x81,
0xF2,
0x7E,
0xBE,
0xEE,
],
"EFI_EVENT_NOTIFICATION_TYEP_DMAR_GUID": [
0x667DD791,
0xC6B3,
0x4C27,
0x8A,
0x6B,
0x0F,
0x8E,
0x72,
0x2D,
0xEB,
0x41,
],
"EFI_EVENT_NOTIFICATION_TYEP_INIT_GUID": [
0xCC5263E8,
0x9308,
0x454A,
0x89,
0xD0,
0x34,
0x0B,
0xD3,
0x9B,
0xC9,
0x8E,
],
"EFI_EVENT_NOTIFICATION_TYEP_MCE_GUID": [
0xE8F56FFE,
0x919C,
0x4CC5,
0xBA,
0x88,
0x65,
0xAB,
0xE1,
0x49,
0x13,
0xBB,
],
"EFI_EVENT_NOTIFICATION_TYEP_NMI_GUID": [
0x5BAD89FF,
0xB7E6,
0x42C9,
0x81,
0x4A,
0xCF,
0x24,
0x85,
0xD6,
0xE9,
0x8A,
],
"EFI_EVENT_NOTIFICATION_TYEP_PCIE_GUID": [
0xCF93C01F,
0x1A16,
0x4DFC,
0xB8,
0xBC,
0x9C,
0x4D,
0xAF,
0x67,
0xC1,
0x04,
],
"EFI_EXTENDED_SAL_BASE_IO_SERVICES_PROTOCOL_GUID": [
0x5AEA42B5,
0x31E1,
0x4515,
0xBC,
0x31,
0xB8,
0xD5,
0x25,
0x75,
0x65,
0xA6,
],
"EFI_EXTENDED_SAL_BASE_SERVICES_PROTOCOL_GUID": [
0xD9E9FA06,
0x0FE0,
0x41C3,
0x96,
0xFB,
0x83,
0x42,
0x5A,
0x33,
0x94,
0xF8,
],
"EFI_EXTENDED_SAL_CACHE_SERVICES_PROTOCOL_GUID": [
0xEDC9494,
0x2743,
0x4BA5,
0x88,
0x18,
0x0A,
0xEF,
0x52,
0x13,
0xF1,
0x88,
],
"EFI_EXTENDED_SAL_ELOG_SERVICES_PROTOCOL_GUID": [
0xD5E4EE5F,
0x3E0A,
0x453C,
0xA7,
0x25,
0xB6,
0x92,
0xBB,
0x6,
0x36,
0x5A,
],
"EFI_EXTENDED_SAL_FV_BLOCK_SERVICES_PROTOCOL_GUID": [
0xA2271DF1,
0xBCBB,
0x4F1D,
0x98,
0xA9,
0x06,
0xBC,
0x17,
0x2F,
0x07,
0x1A,
],
"EFI_EXTENDED_SAL_LOCK_SERVICES_PROTOCOL_GUID": [
0x76B75C23,
0xFE4F,
0x4E17,
0xA2,
0xAD,
0x1A,
0x65,
0x3D,
0xBB,
0x49,
0x4A,
],
"EFI_EXTENDED_SAL_MCA_LOG_SERVICES_PROTOCOL_GUID": [
0xCB3FD86E,
0x38A3,
0x4C03,
0x9A,
0x5C,
0x90,
0xCF,
0xA3,
0xA2,
0xAB,
0x7A,
],
"EFI_EXTENDED_SAL_MCA_SERVICES_PROTOCOL_GUID": [
0x2A591128,
0x6CC7,
0x42B1,
0x8A,
0xF0,
0x58,
0x93,
0x3B,
0x68,
0x2D,
0xBB,
],
"EFI_EXTENDED_SAL_MP_SERVICES_PROTOCOL_GUID": [
0x697D81A2,
0xCF18,
0x4DC0,
0x9E,
0x0D,
0x06,
0x11,
0x3B,
0x61,
0x8A,
0x3F,
],
"EFI_EXTENDED_SAL_MTC_SERVICES_PROTOCOL_GUID": [
0x899AFD18,
0x75E8,
0x408B,
0xA4,
0x1A,
0x6E,
0x2E,
0x7E,
0xCD,
0xF4,
0x54,
],
"EFI_EXTENDED_SAL_PAL_SERVICES_PROTOCOL_GUID": [
0xE1CD9D21,
0x0FC2,
0x438D,
0x97,
0x03,
0x04,
0xE6,
0x6D,
0x96,
0x1E,
0x57,
],
"EFI_EXTENDED_SAL_PCI_SERVICES_PROTOCOL_GUID": [
0xA46B1A31,
0xAD66,
0x4905,
0x92,
0xF6,
0x2B,
0x46,
0x59,
0xDC,
0x30,
0x63,
],
"EFI_EXTENDED_SAL_RESET_SERVICES_PROTOCOL_GUID": [
0x7D019990,
0x8CE1,
0x46F5,
0xA7,
0x76,
0x3C,
0x51,
0x98,
0x67,
0x6A,
0xA0,
],
"EFI_EXTENDED_SAL_RTC_SERVICES_PROTOCOL_GUID": [
0x7E97A470,
0xEFDB,
0x4D02,
0x8F,
0xCE,
0x61,
0x90,
0xD2,
0x7B,
0xA2,
0x96,
],
"EFI_EXTENDED_SAL_SENSOR_SERVICES_PROTOCOL_GUID": [
0x4A153B6E,
0x85A1,
0x4982,
0x98,
0xF4,
0x6A,
0x8C,
0xFC,
0xA4,
0xAB,
0xA1,
],
"EFI_EXTENDED_SAL_SM_COM_LAYER_SERVICES_PROTOCOL_GUID": [
0x4356799,
0x81B7,
0x4E08,
0xA3,
0x8D,
0xD9,
0x78,
0xFA,
0x47,
0xBA,
0x42,
],
"EFI_EXTENDED_SAL_SST_GUID": [
0x38802700,
0x868A,
0x4B4E,
0x81,
0xD4,
0x4F,
0x1B,
0xDC,
0xCF,
0xB4,
0x6F,
],
"EFI_EXTENDED_SAL_STALL_SERVICES_PROTOCOL_GUID": [
0x53A58D06,
0xAC27,
0x4D8C,
0xB5,
0xE9,
0xF0,
0x8A,
0x80,
0x65,
0x41,
0x70,
],
"EFI_EXTENDED_SAL_STATUS_CODE_SERVICES_PROTOCOL_GUID": [
0xDBD91D,
0x55E9,
0x420F,
0x96,
0x39,
0x5E,
0x9F,
0x84,
0x37,
0xB4,
0x4F,
],
"EFI_EXTENDED_SAL_VARIABLE_SERVICES_PROTOCOL_GUID": [
0x4ECB6C53,
0xC641,
0x4370,
0x8C,
0xB2,
0x3B,
0x0E,
0x49,
0x6E,
0x83,
0x78,
],
"EFI_EXTENDED_SAL_VIRTUAL_SERVICES_PROTOCOL_GUID": [
0xC1A74056,
0x260E,
0x4871,
0xA0,
0x31,
0xE6,
0x45,
0xA6,
0x5B,
0x6E,
0x11,
],
"EFI_EXT_SCSI_PASS_THRU_PROTOCOL_GUID": [
0x143B7632,
0xB81B,
0x4CB7,
0xAB,
0xD3,
0xB6,
0x25,
0xA5,
0xB9,
0xBF,
0xFE,
],
"EFI_FAULT_TOLERANT_WRITE_PROTOCOL_GUID": [
0x3EBD9E82,
0x2C78,
0x4DE6,
0x97,
0x86,
0x8D,
0x4B,
0xFC,
0xB7,
0xC8,
0x81,
],
"EFI_FFS_VOLUME_TOP_FILE_GUID": [
0x1BA0062E,
0xC779,
0x4582,
0x85,
0x66,
0x33,
0x6A,
0xE8,
0xF7,
0x8F,
0x09,
],
"EFI_FILE_SYSTEM_INFO_ID_GUID": [
0x9576E93,
0x6D3F,
0x11D2,
0x8E,
0x39,
0x0,
0xA0,
0xC9,
0x69,
0x72,
0x3B,
],
"EFI_FILE_SYSTEM_VOLUME_LABEL_INFO_ID_GUID": [
0xDB47D7D3,
0xFE81,
0x11D3,
0x9A,
0x35,
0x00,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"EFI_FIND_FV_PPI_GUID": [
0x36164812,
0xA023,
0x44E5,
0xBD,
0x85,
0x5,
0xBF,
0x3C,
0x77,
0x0,
0xAA,
],
"EFI_FIRMWARE_CONTENTS_SIGNED_GUID": [
0xF9D89E8,
0x9259,
0x4F76,
0xA5,
0xAF,
0xC,
0x89,
0xE3,
0x40,
0x23,
0xDF,
],
"EFI_FIRMWARE_FILE_SYSTEM2_GUID": [
0x8C8CE578,
0x8A3D,
0x4F1C,
0x99,
0x35,
0x89,
0x61,
0x85,
0xC3,
0x2D,
0xD3,
],
"EFI_FIRMWARE_FILE_SYSTEM3_GUID": [
0x5473C07A,
0x3DCB,
0x4DCA,
0xBD,
0x6F,
0x1E,
0x96,
0x89,
0xE7,
0x34,
0x9A,
],
"EFI_FIRMWARE_FILE_SYSTEM_GUID": [
0x7A9354D9,
0x0468,
0x444A,
0x81,
0xCE,
0x0B,
0xF6,
0x17,
0xD8,
0x90,
0xDF,
],
"EFI_FIRMWARE_MANAGEMENT_PROTOCOL_GUID": [
0x86C77A67,
0xB97,
0x4633,
0xA1,
0x87,
0x49,
0x10,
0x4D,
0x6,
0x85,
0xC7,
],
"EFI_FIRMWARE_PERFORMANCE_GUID": [
0xC095791A,
0x3001,
0x47B2,
0x80,
0xC9,
0xEA,
0xC7,
0x31,
0x9F,
0x2F,
0xA4,
],
"EFI_FIRMWARE_VOLUME2_PROTOCOL_GUID": [
0x220E73B6,
0x6BDB,
0x4413,
0x84,
0x5,
0xB9,
0x74,
0xB1,
0x8,
0x61,
0x9A,
],
"EFI_FIRMWARE_VOLUME_BLOCK2_PROTOCOL_GUID": [
0x8F644FA9,
0xE850,
0x4DB1,
0x9C,
0xE2,
0xB,
0x44,
0x69,
0x8E,
0x8D,
0xA4,
],
"EFI_FIRMWARE_VOLUME_BLOCK_PROTOCOL_GUID": [
0x8F644FA9,
0xE850,
0x4DB1,
0x9C,
0xE2,
0xB,
0x44,
0x69,
0x8E,
0x8D,
0xA4,
],
"EFI_FIRMWARE_VOLUME_DISPATCH_PROTOCOL_GUID": [
0x7AA35A69,
0x506C,
0x444F,
0xA7,
0xAF,
0x69,
0x4B,
0xF5,
0x6F,
0x71,
0xC8,
],
"EFI_FIRMWARE_VOLUME_PROTOCOL_GUID": [
0x389F751F,
0x1838,
0x4388,
0x83,
0x90,
0xCD,
0x81,
0x54,
0xBD,
0x27,
0xF8,
],
"EFI_FORM_BROWSER2_PROTOCOL_GUID": [
0xB9D4C360,
0xBCFB,
0x4F9B,
0x92,
0x98,
0x53,
0xC1,
0x36,
0x98,
0x22,
0x58,
],
"EFI_FORM_BROWSER_COMPATIBILITY_PROTOCOL_GUID": [
0xFB7C852,
0xADCA,
0x4853,
0x8D,
0xF,
0xFB,
0xA7,
0x1B,
0x1C,
0xE1,
0x1A,
],
"EFI_FORM_BROWSER_PROTOCOL_GUID": [
0xE5A1333E,
0xE1B4,
0x4D55,
0xCE,
0xEB,
0x35,
0xC3,
0xEF,
0x13,
0x34,
0x43,
],
"EFI_FORM_BROWSER_PROTOCOL_GUID": [
0xFB7C852,
0xADCA,
0x4853,
0x8D,
0xF,
0xFB,
0xA7,
0x1B,
0x1C,
0xE1,
0x1A,
],
"EFI_FORM_CALLBACK_PROTOCOL_GUID": [
0xF3E4543D,
0xCF35,
0x6CEF,
0x35,
0xC4,
0x4F,
0xE6,
0x34,
0x4D,
0xFC,
0x54,
],
"EFI_FRAMEWORK_DEVICE_PATH_GUID": [
0xB7084E63,
0x46B7,
0x4D1A,
0x86,
0x77,
0xE3,
0x0B,
0x53,
0xDB,
0xF0,
0x50,
],
"EFI_FTP4_PROTOCOL_GUID": [
0xEB338826,
0x681B,
0x4295,
0xB3,
0x56,
0x2B,
0x36,
0x4C,
0x75,
0x7B,
0x9,
],
"EFI_FTP4_SERVICE_BINDING_PROTOCOL_GUID": [
0xFAAECB1,
0x226E,
0x4782,
0xAA,
0xCE,
0x7D,
0xB9,
0xBC,
0xBF,
0x4D,
0xAF,
],
"EFI_FTW_LITE_PROTOCOL_GUID": [
0x3F557189,
0x8DAE,
0x45AE,
0xA0,
0xB3,
0x2B,
0x99,
0xCA,
0x7A,
0xA7,
0xA0,
],
"EFI_FVB_EXTENSION_PROTOCOL_GUID": [
0x53A4C71B,
0xB581,
0x4170,
0x91,
0xB3,
0x8D,
0xB8,
0x7A,
0x4B,
0x5C,
0x46,
],
"EFI_GENERIC_MEMORY_TEST_PROTOCOL_GUID": [
0x309DE7F1,
0x7F5E,
0x4ACE,
0xB4,
0x9C,
0x53,
0x1B,
0xE5,
0xAA,
0x95,
0xEF,
],
"EFI_GENERIC_VARIABLE_GUID": [
0x59D1C24F,
0x50F1,
0x401A,
0xB1,
0x01,
0xF3,
0x3E,
0x0D,
0xAE,
0xD4,
0x43,
],
"EFI_GLOBAL_VARIABLE_GUID": [
0x8BE4DF61,
0x93CA,
0x11D2,
0xAA,
0x0D,
0x00,
0xE0,
0x98,
0x03,
0x2B,
0x8C,
],
"EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID": [
0x9042A9DE,
0x23DC,
0x4A38,
0x96,
0xFB,
0x7A,
0xDE,
0xD0,
0x80,
0x51,
0x6A,
],
"EFI_HARDWARE_ERROR_VARIABLE_GUID": [
0x414E6BDD,
0xE47B,
0x47CC,
0xB2,
0x44,
0xBB,
0x61,
0x02,
0x0C,
0xF5,
0x16,
],
"EFI_HASH_ALGORITHM_SHA1_GUID": [
0x2AE9D80F,
0x3FB2,
0x4095,
0xB7,
0xB1,
0xE9,
0x31,
0x57,
0xB9,
0x46,
0xB6,
],
"EFI_HASH_ALGORITHM_SHA1_NOPAD_GUID": [
0x24C5DC2F,
0x53E2,
0x40CA,
0x9E,
0xD6,
0xA5,
0xD9,
0xA4,
0x9F,
0x46,
0x3B,
],
"EFI_HASH_ALGORITHM_SHA224_GUID": [
0x8DF01A06,
0x9BD5,
0x4BF7,
0xB0,
0x21,
0xDB,
0x4F,
0xD9,
0xCC,
0xF4,
0x5B,
],
"EFI_HASH_ALGORITHM_SHA256_GUID": [
0x51AA59DE,
0xFDF2,
0x4EA3,
0xBC,
0x63,
0x87,
0x5F,
0xB7,
0x84,
0x2E,
0xE9,
],
"EFI_HASH_ALGORITHM_SHA256_NOPAD_GUID": [
0x8628752A,
0x6CB7,
0x4814,
0x96,
0xFC,
0x24,
0xA8,
0x15,
0xAC,
0x22,
0x26,
],
"EFI_HASH_ALGORITHM_SHA384_GUID": [
0xEFA96432,
0xDE33,
0x4DD2,
0xAE,
0xE6,
0x32,
0x8C,
0x33,
0xDF,
0x77,
0x7A,
],
"EFI_HASH_ALGORITHM_SHA512_GUID": [
0xCAA4381E,
0x750C,
0x4770,
0xB8,
0x70,
0x7A,
0x23,
0xB4,
0xE4,
0x21,
0x30,
],
"EFI_HASH_ALGORTIHM_MD5_GUID": [
0xAF7C79C,
0x65B5,
0x4319,
0xB0,
0xAE,
0x44,
0xEC,
0x48,
0x4E,
0x4A,
0xD7,
],
"EFI_HASH_PROTOCOL_GUID": [
0xC5184932,
0xDBA5,
0x46DB,
0xA5,
0xBA,
0xCC,
0x0B,
0xDA,
0x9C,
0x14,
0x35,
],
"EFI_HASH_SERVICE_BINDING_PROTOCOL_GUID": [
0x42881C98,
0xA4F3,
0x44B0,
0xA3,
0x9D,
0xDF,
0xA1,
0x86,
0x67,
0xD8,
0xCD,
],
"EFI_HII_COMPATIBILITY_PROTOCOL_GUID": [
0x5542CCE1,
0xDF5C,
0x4D1B,
0xAB,
0xCA,
0x36,
0x4F,
0x77,
0xD3,
0x99,
0xFB,
],
"EFI_HII_CONFIG_ACCESS_PROTOCOL_GUID": [
0x330D4706,
0xF2A0,
0x4E4F,
0xA3,
0x69,
0xB6,
0x6F,
0xA8,
0xD5,
0x43,
0x85,
],
"EFI_HII_CONFIG_ROUTING_PROTOCOL_GUID": [
0x587E72D7,
0xCC50,
0x4F79,
0x82,
0x09,
0xCA,
0x29,
0x1F,
0xC1,
0xA1,
0x0F,
],
"EFI_HII_DATABASE_PROTOCOL_GUID": [
0xEF9FC172,
0xA1B2,
0x4693,
0xB3,
0x27,
0x6D,
0x32,
0xFC,
0x41,
0x60,
0x42,
],
"EFI_HII_DRIVER_HEALTH_FORMSET_GUID": [
0xF22FC20C,
0x8CF4,
0x45EB,
0x8E,
0x6,
0xAD,
0x4E,
0x50,
0xB9,
0x5D,
0xD3,
],
"EFI_HII_FONT_PROTOCOL_GUID": [
0xE9CA4775,
0x8657,
0x47FC,
0x97,
0xE7,
0x7E,
0xD6,
0x5A,
0x8,
0x43,
0x24,
],
"EFI_HII_FRONT_PAGE_CLASS_GUID": [
0x94D411B7,
0x7669,
0x45C3,
0xBA,
0x3B,
0xF3,
0xA5,
0x8A,
0x71,
0x56,
0x81,
],
"EFI_HII_IMAGE_PROTOCOL_GUID": [
0x31A6406A,
0x6BDF,
0x4E46,
0xB2,
0xA2,
0xEB,
0xAA,
0x89,
0xC4,
0x9,
0x20,
],
"EFI_HII_PACKAGE_LIST_PROTOCOL_GUID": [
0x6A1EE763,
0xD47A,
0x43B4,
0xAA,
0xBE,
0xEF,
0x1D,
0xE2,
0xAB,
0x56,
0xFC,
],
"EFI_HII_PLATFORM_SETUP_FORMSET_GUID": [
0x93039971,
0x8545,
0x4B04,
0xB4,
0x5E,
0x32,
0xEB,
0x83,
0x26,
0x4,
0xE,
],
"EFI_HII_PROTOCOL_GUID": [
0x5542CCE1,
0xDF5C,
0x4D1B,
0xAB,
0xCA,
0x36,
0x4F,
0x77,
0xD3,
0x99,
0xFB,
],
"EFI_HII_PROTOCOL_GUID": [
0xD7AD636E,
0xB997,
0x459B,
0xBF,
0x3F,
0x88,
0x46,
0x89,
0x79,
0x80,
0xE1,
],
"EFI_HII_SET_KEYBOARD_LAYOUT_EVENT_GUID": [
0x14982A4F,
0xB0ED,
0x45B8,
0xA8,
0x11,
0x5A,
0x7A,
0x9B,
0xC2,
0x32,
0xDF,
],
"EFI_HII_STANDARD_FORM_GUID": [
0x3BD2F4EC,
0xE524,
0x46E4,
0xA9,
0xD8,
0x51,
0x1,
0x17,
0x42,
0x55,
0x62,
],
"EFI_HII_STRING_PROTOCOL_GUID": [
0xFD96974,
0x23AA,
0x4CDC,
0xB9,
0xCB,
0x98,
0xD1,
0x77,
0x50,
0x32,
0x2A,
],
"EFI_HII_USER_CREDENTIAL_FORMSET_GUID": [
0x337F4407,
0x5AEE,
0x4B83,
0xB2,
0xA7,
0x4E,
0xAD,
0xCA,
0x30,
0x88,
0xCD,
],
"EFI_HOB_LIST_GUID": [
0x7739F24C,
0x93D7,
0x11D4,
0x9A,
0x3A,
0x0,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"EFI_HOB_MEMORY_ALLOC_BSP_STORE_GUID": [
0x564B33CD,
0xC92A,
0x4593,
0x90,
0xBF,
0x24,
0x73,
0xE4,
0x3C,
0x63,
0x22,
],
"EFI_HOB_MEMORY_ALLOC_STACK_GUID": [
0x4ED4BF27,
0x4092,
0x42E9,
0x80,
0x7D,
0x52,
0x7B,
0x1D,
0x0,
0xC9,
0xBD,
],
"EFI_IA32_X64_ERROR_TYPE_BUS_CHECK_GUID": [
0x1CF3F8B3,
0xC5B1,
0x49A2,
0xAA,
0x59,
0x5E,
0xEF,
0x92,
0xFF,
0xA6,
0x3C,
],
"EFI_IA32_X64_ERROR_TYPE_CACHE_CHECK_GUID": [
0xA55701F5,
0xE3EF,
0x43DE,
0xAC,
0x72,
0x24,
0x9B,
0x57,
0x3F,
0xAD,
0x2C,
],
"EFI_IA32_X64_ERROR_TYPE_MS_CHECK_GUID": [
0x48AB7F57,
0xDC34,
0x4F6C,
0xA7,
0xD3,
0xB0,
0xB5,
0xB0,
0xA7,
0x43,
0x14,
],
"EFI_IA32_X64_ERROR_TYPE_TLB_CHECK_GUID": [
0xFC06B535,
0x5E1F,
0x4562,
0x9F,
0x25,
0x0A,
0x3B,
0x9A,
0xDB,
0x63,
0xC3,
],
"EFI_IDE_CONTROLLER_INIT_PROTOCOL_GUID": [
0xA1E37052,
0x80D9,
0x4E65,
0xA3,
0x17,
0x3E,
0x9A,
0x55,
0xC4,
0x3E,
0xC9,
],
"EFI_IFR_FRAMEWORK_GUID": [
0x31CA5D1A,
0xD511,
0x4931,
0xB7,
0x82,
0xAE,
0x6B,
0x2B,
0x17,
0x8C,
0xD7,
],
"EFI_IFR_REFRESH_ID_OP_GUID": [
0xF5E655D9,
0x02A6,
0x46F2,
0x9E,
0x76,
0xB8,
0xBE,
0x8E,
0x60,
0xAB,
0x22,
],
"EFI_IFR_TIANO_GUID": [
0xF0B1735,
0x87A0,
0x4193,
0xB2,
0x66,
0x53,
0x8C,
0x38,
0xAF,
0x48,
0xCE,
],
"EFI_IMAGE_SECURITY_DATABASE_GUID": [
0xD719B2CB,
0x3D3A,
0x4596,
0xA3,
0xBC,
0xDA,
0xD0,
0xE,
0x67,
0x65,
0x6F,
],
"EFI_INCOMPATIBLE_PCI_DEVICE_SUPPORT_PROTOCOL_GUID": [
0xEB23F55A,
0x7863,
0x4AC2,
0x8D,
0x3D,
0x95,
0x65,
0x35,
0xDE,
0x03,
0x75,
],
"EFI_IOBASE_HOB_GUID": [
0xD4A28A3E,
0xDCF2,
0x43CF,
0xA2,
0xB7,
0xF3,
0x57,
0x2A,
0x7C,
0xAB,
0x9,
],
"EFI_IP4_CONFIG_PROTOCOL_GUID": [
0x3B95AA31,
0x3793,
0x434B,
0x86,
0x67,
0xC8,
0x07,
0x08,
0x92,
0xE0,
0x5E,
],
"EFI_IP4_PROTOCOL_GUID": [
0x41D94CD2,
0x35B6,
0x455A,
0x82,
0x58,
0xD4,
0xE5,
0x13,
0x34,
0xAA,
0xDD,
],
"EFI_IP4_SERVICE_BINDING_PROTOCOL_GUID": [
0xC51711E7,
0xB4BF,
0x404A,
0xBF,
0xB8,
0x0A,
0x04,
0x8E,
0xF1,
0xFF,
0xE4,
],
"EFI_IP6_CONFIG_PROTOCOL_GUID": [
0x937FE521,
0x95AE,
0x4D1A,
0x89,
0x29,
0x48,
0xBC,
0xD9,
0x0A,
0xD3,
0x1A,
],
"EFI_IP6_PROTOCOL_GUID": [
0x2C8759D5,
0x5C2D,
0x66EF,
0x92,
0x5F,
0xB6,
0x6C,
0x10,
0x19,
0x57,
0xE2,
],
"EFI_IP6_SERVICE_BINDING_PROTOCOL_GUID": [
0xEC835DD3,
0xFE0F,
0x617B,
0xA6,
0x21,
0xB3,
0x50,
0xC3,
0xE1,
0x33,
0x88,
],
"EFI_IPSEC2_PROTOCOL_GUID": [
0xA3979E64,
0xACE8,
0x4DDC,
0xBC,
0x7,
0x4D,
0x66,
0xB8,
0xFD,
0x9,
0x77,
],
"EFI_IPSEC_CONFIG_PROTOCOL_GUID": [
0xCE5E5929,
0xC7A3,
0x4602,
0xAD,
0x9E,
0xC9,
0xDA,
0xF9,
0x4E,
0xBF,
0xCF,
],
"EFI_IPSEC_PROTOCOL_GUID": [
0xDFB386F7,
0xE100,
0x43AD,
0x9C,
0x9A,
0xED,
0x90,
0xD0,
0x8A,
0x5E,
0x12,
],
"EFI_ISA_ACPI_PROTOCOL_GUID": [
0x64A892DC,
0x5561,
0x4536,
0x92,
0xC7,
0x79,
0x9B,
0xFC,
0x18,
0x33,
0x55,
],
"EFI_ISA_IO_PROTOCOL_GUID": [
0x7EE2BD44,
0x3DA0,
0x11D4,
0x9A,
0x38,
0x0,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"EFI_ISCSI_INITIATOR_NAME_PROTOCOL_GUID": [
0x59324945,
0xEC44,
0x4C0D,
0xB1,
0xCD,
0x9D,
0xB1,
0x39,
0xDF,
0x7,
0xC,
],
"EFI_KMS_FORMAT_AESCBC_128_GUID": [
0xA0E8EE6A,
0x0E92,
0x44D4,
0x86,
0x1B,
0x0E,
0xAA,
0x4A,
0xCA,
0x44,
0xA2,
],
"EFI_KMS_FORMAT_AESCBC_256_GUID": [
0xD7E69789,
0x1F68,
0x45E8,
0x96,
0xEF,
0x3B,
0x64,
0x07,
0xA5,
0xB2,
0xDC,
],
"EFI_KMS_FORMAT_AESXTS_128_GUID": [
0x4776E33F,
0xDB47,
0x479A,
0xA2,
0x5F,
0xA1,
0xCD,
0x0A,
0xFA,
0xB3,
0x8B,
],
"EFI_KMS_FORMAT_AESXTS_256_GUID": [
0xDC7E8613,
0xC4BB,
0x4DB0,
0x84,
0x62,
0x13,
0x51,
0x13,
0x57,
0xAB,
0xE2,
],
"EFI_KMS_FORMAT_GENERIC_1024_GUID": [
0x43BE0B44,
0x874B,
0x4EAD,
0xB0,
0x9C,
0x24,
0x1A,
0x4F,
0xBD,
0x7E,
0xB3,
],
"EFI_KMS_FORMAT_GENERIC_128_GUID": [
0xEC8A3D69,
0x6DDF,
0x4108,
0x94,
0x76,
0x73,
0x37,
0xFC,
0x52,
0x21,
0x36,
],
"EFI_KMS_FORMAT_GENERIC_160_GUID": [
0xA3B3E6F8,
0xEFCA,
0x4BC1,
0x88,
0xFB,
0xCB,
0x87,
0x33,
0x9B,
0x25,
0x79,
],
"EFI_KMS_FORMAT_GENERIC_2048_GUID": [
0x40093F23,
0x630C,
0x4626,
0x9C,
0x48,
0x40,
0x37,
0x3B,
0x19,
0xCB,
0xBE,
],
"EFI_KMS_FORMAT_GENERIC_256_GUID": [
0x70F64793,
0xC323,
0x4261,
0xAC,
0x2C,
0xD8,
0x76,
0xF2,
0x7C,
0x53,
0x45,
],
"EFI_KMS_FORMAT_GENERIC_3072_GUID": [
0xB9237513,
0x6C44,
0x4411,
0xA9,
0x90,
0x21,
0xE5,
0x56,
0xE0,
0x5A,
0xDE,
],
"EFI_KMS_FORMAT_GENERIC_512_GUID": [
0x978FE043,
0xD7AF,
0x422E,
0x8A,
0x92,
0x2B,
0x48,
0xE4,
0x63,
0xBD,
0xE6,
],
"EFI_KMS_FORMAT_MD2_128_GUID": [
0x78BE11C4,
0xEE44,
0x4A22,
0x9F,
0x05,
0x03,
0x85,
0x2E,
0xC5,
0xC9,
0x78,
],
"EFI_KMS_FORMAT_MD4_128_GUID": [
0xD1C17AA1,
0xCAC5,
0x400F,
0xBE,
0x17,
0xE2,
0xA2,
0xAE,
0x06,
0x67,
0x7C,
],
"EFI_KMS_FORMAT_MD5SHA_128_GUID": [
0x1C178237,
0x6897,
0x459E,
0x9D,
0x36,
0x67,
0xCE,
0x8E,
0xF9,
0x4F,
0x76,
],
"EFI_KMS_FORMAT_MD5_128_GUID": [
0xDCBC3662,
0x9CDA,
0x4B52,
0xA0,
0x4C,
0x82,
0xEB,
0x1D,
0x23,
0x48,
0xC7,
],
"EFI_KMS_FORMAT_MDC2_128_GUID": [
0xF7AD60F8,
0xEFA8,
0x44A3,
0x91,
0x13,
0x23,
0x1F,
0x39,
0x9E,
0xB4,
0xC7,
],
"EFI_KMS_FORMAT_MDC4_128_GUID": [
0x3FA4F847,
0xD8EB,
0x4DF4,
0xBD,
0x49,
0x10,
0x3A,
0x0A,
0x84,
0x7B,
0xBC,
],
"EFI_KMS_FORMAT_RSASHA1_1024_GUID": [
0x56417BED,
0x6BBE,
0x4882,
0x86,
0xA0,
0x3A,
0xE8,
0xBB,
0x17,
0xF8,
0xF9,
],
"EFI_KMS_FORMAT_RSASHA1_2048_GUID": [
0xF66447D4,
0x75A6,
0x463E,
0xA8,
0x19,
0x07,
0x7F,
0x2D,
0xDA,
0x05,
0xE9,
],
"EFI_KMS_FORMAT_RSASHA256_2048_GUID": [
0xA477AF13,
0x877D,
0x4060,
0xBA,
0xA1,
0x25,
0xD1,
0xBE,
0xA0,
0x8A,
0xD3,
],
"EFI_KMS_FORMAT_SHA1_160_GUID": [
0x453C5E5A,
0x482D,
0x43F0,
0x87,
0xC9,
0x59,
0x41,
0xF3,
0xA3,
0x8A,
0xC2,
],
"EFI_KMS_FORMAT_SHA256_256_GUID": [
0x6BB4F5CD,
0x8022,
0x448D,
0xBC,
0x6D,
0x77,
0x1B,
0xAE,
0x93,
0x5F,
0xC6,
],
"EFI_KMS_FORMAT_SHA512_512_GUID": [
0x2F240E12,
0xE14D,
0x475C,
0x83,
0xB0,
0xEF,
0xFF,
0x22,
0xD7,
0x7B,
0xE7,
],
"EFI_KMS_PROTOCOL_GUID": [
0xEC3A978D,
0x7C4E,
0x48FA,
0x9A,
0xBE,
0x6A,
0xD9,
0x1C,
0xC8,
0xF8,
0x11,
],
"EFI_LEGACY_8259_PROTOCOL_GUID": [
0x38321DBA,
0x4FE0,
0x4E17,
0x8A,
0xEC,
0x41,
0x30,
0x55,
0xEA,
0xED,
0xC1,
],
"EFI_LEGACY_BIOS_GUID": [
0x2E3044AC,
0x879F,
0x490F,
0x97,
0x60,
0xBB,
0xDF,
0xAF,
0x69,
0x5F,
0x50,
],
"EFI_LEGACY_BIOS_PLATFORM_PROTOCOL_GUID": [
0x783658A3,
0x4172,
0x4421,
0xA2,
0x99,
0xE0,
0x9,
0x7,
0x9C,
0xC,
0xB4,
],
"EFI_LEGACY_BIOS_PROTOCOL_GUID": [
0xDB9A1E3D,
0x45CB,
0x4ABB,
0x85,
0x3B,
0xE5,
0x38,
0x7F,
0xDB,
0x2E,
0x2D,
],
"EFI_LEGACY_BIOS_THUNK_PROTOCOL_GUID": [
0x4C51A7BA,
0x7195,
0x442D,
0x87,
0x92,
0xBE,
0xEA,
0x6E,
0x2F,
0xF6,
0xEC,
],
"EFI_LEGACY_DEV_ORDER_VARIABLE_GUID": [
0xA56074DB,
0x65FE,
0x45F7,
0xBD,
0x21,
0x2D,
0x2B,
0xDD,
0x8E,
0x96,
0x52,
],
"EFI_LEGACY_INTERRUPT_PROTOCOL_GUID": [
0x31CE593D,
0x108A,
0x485D,
0xAD,
0xB2,
0x78,
0xF2,
0x1F,
0x29,
0x66,
0xBE,
],
"EFI_LEGACY_REGION2_PROTOCOL_GUID": [
0x70101EAF,
0x85,
0x440C,
0xB3,
0x56,
0x8E,
0xE3,
0x6F,
0xEF,
0x24,
0xF0,
],
"EFI_LEGACY_REGION_PROTOCOL_GUID": [
0xFC9013A,
0x568,
0x4BA9,
0x9B,
0x7E,
0xC9,
0xC3,
0x90,
0xA6,
0x60,
0x9B,
],
"EFI_LOADED_IMAGE_DEVICE_PATH_PROTOCOL_GUID": [
0xBC62157E,
0x3E33,
0x4FEC,
0x99,
0x20,
0x2D,
0x3B,
0x36,
0xD7,
0x50,
0xDF,
],
"EFI_LOADED_IMAGE_PROTOCOL_GUID": [
0x5B1B31A1,
0x9562,
0x11D2,
0x8E,
0x3F,
0x00,
0xA0,
0xC9,
0x69,
0x72,
0x3B,
],
"EFI_LOAD_FILE2_PROTOCOL_GUID": [
0x4006C0C1,
0xFCB3,
0x403E,
0x99,
0x6D,
0x4A,
0x6C,
0x87,
0x24,
0xE0,
0x6D,
],
"EFI_LOAD_FILE_PROTOCOL_GUID": [
0x56EC3091,
0x954C,
0x11D2,
0x8E,
0x3F,
0x00,
0xA0,
0xC9,
0x69,
0x72,
0x3B,
],
"EFI_LOAD_FIXED_ADDRESS_CONFIGURATION_TABLE_GUID": [
0x2CA88B53,
0xD296,
0x4080,
0xA4,
0xA5,
0xCA,
0xD9,
0xBA,
0xE2,
0x4B,
0x9,
],
"EFI_LOCK_BOX_PROTOCOL_GUID": [
0xBD445D79,
0xB7AD,
0x4F04,
0x9A,
0xD8,
0x29,
0xBD,
0x20,
0x40,
0xEB,
0x3C,
],
"EFI_MANAGED_NETWORK_PROTOCOL_GUID": [
0x7AB33A91,
0xACE5,
0x4326,
0xB5,
0x72,
0xE7,
0xEE,
0x33,
0xD3,
0x9F,
0x16,
],
"EFI_MANAGED_NETWORK_SERVICE_BINDING_PROTOCOL_GUID": [
0xF36FF770,
0xA7E1,
0x42CF,
0x9E,
0xD2,
0x56,
0xF0,
0xF2,
0x71,
0xF4,
0x4C,
],
"EFI_MEASURED_FV_HOB_GUID": [
0xB2360B42,
0x7173,
0x420A,
0x86,
0x96,
0x46,
0xCA,
0x6B,
0xAB,
0x10,
0x60,
],
"EFI_MEMORY_PRODUCER_GUID": [
0x1D7ADD6E,
0xB2DA,
0x4B0B,
0xB2,
0x9F,
0x49,
0xCB,
0x42,
0xF4,
0x63,
0x56,
],
"EFI_MEMORY_SUBCLASS_GUID": [
0x4E8F4EBB,
0x64B9,
0x4E05,
0x9B,
0x18,
0x4C,
0xFE,
0x49,
0x23,
0x50,
0x97,
],
"EFI_MEMORY_TYPE_INFORMATION_GUID": [
0x4C19049F,
0x4137,
0x4DD3,
0x9C,
0x10,
0x8B,
0x97,
0xA8,
0x3F,
0xFD,
0xFA,
],
"EFI_METRONOME_ARCH_PROTOCOL_GUID": [
0x26BACCB2,
0x6F42,
0x11D4,
0xBC,
0xE7,
0x0,
0x80,
0xC7,
0x3C,
0x88,
0x81,
],
"EFI_MINI_SHELL_FILE_GUID": [
0x86AD232B,
0xD33A,
0x465C,
0xBF,
0x5F,
0x41,
0x37,
0xB,
0xA9,
0x2F,
0xE2,
],
"EFI_MISC_PRODUCER_GUID": [
0x62512C92,
0x63C4,
0x4D80,
0x82,
0xB1,
0xC1,
0xA4,
0xDC,
0x44,
0x80,
0xE5,
],
"EFI_MISC_SUBCLASS_GUID": [
0x772484B2,
0x7482,
0x4B91,
0x9F,
0x9A,
0xAD,
0x43,
0xF8,
0x1C,
0x58,
0x81,
],
"EFI_MMC_HOST_PROTOCOL_GUID": [
0x3E591C00,
0x9E4A,
0x11DF,
0x92,
0x44,
0x00,
0x02,
0xA5,
0xD5,
0xC5,
0x1B,
],
"EFI_MONOTONIC_COUNTER_ARCH_PROTOCOL_GUID": [
0x1DA97072,
0xBDDC,
0x4B30,
0x99,
0xF1,
0x72,
0xA0,
0xB5,
0x6F,
0xFF,
0x2A,
],
"EFI_MONTONIC_COUNTER_ARCH_PROTOCOL_GUID": [
0x1DA97072,
0xBDDC,
0x4B30,
0x99,
0xF1,
0x72,
0xA0,
0xB5,
0x6F,
0xFF,
0x2A,
],
"EFI_MPS_TABLE_GUID": [
0xEB9D2D2F,
0x2D88,
0x11D3,
0x9A,
0x16,
0x0,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"EFI_MP_SERVICES_PROTOCOL_GUID": [
0x3FDDA605,
0xA76E,
0x4F46,
0xAD,
0x29,
0x12,
0xF4,
0x53,
0x1B,
0x3D,
0x08,
],
"EFI_MTFTP4_PROTOCOL_GUID": [
0x78247C57,
0x63DB,
0x4708,
0x99,
0xC2,
0xA8,
0xB4,
0xA9,
0xA6,
0x1F,
0x6B,
],
"EFI_MTFTP4_SERVICE_BINDING_PROTOCOL_GUID": [
0x2FE800BE,
0x8F01,
0x4AA6,
0x94,
0x6B,
0xD7,
0x13,
0x88,
0xE1,
0x83,
0x3F,
],
"EFI_MTFTP6_PROTOCOL_GUID": [
0xBF0A78BA,
0xEC29,
0x49CF,
0xA1,
0xC9,
0x7A,
0xE5,
0x4E,
0xAB,
0x6A,
0x51,
],
"EFI_MTFTP6_SERVICE_BINDING_PROTOCOL_GUID": [
0xD9760FF3,
0x3CCA,
0x4267,
0x80,
0xF9,
0x75,
0x27,
0xFA,
0xFA,
0x42,
0x23,
],
"EFI_NETWORK_INTERFACE_IDENTIFIER_PROTOCOL_GUID": [
0x1ACED566,
0x76ED,
0x4218,
0xBC,
0x81,
0x76,
0x7F,
0x1F,
0x97,
0x7A,
0x89,
],
"EFI_NETWORK_INTERFACE_IDENTIFIER_PROTOCOL_GUID": [
0xE18541CD,
0xF755,
0x4F73,
0x92,
0x8D,
0x64,
0x3C,
0x8A,
0x79,
0xB2,
0x29,
],
"EFI_NIC_IP4_CONFIG_NVDATA_GUID": [
0x9D5B53F,
0xF4B0,
0x4F59,
0xA0,
0xB1,
0x7B,
0x57,
0xD3,
0x5C,
0xE,
0x5,
],
"EFI_NIC_IP4_CONFIG_PROTOCOL_GUID": [
0xDCA3D4D,
0x12DA,
0x4728,
0xBF,
0x7E,
0x86,
0xCE,
0xB9,
0x28,
0xD0,
0x67,
],
"EFI_NIC_IP4_CONFIG_VARIABLE_GUID": [
0xD8944553,
0xC4DD,
0x41F4,
0x9B,
0x30,
0xE1,
0x39,
0x7C,
0xFB,
0x26,
0x7B,
],
"EFI_NT_LOAD_AS_DLL_PPI_GUID": [
0xCCC53F6B,
0xA03A,
0x4ED8,
0x83,
0x9A,
0x3,
0xD9,
0x9C,
0x2,
0xB4,
0xE3,
],
"EFI_OEM_BADGING_PROTOCOL_GUID": [
0x170E13C0,
0xBF1B,
0x4218,
0x87,
0x1D,
0x2A,
0xBD,
0xC6,
0xF8,
0x87,
0xBC,
],
"EFI_PART_TYPE_EFI_SYSTEM_PART_GUID": [
0xC12A7328,
0xF81F,
0x11D2,
0xBA,
0x4B,
0x00,
0xA0,
0xC9,
0x3E,
0xC9,
0x3B,
],
"EFI_PART_TYPE_LEGACY_MBR_GUID": [
0x024DEE41,
0x33E7,
0x11D3,
0x9D,
0x69,
0x00,
0x08,
0xC7,
0x81,
0xF3,
0x9F,
],
"EFI_PATH_FILE_NAME_GUID": [
0x7644C181,
0xFA6E,
0x46DA,
0x80,
0xCB,
0x04,
0xB9,
0x90,
0x40,
0x62,
0xE8,
],
"EFI_PCD_PROTOCOL_GUID": [
0x13A3F0F6,
0x264A,
0x3EF0,
0xF2,
0xE0,
0xDE,
0xC5,
0x12,
0x34,
0x2F,
0x34,
],
"EFI_PCI_ENUMERATION_COMPLETE_GUID": [
0x30CFE3E7,
0x3DE1,
0x4586,
0xBE,
0x20,
0xDE,
0xAB,
0xA1,
0xB3,
0xB7,
0x93,
],
"EFI_PCI_EXPRESS_BASE_ADDRESS_GUID": [
0x3677D529,
0x326F,
0x4603,
0xA9,
0x26,
0xEA,
0xAC,
0xE0,
0x1D,
0xCB,
0xB0,
],
"EFI_PCI_HOST_BRIDGE_RESOURCE_ALLOCATION_PROTOCOL_GUID": [
0xCF8034BE,
0x6768,
0x4D8B,
0xB7,
0x39,
0x7C,
0xCE,
0x68,
0x3A,
0x9F,
0xBE,
],
"EFI_PCI_HOTPLUG_DEVICE_GUID": [
0x0B280816,
0x52E7,
0x4E51,
0xAA,
0x57,
0x11,
0xBD,
0x41,
0xCB,
0xEF,
0xC3,
],
"EFI_PCI_HOTPLUG_REQUEST_PROTOCOL_GUID": [
0x19CB87AB,
0x2CB9,
0x4665,
0x83,
0x60,
0xDD,
0xCF,
0x60,
0x54,
0xF7,
0x9D,
],
"EFI_PCI_HOT_PLUG_INIT_PROTOCOL_GUID": [
0xAA0E8BC1,
0xDABC,
0x46B0,
0xA8,
0x44,
0x37,
0xB8,
0x16,
0x9B,
0x2B,
0xEA,
],
"EFI_PCI_IO_PROTOCOL_GUID": [
0x4CF5B200,
0x68B8,
0x4CA5,
0x9E,
0xEC,
0xB2,
0x3E,
0x3F,
0x50,
0x2,
0x9A,
],
"EFI_PCI_OPTION_ROM_TABLE_GUID": [
0x7462660F,
0x1CBD,
0x48DA,
0xAD,
0x11,
0x91,
0x71,
0x79,
0x13,
0x83,
0x1C,
],
"EFI_PCI_OVERRIDE_GUID": [
0xB5B35764,
0x460C,
0x4A06,
0x99,
0xFC,
0x77,
0xA1,
0x7C,
0x1B,
0x5C,
0xEB,
],
"EFI_PCI_PLATFORM_PROTOCOL_GUID": [
0x7D75280,
0x27D4,
0x4D69,
0x90,
0xD0,
0x56,
0x43,
0xE2,
0x38,
0xB3,
0x41,
],
"EFI_PCI_ROOT_BRIDGE_IO_PROTOCOL_GUID": [
0x2F707EBB,
0x4A1A,
0x11D4,
0x9A,
0x38,
0x00,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"EFI_PC_ANSI_GUID": [
0xE0C14753,
0xF9BE,
0x11D2,
0x9A,
0x0C,
0x00,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"EFI_PEI_APRIORI_FILE_NAME_GUID": [
0x1B45CC0A,
0x156A,
0x428A,
0xAF,
0x62,
0x49,
0x86,
0x4D,
0xA0,
0xE6,
0xE6,
],
"EFI_PEI_BOOT_SCRIPT_EXECUTER_PPI_GUID": [
0xABD42895,
0x78CF,
0x4872,
0x84,
0x44,
0x1B,
0x5C,
0x18,
0x0B,
0xFB,
0xFF,
],
"EFI_PEI_CPU_IO_PPI_INSTALLED_GUID": [
0xE6AF1F7B,
0xFC3F,
0x46DA,
0xA8,
0x28,
0xA3,
0xB4,
0x57,
0xA4,
0x42,
0x82,
],
"EFI_PEI_DECOMPRESS_PPI_GUID": [
0x1A36E4E7,
0xFAB6,
0x476A,
0x8E,
0x75,
0x69,
0x5A,
0x5,
0x76,
0xFD,
0xD7,
],
"EFI_PEI_DEVICE_RECOVERY_MODULE_PPI_GUID": [
0x0DE2CE25,
0x446A,
0x45A7,
0xBF,
0xC9,
0x37,
0xDA,
0x26,
0x34,
0x4B,
0x37,
],
"EFI_PEI_END_OF_PEI_PHASE_PPI_GUID": [
0x605EA650,
0xC65C,
0x42E1,
0xBA,
0x80,
0x91,
0xA5,
0x2A,
0xB6,
0x18,
0xC6,
],
"EFI_PEI_FIND_FV_PPI_GUID": [
0x36164812,
0xA023,
0x44E5,
0xBD,
0x85,
0x5,
0xBF,
0x3C,
0x77,
0x0,
0xAA,
],
"EFI_PEI_FIRMWARE_VOLUME_INFO_PPI_GUID": [
0x49EDB1C1,
0xBF21,
0x4761,
0xBB,
0x12,
0xEB,
0x0,
0x31,
0xAA,
0xBB,
0x39,
],
"EFI_PEI_FLUSH_INSTRUCTION_CACHE_GUID": [
0xD8117CFC,
0x94A6,
0x11D4,
0x9A,
0x3A,
0x0,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"EFI_PEI_LOADED_IMAGE_PPI_GUID": [
0xC1FCD448,
0x6300,
0x4458,
0xB8,
0x64,
0x28,
0xDF,
0x01,
0x53,
0x64,
0xBC,
],
"EFI_PEI_LOAD_FILE_GUID": [
0xB9E0ABFE,
0x5979,
0x4914,
0x97,
0x7F,
0x6D,
0xEE,
0x78,
0xC2,
0x78,
0xA6,
],
"EFI_PEI_LOAD_FILE_PPI_GUID": [
0xB9E0ABFE,
0x5979,
0x4914,
0x97,
0x7F,
0x6D,
0xEE,
0x78,
0xC2,
0x78,
0xA6,
],
"EFI_PEI_PCD_PPI_GUID": [
0x1F34D25,
0x4DE2,
0x23AD,
0x3F,
0xF3,
0x36,
0x35,
0x3F,
0xF3,
0x23,
0xF1,
],
"EFI_PEI_PCI_CFG2_PPI_GUID": [
0x57A449A,
0x1FDC,
0x4C06,
0xBF,
0xC9,
0xF5,
0x3F,
0x6A,
0x99,
0xBB,
0x92,
],
"EFI_PEI_PCI_CFG_PPI_INSTALLED_GUID": [
0xE1F2EBA0,
0xF7B9,
0x4A26,
0x86,
0x20,
0x13,
0x12,
0x21,
0x64,
0x2A,
0x90,
],
"EFI_PEI_PERFORMANCE_HOB_GUID": [
0x10F432DE,
0xDEEC,
0x4631,
0x80,
0xCD,
0x47,
0xF6,
0x5D,
0x8F,
0x80,
0xBB,
],
"EFI_PEI_PERMANENT_MEMORY_INSTALLED_PPI_GUID": [
0xF894643D,
0xC449,
0x42D1,
0x8E,
0xA8,
0x85,
0xBD,
0xD8,
0xC6,
0x5B,
0xDE,
],
"EFI_PEI_PE_COFF_LOADER_GUID": [
0xD8117CFF,
0x94A6,
0x11D4,
0x9A,
0x3A,
0x0,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"EFI_PEI_READ_ONLY_VARIABLE2_PPI_GUID": [
0x2AB86EF5,
0xECB5,
0x4134,
0xB5,
0x56,
0x38,
0x54,
0xCA,
0x1F,
0xE1,
0xB4,
],
"EFI_PEI_READ_ONLY_VARIABLE_ACCESS_PPI_GUID": [
0x3CDC90C6,
0x13FB,
0x4A75,
0x9E,
0x79,
0x59,
0xE9,
0xDD,
0x78,
0xB9,
0xFA,
],
"EFI_PEI_RECOVERY_BLOCK_IO_PPI_GUID": [
0x695D8AA1,
0x42EE,
0x4C46,
0x80,
0x5C,
0x6E,
0xA6,
0xBC,
0xE7,
0x99,
0xE3,
],
"EFI_PEI_RECOVERY_MODULE_PPI_GUID": [
0xFB6D9542,
0x612D,
0x4F45,
0x87,
0x2F,
0x5C,
0xFF,
0x52,
0xE9,
0x3D,
0xCF,
],
"EFI_PEI_REPORT_PROGRESS_CODE_PPI_GUID": [
0x229832D3,
0x7A30,
0x4B36,
0xB8,
0x27,
0xF4,
0xC,
0xB7,
0xD4,
0x54,
0x36,
],
"EFI_PEI_RESET_PPI_GUID": [
0xEF398D58,
0x9DFD,
0x4103,
0xBF,
0x94,
0x78,
0xC6,
0xF4,
0xFE,
0x71,
0x2F,
],
"EFI_PEI_RSC_HANDLER_PPI_GUID": [
0x65D394,
0x9951,
0x4144,
0x82,
0xA3,
0xA,
0xFC,
0x85,
0x79,
0xC2,
0x51,
],
"EFI_PEI_S3_RESUME2_PPI_GUID": [
0x6D582DBC,
0xDB85,
0x4514,
0x8F,
0xCC,
0x5A,
0xDF,
0x62,
0x27,
0xB1,
0x47,
],
"EFI_PEI_S3_RESUME_PPI_GUID": [
0x4426CCB2,
0xE684,
0x4A8A,
0xAE,
0x40,
0x20,
0xD4,
0xB0,
0x25,
0xB7,
0x10,
],
"EFI_PEI_SECTION_EXTRACTION_PPI_GUID": [
0x4F89E208,
0xE144,
0x4804,
0x9E,
0xC8,
0x0F,
0x89,
0x4F,
0x7E,
0x36,
0xD7,
],
"EFI_PEI_SECURITY2_PPI_GUID": [
0xDCD0BE23,
0x9586,
0x40F4,
0xB6,
0x43,
0x06,
0x52,
0x2C,
0xED,
0x4E,
0xDE,
],
"EFI_PEI_SECURITY_PPI_GUID": [
0x1388066E,
0x3A57,
0x4EFA,
0x98,
0xF3,
0xC1,
0x2F,
0x3A,
0x95,
0x8A,
0x29,
],
"EFI_PEI_SMBUS2_PPI_GUID": [
0x9CA93627,
0xB65B,
0x4324,
0xA2,
0x2,
0xC0,
0xB4,
0x61,
0x76,
0x45,
0x43,
],
"EFI_PEI_SMBUS_PPI_GUID": [
0xABD42895,
0x78CF,
0x4872,
0x84,
0x44,
0x1B,
0x5C,
0x18,
0xB,
0xFB,
0xDA,
],
"EFI_PEI_SMM_COMMUNICATION_PPI_GUID": [
0xAE933E1C,
0xCC47,
0x4E38,
0x8F,
0xE,
0xE2,
0xF6,
0x1D,
0x26,
0x5,
0xDF,
],
"EFI_PEI_STALL_PPI_GUID": [
0x1F4C6F90,
0xB06B,
0x48D8,
0xA2,
0x01,
0xBA,
0xE5,
0xF1,
0xCD,
0x7D,
0x56,
],
"EFI_PEI_TEMPORARY_RAM_DONE_PPI_GUID": [
0xCEAB683C,
0xEC56,
0x4A2D,
0xA9,
0x06,
0x40,
0x53,
0xFA,
0x4E,
0x9C,
0x16,
],
"EFI_PEI_TEMPORARY_RAM_SUPPORT_PPI_GUID": [
0xDBE23AA9,
0xA345,
0x4B97,
0x85,
0xB6,
0xB2,
0x26,
0xF1,
0x61,
0x73,
0x89,
],
"EFI_PEI_TRANSFER_CONTROL_GUID": [
0xD8117D02,
0x94A6,
0x11D4,
0x9A,
0x3A,
0x0,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"EFI_PEI_VECTOR_HANDOFF_INFO_PPI_GUID": [
0x3CD652B4,
0x6D33,
0x4DCE,
0x89,
0xDB,
0x83,
0xDF,
0x97,
0x66,
0xFC,
0xCA,
],
"EFI_PERFORMANCE_PROTOCOL_GUID": [
0xFFECFFFF,
0x923C,
0x14D2,
0x9E,
0x3F,
0x22,
0xA0,
0xC9,
0x69,
0x56,
0x3B,
],
"EFI_PHYSICAL_PRESENCE_DATA_GUID": [
0xF6499B1,
0xE9AD,
0x493D,
0xB9,
0xC2,
0x2F,
0x90,
0x81,
0x5C,
0x6C,
0xBC,
],
"EFI_PLATFORM_DRIVER_OVERRIDE_PROTOCOL_GUID": [
0x6B30C738,
0xA391,
0x11D4,
0x9A,
0x3B,
0x00,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"EFI_PLATFORM_MEMTEST_PROTOCOL_GUID": [
0x859BA18,
0x7DD7,
0x4ED7,
0xA8,
0x8E,
0x10,
0x9C,
0x63,
0x91,
0x7B,
0xDD,
],
"EFI_PLATFORM_TO_DRIVER_CONFIGURATION_CLP_GUID": [
0x345ECC0E,
0x0CB6,
0x4B75,
0xBB,
0x57,
0x1B,
0x12,
0x9C,
0x47,
0x33,
0x3E,
],
"EFI_PLATFORM_TO_DRIVER_CONFIGURATION_PROTOCOL_GUID": [
0x642CD590,
0x8059,
0x4C0A,
0xA9,
0x58,
0xC5,
0xEC,
0x07,
0xD2,
0x3C,
0x4B,
],
"EFI_PRIMARY_CONSOLE_IN_DEVICE_GUID": [
0xE451DCBE,
0x96A1,
0x4729,
0xA5,
0xCF,
0x6B,
0x9C,
0x2C,
0xFF,
0x47,
0xFD,
],
"EFI_PRIMARY_CONSOLE_OUT_DEVICE_GUID": [
0x62BDF38A,
0xE3D5,
0x492C,
0x95,
0xC,
0x23,
0xA7,
0xF6,
0x6E,
0x67,
0x2E,
],
"EFI_PRIMARY_STANDARD_ERROR_DEVICE_GUID": [
0x5A68191B,
0x9B97,
0x4752,
0x99,
0x46,
0xE3,
0x6A,
0x5D,
0xA9,
0x42,
0xB1,
],
"EFI_PRINT2_PROTOCOL_GUID": [
0xF05976EF,
0x83F1,
0x4F3D,
0x86,
0x19,
0xF7,
0x59,
0x5D,
0x41,
0xE5,
0x38,
],
"EFI_PRINT_PROTOCOL_GUID": [
0xDF2D868E,
0x32FC,
0x4CF0,
0x8E,
0x6B,
0xFF,
0xD9,
0x5D,
0x13,
0x43,
0xD0,
],
"EFI_PROCESSOR_PRODUCER_GUID": [
0x1BF06AEA,
0x5BEC,
0x4A8D,
0x95,
0x76,
0x74,
0x9B,
0x09,
0x56,
0x2D,
0x30,
],
"EFI_PROCESSOR_SUBCLASS_GUID": [
0x26FDEB7E,
0xB8AF,
0x4CCF,
0xAA,
0x97,
0x02,
0x63,
0x3C,
0xE4,
0x8C,
0xA7,
],
"EFI_PS2_POLICY_PROTOCOL_GUID": [
0x4DF19259,
0xDC71,
0x4D46,
0xBE,
0xF1,
0x35,
0x7B,
0xB5,
0x78,
0xC4,
0x18,
],
"EFI_PXE_BASE_CODE_CALLBACK_PROTOCOL_GUID": [
0x245DCA21,
0xFB7B,
0x11D3,
0x8F,
0x01,
0x00,
0xA0,
0xC9,
0x69,
0x72,
0x3B,
],
"EFI_PXE_BASE_CODE_PROTOCOL_GUID": [
0x03C4E603,
0xAC28,
0x11D3,
0x9A,
0x2D,
0x00,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"EFI_PXE_DHCP4_CALLBACK_PROTOCOL_GUID": [
0xC1544C01,
0x92A4,
0x4198,
0x8A,
0x84,
0x77,
0x85,
0x83,
0xC2,
0x36,
0x21,
],
"EFI_PXE_DHCP4_PROTOCOL_GUID": [
0x03C4E624,
0xAC28,
0x11D3,
0x9A,
0x2D,
0x00,
0x90,
0x29,
0x3F,
0xC1,
0x4D,
],
"EFI_REAL_TIME_CLOCK_ARCH_PROTOCOL_GUID": [
0x27CFAC87,
0x46CC,
0x11D4,
0x9A,
0x38,
0x00,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"EFI_RESET_ARCH_PROTOCOL_GUID": [
0x27CFAC88,
0x46CC,
0x11D4,
0x9A,
0x38,
0x00,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"EFI_RSC_HANDLER_PROTOCOL_GUID": [
0x86212936,
0xE76,
0x41C8,
0xA0,
0x3A,
0x2A,
0xF2,
0xFC,
0x1C,
0x39,
0xE2,
],
"EFI_RUNTIME_ARCH_PROTOCOL_GUID": [
0xB7DFB4E1,
0x52F,
0x449F,
0x87,
0xBE,
0x98,
0x18,
0xFC,
0x91,
0xB7,
0x33,
],
"EFI_RUNTIME_CRYPT_PROTOCOL_GUID": [
0xE1475E0C,
0x1746,
0x4802,
0x86,
0x2E,
0x1,
0x1C,
0x2C,
0x2D,
0x9D,
0x86,
],
"EFI_S3_SAVE_STATE_PROTOCOL_GUID": [
0xE857CAF6,
0xC046,
0x45DC,
0xBE,
0x3F,
0xEE,
0x7,
0x65,
0xFB,
0xA8,
0x87,
],
"EFI_S3_SMM_SAVE_STATE_PROTOCOL_GUID": [
0x320AFE62,
0xE593,
0x49CB,
0xA9,
0xF1,
0xD4,
0xC2,
0xF4,
0xAF,
0x1,
0x4C,
],
"EFI_SAL_MCA_INIT_PMI_PROTOCOL_GUID": [
0xB60DC6E8,
0x3B6F,
0x11D5,
0xAF,
0x9,
0x0,
0xA0,
0xC9,
0x44,
0xA0,
0x5B,
],
"EFI_SAL_SYSTEM_TABLE_GUID": [
0xEB9D2D32,
0x2D88,
0x11D3,
0x9A,
0x16,
0x0,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"EFI_SAS_DEVICE_PATH_GUID": [
0xD487DDB4,
0x008B,
0x11D9,
0xAF,
0xDC,
0x00,
0x10,
0x83,
0xFF,
0xCA,
0x4D,
],
"EFI_SCSI_BUS_PROTOCOL_GUID": [
0x5261213D,
0x3A3D,
0x441E,
0xB3,
0xAF,
0x21,
0xD3,
0xF7,
0xA4,
0xCA,
0x17,
],
"EFI_SCSI_IO_PROTOCOL_GUID": [
0x932F47E6,
0x2362,
0x4002,
0x80,
0x3E,
0x3C,
0xD5,
0x4B,
0x13,
0x8F,
0x85,
],
"EFI_SCSI_PASS_THRU_PROTOCOL_GUID": [
0xA59E8FCF,
0xBDA0,
0x43BB,
0x90,
0xB1,
0xD3,
0x73,
0x2E,
0xCA,
0xA8,
0x77,
],
"EFI_SECTION_EXTRACTION_PROTOCOL_GUID": [
0x448F5DA4,
0x6DD7,
0x4FE1,
0x93,
0x07,
0x69,
0x22,
0x41,
0x92,
0x21,
0x5D,
],
"EFI_SECURITY2_ARCH_PROTOCOL_GUID": [
0x94AB2F58,
0x1438,
0x4EF1,
0x91,
0x52,
0x18,
0x94,
0x1A,
0x3A,
0x0E,
0x68,
],
"EFI_SECURITY_ARCH_PROTOCOL_GUID": [
0xA46423E3,
0x4617,
0x49F1,
0xB9,
0xFF,
0xD1,
0xBF,
0xA9,
0x11,
0x58,
0x39,
],
"EFI_SECURITY_POLICY_PROTOCOL_GUID": [
0x78E4D245,
0xCD4D,
0x4A05,
0xA2,
0xBA,
0x47,
0x43,
0xE8,
0x6C,
0xFC,
0xAB,
],
"EFI_SEC_PLATFORM_INFORMATION_GUID": [
0x6F8C2B35,
0xFEF4,
0x448D,
0x82,
0x56,
0xE1,
0x1B,
0x19,
0xD6,
0x10,
0x77,
],
"EFI_SERIAL_IO_PROTOCOL_GUID": [
0xBB25CF6F,
0xF1D4,
0x11D2,
0x9A,
0x0C,
0x00,
0x90,
0x27,
0x3F,
0xC1,
0xFD,
],
"EFI_SE_EXT_SIGNATURE_GUID": [
0xD2C18636,
0x40E5,
0x4EB5,
0xA3,
0x1B,
0x36,
0x69,
0x5F,
0xD4,
0x2C,
0x87,
],
"EFI_SHELLPKG_TOKEN_SPACE_GUID": [
0x171E9188,
0x31D3,
0x40F5,
0xB1,
0xC,
0x53,
0x9B,
0x2D,
0xB9,
0x40,
0xCD,
],
"EFI_SHELL_FILE_GUID": [
0xC57AD6B7,
0x0515,
0x40A8,
0x9D,
0x21,
0x55,
0x16,
0x52,
0x85,
0x4E,
0x37,
],
"EFI_SHELL_PARAMETERS_PROTOCOL_GUID": [
0x752F3136,
0x4E16,
0x4FDC,
0xA2,
0x2A,
0xE5,
0xF4,
0x68,
0x12,
0xF4,
0xCA,
],
"EFI_SHELL_PROTOCOL_GUID": [
0x6302D008,
0x7F9B,
0x4F30,
0x87,
0xAC,
0x60,
0xC9,
0xFE,
0xF5,
0xDA,
0x4E,
],
"EFI_SIMPLE_FILE_SYSTEM_PROTOCOL_GUID": [
0x964E5B22,
0x6459,
0x11D2,
0x8E,
0x39,
0x0,
0xA0,
0xC9,
0x69,
0x72,
0x3B,
],
"EFI_SIMPLE_NETWORK_PROTOCOL_GUID": [
0xA19832B9,
0xAC25,
0x11D3,
0x9A,
0x2D,
0x00,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"EFI_SIMPLE_POINTER_PROTOCOL_GUID": [
0x31878C87,
0xB75,
0x11D5,
0x9A,
0x4F,
0x0,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL_GUID": [
0xDD9E7534,
0x7762,
0x4698,
0x8C,
0x14,
0xF5,
0x85,
0x17,
0xA6,
0x25,
0xAA,
],
"EFI_SIMPLE_TEXT_INPUT_PROTOCOL_GUID": [
0x387477C1,
0x69C7,
0x11D2,
0x8E,
0x39,
0x0,
0xA0,
0xC9,
0x69,
0x72,
0x3B,
],
"EFI_SIMPLE_TEXT_IN_PROTOCOL_GUID": [
0x387477C1,
0x69C7,
0x11D2,
0x8E,
0x39,
0x0,
0xA0,
0xC9,
0x69,
0x72,
0x3B,
],
"EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL_GUID": [
0x387477C2,
0x69C7,
0x11D2,
0x8E,
0x39,
0x0,
0xA0,
0xC9,
0x69,
0x72,
0x3B,
],
"EFI_SIMPLE_TEXT_OUT_PROTOCOL_GUID": [
0x387477C2,
0x69C7,
0x11D2,
0x8E,
0x39,
0x0,
0xA0,
0xC9,
0x69,
0x72,
0x3B,
],
"EFI_SIO_PROTOCOL_GUID": [
0x215FDD18,
0xBD50,
0x4FEB,
0x89,
0xB,
0x58,
0xCA,
0xB,
0x47,
0x39,
0xE9,
],
"EFI_SMBIOS_PROTOCOL_GUID": [
0x3583FF6,
0xCB36,
0x4940,
0x94,
0x7E,
0xB9,
0xB3,
0x9F,
0x4A,
0xFA,
0xF7,
],
"EFI_SMBIOS_TABLE_GUID": [
0xEB9D2D31,
0x2D88,
0x11D3,
0x9A,
0x16,
0x0,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"EFI_SMBUS_HC_PROTOCOL_GUID": [
0xE49D33ED,
0x513D,
0x4634,
0xB6,
0x98,
0x6F,
0x55,
0xAA,
0x75,
0x1C,
0x1B,
],
"EFI_SMM_ACCESS2_PROTOCOL_GUID": [
0xC2702B74,
0x800C,
0x4131,
0x87,
0x46,
0x8F,
0xB5,
0xB8,
0x9C,
0xE4,
0xAC,
],
"EFI_SMM_ACCESS_PROTOCOL_GUID": [
0x3792095A,
0xE309,
0x4C1E,
0xAA,
0x01,
0x85,
0xF5,
0x65,
0x5A,
0x17,
0xF1,
],
"EFI_SMM_BASE2_PROTOCOL_GUID": [
0xF4CCBFB7,
0xF6E0,
0x47FD,
0x9D,
0xD4,
0x10,
0xA8,
0xF1,
0x50,
0xC1,
0x91,
],
"EFI_SMM_BASE_HELPER_READY_PROTOCOL_GUID": [
0x910DCA07,
0x1F94,
0x4EE7,
0xAF,
0x2F,
0xFF,
0x72,
0xF3,
0x15,
0x43,
0x53,
],
"EFI_SMM_BASE_PROTOCOL_GUID": [
0x1390954D,
0xDA95,
0x4227,
0x93,
0x28,
0x72,
0x82,
0xC2,
0x17,
0xDA,
0xA8,
],
"EFI_SMM_COMMUNICATION_PROTOCOL_GUID": [
0xC68ED8E2,
0x9DC6,
0x4CBD,
0x9D,
0x94,
0xDB,
0x65,
0xAC,
0xC5,
0xC3,
0x32,
],
"EFI_SMM_CONFIGURATION_PROTOCOL_GUID": [
0x26EEB3DE,
0xB689,
0x492E,
0x80,
0xF0,
0xBE,
0x8B,
0xD7,
0xDA,
0x4B,
0xA7,
],
"EFI_SMM_CONTROL2_PROTOCOL_GUID": [
0x843DC720,
0xAB1E,
0x42CB,
0x93,
0x57,
0x8A,
0x0,
0x78,
0xF3,
0x56,
0x1B,
],
"EFI_SMM_CONTROL_PROTOCOL_GUID": [
0x8D12E231,
0xC667,
0x4FD1,
0x98,
0xF2,
0x24,
0x49,
0xA7,
0xE7,
0xB2,
0xE5,
],
"EFI_SMM_CPU_IO2_PROTOCOL_GUID": [
0x3242A9D8,
0xCE70,
0x4AA0,
0x95,
0x5D,
0x5E,
0x7B,
0x14,
0x0D,
0xE4,
0xD2,
],
"EFI_SMM_CPU_IO_GUID": [
0x5F439A0B,
0x45D8,
0x4682,
0xA4,
0xF4,
0xF0,
0x57,
0x6B,
0x51,
0x34,
0x41,
],
"EFI_SMM_CPU_PROTOCOL_GUID": [
0xEB346B97,
0x975F,
0x4A9F,
0x8B,
0x22,
0xF8,
0xE9,
0x2B,
0xB3,
0xD5,
0x69,
],
"EFI_SMM_CPU_SAVE_STATE_PROTOCOL_GUID": [
0x21F302AD,
0x6E94,
0x471B,
0x84,
0xBC,
0xB1,
0x48,
0x0,
0x40,
0x3A,
0x1D,
],
"EFI_SMM_END_OF_DXE_PROTOCOL_GUID": [
0x24E70042,
0xD5C5,
0x4260,
0x8C,
0x39,
0xA,
0xD3,
0xAA,
0x32,
0xE9,
0x3D,
],
"EFI_SMM_FAULT_TOLERANT_WRITE_PROTOCOL_GUID": [
0x3868FC3B,
0x7E45,
0x43A7,
0x90,
0x6C,
0x4B,
0xA4,
0x7D,
0xE1,
0x75,
0x4D,
],
"EFI_SMM_FIRMWARE_VOLUME_BLOCK_PROTOCOL_GUID": [
0xD326D041,
0xBD31,
0x4C01,
0xB5,
0xA8,
0x62,
0x8B,
0xE8,
0x7F,
0x6,
0x53,
],
"EFI_SMM_GPI_DISPATCH2_PROTOCOL_GUID": [
0x25566B03,
0xB577,
0x4CBF,
0x95,
0x8C,
0xED,
0x66,
0x3E,
0xA2,
0x43,
0x80,
],
"EFI_SMM_GPI_DISPATCH_PROTOCOL_GUID": [
0xE0744B81,
0x9513,
0x49CD,
0x8C,
0xEA,
0xE9,
0x24,
0x5E,
0x70,
0x39,
0xDA,
],
"EFI_SMM_ICHN_DISPATCH_PROTOCOL_GUID": [
0xC50B323E,
0x9075,
0x4F2A,
0xAC,
0x8E,
0xD2,
0x59,
0x6A,
0x10,
0x85,
0xCC,
],
"EFI_SMM_IO_TRAP_DISPATCH2_PROTOCOL_GUID": [
0x58DC368D,
0x7BFA,
0x4E77,
0xAB,
0xBC,
0xE,
0x29,
0x41,
0x8D,
0xF9,
0x30,
],
"EFI_SMM_LOCK_BOX_COMMUNICATION_GUID": [
0x2A3CFEBD,
0x27E8,
0x4D0A,
0x8B,
0x79,
0xD6,
0x88,
0xC2,
0xA3,
0xE1,
0xC0,
],
"EFI_SMM_PCI_ROOT_BRIDGE_IO_PROTOCOL_GUID": [
0x8BC1714D,
0xFFCB,
0x41C3,
0x89,
0xDC,
0x6C,
0x74,
0xD0,
0x6D,
0x98,
0xEA,
],
"EFI_SMM_PERIODIC_TIMER_DISPATCH2_PROTOCOL_GUID": [
0x4CEC368E,
0x8E8E,
0x4D71,
0x8B,
0xE1,
0x95,
0x8C,
0x45,
0xFC,
0x8A,
0x53,
],
"EFI_SMM_PERIODIC_TIMER_DISPATCH_PROTOCOL_GUID": [
0x9CCA03FC,
0x4C9E,
0x4A19,
0x9B,
0x6,
0xED,
0x7B,
0x47,
0x9B,
0xDE,
0x55,
],
"EFI_SMM_POWER_BUTTON_DISPATCH2_PROTOCOL_GUID": [
0x1B1183FA,
0x1823,
0x46A7,
0x88,
0x72,
0x9C,
0x57,
0x87,
0x55,
0x40,
0x9D,
],
"EFI_SMM_POWER_BUTTON_DISPATCH_PROTOCOL_GUID": [
0xB709EFA0,
0x47A6,
0x4B41,
0xB9,
0x31,
0x12,
0xEC,
0xE7,
0xA8,
0xEE,
0x56,
],
"EFI_SMM_READY_TO_LOCK_PROTOCOL_GUID": [
0x47B7FA8C,
0xF4BD,
0x4AF6,
0x82,
0x00,
0x33,
0x30,
0x86,
0xF0,
0xD2,
0xC8,
],
"EFI_SMM_RSC_HANDLER_PROTOCOL_GUID": [
0x2FF29FA7,
0x5E80,
0x4ED9,
0xB3,
0x80,
0x1,
0x7D,
0x3C,
0x55,
0x4F,
0xF4,
],
"EFI_SMM_STANDBY_BUTTON_DISPATCH2_PROTOCOL_GUID": [
0x7300C4A1,
0x43F2,
0x4017,
0xA5,
0x1B,
0xC8,
0x1A,
0x7F,
0x40,
0x58,
0x5B,
],
"EFI_SMM_STANDBY_BUTTON_DISPATCH_PROTOCOL_GUID": [
0x78965B98,
0xB0BF,
0x449E,
0x8B,
0x22,
0xD2,
0x91,
0x4E,
0x49,
0x8A,
0x98,
],
"EFI_SMM_STATUS_CODE_PROTOCOL_GUID": [
0x6AFD2B77,
0x98C1,
0x4ACD,
0xA6,
0xF9,
0x8A,
0x94,
0x39,
0xDE,
0xF,
0xB1,
],
"EFI_SMM_SWAP_ADDRESS_RANGE_PROTOCOL_GUID": [
0x67C4F112,
0x3385,
0x4E55,
0x9C,
0x5B,
0xC0,
0x5B,
0x71,
0x7C,
0x42,
0x28,
],
"EFI_SMM_SW_DISPATCH2_PROTOCOL_GUID": [
0x18A3C6DC,
0x5EEA,
0x48C8,
0xA1,
0xC1,
0xB5,
0x33,
0x89,
0xF9,
0x89,
0x99,
],
"EFI_SMM_SW_DISPATCH_PROTOCOL_GUID": [
0xE541B773,
0xDD11,
0x420C,
0xB0,
0x26,
0xDF,
0x99,
0x36,
0x53,
0xF8,
0xBF,
],
"EFI_SMM_SX_DISPATCH2_PROTOCOL_GUID": [
0x456D2859,
0xA84B,
0x4E47,
0xA2,
0xEE,
0x32,
0x76,
0xD8,
0x86,
0x99,
0x7D,
],
"EFI_SMM_SX_DISPATCH_PROTOCOL_GUID": [
0x14FC52BE,
0x1DC,
0x426C,
0x91,
0xAE,
0xA2,
0x3C,
0x3E,
0x22,
0xA,
0xE8,
],
"EFI_SMM_USB_DISPATCH2_PROTOCOL_GUID": [
0xEE9B8D90,
0xC5A6,
0x40A2,
0xBD,
0xE2,
0x52,
0x55,
0x8D,
0x33,
0xCC,
0xA1,
],
"EFI_SMM_USB_DISPATCH_PROTOCOL_GUID": [
0xA05B6FFD,
0x87AF,
0x4E42,
0x95,
0xC9,
0x62,
0x28,
0xB6,
0x3C,
0xF3,
0xF3,
],
"EFI_SMM_VARIABLE_PROTOCOL_GUID": [
0xED32D533,
0x99E6,
0x4209,
0x9C,
0xC0,
0x2D,
0x72,
0xCD,
0xD9,
0x98,
0xA7,
],
"EFI_SMM_VARIABLE_WRITE_GUID": [
0x93BA1826,
0xDFFB,
0x45DD,
0x82,
0xA7,
0xE7,
0xDC,
0xAA,
0x3B,
0xBD,
0xF3,
],
"EFI_STANDARD_CALLER_ID_GUID": [
0xC9DCF469,
0xA7C4,
0x11D5,
0x87,
0xDA,
0x00,
0x06,
0x29,
0x45,
0xC3,
0xB9,
],
"EFI_STANDARD_ERROR_DEVICE_GUID": [
0xD3B36F2D,
0xD551,
0x11D4,
0x9A,
0x46,
0x0,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"EFI_STATUS_CODE_DATA_TYPE_ASSERT_GUID": [
0xDA571595,
0x4D99,
0x487C,
0x82,
0x7C,
0x26,
0x22,
0x67,
0x7D,
0x33,
0x07,
],
"EFI_STATUS_CODE_DATA_TYPE_DEBUG_GUID": [
0x9A4E9246,
0xD553,
0x11D5,
0x87,
0xE2,
0x00,
0x06,
0x29,
0x45,
0xC3,
0xB9,
],
"EFI_STATUS_CODE_DATA_TYPE_ERROR_GUID": [
0xAB359CE3,
0x99B3,
0xAE18,
0xC8,
0x9D,
0x95,
0xD3,
0xB0,
0x72,
0xE1,
0x9B,
],
"EFI_STATUS_CODE_DATA_TYPE_EXCEPTION_HANDLER_GUID": [
0x3BC2BD12,
0xAD2E,
0x11D5,
0x87,
0xDD,
0x00,
0x06,
0x29,
0x45,
0xC3,
0xB9,
],
"EFI_STATUS_CODE_DATA_TYPE_PROGRESS_CODE_GUID": [
0xA356AB39,
0x35C4,
0x35DA,
0xB3,
0x7A,
0xF8,
0xEA,
0x9E,
0x8B,
0x36,
0xA3,
],
"EFI_STATUS_CODE_DATA_TYPE_STRING_GUID": [
0x92D11080,
0x496F,
0x4D95,
0xBE,
0x7E,
0x03,
0x74,
0x88,
0x38,
0x2B,
0x0A,
],
"EFI_STATUS_CODE_GUID": [
0xD083E94C,
0x6560,
0x42E4,
0xB6,
0xD4,
0x2D,
0xF7,
0x5A,
0xDF,
0x6A,
0x2A,
],
"EFI_STATUS_CODE_RUNTIME_PROTOCOL_GUID": [
0xD2B2B828,
0x826,
0x48A7,
0xB3,
0xDF,
0x98,
0x3C,
0x0,
0x60,
0x24,
0xF0,
],
"EFI_STATUS_CODE_SPECIFIC_DATA_GUID": [
0x335984BD,
0xE805,
0x409A,
0xB8,
0xF8,
0xD2,
0x7E,
0xCE,
0x5F,
0xF7,
0xA6,
],
"EFI_STORAGE_SECURITY_COMMAND_PROTOCOL_GUID": [
0xC88B0B6D,
0x0DFC,
0x49A7,
0x9C,
0xB4,
0x49,
0x07,
0x4B,
0x4C,
0x3A,
0x78,
],
"EFI_SWAP_ADDRESS_RANGE_PROTOCOL_GUID": [
0x1259F60D,
0xB754,
0x468E,
0xA7,
0x89,
0x4D,
0xB8,
0x5D,
0x55,
0xE8,
0x7E,
],
"EFI_SYSTEM_NV_DATA_FV_GUID": [
0xFFF12B8D,
0x7696,
0x4C8B,
0xA9,
0x85,
0x27,
0x47,
0x7,
0x5B,
0x4F,
0x50,
],
"EFI_SYSTEM_NV_DATA_HOB_GUID": [
0xD6E5092D,
0xC7B2,
0x4872,
0xAF,
0x66,
0xFD,
0xC0,
0xE6,
0xF9,
0x5E,
0x78,
],
"EFI_TAPE_IO_PROTOCOL_GUID": [
0x1E93E633,
0xD65A,
0x459E,
0xAB,
0x84,
0x93,
0xD9,
0xEC,
0x26,
0x6D,
0x18,
],
"EFI_TCG_EVENT_HOB_GUID": [
0x2E3044AC,
0x879F,
0x490F,
0x97,
0x60,
0xBB,
0xDF,
0xAF,
0x69,
0x5F,
0x50,
],
"EFI_TCG_PLATFORM_PROTOCOL_GUID": [
0x8C4C9A41,
0xBF56,
0x4627,
0x9E,
0xA,
0xC8,
0x38,
0x6D,
0x66,
0x11,
0x5C,
],
"EFI_TCG_PROTOCOL_GUID": [
0xF541796D,
0xA62E,
0x4954,
0xA7,
0x75,
0x95,
0x84,
0xF6,
0x1B,
0x9C,
0xDD,
],
"EFI_TCP4_PROTOCOL_GUID": [
0x65530BC7,
0xA359,
0x410F,
0xB0,
0x10,
0x5A,
0xAD,
0xC7,
0xEC,
0x2B,
0x62,
],
"EFI_TCP4_SERVICE_BINDING_PROTOCOL_GUID": [
0x00720665,
0x67EB,
0x4A99,
0xBA,
0xF7,
0xD3,
0xC3,
0x3A,
0x1C,
0x7C,
0xC9,
],
"EFI_TCP6_PROTOCOL_GUID": [
0x46E44855,
0xBD60,
0x4AB7,
0xAB,
0x0D,
0xA6,
0x79,
0xB9,
0x44,
0x7D,
0x77,
],
"EFI_TCP6_SERVICE_BINDING_PROTOCOL_GUID": [
0xEC20EB79,
0x6C1A,
0x4664,
0x9A,
0x0D,
0xD2,
0xE4,
0xCC,
0x16,
0xD6,
0x64,
],
"EFI_TCP_PROTOCOL_GUID": [
0x02B3D5F2,
0xAC28,
0x11D3,
0x9A,
0x2D,
0x00,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"EFI_TIANO_DECOMPRESS_PROTOCOL_GUID": [
0xE84CF29C,
0x191F,
0x4EAE,
0x96,
0xE1,
0xF4,
0x6A,
0xEC,
0xEA,
0xEA,
0x0B,
],
"EFI_TIMER_ARCH_PROTOCOL_GUID": [
0x26BACCB3,
0x6F42,
0x11D4,
0xBC,
0xE7,
0x0,
0x80,
0xC7,
0x3C,
0x88,
0x81,
],
"EFI_TSC_FREQUENCY_GUID": [
0xDBA6A7E3,
0xBB57,
0x4BE7,
0x8A,
0xF8,
0xD5,
0x78,
0xDB,
0x7E,
0x56,
0x87,
],
"EFI_UART_DEVICE_PATH_GUID": [
0x37499A9D,
0x542F,
0x4C89,
0xA0,
0x26,
0x35,
0xDA,
0x14,
0x20,
0x94,
0xE4,
],
"EFI_UDP4_PROTOCOL_GUID": [
0x3AD9DF29,
0x4501,
0x478D,
0xB1,
0xF8,
0x7F,
0x7F,
0xE7,
0x0E,
0x50,
0xF3,
],
"EFI_UDP4_SERVICE_BINDING_PROTOCOL_GUID": [
0x83F01464,
0x99BD,
0x45E5,
0xB3,
0x83,
0xAF,
0x63,
0x05,
0xD8,
0xE9,
0xE6,
],
"EFI_UDP6_PROTOCOL_GUID": [
0x4F948815,
0xB4B9,
0x43CB,
0x8A,
0x33,
0x90,
0xE0,
0x60,
0xB3,
0x49,
0x55,
],
"EFI_UDP6_SERVICE_BINDING_PROTOCOL_GUID": [
0x66ED4721,
0x3C98,
0x4D3E,
0x81,
0xE3,
0xD0,
0x3D,
0xD3,
0x9A,
0x72,
0x54,
],
"EFI_UGA_DRAW_PROTOCOL_GUID": [
0x982C298B,
0xF4FA,
0x41CB,
0xB8,
0x38,
0x77,
0xAA,
0x68,
0x8F,
0xB8,
0x39,
],
"EFI_UGA_IO_PROTOCOL_GUID": [
0x61A4D49E,
0x6F68,
0x4F1B,
0xB9,
0x22,
0xA8,
0x6E,
0xED,
0xB,
0x7,
0xA2,
],
"EFI_UGA_SPLASH_PROTOCOL_GUID": [
0xA45B3A0D,
0x2E55,
0x4C03,
0xAD,
0x9C,
0x27,
0xD4,
0x82,
0xB,
0x50,
0x7E,
],
"EFI_UNICODE_COLLATION2_PROTOCOL_GUID": [
0xA4C751FC,
0x23AE,
0x4C3E,
0x92,
0xE9,
0x49,
0x64,
0xCF,
0x63,
0xF3,
0x49,
],
"EFI_UNICODE_COLLATION_PROTOCOL2_GUID": [
0xA4C751FC,
0x23AE,
0x4C3E,
0x92,
0xE9,
0x49,
0x64,
0xCF,
0x63,
0xF3,
0x49,
],
"EFI_UNICODE_COLLATION_PROTOCOL_GUID": [
0x1D85CD7F,
0xF43D,
0x11D2,
0x9A,
0xC,
0x0,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"EFI_UNIX_CONSOLE_GUID": [
0xF2CC5D06,
0x8985,
0x11DB,
0xBB,
0x19,
0x00,
0x40,
0xD0,
0x2B,
0x18,
0x35,
],
"EFI_UNIX_CPU_MODEL_GUID": [
0xF2D3B330,
0x8985,
0x11DB,
0x8A,
0xA3,
0x00,
0x40,
0xD0,
0x2B,
0x18,
0x35,
],
"EFI_UNIX_CPU_SPEED_GUID": [
0xF2D74E5A,
0x8985,
0x11DB,
0x97,
0x05,
0x00,
0x40,
0xD0,
0x2B,
0x18,
0x35,
],
"EFI_UNIX_FILE_SYSTEM_GUID": [
0xF2C16B9E,
0x8985,
0x11DB,
0x92,
0xC8,
0x00,
0x40,
0xD0,
0x2B,
0x18,
0x35,
],
"EFI_UNIX_GOP_GUID": [
0xBACE07C2,
0x8987,
0x11DB,
0xA5,
0x9A,
0x00,
0x40,
0xD0,
0x2B,
0x18,
0x35,
],
"EFI_UNIX_IO_PROTOCOL_GUID": [
0xF2E23F54,
0x8985,
0x11DB,
0xAC,
0x79,
0x00,
0x40,
0xD0,
0x2B,
0x18,
0x35,
],
"EFI_UNIX_MEMORY_GUID": [
0xF2D006CC,
0x8985,
0x11DB,
0xA4,
0x72,
0x00,
0x40,
0xD0,
0x2B,
0x18,
0x35,
],
"EFI_UNIX_NETWORK_GUID": [
0x081603B4,
0x0F1D,
0x4022,
0xB6,
0xFD,
0x4C,
0xE3,
0x5E,
0x09,
0xA1,
0xA6,
],
"EFI_UNIX_PHYSICAL_DISKS_GUID": [
0xF2BDCC96,
0x8985,
0x11DB,
0x87,
0x19,
0x00,
0x40,
0xD0,
0x2B,
0x18,
0x35,
],
"EFI_UNIX_SERIAL_PORT_GUID": [
0x6D3A727D,
0x66C8,
0x4D19,
0x87,
0xE6,
0x2,
0x15,
0x86,
0x14,
0x90,
0xF3,
],
"EFI_UNIX_THUNK_PROTOCOL_GUID": [
0xF2E98868,
0x8985,
0x11DB,
0x9A,
0x59,
0x00,
0x40,
0xD0,
0x2B,
0x18,
0x35,
],
"EFI_UNIX_UGA_GUID": [
0xF2C8B80E,
0x8985,
0x11DB,
0x93,
0xF1,
0x00,
0x40,
0xD0,
0x2B,
0x18,
0x35,
],
"EFI_UNIX_UGA_IO_PROTOCOL_GUID": [
0xF2E5E2C6,
0x8985,
0x11DB,
0xA1,
0x91,
0x00,
0x40,
0xD0,
0x2B,
0x18,
0x35,
],
"EFI_UNIX_VIRTUAL_DISKS_GUID": [
0xF2BA331A,
0x8985,
0x11DB,
0xA4,
0x06,
0x00,
0x40,
0xD0,
0x2B,
0x18,
0x35,
],
"EFI_UPDATE_DATA_FILE_GUID": [
0x283FA2EE,
0x532C,
0x484D,
0x93,
0x83,
0x9F,
0x93,
0xB3,
0x6F,
0xB,
0x7E,
],
"EFI_USB2_HC_PROTOCOL_GUID": [
0x3E745226,
0x9818,
0x45B6,
0xA2,
0xAC,
0xD7,
0xCD,
0xE,
0x8B,
0xA2,
0xBC,
],
"EFI_USB_ATAPI_PROTOCOL_GUID": [
0x2B2F68DA,
0x0CD2,
0x44CF,
0x8E,
0x8B,
0xBB,
0xA2,
0x0B,
0x1B,
0x5B,
0x75,
],
"EFI_USB_BUS_PROTOCOL_GUID": [
0x2B2F68CC,
0x0CD2,
0x44CF,
0x8E,
0x8B,
0xBB,
0xA2,
0x0B,
0x1B,
0x5B,
0x75,
],
"EFI_USB_HC_PROTOCOL_GUID": [
0xF5089266,
0x1AA0,
0x4953,
0x97,
0xD8,
0x56,
0x2F,
0x8A,
0x73,
0xB5,
0x19,
],
"EFI_USB_IO_PROTOCOL_GUID": [
0x2B2F68D6,
0x0CD2,
0x44CF,
0x8E,
0x8B,
0xBB,
0xA2,
0x0B,
0x1B,
0x5B,
0x75,
],
"EFI_USER_CREDENTIAL2_PROTOCOL_GUID": [
0xE98ADB03,
0xB8B9,
0x4AF8,
0xBA,
0x20,
0x26,
0xE9,
0x11,
0x4C,
0xBC,
0xE5,
],
"EFI_USER_CREDENTIAL_PROTOCOL_GUID": [
0x71EE5E94,
0x65B9,
0x45D5,
0x82,
0x1A,
0x3A,
0x4D,
0x86,
0xCF,
0xE6,
0xBE,
],
"EFI_USER_INFO_ACCESS_SETUP_ADMIN_GUID": [
0x85B75607,
0xF7CE,
0x471E,
0xB7,
0xE4,
0x2A,
0xEA,
0x5F,
0x72,
0x32,
0xEE,
],
"EFI_USER_INFO_ACCESS_SETUP_NORMAL_GUID": [
0x1DB29AE0,
0x9DCB,
0x43BC,
0x8D,
0x87,
0x5D,
0xA1,
0x49,
0x64,
0xDD,
0xE2,
],
"EFI_USER_INFO_ACCESS_SETUP_RESTRICTED_GUID": [
0xBDB38125,
0x4D63,
0x49F4,
0x82,
0x12,
0x61,
0xCF,
0x5A,
0x19,
0xA,
0xF8,
],
"EFI_USER_MANAGER_PROTOCOL_GUID": [
0x6FD5B00C,
0xD426,
0x4283,
0x98,
0x87,
0x6C,
0xF5,
0xCF,
0x1C,
0xB1,
0xFE,
],
"EFI_UXIX_SYSTEM_CONFIG_GUID": [
0x375EA976,
0x3CCD,
0x4E74,
0xA8,
0x45,
0x26,
0xB9,
0xB3,
0x24,
0xB1,
0x3C,
],
"EFI_VARIABLE_ARCH_PROTOCOL_GUID": [
0x1E5668E2,
0x8481,
0x11D4,
0xBC,
0xF1,
0x0,
0x80,
0xC7,
0x3C,
0x88,
0x81,
],
"EFI_VARIABLE_GUID": [
0xDDCF3616,
0x3275,
0x4164,
0x98,
0xB6,
0xFE,
0x85,
0x70,
0x7F,
0xFE,
0x7D,
],
"EFI_VARIABLE_INDEX_TABLE_GUID": [
0x8CFDB8C8,
0xD6B2,
0x40F3,
0x8E,
0x97,
0x02,
0x30,
0x7C,
0xC9,
0x8B,
0x7C,
],
"EFI_VARIABLE_STORE_PROTOCOL_GUID": [
0xF088CD91,
0xA046,
0x11D2,
0x8E,
0x42,
0x0,
0xA0,
0xC9,
0x69,
0x72,
0x3B,
],
"EFI_VARIABLE_WRITE_ARCH_PROTOCOL_GUID": [
0x6441F818,
0x6362,
0x4E44,
0xB5,
0x70,
0x7D,
0xBA,
0x31,
0xDD,
0x24,
0x53,
],
"EFI_VGA_MINI_PORT_PROTOCOL_GUID": [
0xC7735A2F,
0x88F5,
0x4882,
0xAE,
0x63,
0xFA,
0xAC,
0x8C,
0x8B,
0x86,
0xB3,
],
"EFI_VIRTUAL_MEMORY_ACCESS_PROTOCOL_GUID": [
0x745D377A,
0xB988,
0x47B2,
0xB1,
0x8F,
0xBB,
0xC8,
0xD,
0xC5,
0x66,
0x98,
],
"EFI_VLAN_CONFIG_PROTOCOL_GUID": [
0x9E23D768,
0xD2F3,
0x4366,
0x9F,
0xC3,
0x3A,
0x7A,
0xBA,
0x86,
0x43,
0x74,
],
"EFI_VT_100_GUID": [
0xDFA66065,
0xB419,
0x11D3,
0x9A,
0x2D,
0x00,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"EFI_VT_100_PLUS_GUID": [
0x7BAEC70B,
0x57E0,
0x4C76,
0x8E,
0x87,
0x2F,
0x9E,
0x28,
0x08,
0x83,
0x43,
],
"EFI_VT_UTF8_GUID": [
0xAD15A0D6,
0x8BEC,
0x4ACF,
0xA0,
0x73,
0xD0,
0x1D,
0xE7,
0x7E,
0x2D,
0x88,
],
"EFI_WATCHDOG_TIMER_ARCH_PROTOCOL_GUID": [
0x665E3FF5,
0x46CC,
0x11D4,
0x9A,
0x38,
0x00,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"EFI_WIN_NT_CONSOLE_GUID": [
0xBA73672C,
0xA5D3,
0x11D4,
0xBD,
0x0,
0x0,
0x80,
0xC7,
0x3C,
0x88,
0x81,
],
"EFI_WIN_NT_CPU_MODEL_GUID": [
0xBEE9B6CE,
0x2F8A,
0x11D4,
0xBD,
0xD,
0x0,
0x80,
0xC7,
0x3C,
0x88,
0x81,
],
"EFI_WIN_NT_CPU_SPEED_GUID": [
0xD4F29055,
0xE1FB,
0x11D4,
0xBD,
0xD,
0x0,
0x80,
0xC7,
0x3C,
0x88,
0x81,
],
"EFI_WIN_NT_FILE_SYSTEM_GUID": [
0xC95A935,
0xA006,
0x11D4,
0xBC,
0xFA,
0x0,
0x80,
0xC7,
0x3C,
0x88,
0x81,
],
"EFI_WIN_NT_GOP_GUID": [
0x4E11E955,
0xCCCA,
0x11D4,
0xBD,
0xD,
0x0,
0x80,
0xC7,
0x3C,
0x88,
0x81,
],
"EFI_WIN_NT_IO_PROTOCOL_GUID": [
0x96EB4AD6,
0xA32A,
0x11D4,
0xBC,
0xFD,
0x0,
0x80,
0xC7,
0x3C,
0x88,
0x81,
],
"EFI_WIN_NT_MEMORY_GUID": [
0x99042912,
0x122A,
0x11D4,
0xBD,
0xD,
0x0,
0x80,
0xC7,
0x3C,
0x88,
0x81,
],
"EFI_WIN_NT_PASS_THROUGH_GUID": [
0xCC664EB8,
0x3C24,
0x4086,
0xB6,
0xF6,
0x34,
0xE8,
0x56,
0xBC,
0xE3,
0x6E,
],
"EFI_WIN_NT_PHYSICAL_DISKS_GUID": [
0xC95A92F,
0xA006,
0x11D4,
0xBC,
0xFA,
0x0,
0x80,
0xC7,
0x3C,
0x88,
0x81,
],
"EFI_WIN_NT_SERIAL_PORT_GUID": [
0xC95A93D,
0xA006,
0x11D4,
0xBC,
0xFA,
0x0,
0x80,
0xC7,
0x3C,
0x88,
0x81,
],
"EFI_WIN_NT_SYSTEM_CONFIG_GUID": [
0xB347F047,
0xAF8C,
0x490E,
0xAC,
0x07,
0x0A,
0xA9,
0xB7,
0xE5,
0x38,
0x58,
],
"EFI_WIN_NT_THUNK_PROTOCOL_GUID": [
0x58C518B1,
0x76F3,
0x11D4,
0xBC,
0xEA,
0x0,
0x80,
0xC7,
0x3C,
0x88,
0x81,
],
"EFI_WIN_NT_UGA_GUID": [
0xAB248E99,
0xABE1,
0x11D4,
0xBD,
0xD,
0x0,
0x80,
0xC7,
0x3C,
0x88,
0x81,
],
"EFI_WIN_NT_VIRTUAL_DISKS_GUID": [
0xC95A928,
0xA006,
0x11D4,
0xBC,
0xFA,
0x0,
0x80,
0xC7,
0x3C,
0x88,
0x81,
],
"EFI_XEN_INFO_GUID": [
0xD3B46F3B,
0xD441,
0x1244,
0x9A,
0x12,
0x0,
0x12,
0x27,
0x3F,
0xC1,
0x4D,
],
"EMBEDDED_DEVICE_PROTOCOL_GUID": [
0xBF4B9D10,
0x13EC,
0x43DD,
0x88,
0x80,
0xE9,
0xB,
0x71,
0x8F,
0x27,
0xDE,
],
"EMBEDDED_EXTERNAL_DEVICE_PROTOCOL_GUID": [
0x735F8C64,
0xD696,
0x44D0,
0xBD,
0xF2,
0x44,
0x7F,
0xD0,
0x5A,
0x54,
0x06,
],
"EMU_BLOCK_IO_PROTOCOL_GUID": [
0x6888A4AE,
0xAFCE,
0xE84B,
0x91,
0x02,
0xF7,
0xB9,
0xDA,
0xE6,
0xA0,
0x30,
],
"EMU_GRAPHICS_WINDOW_PROTOCOL_GUID": [
0x30FD316A,
0x6728,
0x2E41,
0xA6,
0x90,
0x0D,
0x13,
0x33,
0xD8,
0xCA,
0xC1,
],
"EMU_IO_THUNK_PROTOCO_GUID": [
0x453368F6,
0x7C85,
0x434A,
0xA9,
0x8A,
0x72,
0xD1,
0xB7,
0xFF,
0xA9,
0x26,
],
"EMU_SNP_PROTOCOL_GUID": [
0xFD5FBE54,
0x8C35,
0xB345,
0x8A,
0x0F,
0x7A,
0xC8,
0xA5,
0xFD,
0x05,
0x21,
],
"EMU_THUNK_PPI_GUID": [
0xB958B78C,
0x1D3E,
0xEE40,
0x8B,
0xF4,
0xF0,
0x63,
0x2D,
0x06,
0x39,
0x16,
],
"EMU_THUNK_PROTOCOL_GUID": [
0x5CF32E0B,
0x8EDF,
0x2E44,
0x9C,
0xDA,
0x93,
0x20,
0x5E,
0x99,
0xEC,
0x1C,
],
"EXTENDED_SAL_BOOT_SERVICE_PROTOCOL_GUID": [
0xDE0EE9A4,
0x3C7A,
0x44F2,
0xB7,
0x8B,
0xE3,
0xCC,
0xD6,
0x9C,
0x3A,
0xF7,
],
"EXTENDED_SAL_BOOT_SERVICE_PROTOCOL_GUID": [
0xDE0EE9A4,
0x3C7A,
0x44F2,
0xB7,
0x8B,
0xE3,
0xCC,
0xD6,
0x9C,
0x3A,
0xF7,
],
"FFS_GUID": [
0xAC05BF33,
0x995A,
0x4ED4,
0xAA,
0xB8,
0xEF,
0x7A,
0xE8,
0xF,
0x5C,
0xB0,
],
"FILE_EXPLORE_FORMSET_GUID": [
0x1F2D63E1,
0xFEBD,
0x4DC7,
0x9C,
0xC5,
0xBA,
0x2B,
0x1C,
0xEF,
0x9C,
0x5B,
],
"FILE_GUID": [
0xCBD2E4D5,
0x7068,
0x4FF5,
0xB4,
0x62,
0x98,
0x22,
0xB4,
0xAD,
0x8D,
0x60,
],
"FORM_BROWSER_EXTENSION_PROTOCOL_GUID": [
0x1F73B18D,
0x4630,
0x43C1,
0xA1,
0xDE,
0x6F,
0x80,
0x85,
0x5D,
0x7D,
0xA4,
],
"FRAMEWORK_BDS_FRONTPAGE_FORMSET_GUID": [
0x9E0C30BC,
0x3F06,
0x4BA6,
0x82,
0x88,
0x9,
0x17,
0x9B,
0x85,
0x5D,
0xBE,
],
"FRAMEWORK_EFI_FIRMWARE_VOLUME_BLOCK_PROTOCOL_GUID": [
0xDE28BC59,
0x6228,
0x41BD,
0xBD,
0xF6,
0xA3,
0xB9,
0xAD,
0xB5,
0x8D,
0xA1,
],
"FRAMEWORK_EFI_MP_SERVICES_PROTOCOL_GUID": [
0xF33261E7,
0x23CB,
0x11D5,
0xBD,
0x5C,
0x0,
0x80,
0xC7,
0x3C,
0x88,
0x81,
],
"FRONT_PAGE_FORMSET_GUID": [
0x9E0C30BC,
0x3F06,
0x4BA6,
0x82,
0x88,
0x9,
0x17,
0x9B,
0x85,
0x5D,
0xBE,
],
"HANDLE_PARSING_HII_GUID": [
0xB8969637,
0x81DE,
0x43AF,
0xBC,
0x9A,
0x24,
0xD9,
0x89,
0x13,
0xF2,
0xF6,
],
"HD_BOOT_DEVICE_PATH_VARIABLE_GUID": [
0xFAB7E9E1,
0x39DD,
0x4F2B,
0x84,
0x8,
0xE2,
0xE,
0x90,
0x6C,
0xB6,
0xDE,
],
"HII_RESOURCE_SAMPLE_FORM_SET_GUID": [
0x4F4EF7F0,
0xAA29,
0x4CE9,
0xBA,
0x41,
0x64,
0x3E,
0x1,
0x23,
0xA9,
0x9F,
],
"HOB_LIST_GUID": [
0x7739F24C,
0x93D7,
0x11D4,
0x9A,
0x3A,
0x0,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"HOT_PLUG_DEVICE_GUID": [
0x220AC432,
0x1D43,
0x49E5,
0xA7,
0x4F,
0x4C,
0x9D,
0xA6,
0x7A,
0xD2,
0x3B,
],
"IDLE_LOOP_EVENT_GUID": [
0x3C8D294C,
0x5FC3,
0x4451,
0xBB,
0x31,
0xC4,
0xC0,
0x32,
0x29,
0x5E,
0x6C,
],
"INTEL_FRAMEWORK_MODULEPKG_TOKEN_SPACE_GUID": [
0xD3705011,
0xBC19,
0x4AF7,
0xBE,
0x16,
0xF6,
0x80,
0x30,
0x37,
0x8C,
0x15,
],
"IP4_ISCSI_CONFIG_GUID": [
0x6456ED61,
0x3579,
0x41C9,
0x8A,
0x26,
0x0A,
0x0B,
0xD6,
0x2B,
0x78,
0xFC,
],
"IP6_CONFIG_NVDATA_GUID": [
0x2EEA107,
0x98DB,
0x400E,
0x98,
0x30,
0x46,
0xA,
0x15,
0x42,
0xD7,
0x99,
],
"ISCSI_CHAP_AUTH_INFO_GUID": [
0x786EC0AC,
0x65AE,
0x4D1B,
0xB1,
0x37,
0xD,
0x11,
0xA,
0x48,
0x37,
0x97,
],
"ISCSI_CONFIG_GUID": [
0x4B47D616,
0xA8D6,
0x4552,
0x9D,
0x44,
0xCC,
0xAD,
0x2E,
0xF,
0x4C,
0xF9,
],
"ISCSI_V4_PRIVATE_GUID": [
0xFA3CDE4C,
0x87C2,
0x427D,
0xAE,
0xDE,
0x7D,
0xD0,
0x96,
0xC8,
0x8C,
0x58,
],
"ISCSI_V6_PRIVATE_GUID": [
0x28BE27E5,
0x66CC,
0x4A31,
0xA3,
0x15,
0xDB,
0x14,
0xC3,
0x74,
0x4D,
0x85,
],
"LAST_ENUM_LANGUAGE_GUID": [
0xE8C545B,
0xA2EE,
0x470D,
0x8E,
0x26,
0xBD,
0xA1,
0xA1,
0x3C,
0xA,
0xA3,
],
"LDR_MEMORY_DESCRIPTOR_GUID": [
0x7701D7E5,
0x7D1D,
0x4432,
0xA4,
0x68,
0x67,
0x3D,
0xAB,
0x8A,
0xDE,
0x60,
],
"LOAD_FILE_PROTOCOL_GUID": [
0x56EC3091,
0x954C,
0x11D2,
0x8E,
0x3F,
0x00,
0xA0,
0xC9,
0x69,
0x72,
0x3B,
],
"LOCAL_EFI_WIN_NT_BUS_DRIVER_IO_PROTOCOL_GUID": [
0x96EB4AD6,
0xA32A,
0x11D4,
0xBC,
0xFD,
0x0,
0x80,
0xC7,
0x3C,
0x88,
0x81,
],
"LOCAL_EFI_WIN_NT_SERIAL_PORT_GUID": [
0xC95A93D,
0xA006,
0x11D4,
0xBC,
0xFA,
0x0,
0x80,
0xC7,
0x3C,
0x88,
0x81,
],
"LOCAL_EFI_WIN_NT_THUNK_PROTOCOL_GUID": [
0x58C518B1,
0x76F3,
0x11D4,
0xBC,
0xEA,
0x0,
0x80,
0xC7,
0x3C,
0x88,
0x81,
],
"LZMAF86_CUSTOM_DECOMPRESS_GUID": [
0xD42AE6BD,
0x1352,
0x4BFB,
0x90,
0x9A,
0xCA,
0x72,
0xA6,
0xEA,
0xE8,
0x89,
],
"LZMA_CUSTOM_DECOMPRESS_GUID": [
0xEE4E5898,
0x3914,
0x4259,
0x9D,
0x6E,
0xDC,
0x7B,
0xD7,
0x94,
0x03,
0xCF,
],
"MDEMODULEPKG_TOKEN_SPACE_GUID": [
0xA1AFF049,
0xFDEB,
0x442A,
0xB3,
0x20,
0x13,
0xAB,
0x4C,
0xB7,
0x2B,
0xBC,
],
"MDEPKG_TOKEN_SPACE_GUID": [
0x914AEBE7,
0x4635,
0x459B,
0xAA,
0x1C,
0x11,
0xE2,
0x19,
0xB0,
0x3A,
0x10,
],
"MEMORY_ONLY_RESET_CONTROL_GUID": [
0xE20939BE,
0x32D4,
0x41BE,
0xA1,
0x50,
0x89,
0x7F,
0x85,
0xD4,
0x98,
0x29,
],
"MEMORY_STATUS_CODE_RECORD_GUID": [
0x60CC026,
0x4C0D,
0x4DDA,
0x8F,
0x41,
0x59,
0x5F,
0xEF,
0x0,
0xA5,
0x2,
],
"MTC_VENDOR_GUID": [
0xEB704011,
0x1402,
0x11D3,
0x8E,
0x77,
0x0,
0xA0,
0xC9,
0x69,
0x72,
0x3B,
],
"MY_GUID": [
0x12345678,
0xAABB,
0xCCDD,
0xEE,
0xFF,
0x11,
0x22,
0x33,
0x44,
0x55,
0x66,
],
"NT_FWH_PPI_GUID": [
0x4E76928F,
0x50AD,
0x4334,
0xB0,
0x6B,
0xA8,
0x42,
0x13,
0x10,
0x8A,
0x57,
],
"PCATCHIPSET_TOKEN_SPACE_GUID": [
0x326AE723,
0xAE32,
0x4589,
0x98,
0xB8,
0xCA,
0xC2,
0x3C,
0xDC,
0xC1,
0xB1,
],
"PCD_DATABASE_HOB_GUID": [
0xEA296D92,
0x0B69,
0x423C,
0x8C,
0x28,
0x33,
0xB4,
0xE0,
0xA9,
0x12,
0x68,
],
"PCD_PPI_GUID": [
0x6E81C58,
0x4AD7,
0x44BC,
0x83,
0x90,
0xF1,
0x2,
0x65,
0xF7,
0x24,
0x80,
],
"PCD_PROTOCOL_GUID": [
0x11B34006,
0xD85B,
0x4D0A,
0xA2,
0x90,
0xD5,
0xA5,
0x71,
0x31,
0xE,
0xF7,
],
"PE32_IMAGE_PROTOCOL_GUID": [
0x5CB5C776,
0x60D5,
0x45EE,
0x88,
0x3C,
0x45,
0x27,
0x8,
0xCD,
0x74,
0x3F,
],
"PEI_ATA_CONTROLLER_PPI_GUID": [
0xA45E60D1,
0xC719,
0x44AA,
0xB0,
0x7A,
0xAA,
0x77,
0x7F,
0x85,
0x90,
0x6D,
],
"PEI_BASE_MEMORY_TEST_GUID": [
0xB6EC423C,
0x21D2,
0x490D,
0x85,
0xC6,
0xDD,
0x58,
0x64,
0xEA,
0xA6,
0x74,
],
"PEI_BLOCK_IO_PPI_GUID": [
0x695D8AA1,
0x42EE,
0x4C46,
0x80,
0x5C,
0x6E,
0xA6,
0xBC,
0xE7,
0x99,
0xE3,
],
"PEI_BOOT_SCRIPT_EXECUTER_PPI_GUID": [
0xABD42895,
0x78CF,
0x4872,
0x84,
0x44,
0x1B,
0x5C,
0x18,
0x0B,
0xFB,
0xFF,
],
"PEI_CAPSULE_PPI_GUID": [
0x3ACF33EE,
0xD892,
0x40F4,
0xA2,
0xFC,
0x38,
0x54,
0xD2,
0xE1,
0x32,
0x3D,
],
"PEI_CPU_IO_PPI_GUID": [
0xE6AF1F7B,
0xFC3F,
0x46DA,
0xA8,
0x28,
0xA3,
0xB4,
0x57,
0xA4,
0x42,
0x82,
],
"PEI_END_OF_PEI_PHASE_PPI_GUID": [
0x605EA650,
0xC65C,
0x42E1,
0xBA,
0x80,
0x91,
0xA5,
0x2A,
0xB6,
0x18,
0xC6,
],
"PEI_FLASH_MAP_PPI_GUID": [
0xF34C2FA0,
0xDE88,
0x4270,
0x84,
0x14,
0x96,
0x12,
0x22,
0xF4,
0x52,
0x1C,
],
"PEI_IN_MEMORY_GUID": [
0x643B8786,
0xB417,
0x48D2,
0x8F,
0x5E,
0x78,
0x19,
0x93,
0x1C,
0xAE,
0xD8,
],
"PEI_LOCK_PHYSICAL_PRESENCE_PPI_GUID": [
0xEF9AEFE5,
0x2BD3,
0x4031,
0xAF,
0x7D,
0x5E,
0xFE,
0x5A,
0xBB,
0x9A,
0xD,
],
"PEI_NT_THUNK_GUID": [
0x98C281E5,
0xF906,
0x43DD,
0xA9,
0x2B,
0xB0,
0x3,
0xBF,
0x27,
0x65,
0xDA,
],
"PEI_NT_THUNK_PPI_GUID": [
0x98C281E5,
0xF906,
0x43DD,
0xA9,
0x2B,
0xB0,
0x3,
0xBF,
0x27,
0x65,
0xDA,
],
"PEI_OPERATOR_PRESENCE_PPI_GUID": [
0x20A7378C,
0xAA83,
0x4CE1,
0x82,
0x1F,
0x47,
0x40,
0xEE,
0x1B,
0x3F,
0x9F,
],
"PEI_PCI_CFG_PPI_GUID": [
0xE1F2EBA0,
0xF7B9,
0x4A26,
0x86,
0x20,
0x13,
0x12,
0x21,
0x64,
0x2A,
0x90,
],
"PEI_PERMANENT_MEMORY_INSTALLED_PPI_GUID": [
0xF894643D,
0xC449,
0x42D1,
0x8E,
0xA8,
0x85,
0xBD,
0xD8,
0xC6,
0x5B,
0xDE,
],
"PEI_READ_ONLY_VARIABLE_ACCESS_PPI_GUID": [
0x3CDC90C6,
0x13FB,
0x4A75,
0x9E,
0x79,
0x59,
0xE9,
0xDD,
0x78,
0xB9,
0xFA,
],
"PEI_RESET_PPI_GUID": [
0xEF398D58,
0x9DFD,
0x4103,
0xBF,
0x94,
0x78,
0xC6,
0xF4,
0xFE,
0x71,
0x2F,
],
"PEI_S3_RESUME_PPI_GUID": [
0x4426CCB2,
0xE684,
0x4A8A,
0xAE,
0x40,
0x20,
0xD4,
0xB0,
0x25,
0xB7,
0x10,
],
"PEI_SECURITY_PPI_GUID": [
0x1388066E,
0x3A57,
0x4EFA,
0x98,
0xF3,
0xC1,
0x2F,
0x3A,
0x95,
0x8A,
0x29,
],
"PEI_SEC_PERFORMANCE_PPI_GUID": [
0x0ECC666B,
0x4662,
0x47F9,
0x9D,
0xD5,
0xD0,
0x96,
0xFF,
0x7D,
0xA4,
0x9E,
],
"PEI_SMBUS2_PPI_GUID": [
0x9CA93627,
0xB65B,
0x4324,
0xA2,
0x2,
0xC0,
0xB4,
0x61,
0x76,
0x45,
0x43,
],
"PEI_SMBUS_PPI_GUID": [
0xABD42895,
0x78CF,
0x4872,
0x84,
0x44,
0x1B,
0x5C,
0x18,
0xB,
0xFB,
0xDA,
],
"PEI_SMM_ACCESS_PPI_GUID": [
0x268F33A9,
0xCCCD,
0x48BE,
0x88,
0x17,
0x86,
0x5,
0x3A,
0xC3,
0x2E,
0xD6,
],
"PEI_SMM_CONTROL_PPI_GUID": [
0x61C68702,
0x4D7E,
0x4F43,
0x8D,
0xEF,
0xA7,
0x43,
0x5,
0xCE,
0x74,
0xC5,
],
"PEI_STALL_PPI_GUID": [
0x1F4C6F90,
0xB06B,
0x48D8,
0xA2,
0x01,
0xBA,
0xE5,
0xF1,
0xCD,
0x7D,
0x56,
],
"PEI_STATUS_CODE_MEMORY_PPI_GUID": [
0x26F8AB01,
0xD3CD,
0x489C,
0x98,
0x4F,
0xDF,
0xDE,
0xF7,
0x68,
0x39,
0x5B,
],
"PEI_STATUS_CODE_PPI_GUID": [
0x229832D3,
0x7A30,
0x4B36,
0xB8,
0x27,
0xF4,
0xC,
0xB7,
0xD4,
0x54,
0x36,
],
"PEI_TPM_INITIALIZED_PPI_GUID": [
0xE9DB0D58,
0xD48D,
0x47F6,
0x9C,
0x6E,
0x6F,
0x40,
0xE8,
0x6C,
0x7B,
0x41,
],
"PEI_UNIX_AUTOSCAN_PPI_GUID": [
0xF2ED3D14,
0x8985,
0x11DB,
0xB0,
0x57,
0x00,
0x40,
0xD0,
0x2B,
0x18,
0x35,
],
"PEI_UNIX_THUNK_PPI_GUID": [
0xF2F830F2,
0x8985,
0x11DB,
0x80,
0x6B,
0x00,
0x40,
0xD0,
0x2B,
0x18,
0x35,
],
"PEI_USB2_HOST_CONTROLLER_PPI_GUID": [
0xA7D09FE1,
0x74D4,
0x4BA5,
0x84,
0x7C,
0x12,
0xED,
0x5B,
0x19,
0xAD,
0xE4,
],
"PEI_USB_CONTROLLER_PPI_GUID": [
0x3BC1F6DE,
0x693E,
0x4547,
0xA3,
0x0,
0x21,
0x82,
0x3C,
0xA4,
0x20,
0xB2,
],
"PEI_USB_HOST_CONTROLLER_PPI_GUID": [
0x652B38A9,
0x77F4,
0x453F,
0x89,
0xD5,
0xE7,
0xBD,
0xC3,
0x52,
0xFC,
0x53,
],
"PEI_USB_IO_PPI_GUID": [
0x7C29785C,
0x66B9,
0x49FC,
0xB7,
0x97,
0x1C,
0xA5,
0x55,
0xE,
0xF2,
0x83,
],
"PERFORMANCEPKG_TOKEN_SPACE_GUID": [
0x669346EF,
0xFDAD,
0x4AEB,
0x08,
0xA6,
0x21,
0x46,
0x2D,
0x3F,
0xEF,
0x7D,
],
"PERFORMANCE_EX_PROTOCOL_GUID": [
0x1EA81BEC,
0xF01A,
0x4D98,
0xA2,
0x1,
0x4A,
0x61,
0xCE,
0x2F,
0xC0,
0x22,
],
"PERFORMANCE_PROTOCOL_GUID": [
0x76B6BDFA,
0x2ACD,
0x4462,
0x9E,
0x3F,
0xCB,
0x58,
0xC9,
0x69,
0xD9,
0x37,
],
"PE_COFF_LOADER_PROTOCOL_GUID": [
0xB323179B,
0x97FB,
0x477E,
0xB0,
0xFE,
0xD8,
0x85,
0x91,
0xFA,
0x11,
0xAB,
],
"PLAT_OVER_MNGR_GUID": [
0x8614567D,
0x35BE,
0x4415,
0x8D,
0x88,
0xBD,
0x7D,
0xC,
0x9C,
0x70,
0xC0,
],
"PRE_PI_EXTRACT_GUIDED_SECTION_DATA_GUID": [
0x385A982C,
0x2F49,
0x4043,
0xA5,
0x1E,
0x49,
0x01,
0x02,
0x5C,
0x8B,
0x6B,
],
"PWD_CREDENTIAL_PROVIDER_GUID": [
0x78B9EC8B,
0xC000,
0x46C5,
0xAC,
0x93,
0x24,
0xA0,
0xC1,
0xBB,
0x0,
0xCE,
],
"RECOVERY_ON_DATA_CD_GUID": [
0x5CAC0099,
0x0DC9,
0x48E5,
0x80,
0x68,
0xBB,
0x95,
0xF5,
0x40,
0x0A,
0x9F,
],
"RECOVERY_ON_FAT_FLOPPY_DISK_GUID": [
0x2E3D2E75,
0x9B2E,
0x412D,
0xB4,
0xB1,
0x70,
0x41,
0x6B,
0x87,
0x0,
0xFF,
],
"RECOVERY_ON_FAT_IDE_DISK_GUID": [
0xB38573B6,
0x6200,
0x4AC5,
0xB5,
0x1D,
0x82,
0xE6,
0x59,
0x38,
0xD7,
0x83,
],
"RECOVERY_ON_FAT_USB_DISK_GUID": [
0x0FFBCE19,
0x324C,
0x4690,
0xA0,
0x09,
0x98,
0xC6,
0xAE,
0x2E,
0xB1,
0x86,
],
"SAL_SYSTEM_TABLE_GUID": [
0xEB9D2D32,
0x2D88,
0x11D3,
0x9A,
0x16,
0x0,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"SECUREBOOT_CONFIG_FORM_SET_GUID": [
0x5DAF50A5,
0xEA81,
0x4DE2,
0x8F,
0x9B,
0xCA,
0xBD,
0xA9,
0xCF,
0x5C,
0x14,
],
"SECURITYPKG_TOKEN_SPACE_GUID": [
0xD3FB176,
0x9569,
0x4D51,
0xA3,
0xEF,
0x7D,
0x61,
0xC6,
0x4F,
0xEA,
0xBA,
],
"SHELLPKG_SHELL_ENV2_EXT_GUID": [
0xD2C18636,
0x40E5,
0x4EB5,
0xA3,
0x1B,
0x36,
0x69,
0x5F,
0xD4,
0x2C,
0x87,
],
"SHELL_ALIAS_VARIABLE_GUID": [
0x0053D9D6,
0x2659,
0x4599,
0xA2,
0x6B,
0xEF,
0x45,
0x36,
0xE6,
0x31,
0xA9,
],
"SHELL_DEBUG1_HII_GUID": [
0x25F200AA,
0xD3CB,
0x470A,
0xBF,
0x51,
0xE7,
0xD1,
0x62,
0xD2,
0x2E,
0x6F,
],
"SHELL_DRIVER1_HII_GUID": [
0xAF0B742,
0x63EC,
0x45BD,
0x8D,
0xB6,
0x71,
0xAD,
0x7F,
0x2F,
0xE8,
0xE8,
],
"SHELL_ENVIRONMENT_PROTOCOL_GUID": [
0x47C7B221,
0xC42A,
0x11D2,
0x8E,
0x57,
0x0,
0xA0,
0xC9,
0x69,
0x72,
0x3B,
],
"SHELL_INSTALL1_HII_GUID": [
0x7D574D54,
0xD364,
0x4D4A,
0x95,
0xE3,
0x49,
0x45,
0xDB,
0x7A,
0xD3,
0xEE,
],
"SHELL_INTERFACE_PROTOCOL_GUID": [
0x47C7B223,
0xC42A,
0x11D2,
0x8E,
0x57,
0x0,
0xA0,
0xC9,
0x69,
0x72,
0x3B,
],
"SHELL_LEVEL1_HII_GUID": [
0xDEC5DAA4,
0x6781,
0x4820,
0x9C,
0x63,
0xA7,
0xB0,
0xE4,
0xF1,
0xDB,
0x31,
],
"SHELL_LEVEL2_HII_GUID": [
0xF95A7CCC,
0x4C55,
0x4426,
0xA7,
0xB4,
0xDC,
0x89,
0x61,
0x95,
0xB,
0xAE,
],
"SHELL_LEVEL3_HII_GUID": [
0x4344558D,
0x4EF9,
0x4725,
0xB1,
0xE4,
0x33,
0x76,
0xE8,
0xD6,
0x97,
0x4F,
],
"SHELL_MAP_GUID": [
0x51271E13,
0x7DE3,
0x43AF,
0x8B,
0xC2,
0x71,
0xAD,
0x3B,
0x82,
0x43,
0x25,
],
"SHELL_NETWORK1_HII_GUID": [
0xF3D301BB,
0xF4A5,
0x45A8,
0xB0,
0xB7,
0xFA,
0x99,
0x9C,
0x62,
0x37,
0xAE,
],
"SHELL_VARIABLE_GUID": [
0x158DEF5A,
0xF656,
0x419C,
0xB0,
0x27,
0x7A,
0x31,
0x92,
0xC0,
0x79,
0xD2,
],
"SMBIOS_TABLE_GUID": [
0xEB9D2D31,
0x2D88,
0x11D3,
0x9A,
0x16,
0x0,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"SMM_COMMUNICATE_HEADER_GUID": [
0xF328E36C,
0x23B6,
0x4A95,
0x85,
0x4B,
0x32,
0xE1,
0x95,
0x34,
0xCD,
0x75,
],
"SMM_PERFORMANCE_EX_PROTOCOL_GUID": [
0x931FC048,
0xC71D,
0x4455,
0x89,
0x30,
0x47,
0x6,
0x30,
0xE3,
0xE,
0xE5,
],
"SMM_PERFORMANCE_PROTOCOL_GUID": [
0xF866226A,
0xEAA5,
0x4F5A,
0xA9,
0xA,
0x6C,
0xFB,
0xA5,
0x7C,
0x58,
0x8E,
],
"STATUS_CODE_CALLBACK_GUID": [
0xE701458C,
0x4900,
0x4CA5,
0xB7,
0x72,
0x3D,
0x37,
0x94,
0x9F,
0x79,
0x27,
],
"SYSTEM_ROM_FILE_GUID": [
0x1547B4F3,
0x3E8A,
0x4FEF,
0x81,
0xC8,
0x32,
0x8E,
0xD6,
0x47,
0xAB,
0x1A,
],
"TCG_CONFIG_FORM_SET_GUID": [
0xB0F901E4,
0xC424,
0x45DE,
0x90,
0x81,
0x95,
0xE2,
0xB,
0xDE,
0x6F,
0xB5,
],
"TEMPORARY_RAM_SUPPORT_PPI_GUID": [
0xDBE23AA9,
0xA345,
0x4B97,
0x85,
0xB6,
0xB2,
0x26,
0xF1,
0x61,
0x73,
0x89,
],
"TIANO_CUSTOM_DECOMPRESS_GUID": [
0xA31280AD,
0x481E,
0x41B6,
0x95,
0xE8,
0x12,
0x7F,
0x4C,
0x98,
0x47,
0x79,
],
"UNIX_FWH_PPI_GUID": [
0xF2F0DC30,
0x8985,
0x11DB,
0xA1,
0x5B,
0x00,
0x40,
0xD0,
0x2B,
0x18,
0x35,
],
"UNIX_PEI_LOAD_FILE_GUID": [
0xF2F48768,
0x8985,
0x11DB,
0xB8,
0xDA,
0x00,
0x40,
0xD0,
0x2B,
0x18,
0x35,
],
"UNKNOWN_DEVICE_GUID": [
0xCF31FAC5,
0xC24E,
0x11D2,
0x85,
0xF3,
0x0,
0xA0,
0xC9,
0x3E,
0xC9,
0x3B,
],
"USB_CREDENTIAL_PROVIDER_GUID": [
0xD0849ED1,
0xA88C,
0x4BA6,
0xB1,
0xD6,
0xAB,
0x50,
0xE2,
0x80,
0xB7,
0xA9,
],
"USB_KEYBOARD_LAYOUT_PACKAGE_GUID": [
0xC0F3B43,
0x44DE,
0x4907,
0xB4,
0x78,
0x22,
0x5F,
0x6F,
0x62,
0x89,
0xDC,
],
"USER_IDENTIFY_MANAGER_GUID": [
0x3CCD3DD8,
0x8D45,
0x4FED,
0x96,
0x2D,
0x2B,
0x38,
0xCD,
0x82,
0xB3,
0xC4,
],
"USER_PROFILE_MANAGER_GUID": [
0xC35F272C,
0x97C2,
0x465A,
0xA2,
0x16,
0x69,
0x6B,
0x66,
0x8A,
0x8C,
0xFE,
],
"VIRTUAL_UNCACHED_PAGES_PROTOCOL_GUID": [
0xAD651C7D,
0x3C22,
0x4DBF,
0x92,
0xE8,
0x38,
0xA7,
0xCD,
0xAE,
0x87,
0xB2,
],
"VLAN_CONFIG_FORM_SET_GUID": [
0xD79DF6B0,
0xEF44,
0x43BD,
0x97,
0x97,
0x43,
0xE9,
0x3B,
0xCF,
0x5F,
0xA8,
],
}
|
<filename>docusign_esign/models/usage_history.py
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign.
OpenAPI spec version: v2.1
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class UsageHistory(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, last_sent_date_time=None, last_signed_date_time=None, sent_count=None, signed_count=None):
"""
UsageHistory - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'last_sent_date_time': 'str',
'last_signed_date_time': 'str',
'sent_count': 'str',
'signed_count': 'str'
}
self.attribute_map = {
'last_sent_date_time': 'lastSentDateTime',
'last_signed_date_time': 'lastSignedDateTime',
'sent_count': 'sentCount',
'signed_count': 'signedCount'
}
self._last_sent_date_time = last_sent_date_time
self._last_signed_date_time = last_signed_date_time
self._sent_count = sent_count
self._signed_count = signed_count
@property
def last_sent_date_time(self):
"""
Gets the last_sent_date_time of this UsageHistory.
The date and time the user last sent an envelope.
:return: The last_sent_date_time of this UsageHistory.
:rtype: str
"""
return self._last_sent_date_time
@last_sent_date_time.setter
def last_sent_date_time(self, last_sent_date_time):
"""
Sets the last_sent_date_time of this UsageHistory.
The date and time the user last sent an envelope.
:param last_sent_date_time: The last_sent_date_time of this UsageHistory.
:type: str
"""
self._last_sent_date_time = last_sent_date_time
@property
def last_signed_date_time(self):
"""
Gets the last_signed_date_time of this UsageHistory.
The date and time the user last signed an envelope.
:return: The last_signed_date_time of this UsageHistory.
:rtype: str
"""
return self._last_signed_date_time
@last_signed_date_time.setter
def last_signed_date_time(self, last_signed_date_time):
"""
Sets the last_signed_date_time of this UsageHistory.
The date and time the user last signed an envelope.
:param last_signed_date_time: The last_signed_date_time of this UsageHistory.
:type: str
"""
self._last_signed_date_time = last_signed_date_time
@property
def sent_count(self):
"""
Gets the sent_count of this UsageHistory.
The number of envelopes the user has sent.
:return: The sent_count of this UsageHistory.
:rtype: str
"""
return self._sent_count
@sent_count.setter
def sent_count(self, sent_count):
"""
Sets the sent_count of this UsageHistory.
The number of envelopes the user has sent.
:param sent_count: The sent_count of this UsageHistory.
:type: str
"""
self._sent_count = sent_count
@property
def signed_count(self):
"""
Gets the signed_count of this UsageHistory.
The number of envelopes the user has signed.
:return: The signed_count of this UsageHistory.
:rtype: str
"""
return self._signed_count
@signed_count.setter
def signed_count(self, signed_count):
"""
Sets the signed_count of this UsageHistory.
The number of envelopes the user has signed.
:param signed_count: The signed_count of this UsageHistory.
:type: str
"""
self._signed_count = signed_count
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
<reponame>resistics/resistics
"""Testing of sampling code"""
import pytest
from typing import Tuple, Union
from datetime import datetime, timedelta
import pandas as pd
from resistics.sampling import RSDateTime, RSTimeDelta, to_datetime, to_timedelta
@pytest.mark.parametrize(
"time, expected",
[
("2021-01-02 00:00:00", RSDateTime(2021, 1, 2)),
(pd.Timestamp("2021-01-02 00:00:00"), RSDateTime(2021, 1, 2)),
(datetime(2021, 1, 2), RSDateTime(2021, 1, 2)),
],
)
def test_to_datetime(time: Union[str, pd.Timestamp, datetime], expected):
"""Test converting to datetime"""
assert to_datetime(time) == expected
@pytest.mark.parametrize(
"delta, expected",
[
(1 / 4096, RSTimeDelta(seconds=1 / 4096)),
(pd.Timedelta(0.1, "s"), RSTimeDelta(microseconds=100_000)),
(timedelta(milliseconds=100), RSTimeDelta(microseconds=100_000)),
],
)
def test_to_timedelta(
delta: Union[float, timedelta, pd.Timedelta], expected: RSTimeDelta
):
"""Test converting to timedelta"""
assert to_timedelta(delta) == expected
@pytest.mark.parametrize(
"delta, fs, method, expected",
[
(to_timedelta(100 * (1 / 64)), 64, "round", 101),
(to_timedelta(354 * (1 / 16_384)), 16_384, "round", 355),
(to_timedelta(354.2 * (1 / 16_384)), 16_384, "round", 355),
(to_timedelta(354.2 * (1 / 16_384)), 16_384, "floor", 355),
(to_timedelta(354.2 * (1 / 16_384)), 16_384, "ceil", 356),
(to_timedelta(354.8 * (1 / 16_384)), 16_384, "round", 356),
(to_timedelta(354.8 * (1 / 16_384)), 16_384, "floor", 355),
(to_timedelta(354.8 * (1 / 16_384)), 16_384, "ceil", 356),
],
)
def test_to_n_samples(delta: RSTimeDelta, fs: float, method: str, expected: int):
"""Test to n_samples"""
from resistics.sampling import to_n_samples
n_samples = to_n_samples(delta, fs, method)
assert n_samples == expected
@pytest.mark.parametrize(
"fs, first_time, sample, expected",
[
(
512,
to_datetime("2021-01-02 00:00:00"),
512,
to_datetime("2021-01-02 00:00:01"),
),
(
16_384,
to_datetime("2021-01-01 00:00:00"),
20_000,
RSDateTime(2021, 1, 1, 0, 0, 1, 220703, 125),
),
],
)
def test_sample_to_datetimes(
fs: float,
first_time: RSDateTime,
sample: int,
expected: RSDateTime,
) -> None:
"""Test converting sample to datetimes"""
from resistics.sampling import sample_to_datetime
assert expected == sample_to_datetime(fs, first_time, sample)
@pytest.mark.parametrize(
"fs, first_time, from_sample, to_sample, expected_from, expected_to",
[
(
4096,
to_datetime("2021-01-02 00:00:00"),
10_010,
25_999,
RSDateTime(2021, 1, 2, 0, 0, 2, 443847, 656.25),
RSDateTime(2021, 1, 2, 0, 0, 6, 347412, 109.375),
),
],
)
def test_samples_to_datetimes(
fs: float,
first_time: RSDateTime,
from_sample: int,
to_sample: int,
expected_from: RSDateTime,
expected_to: RSDateTime,
) -> None:
"""Test converting samples to datetimes"""
from resistics.sampling import samples_to_datetimes
from_time, to_time = samples_to_datetimes(fs, first_time, from_sample, to_sample)
assert from_time == expected_from
assert to_time == expected_to
@pytest.mark.parametrize(
"first_time, last_time, from_time, expected, raises",
[
(
to_datetime("2021-01-02 00:00:00"),
to_datetime("2021-01-02 23:00:00"),
to_datetime("2021-01-01 23:00:00"),
to_datetime("2021-01-02 00:00:00"),
False,
),
(
to_datetime("2021-01-02 00:00:00"),
to_datetime("2021-01-02 23:00:00"),
to_datetime("2021-01-02 03:00:00"),
to_datetime("2021-01-02 03:00:00"),
False,
),
(
to_datetime("2021-01-02 00:00:00"),
to_datetime("2021-01-02 23:00:00"),
to_datetime("2021-01-02 23:30:00"),
to_datetime("2021-01-02 00:00:00"),
True,
),
],
)
def test_check_from_time(
first_time: RSDateTime,
last_time: RSDateTime,
from_time: RSDateTime,
expected: RSDateTime,
raises: bool,
) -> None:
"""Test adjusting from time"""
from resistics.sampling import check_from_time
if raises:
with pytest.raises(ValueError):
check_from_time(first_time, last_time, from_time)
else:
from_time = check_from_time(first_time, last_time, from_time)
assert from_time == expected
@pytest.mark.parametrize(
"first_time, last_time, to_time, expected, raises",
[
(
to_datetime("2021-01-02 00:00:00"),
to_datetime("2021-01-02 23:00:00"),
to_datetime("2021-01-02 23:30:00"),
to_datetime("2021-01-02 23:00:00"),
False,
),
(
to_datetime("2021-01-02 00:00:00"),
to_datetime("2021-01-02 23:00:00"),
to_datetime("2021-01-02 03:00:00"),
to_datetime("2021-01-02 03:00:00"),
False,
),
(
to_datetime("2021-01-02 00:00:00"),
to_datetime("2021-01-02 23:00:00"),
to_datetime("2021-01-01 23:30:00"),
to_datetime("2021-01-02 00:00:00"),
True,
),
],
)
def test_check_to_time(
first_time: RSDateTime,
last_time: RSDateTime,
to_time: RSDateTime,
expected: RSDateTime,
raises: bool,
) -> None:
"""Check adjusting to time"""
from resistics.sampling import check_to_time
if raises:
with pytest.raises(ValueError):
check_to_time(first_time, last_time, to_time)
else:
to_time = check_to_time(first_time, last_time, to_time)
assert to_time == expected
@pytest.mark.parametrize(
"fs, first_time, last_time, from_time, expected",
[
(
512,
to_datetime("2021-01-02 00:00:00"),
to_datetime("2021-01-02 23:00:00"),
to_datetime("2021-01-02 00:00:00"),
0,
),
(
128,
to_datetime("2021-01-02 00:00:00"),
to_datetime("2021-01-03 01:00:00"),
to_datetime("2021-01-02 00:00:00"),
0,
),
(
128,
to_datetime("2021-01-02 00:00:00"),
to_datetime("2021-01-03 01:00:00"),
to_datetime("2021-01-02 01:01:02"),
468_736,
),
(
0.5,
to_datetime("2021-01-02 00:00:00"),
to_datetime("2021-01-03 01:00:00"),
to_datetime("2021-01-02 01:01:01"),
1_831,
),
(
16_384,
to_datetime("2021-01-02 00:00:00"),
to_datetime("2021-01-02 00:00:00") + 400_000 * to_timedelta(1 / 16_384),
to_datetime("2021-01-02 00:00:00") + 193_435 * to_timedelta(1 / 16_384),
193_435,
),
],
)
def test_from_time_to_sample(
fs: float,
first_time: RSDateTime,
last_time: RSDateTime,
from_time: RSDateTime,
expected: int,
) -> None:
"""Test converting datetimes to samples"""
from resistics.sampling import from_time_to_sample
assert expected == from_time_to_sample(fs, first_time, last_time, from_time)
@pytest.mark.parametrize(
"fs, first_time, last_time, to_time, expected",
[
(
512,
to_datetime("2021-01-02 00:00:00"),
to_datetime("2021-01-02 23:00:00"),
to_datetime("2021-01-02 23:00:00"),
42_393_600,
),
(
128,
to_datetime("2021-01-02 00:00:00"),
to_datetime("2021-01-03 01:00:00"),
to_datetime("2021-01-03 01:00:00"),
11_520_000,
),
(
128,
to_datetime("2021-01-02 00:00:00"),
to_datetime("2021-01-03 01:00:00"),
to_datetime("2021-01-02 02:22:31"),
1_094_528,
),
(
0.5,
to_datetime("2021-01-02 00:00:00"),
to_datetime("2021-01-03 01:00:00"),
to_datetime("2021-01-02 02:22:31"),
4_275,
),
(
16_384,
to_datetime("2021-01-02 00:00:00"),
to_datetime("2021-01-02 00:00:00") + 400_000 * to_timedelta(1 / 16_384),
to_datetime("2021-01-02 00:00:00") + 374_653 * to_timedelta(1 / 16_384),
374_653,
),
],
)
def test_to_time_to_sample(
fs: float,
first_time: RSDateTime,
last_time: RSDateTime,
to_time: RSDateTime,
expected: int,
) -> None:
"""Test converting datetimes to samples"""
from resistics.sampling import to_time_to_sample
assert expected == to_time_to_sample(fs, first_time, last_time, to_time)
@pytest.mark.parametrize(
"fs, first_time, last_time, from_time, to_time, expected",
[
(
512,
to_datetime("2021-01-02 00:00:00"),
to_datetime("2021-01-02 23:00:00"),
to_datetime("2021-01-02 00:00:00"),
to_datetime("2021-01-02 23:00:00"),
(0, 42_393_600),
),
(
128,
to_datetime("2021-01-02 00:00:00"),
to_datetime("2021-01-03 01:00:00"),
to_datetime("2021-01-02 00:00:00"),
to_datetime("2021-01-03 01:00:00"),
(0, 11_520_000),
),
(
128,
to_datetime("2021-01-02 00:00:00"),
to_datetime("2021-01-03 01:00:00"),
to_datetime("2021-01-02 01:01:02"),
to_datetime("2021-01-02 02:22:31"),
(468_736, 1_094_528),
),
(
0.5,
to_datetime("2021-01-02 00:00:00"),
to_datetime("2021-01-03 01:00:00"),
to_datetime("2021-01-02 01:01:01"),
to_datetime("2021-01-02 02:22:31"),
(1_831, 4_275),
),
],
)
def test_datetimes_to_samples(
fs: float,
first_time: RSDateTime,
last_time: RSDateTime,
from_time: RSDateTime,
to_time: RSDateTime,
expected: Tuple[int, int],
) -> None:
"""Test converting datetimes to samples"""
from resistics.sampling import datetimes_to_samples
from_sample, to_sample = datetimes_to_samples(
fs, first_time, last_time, from_time, to_time
)
assert (from_sample, to_sample) == expected
|
<reponame>edouardparis/aquarium<gh_stars>0
#!/usr/bin/env python3
import argparse
import logging
import os
import shutil
import socket
import subprocess
import sys
import test_framework
import time
import traceback
from concurrent import futures
from test_framework.bitcoind import BitcoinD
from test_framework.revault_network import RevaultNetwork
from test_framework.utils import (
POSTGRES_USER,
POSTGRES_PASS,
POSTGRES_HOST,
POSTGRES_IS_SETUP,
EXECUTOR_WORKERS,
LOG_LEVEL,
DEBUG_GUI,
)
BASE_DIR = os.getenv("BASE_DIR", os.path.abspath("demo"))
SRC_DIR = os.getenv("SRC_DIR", os.path.abspath("src"))
COORDINATORD_SRC_DIR = os.path.join(SRC_DIR, "coordinatord")
COSIGNERD_SRC_DIR = os.path.join(SRC_DIR, "cosignerd")
MIRADORD_SRC_DIR = os.path.join(SRC_DIR, "miradord")
REVAULTD_SRC_DIR = os.path.join(SRC_DIR, "revaultd")
REVAULT_GUI_SRC_DIR = os.path.join(SRC_DIR, "revault-gui")
SHELL = os.getenv("SHELL", "bash")
COORDINATORD_VERSION = os.getenv("COORDINATORD_VERSION", "master")
COSIGNERD_VERSION = os.getenv("COSIGNERD_VERSION", "master")
MIRADORD_VERSION = os.getenv("MIRADORD_VERSION", "master")
REVAULTD_VERSION = os.getenv("REVAULTD_VERSION", "master")
REVAULT_GUI_VERSION = os.getenv("REVAULT_GUI_VERSION", "master")
WITH_GUI = os.getenv("WITH_GUI", "1") == "1"
WITH_ALL_HWS = os.getenv("WITH_ALL_HWS", "0") == "1"
# FIXME: use tmp
def bitcoind_dir():
return os.path.join(BASE_DIR, "bitcoind")
def executor():
return futures.ThreadPoolExecutor(
max_workers=EXECUTOR_WORKERS, thread_name_prefix="revault-demo"
)
def is_listening(host, port):
"""Check if a service is listening there."""
s = socket.socket()
try:
s.connect((host, port))
return True
except socket.error:
return False
def build_src(src_dir, version, git_url):
if not os.path.isdir(src_dir):
if not os.path.isdir(SRC_DIR):
os.makedirs(SRC_DIR)
subprocess.check_call(["git", "-C", f"{SRC_DIR}", "clone", git_url])
subprocess.check_call(["git", "-C", f"{src_dir}", "fetch", "origin"])
subprocess.check_call(["git", "-C", f"{src_dir}", "checkout", f"{version}"])
subprocess.check_call(
["cargo", "build", "--manifest-path", f"{src_dir}/Cargo.toml"]
)
def build_all_binaries(build_cosig, build_wt, build_coordinator=True):
if build_coordinator:
logging.info(
f"Building coordinatord at '{COORDINATORD_VERSION}' in '{COORDINATORD_SRC_DIR}'"
)
build_src(
COORDINATORD_SRC_DIR,
COORDINATORD_VERSION,
"https://github.com/revault/coordinatord",
)
else:
logging.info("Skipping the build of the coordinator, using the dummy one.")
if build_cosig:
logging.info(
f"Building cosignerd at '{COSIGNERD_VERSION}' in '{COSIGNERD_SRC_DIR}'"
)
build_src(
COSIGNERD_SRC_DIR, COSIGNERD_VERSION, "https://github.com/revault/cosignerd"
)
if build_wt:
logging.info(
f"Building miradord at '{MIRADORD_VERSION}' in '{MIRADORD_SRC_DIR}'"
)
build_src(
MIRADORD_SRC_DIR, MIRADORD_VERSION, "https://github.com/revault/miradord"
)
logging.info(f"Building revaultd at '{REVAULTD_VERSION}' in '{REVAULTD_SRC_DIR}'")
build_src(REVAULTD_SRC_DIR, REVAULTD_VERSION, "https://github.com/revault/revaultd")
if WITH_GUI:
logging.info(
f"Building revault-gui at '{REVAULT_GUI_VERSION}' in '{REVAULT_GUI_SRC_DIR}',"
" this may take some time"
)
build_src(
REVAULT_GUI_SRC_DIR,
REVAULT_GUI_VERSION,
"https://github.com/edouardparis/revault-gui",
)
logging.info("Building revault-gui's dummysigner")
subprocess.check_call(
[
"cargo",
"build",
"--manifest-path",
f"{REVAULT_GUI_SRC_DIR}/contrib/tools/dummysigner/Cargo.toml",
]
)
def bitcoind():
bitcoind = BitcoinD(bitcoin_dir=bitcoind_dir())
bitcoind.startup()
bitcoind.rpc.createwallet(bitcoind.rpc.wallet_name, False, False, "", False, True)
while bitcoind.rpc.getbalance() < 50:
bitcoind.rpc.generatetoaddress(1, bitcoind.rpc.getnewaddress())
while bitcoind.rpc.getblockcount() <= 1:
time.sleep(0.1)
return bitcoind
def deploy(
n_stks, n_mans, n_stkmans, csv, mans_thresh=None, with_cosigs=False, policies=[]
):
with_wts = len(policies) > 0
if not POSTGRES_IS_SETUP:
logging.info("No Postgre backend given, will use a dummy coordinator")
if POSTGRES_IS_SETUP and not is_listening(POSTGRES_HOST, 5432):
logging.error(f"No Postgre server listening on {POSTGRES_HOST}:5432.")
print(
f"A simple way to get started with one given your POSTGRES_PASS and POSTGRES_USER:"
)
print(
f" docker run --rm -d -p 5432:5432 --name postgres-coordinatord -e POSTGRES_PASSWORD={POSTGRES_PASS} -e POSTGRES_USER={POSTGRES_USER} -e POSTGRES_DB=coordinator_db postgres:alpine"
)
sys.exit(1)
if n_stks + n_stkmans < 1:
logging.error("Need at least 1 stakeholder")
sys.exit(1)
if n_mans + n_stkmans < 1:
logging.error("Need at least 1 manager")
sys.exit(1)
if mans_thresh is not None and (
mans_thresh > n_mans + n_stkmans or mans_thresh < 1
):
logging.error("Invalid managers threshold")
sys.exit(1)
for p in policies:
if not os.path.isfile(p):
logging.error(f"No plugin at '{p}'")
sys.exit(1)
if os.path.isdir(BASE_DIR):
logging.warning("Base directory exists already")
resp = input(f"Remove non-empty '{BASE_DIR}' and start fresh? (y/n) ")
if resp.lower() == "y":
shutil.rmtree(BASE_DIR)
else:
logging.info("Exiting")
sys.exit(1)
logging.info("Checking the source directories..")
build_all_binaries(build_cosig=with_cosigs, build_wt=with_wts, build_coordinator=POSTGRES_IS_SETUP)
logging.info("Setting up bitcoind")
bd = bitcoind()
# In any case cleanup bitcoind before exiting
try:
logging.info(
f"Deploying a Revault network with {n_stks} only-stakeholders,"
f" {n_mans} only-managers, {n_stkmans} both stakeholders and managers,"
f" a CSV of {csv} and a managers threshold of {mans_thresh or n_mans + n_stkmans}"
)
# Monkey patch the servers binaries paths
test_framework.revaultd.REVAULTD_PATH = os.path.join(
REVAULTD_SRC_DIR, "target", "debug", "revaultd"
)
test_framework.coordinatord.COORDINATORD_PATH = os.path.join(
COORDINATORD_SRC_DIR, "target", "debug", "coordinatord"
)
test_framework.cosignerd.COSIGNERD_PATH = os.path.join(
COSIGNERD_SRC_DIR, "target", "debug", "cosignerd"
)
test_framework.miradord.MIRADORD_PATH = os.path.join(
MIRADORD_SRC_DIR, "target", "debug", "miradord"
)
rn = RevaultNetwork(
BASE_DIR,
bd,
executor(),
POSTGRES_USER,
POSTGRES_PASS,
POSTGRES_HOST,
)
rn.deploy(
n_stks,
n_mans,
n_stkmans,
csv,
mans_thresh,
with_watchtowers=with_wts,
with_cosigs=with_cosigs,
)
if with_wts:
# NOTE: no config. We use hardcoded values for the demo.
policies = [{"path": p} for p in policies]
for stk in rn.stk_wallets + rn.stkman_wallets:
stk.watchtower.add_plugins(policies)
dummysigner_conf_file = os.path.join(BASE_DIR, "dummysigner.toml")
# We use a hack to avoid having to modify the test_framework to include the GUI.
if WITH_GUI:
emergency_address = rn.emergency_address
deposit_desc = rn.deposit_desc
unvault_desc = rn.unvault_desc
cpfp_desc = rn.cpfp_desc
with open(dummysigner_conf_file, "w") as f:
f.write(f'emergency_address = "{emergency_address}"\n')
for i, stk in enumerate(rn.stk_wallets):
f.write("[[keys]]\n")
f.write(f'name = "stakeholder_{i}_key"\n')
f.write(f'xpriv = "{stk.stk_keychain.hd.get_xpriv()}"\n')
for i, man in enumerate(rn.man_wallets):
f.write("[[keys]]\n")
f.write(f'name = "manager_{i}_key"\n')
f.write(f'xpriv = "{man.man_keychain.hd.get_xpriv()}"\n')
for i, stkman in enumerate(rn.stkman_wallets):
f.write("[[keys]]\n")
f.write(f'name = "stkman_{i}_stakeholder_key"\n')
f.write(f'xpriv = "{stkman.stk_keychain.hd.get_xpriv()}"\n')
f.write("[[keys]]\n")
f.write(f'name = "stkman_{i}_manager_key"\n')
f.write(f'xpriv = "{stkman.man_keychain.hd.get_xpriv()}"\n')
f.write("[descriptors]\n")
f.write(f'deposit_descriptor = "{deposit_desc}"\n')
f.write(f'unvault_descriptor = "{unvault_desc}"\n')
f.write(f'cpfp_descriptor = "{cpfp_desc}"\n')
for p in rn.participants():
p.gui_conf_file = os.path.join(
p.datadir_with_network, "gui_config.toml"
)
with open(p.gui_conf_file, "w") as f:
f.write(f"revaultd_config_path = '{p.conf_file}'\n")
f.write(f"revaultd_path = '{test_framework.revaultd.REVAULTD_PATH}'\n")
f.write(f"log_level = '{LOG_LEVEL}'\n")
f.write(f"debug = {'true' if DEBUG_GUI else 'false'}")
revault_gui = os.path.join(
REVAULT_GUI_SRC_DIR, "target", "debug", "revault-gui"
)
dummysigner = os.path.join(
REVAULT_GUI_SRC_DIR,
"contrib",
"tools",
"dummysigner",
"target",
"debug",
"dummysigner",
)
revault_cli = os.path.join(REVAULTD_SRC_DIR, "target", "debug", "revault-cli")
aliases_file = os.path.join(BASE_DIR, "aliases.sh")
with open(aliases_file, "w") as f:
f.write('PS1="(Revault demo) $PS1"\n') # It's a hack it shouldn't be there
f.write(f"alias bd=\"bitcoind -datadir='{bd.bitcoin_dir}'\"\n")
f.write(
f"alias bcli=\"bitcoin-cli -datadir='{bd.bitcoin_dir}' -rpcwallet='{bd.rpc.wallet_name}'\"\n"
)
for i, stk in enumerate(rn.stk_wallets):
f.write(f'alias stk{i}cli="{revault_cli} --conf {stk.conf_file}"\n')
f.write(f'alias stk{i}d="{test_framework.revaultd.REVAULTD_PATH} --conf {stk.conf_file}"\n')
if WITH_GUI:
f.write(
f"alias stk{i}gui='{revault_gui} --conf {stk.gui_conf_file} > /dev/null'\n"
)
if WITH_ALL_HWS:
f.write(
f"alias stk{i}hw='{dummysigner} {stk.stk_keychain.hd.get_xpriv()} > /dev/null'\n"
)
for i, man in enumerate(rn.man_wallets):
f.write(f'alias man{i}cli="{revault_cli} --conf {man.conf_file}"\n')
f.write(f'alias man{i}d="{test_framework.revaultd.REVAULTD_PATH} --conf {man.conf_file}"\n')
if WITH_GUI:
f.write(
f"alias man{i}gui='{revault_gui} --conf {man.gui_conf_file} > /dev/null'\n"
)
if WITH_ALL_HWS:
f.write(
f"alias man{i}hw='{dummysigner} {man.man_keychain.hd.get_xpriv()} > /dev/null'\n"
)
for i, stkman in enumerate(rn.stkman_wallets):
f.write(
f'alias stkman{i}cli="{revault_cli} --conf {stkman.conf_file}"\n'
)
f.write(
f'alias stkman{i}d="{test_framework.revaultd.REVAULTD_PATH} --conf {stkman.conf_file}"\n'
)
if WITH_GUI:
f.write(
f"alias stkman{i}gui='{revault_gui} --conf {stkman.gui_conf_file} > /dev/null'\n"
)
if WITH_ALL_HWS:
f.write(
f"alias stkman{i}hwstk='{dummysigner} {stkman.stk_keychain.hd.get_xpriv()} > /dev/null'\n"
)
f.write(
f"alias stkman{i}hwman='{dummysigner} {stkman.man_keychain.hd.get_xpriv()} > /dev/null'\n"
)
# hw for all the keys.
if WITH_GUI:
f.write(f"alias hw='{dummysigner} --conf {dummysigner_conf_file} > /dev/null'\n")
with open(aliases_file, "r") as f:
available_aliases = "".join(f.readlines()[1:])
print("Dropping you into a shell. Exit to end the session.", end="\n\n")
print(f"Available aliases: \n{available_aliases}\n")
# In any case clean up all daemons before exiting
try:
subprocess.call([SHELL, "--init-file", f"{aliases_file}", "-i"])
except Exception as e:
logging.error(f"Got error: '{str(e)}'")
logging.error(traceback.format_exc())
finally:
logging.info("Cleaning up Revault deployment")
rn.cleanup()
except Exception as e:
logging.error(f"Got error: '{str(e)}'")
logging.error(traceback.format_exc())
finally:
logging.info("Cleaning up bitcoind")
bd.cleanup()
def setup_logging():
log_level = logging.INFO
if LOG_LEVEL.lower() in ["debug", "info", "warning"]:
log_level = LOG_LEVEL.upper()
logging.basicConfig(level=log_level)
# Much hacky, much fancy
logging.addLevelName(
logging.INFO, f"\033[1;34m{logging.getLevelName(logging.INFO)}\033[1;0m"
)
logging.addLevelName(
logging.WARNING, f"\033[1;33m{logging.getLevelName(logging.WARNING)}\033[1;0m"
)
logging.addLevelName(
logging.ERROR, f"\033[1;31m{logging.getLevelName(logging.ERROR)}\033[1;0m"
)
def parse_args():
parser = argparse.ArgumentParser()
deploy_config = parser.add_argument_group("Deployment configuration")
deploy_config.add_argument(
"-stks",
"--stakeholders",
type=int,
help="The number of only-stakeholder",
required=True,
)
deploy_config.add_argument(
"-mans",
"--managers",
type=int,
help="The number of only-manager",
required=True,
)
deploy_config.add_argument(
"-stkmans",
"--stakeholder-managers",
type=int,
help="The number of both stakeholder-manager",
required=True,
)
deploy_config.add_argument(
"-csv",
"--timelock",
type=int,
help="The number of blocks during which an Unvault attempt can be canceled",
required=True,
)
deploy_config.add_argument(
"-mansthresh",
"--managers-threshold",
type=int,
)
deploy_config.add_argument(
"-cosigs",
"--with-cosigning-servers",
action="store_true",
help="Enable cosigning servers to allow Spend policies at the cost of weaker assumptions",
)
deploy_config.add_argument(
"-policy",
"--spending-policy",
action="append",
default=[],
dest="policies",
help="Enforce a spending policy on all watchtowers by specifying a path to a "
"watchtower plugin. Specify this option multiple times to enable multiple "
"policies.",
)
return parser.parse_args()
if __name__ == "__main__":
setup_logging()
args = parse_args()
deploy(
args.stakeholders,
args.managers,
args.stakeholder_managers,
args.timelock,
args.managers_threshold,
args.with_cosigning_servers,
args.policies,
)
|
import unittest
import requests
import json
import os.path
from urllib.parse import urljoin
import itertools
import subprocess
import os
URL = 'http://localhost:8000/'
USERNAME_LOWER_LIMIT = 3
USERNAME_UPPER_LIMIT = 20
PASSWORD_LOWER_LIMIT = 6
PASSWORD_UPPER_LIMIT = 20
TITLE_LOWER_LIMIT = 5
TITLE_UPPER_LIMIT = 100
BODY_LOWER_LIMIT = 10
BODY_UPPER_LIMIT = 10000
DEVNULL = open(os.devnull, 'w')
class TestBlog(unittest.TestCase):
def setUp(self):
"""
Clear the database before every test.
"""
subprocess.call(["redis-cli", "flushall"], stdout=DEVNULL)
def createUser(self, username='test', password='<PASSWORD>'):
"""
Create a test user account.
"""
response = self.makeRequest('/user/create', {
'username': username,
'password': password
})
return {
'username': username,
'password': password,
**response
}
def removeUser(self, info):
return self.makeRequest('/user/remove', {
'username': info['username'],
'password': info['password']
})
def addPost(self, userInfo):
title = 'The title.'
body = 'The body message.'
response = self.makeRequest(
'/blog/add', {
'token': userInfo['token'],
'title': title,
'body': body
})
return {
'title': title,
'body': body,
**response
}
def titleBodyTest(self, url, data):
"""
Test the lower and upper limit of the 'title' and 'body' values.
"""
self.limitsTest(url, 'title', TITLE_LOWER_LIMIT, TITLE_UPPER_LIMIT, {
**data,
'body': 'The body message.'
})
self.limitsTest(url, 'body', BODY_LOWER_LIMIT, BODY_UPPER_LIMIT, {
**data,
'title': 'The title.'
})
def limitsTest(self, url, propName, lower, upper, data):
# test the lower limit
response = self.makeRequest(url, {
**data,
propName: '1' * (lower - 1)
})
self.assertEqual(response['success'], False)
self.assertEqual('message' in response, True)
# test the upper limit
response = self.makeRequest(url, {
**data,
propName: '1' * (upper + 1)
})
self.assertEqual(response['success'], False)
self.assertEqual('message' in response, True)
def postTest(self, postId, author, title, body):
"""
Check if a post of the given ID has the correct title/body/etc.
"""
response = self.makeRequest('/blog/get/{0}'.format(postId))
self.assertEqual(response['success'], True)
self.assertEqual(response['post']['body'], body)
self.assertEqual(response['post']['author'], author)
self.assertEqual(response['post']['title'], title)
self.assertEqual('last_updated' in response['post'], True)
def makeRequest(self, path, data=None):
"""
Make a GET request if 'data' is not passed.
Otherwise do a POST request with the given 'data' dictionary.
"""
completeUrl = urljoin(URL, path)
if data is None:
r = requests.get(completeUrl)
else:
r = requests.post(completeUrl, data=data)
return json.loads(r.text)
def missingArguments(self, url, arguments):
"""
Test all combinations of missing arguments.
The request shouldn't be successfull (since its missing at least 1 argument).
"""
for length in range(0, len(arguments)):
# get all the combinations of arguments possible (of different lengths)
combinations = itertools.combinations(arguments, length)
for combination in combinations:
data = {}
# construct a data argument to pass along (doesn't matter the actual data, just that we're passing along that argument)
for argument in combination:
data[argument] = '1'
# make a request with an incomplete set of arguments, it shouldn't work
response = self.makeRequest(url, data)
self.assertEqual(response['success'], False)
def test_user_create(self):
url = '/user/create'
self.missingArguments(url, ['username', 'password'])
# test lower and upper limits of 'username' and 'password'
self.limitsTest(url, 'username', USERNAME_LOWER_LIMIT, USERNAME_UPPER_LIMIT, {
'password': '<PASSWORD>'
})
self.limitsTest(url, 'password', PASSWORD_LOWER_LIMIT, PASSWORD_UPPER_LIMIT, {
'username': 'aaa'
})
# create a new user
response = self.createUser()
self.assertEqual(response['success'], True)
self.assertEqual('message' in response, True)
self.assertEqual('token' in response, True)
# try to create the same user (shouldn't work since it already exists)
response = self.createUser()
self.assertEqual(response['success'], False)
self.assertEqual('message' in response, True)
self.assertEqual('token' not in response, True)
def test_user_login(self):
url = '/user/login'
self.missingArguments(url, ['username', 'password'])
# login with an existing account credentials
info = self.createUser()
response = self.makeRequest(url, {
'username': info['username'],
'password': info['password']
})
self.assertEqual(response['success'], True)
self.assertEqual('token' in response, True)
# login with correct username but incorrect password
response = self.makeRequest(url, {
'username': info['username'],
'password': '<PASSWORD>'
})
self.assertEqual(response['success'], False)
self.assertEqual('message' in response, True)
self.assertEqual('token' not in response, True)
# login with incorrect username and password
response = self.makeRequest(url, {
'username': 'sdsadsdsdsd',
'password': '<PASSWORD>'
})
self.assertEqual(response['success'], False)
self.assertEqual('message' in response, True)
self.assertEqual('token' not in response, True)
def test_user_remove(self):
url = '/user/remove'
info = self.createUser()
post = self.addPost(info)
username = info['username']
self.missingArguments(url, ['username', 'password'])
# try to remove with invalid username
response = self.makeRequest(url, {
'username': 'sdadasdsad',
'password': '<PASSWORD>'
})
self.assertEqual(response['success'], False)
self.assertEqual('message' in response, True)
# try to remove with invalid password
response = self.makeRequest(url, {
'username': username,
'password': '<PASSWORD>'
})
self.assertEqual(response['success'], False)
self.assertEqual('message' in response, True)
# remove a user properly and check if the post was removed as well
response = self.makeRequest(
'/blog/{0}/getall'.format(username))
self.assertEqual(len(response['posts_ids']), 1)
response = self.makeRequest(url, {
'username': username,
'password': info['password']
})
self.assertEqual(response['success'], True)
response = self.makeRequest(
'/blog/{0}/getall'.format(username))
self.assertEqual(response['success'], False)
def test_user_change_password(self):
url = '/user/change_password'
info = self.createUser()
newPass = '<PASSWORD>'
self.missingArguments(url, ['username', 'password', 'newPassword'])
# invalid username
response = self.makeRequest(
url, {
'username': 'sdsdsdsd',
'password': '<PASSWORD>',
'newPassword': '<PASSWORD>'
})
self.assertEqual(response['success'], False)
self.assertEqual('message' in response, True)
# invalid password
response = self.makeRequest(url, {
'username': info['username'],
'password': '<PASSWORD>'
})
self.assertEqual(response['success'], False)
self.assertEqual('message' in response, True)
# not a valid new password (test the lower and upper limits)
self.limitsTest(url, 'newPassword', PASSWORD_LOWER_LIMIT, PASSWORD_UPPER_LIMIT, {
'username': info['username'],
'password': info['password']
})
# correct usage
response = self.makeRequest(
url, {
'username': info['username'],
'password': info['password'],
'newPassword': <PASSWORD>
})
newToken = response['token']
self.assertEqual(response['success'], True)
self.assertEqual('token' in response, True)
# shouldn't be able to login with previous password
response = self.makeRequest('/user/login', {
'username': info['username'],
'password': info['password']
})
self.assertEqual(response['success'], False)
# but it should work with the new password
response = self.makeRequest('/user/login', {
'username': info['username'],
'password': <PASSWORD>
})
self.assertEqual(response['success'], True)
# the old token shouldn't work either
response = self.makeRequest(
'/blog/add', {
'token': info['token'],
'title': 'The title.',
'body': 'The body message.'
})
self.assertEqual(response['success'], False)
# the new token should
response = self.makeRequest('/blog/add', {
'token': newToken,
'title': 'The title.',
'body': 'The body message.'
})
self.assertEqual(response['success'], True)
def test_user_invalidate_tokens(self):
url = '/user/invalidate_tokens'
info = self.createUser()
initialToken = info['token']
self.missingArguments(url, ['username', 'password'])
# add a blog post
response = self.makeRequest(
'/blog/add', {
'token': initialToken,
'title': 'The title.',
'body': 'The body message.'
})
self.assertEqual(response['success'], True)
# invalidate the tokens
response = self.makeRequest(url, {
'username': info['username'],
'password': info['password']
})
newToken = response['token']
self.assertEqual(response['success'], True)
self.assertEqual('token' in response, True)
# shouldn't work now with old token
response = self.makeRequest(
'/blog/add', {
'token': initialToken,
'title': 'The title',
'body': 'The body message.'
})
self.assertEqual(response['success'], False)
# works with new token
response = self.makeRequest('/blog/add', {
'token': newToken,
'title': 'The title.',
'body': 'The body message.'
})
self.assertEqual(response['success'], True)
def test_user_getall(self):
url = '/user/getall'
response = self.makeRequest(url)
self.assertEqual(response['success'], True)
self.assertEqual(len(response['users']), 0)
# add some users and check the length
user1 = self.createUser('test1')
response = self.makeRequest(url)
self.assertEqual(response['success'], True)
self.assertEqual(len(response['users']), 1)
user2 = self.createUser('test2')
response = self.makeRequest(url)
self.assertEqual(response['success'], True)
self.assertEqual(len(response['users']), 2)
# remove one user
self.removeUser(user2)
response = self.makeRequest(url)
self.assertEqual(response['success'], True)
self.assertEqual(len(response['users']), 1)
def test_user_random(self):
url = '/user/random'
# no users yet
response = self.makeRequest(url)
self.assertEqual(response['success'], False)
self.assertEqual('message' in response, True)
# one user added, should get that username
user = self.createUser('test1')
response = self.makeRequest(url)
self.assertEqual(response['success'], True)
self.assertEqual(response['username'], 'test1')
self.assertEqual(len(response['posts_ids']), 0)
# add one post and then check if its returned when getting a random user
post = self.addPost(user)
response = self.makeRequest(url)
self.assertEqual(len(response['posts_ids']), 1)
self.assertEqual(int(response['posts_ids'][0]), post['post_id'])
def test_blog_add(self):
url = '/blog/add'
user = self.createUser()
title = 'The title.'
body = 'The body message.'
self.missingArguments(url, ['token', 'title', 'body'])
# shouldn't work with an incorrect token
response = self.makeRequest(url, {
'token': 'aaaa',
'title': title,
'body': body
})
self.assertEqual(response['success'], False)
self.assertEqual('message' in response, True)
# there's a lower and upper limit to both the 'title' and 'body'
self.titleBodyTest(url, {
'token': user['token']
})
# correct usage
response = self.makeRequest(url, {
'token': user['token'],
'title': title,
'body': body
})
self.assertEqual(response['success'], True)
self.assertEqual('post_id' in response, True)
# try to get it with the given ID, and compare the values
self.postTest(response['post_id'], user['username'], title, body)
def test_blog_get(self):
url = '/blog/get/{0}'
# test with a string argument
response = self.makeRequest(url.format('a'))
self.assertEqual(response['success'], False)
self.assertEqual('message' in response, True)
# test with a non-existing ID
response = self.makeRequest(url.format(1))
self.assertEqual(response['success'], False)
self.assertEqual('message' in response, True)
# correct usage
user = self.createUser()
post = self.addPost(user)
self.postTest(post['post_id'], user['username'],
post['title'], post['body'])
def test_blog_remove(self):
url = 'blog/remove'
user1 = self.createUser('test1')
user2 = self.createUser('test2')
self.missingArguments(url, ['token', 'blogId'])
# test invalid token
response = self.makeRequest(url, {
'token': 'aaaa',
'blogId': 1
})
self.assertEqual(response['success'], False)
self.assertEqual('message' in response, True)
# test non-existing blog id
response = self.makeRequest(url, {
'token': user1['token'],
'blogId': 1
})
self.assertEqual(response['success'], False)
self.assertEqual('message' in response, True)
# try to remove a post that doesn't belong to you
post = self.addPost(user1)
postId = post['post_id']
response = self.makeRequest(url, {
'token': user2['token'],
'blogId': postId
})
self.assertEqual(response['success'], False)
self.assertEqual('message' in response, True)
# remove a post and try to get it to confirm if it was removed
response = self.makeRequest('/blog/get/{0}'.format(postId))
self.assertEqual(response['success'], True)
response = self.makeRequest(url, {
'token': user1['token'],
'blogId': postId
})
self.assertEqual(response['success'], True)
response = self.makeRequest('/blog/get/{0}'.format(postId))
self.assertEqual(response['success'], False)
def test_blog_update(self):
url = '/blog/update'
user1 = self.createUser('test1')
user2 = self.createUser('test2')
title = 'The title.'
body = 'The body message.'
self.missingArguments(url, ['token', 'title', 'body', 'blogId'])
# test invalid token
response = self.makeRequest(url, {
'token': 'aaaa',
'blogId': 1,
'title': title,
'body': body
})
self.assertEqual(response['success'], False)
self.assertEqual('message' in response, True)
# test non-existing blog id
response = self.makeRequest(url, {
'token': user1['token'],
'blogId': 1,
'title': title,
'body': body
})
self.assertEqual(response['success'], False)
self.assertEqual('message' in response, True)
# try to update a post that doesn't belong to you
post = self.addPost(user1)
postId = post['post_id']
response = self.makeRequest(url, {
'token': user2['token'],
'blogId': postId,
'title': title,
'body': body
})
self.assertEqual(response['success'], False)
self.assertEqual('message' in response, True)
# test the 'title' and 'body' limits
self.titleBodyTest(url, {
'token': user1['token'],
'blogId': postId
})
# update a post correctly
newTitle = 'The new title!'
newBody = 'The brand new body message!'
response = self.makeRequest(url, {
'token': user1['token'],
'blogId': postId,
'title': newTitle,
'body': newBody
})
self.assertEqual(response['success'], True)
# check if the changes were done
self.postTest(postId, user1['username'], newTitle, newBody)
def test_blog_username_getall(self):
url = '/blog/{0}/getall'
# test when username doesn't exist
response = self.makeRequest(url.format('a'))
self.assertEqual(response['success'], False)
self.assertEqual('message' in response, True)
# test with 0 posts
user = self.createUser()
username = user['username']
response = self.makeRequest(url.format(username))
self.assertEqual(response['success'], False)
self.assertEqual('message' in response, True)
# test with 1 post
post = self.addPost(user)
response = self.makeRequest(url.format(username))
self.assertEqual(response['success'], True)
self.assertEqual(len(response['posts_ids']), 1)
self.assertEqual(int(response['posts_ids'][0]), post['post_id'])
def test_blog_random(self):
url = '/blog/random'
# test with no posts yet
response = self.makeRequest(url)
self.assertEqual(response['success'], False)
self.assertEqual('message' in response, True)
# test with 1 post added
user = self.createUser()
post = self.addPost(user)
response = self.makeRequest(url)
self.assertEqual(response['success'], True)
self.assertEqual(response['post']['title'], post['title'])
self.assertEqual(response['post']['body'], post['body'])
self.assertEqual(response['post']['author'], user['username'])
def test_blog_getall(self):
url = '/blog/getall'
# test with no posts yet
response = self.makeRequest(url)
self.assertEqual(response['success'], True)
self.assertEqual(len(response['posts_ids']), 0)
# test with 1 post
user = self.createUser()
post1 = self.addPost(user)
response = self.makeRequest(url)
self.assertEqual(response['success'], True)
self.assertEqual(len(response['posts_ids']), 1)
# test with 2
post2 = self.addPost(user)
response = self.makeRequest(url)
self.assertEqual(response['success'], True)
self.assertEqual(len(response['posts_ids']), 2)
if __name__ == '__main__':
unittest.main()
|
from __future__ import with_statement
import sys
import struct
import json
import ssl
import pytest
import gevent
from itertools import product
from gnsq import Nsqd, Message, states, errors
from gnsq import protocol as nsq
from gnsq.stream.stream import SSLSocket, DefalteSocket, SnappySocket
from mock_server import mock_server
from integration_server import NsqdIntegrationServer
BAD_GEVENT = all([
sys.version_info > (2, 7, 8),
sys.version_info < (3, 0),
gevent.version_info < (1, 0, 2),
])
def mock_response(frame_type, data):
body_size = 4 + len(data)
body_size_packed = struct.pack('>l', body_size)
frame_type_packed = struct.pack('>l', frame_type)
return body_size_packed + frame_type_packed + data
def mock_response_message(timestamp, attempts, id, body):
timestamp_packed = struct.pack('>q', timestamp)
attempts_packed = struct.pack('>h', attempts)
id = "%016d" % id
data = timestamp_packed + attempts_packed + id + body
return mock_response(nsq.FRAME_TYPE_MESSAGE, data)
def test_connection():
@mock_server
def handle(socket, address):
assert socket.recv(4) == ' V2'
assert socket.recv(1) == ''
with handle as server:
conn = Nsqd(address='127.0.0.1', tcp_port=server.server_port)
assert conn.state == states.INIT
conn.connect()
assert conn.state == states.CONNECTED
conn.connect()
assert conn.state == states.CONNECTED
conn.close_stream()
assert conn.state == states.DISCONNECTED
def test_disconnected():
@mock_server
def handle(socket, address):
assert socket.recv(4) == ' V2'
assert socket.recv(1) == ''
with handle as server:
conn = Nsqd(address='127.0.0.1', tcp_port=server.server_port)
conn.connect()
conn.close_stream()
assert conn.state == states.DISCONNECTED
with pytest.raises(errors.NSQSocketError):
conn.nop()
with pytest.raises(errors.NSQSocketError):
conn.read_response()
@pytest.mark.parametrize('body', [
'hello world',
'',
'{"some": "json data"}',
])
def test_read(body):
@mock_server
def handle(socket, address):
socket.send(struct.pack('>l', len(body)))
socket.send(body)
with handle as server:
conn = Nsqd(address='127.0.0.1', tcp_port=server.server_port)
conn.connect()
assert conn._read_response() == body
conn.close_stream()
def test_identify():
@mock_server
def handle(socket, address):
assert socket.recv(4) == ' V2'
assert socket.recv(9) == 'IDENTIFY\n'
size = nsq.unpack_size(socket.recv(4))
data = json.loads(socket.recv(size))
assert 'gnsq' in data['user_agent']
socket.send(mock_response(nsq.FRAME_TYPE_RESPONSE, 'OK'))
with handle as server:
conn = Nsqd(address='127.0.0.1', tcp_port=server.server_port)
conn.connect()
assert conn.identify() is None
def test_negotiation():
@mock_server
def handle(socket, address):
assert socket.recv(4) == ' V2'
assert socket.recv(9) == 'IDENTIFY\n'
size = nsq.unpack_size(socket.recv(4))
data = json.loads(socket.recv(size))
assert 'gnsq' in data['user_agent']
resp = json.dumps({'test': 42})
socket.send(mock_response(nsq.FRAME_TYPE_RESPONSE, resp))
with handle as server:
conn = Nsqd(address='127.0.0.1', tcp_port=server.server_port)
conn.connect()
assert conn.identify()['test'] == 42
@pytest.mark.parametrize('command,args,resp', [
('subscribe', ('topic', 'channel'), 'SUB topic channel\n'),
('subscribe', ('foo', 'bar'), 'SUB foo bar\n'),
('ready', (0,), 'RDY 0\n'),
('ready', (1,), 'RDY 1\n'),
('ready', (42,), 'RDY 42\n'),
('finish', ('0000000000000000',), 'FIN 0000000000000000\n'),
('finish', ('deadbeafdeadbeaf',), 'FIN deadbeafdeadbeaf\n'),
('requeue', ('0000000000000000',), 'REQ 0000000000000000 0\n'),
('requeue', ('deadbeafdeadbeaf', 0), 'REQ deadbeafdeadbeaf 0\n'),
('requeue', ('deadbeafdeadbeaf', 42), 'REQ deadbeafdeadbeaf 42\n'),
('touch', ('0000000000000000',), 'TOUCH 0000000000000000\n'),
('touch', ('deadbeafdeadbeaf',), 'TOUCH deadbeafdeadbeaf\n'),
('close', (), 'CLS\n'),
('nop', (), 'NOP\n'),
])
def test_command(command, args, resp):
@mock_server
def handle(socket, address):
assert socket.recv(4) == ' V2'
assert socket.recv(len(resp)) == resp
with handle as server:
conn = Nsqd(address='127.0.0.1', tcp_port=server.server_port)
conn.connect()
getattr(conn, command)(*args)
def test_publish():
@mock_server
def handle(socket, address):
assert socket.recv(4) == ' V2'
assert socket.recv(10) == 'PUB topic\n'
assert nsq.unpack_size(socket.recv(4)) == 3
assert socket.recv(3) == 'sup'
with handle as server:
conn = Nsqd(address='127.0.0.1', tcp_port=server.server_port)
conn.connect()
conn.publish('topic', 'sup')
def test_multipublish():
@mock_server
def handle(socket, address):
assert socket.recv(4) == ' V2'
assert socket.recv(11) == 'MPUB topic\n'
size = nsq.unpack_size(socket.recv(4))
data = socket.recv(size)
head, data = data[:4], data[4:]
assert nsq.unpack_size(head) == 2
for _ in xrange(2):
head, data = data[:4], data[4:]
assert nsq.unpack_size(head) == 3
head, data = data[:3], data[3:]
assert head == 'sup'
assert data == ''
with handle as server:
conn = Nsqd(address='127.0.0.1', tcp_port=server.server_port)
conn.connect()
conn.multipublish('topic', ['sup', 'sup'])
@pytest.mark.parametrize('error_msg,error,fatal', [
('E_INVALID cannot SUB in current state', 'NSQInvalid', True),
('E_BAD_BODY MPUB failed to read body size', 'NSQBadBody', True),
('E_BAD_TOPIC SUB topic name oh my god is not valid', 'NSQBadTopic', True),
('E_BAD_CHANNEL SUB channel name !! is not valid', 'NSQBadChannel', True),
('E_BAD_MESSAGE PUB failed to read message body', 'NSQBadMessage', True),
('E_PUT_FAILED PUT failed', 'NSQPutFailed', True),
('E_PUB_FAILED PUB failed', 'NSQPubFailed', True),
('E_MPUB_FAILED MPUB failed', 'NSQMPubFailed', True),
('E_AUTH_DISABLED AUTH Disabled', 'NSQAuthDisabled', True),
('E_AUTH_FAILED AUTH failed', 'NSQAuthFailed', True),
('E_UNAUTHORIZED AUTH No authorizations found', 'NSQUnauthorized', True),
('E_FIN_FAILED FIN failed', 'NSQFinishFailed', False),
('E_REQ_FAILED REQ failed', 'NSQRequeueFailed', False),
('E_TOUCH_FAILED TOUCH failed', 'NSQTouchFailed', False),
('some unknown error', 'NSQException', True),
])
def test_error(error_msg, error, fatal):
@mock_server
def handle(socket, address):
assert socket.recv(4) == ' V2'
socket.send(mock_response(nsq.FRAME_TYPE_ERROR, error_msg))
with handle as server:
conn = Nsqd(address='127.0.0.1', tcp_port=server.server_port)
conn.connect()
frame, resp = conn.read_response()
assert frame == nsq.FRAME_TYPE_ERROR
assert isinstance(resp, getattr(errors, error))
assert conn.is_connected != fatal
def test_hashing():
conn1 = Nsqd('localhost', 1337)
conn2 = Nsqd('localhost', 1337)
assert conn1 == conn2
assert not (conn1 < conn2)
assert not (conn2 < conn1)
test = {conn1: True}
assert conn2 in test
def test_sync_receive_messages():
@mock_server
def handle(socket, address):
assert socket.recv(4) == ' V2'
assert socket.recv(9) == 'IDENTIFY\n'
size = nsq.unpack_size(socket.recv(4))
data = json.loads(socket.recv(size))
assert isinstance(data, dict)
socket.send(mock_response(nsq.FRAME_TYPE_RESPONSE, 'OK'))
msg = 'SUB topic channel\n'
assert socket.recv(len(msg)) == msg
socket.send(mock_response(nsq.FRAME_TYPE_RESPONSE, 'OK'))
for i in xrange(10):
assert socket.recv(6) == 'RDY 1\n'
body = json.dumps({'data': {'test_key': i}})
ts = i * 1000 * 1000
socket.send(mock_response_message(ts, i, i, body))
with handle as server:
conn = Nsqd(address='127.0.0.1', tcp_port=server.server_port)
conn.connect()
assert conn.identify() is None
conn.subscribe('topic', 'channel')
frame, data = conn.read_response()
assert frame == nsq.FRAME_TYPE_RESPONSE
assert data == 'OK'
for i in xrange(10):
conn.ready(1)
frame, msg = conn.read_response()
assert frame == nsq.FRAME_TYPE_MESSAGE
assert isinstance(msg, Message)
assert msg.timestamp == i * 1000 * 1000
assert msg.id == '%016d' % i
assert msg.attempts == i
assert json.loads(msg.body)['data']['test_key'] == i
def test_sync_heartbeat():
@mock_server
def handle(socket, address):
assert socket.recv(4) == ' V2'
socket.send(mock_response(nsq.FRAME_TYPE_RESPONSE, '_heartbeat_'))
assert socket.recv(4) == 'NOP\n'
with handle as server:
conn = Nsqd(address='127.0.0.1', tcp_port=server.server_port)
conn.connect()
frame, data = conn.read_response()
assert frame == nsq.FRAME_TYPE_RESPONSE
assert data == '_heartbeat_'
def test_auth():
@mock_server
def handle(socket, address):
assert socket.recv(4) == ' V2'
assert socket.recv(5) == 'AUTH\n'
assert nsq.unpack_size(socket.recv(4)) == 6
assert socket.recv(6) == 'secret'
resp = json.dumps({'identity': 'awesome'})
socket.send(mock_response(nsq.FRAME_TYPE_RESPONSE, resp))
with handle as server:
conn = Nsqd(
address='127.0.0.1',
tcp_port=server.server_port,
auth_secret='secret'
)
conn.connect()
resp = conn.auth()
assert resp['identity'] == 'awesome'
def test_identify_auth():
@mock_server
def handle(socket, address):
assert socket.recv(4) == ' V2'
assert socket.recv(9) == 'IDENTIFY\n'
size = nsq.unpack_size(socket.recv(4))
data = json.loads(socket.recv(size))
assert 'gnsq' in data['user_agent']
resp = json.dumps({'auth_required': True})
socket.send(mock_response(nsq.FRAME_TYPE_RESPONSE, resp))
assert socket.recv(5) == 'AUTH\n'
assert nsq.unpack_size(socket.recv(4)) == 6
assert socket.recv(6) == 'secret'
resp = json.dumps({'identity': 'awesome'})
socket.send(mock_response(nsq.FRAME_TYPE_RESPONSE, resp))
with handle as server:
conn = Nsqd(
address='127.0.0.1',
tcp_port=server.server_port,
auth_secret='secret'
)
@conn.on_auth.connect
def assert_auth(conn, response):
assert assert_auth.was_called is False
assert_auth.was_called = True
assert response['identity'] == 'awesome'
assert_auth.was_called = False
conn.connect()
resp = conn.identify()
assert resp['auth_required']
assert assert_auth.was_called
@pytest.mark.parametrize('tls,deflate,snappy', product((True, False), repeat=3))
@pytest.mark.slow
def test_socket_upgrades(tls, deflate, snappy):
with NsqdIntegrationServer() as server:
options = {
'address': server.address,
'tcp_port': server.tcp_port,
'deflate': deflate,
'snappy': snappy,
}
if tls:
options.update({
'tls_v1': True,
'tls_options': {
'keyfile': server.tls_key,
'certfile': server.tls_cert,
}
})
conn = Nsqd(**options)
conn.connect()
assert conn.state == states.CONNECTED
if deflate and snappy:
with pytest.raises(errors.NSQErrorCode):
conn.identify()
return
if tls and BAD_GEVENT:
with pytest.raises(AttributeError):
conn.identify()
return
if tls and server.version < (0, 2, 28):
with pytest.raises(ssl.SSLError):
conn.identify()
return
resp = conn.identify()
assert isinstance(resp, dict)
assert resp['tls_v1'] is tls
assert resp['deflate'] is deflate
assert resp['snappy'] is snappy
if tls and (deflate or snappy):
assert isinstance(conn.stream.socket._socket, SSLSocket)
elif tls:
assert isinstance(conn.stream.socket, SSLSocket)
if deflate:
assert isinstance(conn.stream.socket, DefalteSocket)
if snappy:
assert isinstance(conn.stream.socket, SnappySocket)
conn.publish('topic', 'sup')
frame, data = conn.read_response()
assert frame == nsq.FRAME_TYPE_RESPONSE
assert data == 'OK'
conn.subscribe('topic', 'channel')
frame, data = conn.read_response()
assert frame == nsq.FRAME_TYPE_RESPONSE
assert data == 'OK'
conn.ready(1)
frame, data = conn.read_response()
assert frame == nsq.FRAME_TYPE_MESSAGE
assert data.body == 'sup'
conn.close_stream()
@pytest.mark.slow
def test_cls_error():
with NsqdIntegrationServer() as server:
conn = Nsqd(address=server.address, tcp_port=server.tcp_port)
conn.connect()
assert conn.state == states.CONNECTED
conn.close()
frame, error = conn.read_response()
assert frame == nsq.FRAME_TYPE_ERROR
assert isinstance(error, errors.NSQInvalid)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 7 08:38:28 2020
pyqt realtime plot tutorial
source: https://www.learnpyqt.com/courses/graphics-plotting/plotting-pyqtgraph/
@author: nlourie
"""
from PyQt5 import QtWidgets, QtCore,uic
from pyqtgraph import PlotWidget, plot,QtGui
import pyqtgraph as pg
import sys # We need sys so that we can pass argv to QApplication
import os
from datetime import datetime
import numpy as np
from scipy import signal
import board
import busio
import adafruit_lps35hw
import time
from scipy import interpolate
#import monitor_utils as mu
# Initialize the i2c bus
i2c = busio.I2C(board.SCL, board.SDA)
# Using the adafruit_lps35hw class to read in the pressure sensor
# note the address must be in decimal.
# allowed addresses are:
# 92 (0x5c - if you put jumper from SDO to Gnd)
# 93 (0x5d - default)
p2 = adafruit_lps35hw.LPS35HW(i2c, address = 92)
p1 = adafruit_lps35hw.LPS35HW(i2c, address = 93)
p1.data_rate = adafruit_lps35hw.DataRate.RATE_75_HZ
p2.data_rate = adafruit_lps35hw.DataRate.RATE_75_HZ
mbar2cmh20 = 1.01972
# Now read out the pressure difference between the sensors
print('p1_0 = ',p1.pressure,' mbar')
print('p1_0 = ',p1.pressure*mbar2cmh20,' cmH20')
print('p2_0 = ',p2.pressure,' mbar')
print('p2_0 = ',p2.pressure*mbar2cmh20,' cmH20')
print('')
print('Now zero the pressure:')
# Not sure why sometimes I have to do this twice??
p1.zero_pressure()
p1.zero_pressure()
time.sleep(1)
p2.zero_pressure()
p2.zero_pressure()
time.sleep(1)
print('p1_0 = ',p1.pressure,' mbar')
print('p1_0 = ',p1.pressure*mbar2cmh20,' cmH20')
print('p2_0 = ',p2.pressure,' mbar')
print('p2_0 = ',p2.pressure*mbar2cmh20,' cmH20')
print()
def breath_detect_coarse(flow,fs,plotflag = False):
"""
%% This function detects peaks of flow signal
% Inputs:
% flow: flow signal
% fs: sampling frequency
% plotflag: set to 1 to plot
% Output:
% peak (location, amplitude)
% Written by: <NAME>, PhD
% Email: <EMAIL>
% Updated on: 12 Nov 2015.
% Ver: 1.0
# Converted to python by: <NAME>, PhD
# Email: <EMAIL>
# Updated on: April, 2020
"""
# detect peaks of flow signal
minpeakwidth = fs*0.3
peakdistance = fs*1.5
#print('peakdistance = ',peakdistance)
minPeak = 0.05 # flow threshold = 0.05 (L/s)
minpeakprominence = 0.05
peak_index, _ = signal.find_peaks(flow,
height = minPeak,
distance = peakdistance,
prominence = minpeakprominence,
width = minpeakwidth)
"""
valley_index, _ = signal.find_peaks(-1*flow,
height = minPeak,
distance = peakdistance,
prominence = minpeakprominence,
width = minpeakwidth)
"""
print('found peaks at index = ',peak_index)
return peak_index
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.setWindowTitle("Standalone Respiratory Monitor")
self.graph0 = pg.PlotWidget()
self.graph1 = pg.PlotWidget()
self.graph2 = pg.PlotWidget()
self.graph3 = pg.PlotWidget()
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.graph0)
layout.addWidget(self.graph1)
layout.addWidget(self.graph2)
layout.addWidget(self.graph3)
widget = QtWidgets.QWidget()
widget.setLayout(layout)
# make the window with a graph widget
#self.graph1 = pg.PlotWidget()
self.setCentralWidget(widget)
# set the plot properties
self.graph1.setBackground('k')
self.graph0.showGrid(x = True, y = True)
self.graph1.showGrid(x=True,y=True)
self.graph2.showGrid(x = True, y = True)
self.graph3.showGrid(x = True, y = True)
# Set the label properties with valid CSS commands -- https://groups.google.com/forum/#!topic/pyqtgraph/jS1Ju8R6PXk
labelStyle = {'color': '#FFF', 'font-size': '12pt'}
self.graph0.setLabel('left','P','cmH20',**labelStyle)
self.graph1.setLabel('left','Flow','L/s',**labelStyle)
self.graph3.setLabel('bottom', 'Time', 's', **labelStyle)
#self.graph2.setLabel('left', 'V raw','L',**labelStyle)
self.graph3.setLabel('left','V corr','L',**labelStyle)
# change the plot range
#self.graph0.setYRange(-30,30,padding = 0.1)
#self.graph1.setYRange(-2,2,padding = 0.1)
#self.graph3.setYRange(-0.5,1.5,padding = 0.1)
#self.graph3.setYRange(200,200,padding = 0.1)
self.x = [0]
self.t = [datetime.utcnow().timestamp()]
self.dt = [0]
self.x = [0]
self.dt = [0]
#self.y = [honeywell_v2f(chan.voltage)]
self.dp = [(p1.pressure - p2.pressure)*mbar2cmh20]
self.p1 = [(p1.pressure)*mbar2cmh20]
self.p2 = [(p2.pressure)*mbar2cmh20]
self.flow = [0]
self.vol = [0]
print('P1 = ',p1.pressure,' cmH20')
print('P2 = ',p2.pressure,' cmH20')
# plot data: x, y values
# make a QPen object to hold the marker properties
pen = pg.mkPen(color = 'y',width = 1)
pen2 = pg.mkPen(color = 'b',width = 2)
self.data_line01 = self.graph0.plot(self.dt,self.p1,pen = pen)
self.data_line02 = self.graph0.plot(self.dt,self.p2,pen = pen2)
self.data_line1 = self.graph1.plot(self.dt, self.flow,pen = pen)
# graph2
self.data_line21 = self.graph2.plot(self.dt,self.flow,pen = pen)
self.data_line22 = self.graph2.plot(self.dt,self.flow,pen = pen)
# graph3
self.data_line3 = self.graph3.plot(self.dt,self.vol,pen = pen)
self.calibrating = False
"""
# Slower timer
self.t_cal = 100
self.cal_timer = QtCore.QTimer()
self.cal_timer.setInterval(self.t_cal)
self.cal_timer.timeout.connect(self.update_cal)
self.cal_timer.start()
"""
# Stuff with the timer
self.t_update = 10 #update time of timer in ms
self.timer = QtCore.QTimer()
self.timer.setInterval(self.t_update)
self.timer.timeout.connect(self.update_plot_data)
self.timer.start()
self.drift_model = [0,datetime.utcnow().timestamp()/1000*self.t_update]
self.i_valleys = []
self.time_to_show = 30 #s
def update_plot_data(self):
# This is what happens every timer loop
if self.dt[-1] >= self.time_to_show:
self.x = self.x[1:] # Remove the first element
#self.y = self.y[1:] # remove the first element
self.dp = self.dp[1:]
self.t = self.t[1:] # remove the first element
self.dt= self.dt[1:]
self.p1 = self.p1[1:]
self.p2 = self.p2[1:]
self.vol = self.vol[1:]
self.flow = self.flow[1:]
self.x.append(self.x[-1] + 1) # add a new value 1 higher than the last
self.t.append(datetime.utcnow().timestamp())
self.dt = [(ti - self.t[0]) for ti in self.t]
dp_cmh20 = ((p1.pressure - p2.pressure))*mbar2cmh20
self.dp.append(dp_cmh20)
self.flow.append(dp_cmh20)
self.p1.append(p1.pressure*mbar2cmh20)
self.p2.append(p2.pressure*mbar2cmh20)
# remove any linear trend in the volume data since it's just nonsense.
# THis should zero it out okay if there's no noticeable "dips"
self.vol = signal.detrend(np.cumsum(self.flow))
self.fs = 1/(self.t[-1] - self.t[-2])
print('Sample Freq = ',self.fs)
negative_mean_subtracted_volume = [-1*(v-np.mean(self.vol)) for v in self.vol]
i_valleys = breath_detect_coarse(negative_mean_subtracted_volume,fs = self.fs,plotflag = False)
self.i_valleys = i_valleys
#print('i_valleys = ',self.i_valleys)
#print('datatype of i_valleys = ',type(self.i_valleys))
if len(self.i_valleys) >= 2:
t = np.array(self.t)
vol = np.array(self.vol)
dt = np.array(self.dt)
print('found peaks at dt = ',dt[self.i_valleys])
#self.drift_model = np.polyfit(t[self.i_valleys],vol[self.i_valleys],1)
#self.v_drift = np.polyval(self.drift_model,t)
#self.vol_corr = vol - self.v_drift
#self.data_line22.setData(self.dt,self.v_drift)
self.drift_model = interpolate.interp1d(t[i_valleys],vol[i_valleys],kind = 'linear')
v_drift_within_spline = self.drift_model(t[i_valleys[0]:i_valleys[-1]])
v_drift = np.zeros(len(t))
v_drift[0:self.i_valleys[1]] = np.polyval(np.polyfit(t[i_valleys[0:1]],vol[self.i_valleys[0:1]],1),t[0:self.i_valleys[1]],)
v_drift[self.i_valleys[0]:self.i_valleys[-1]] = v_drift_within_spline
v_drift[self.i_valleys[-1]:] = np.polyval(np.polyfit(t[self.i_valleys[-2:]],vol[self.i_valleys[-2:]],1),t[self.i_valleys[-1]:])
self.v_drift = v_drift
self.vol_corr = vol - v_drift
self.data_line22.setData(self.dt,self.v_drift)
else:
self.vol_corr = self.vol
self.data_line01.setData(self.dt,self.p1)
self.data_line02.setData(self.dt,self.p2)
self.data_line1.setData(self.dt,self.flow) #update the data
self.data_line21.setData(self.dt,self.vol)
self.data_line3.setData(self.dt,self.vol_corr)
"""
def update_cal(self) :
print ('len dt = ',len(self.dt))
if len(self.dt) > 50:
# try to run the monitor utils functions
fs = 1000/self.t_update
i_peaks,i_valleys,i_infl_points,vol_last_peak,flow,self.vol_corr,self.vol_offset,time,vol,drift_model = mu.get_processed_flow(np.array(self.t),np.array(self.y),fs,SmoothingParam = 0,smoothflag=True,plotflag = False)
if len(i_peaks) > 2:
self.drift_model = drift_model
print('updating calibration')
self.calibrating = True
self.data_line2.setData(self.dt,vol)
self.data_line5.setData(self.dt,np.polyval(self.drift_model,time))
self.data_line3.setData(self.dt,vol - np.polyval(self.drift_model,time))
print('drift model = ',self.drift_model)
"""
def main():
app = QtWidgets.QApplication(sys.argv)
main = MainWindow()
main.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
# MIT License
#
# Copyright (c) 2021 Emc2356
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
pathfinding from 1 point to another
"""
from typing import List, Tuple, Generator, Union, Dict, Sequence, Set, TypeVar, overload as TpOverload
import queue
Number = Union[int, float]
class _ASPos: # it is used for A* pathfinding
__slots__ = "i", "j", "v", "tp"
def __init__(self, idxs: Tuple[int, int], v: Number) -> None:
self.i: int = (idxs[0])
self.j: int = (idxs[1])
self.v: Number = v
self.tp: Tuple[int, int] = (self.i, self.j)
@property
def pos(self) -> Tuple[int, int]:
return self.i, self.j
def neighbors(self, grid: List[List["_ASPos"]]) -> Generator:
columns = len(grid)
rows = len(grid[0])
i = self.i
j = self.j
if i < columns - 1 and grid[self.i + 1][j].v == 0:
yield grid[self.i + 1][j]
if i > 0 and grid[self.i - 1][j].v == 0:
yield grid[self.i - 1][j]
if j < rows - 1 and grid[self.i][j + 1].v == 0:
yield grid[self.i][j + 1]
if j > 0 and grid[self.i][j - 1].v == 0:
yield grid[self.i][j - 1]
def __lt__(self, other):
return False
def __repr__(self) -> str:
return repr(f"${self.i} ${self.j} ${self.v}")
def AS_heuristic(p1: _ASPos, p2: _ASPos) -> Number: # it is used for A* pathfinding
return int(abs(p1.i - p2.i) + abs(p1.j - p2.j))
def pathfinding(
grid: List[List[int]], start: Union[List[int], Tuple[int, int], Sequence[int]],
end: Union[List[int], Tuple[int, int], Sequence[int]]
) -> Sequence[Tuple[float, float]]:
"""
if finds the most efficient path from one point to another
if the value of the grid is 0 then the algorithm can go there if it is something over 0 then the algorithm considers ot a wall
:param start: Sequence[int]
:param end: Sequence[int]
:param grid: List[List[int]]
:return: List[Tuple[int, int]]
"""
ASPGrid: List[List[_ASPos]] = [[_ASPos((i, j), grid[i][j]) for j in range(len(grid[0]))] for i in range(len(grid))]
ASPstart: _ASPos = ASPGrid[start[0]][start[1]]
ASPend: _ASPos = ASPGrid[end[0]][end[1]]
count: int = 0
open_set: queue.PriorityQueue = queue.PriorityQueue()
open_set.put((0, count, ASPstart))
open_set_hash: Set[_ASPos] = {ASPstart}
came_from: Dict = {}
g_score: Dict[_ASPos, float] = {pos: float("inf") for row in ASPGrid for pos in row}
g_score[ASPstart] = 0
f_score: Dict[_ASPos, float] = {pos: float("inf") for row in ASPGrid for pos in row}
f_score[ASPstart] = AS_heuristic(ASPstart, ASPend)
current: _ASPos
while not open_set.empty(): # could be replaced with while open_set.qsize():
# get the best spot that we have available
current = open_set.get()[2]
open_set_hash.remove(current)
if current is ASPend:
path = []
while current in came_from:
current = came_from[current]
path.append(current.tp)
path.reverse()
return path
for neighbor in current.neighbors(ASPGrid): # type: _ASPos
temp_g_score = g_score[current] + 1
if temp_g_score < g_score[neighbor]:
came_from[neighbor] = current
g_score[neighbor] = temp_g_score
f_score[neighbor] = temp_g_score + AS_heuristic(neighbor, ASPend)
if neighbor not in open_set_hash:
count += 1
open_set.put((f_score[neighbor], count, neighbor))
open_set_hash.add(neighbor)
return []
__all__ = ["pathfinding"]
|
from rest_framework.test import APITestCase
from django.core.management import call_command
from django.core.management.base import CommandError
class BaseTestCase(APITestCase):
def init_data(self):
self.url = '/api/post/'
self.good_url = '/api/post/1'
self.good_data = {"title": "Test", "slug": "test", "content": "test"}
self.bad_url = '/api/post/15'
self.bad_data = {"bad_data": 69, "slug": "test", "content": "Test"}
def generate_api(self, format):
args = ['api']
opts = {'format': format, 'force': True}
call_command('generate', *args, **opts)
self.init_data()
def set_up(self):
response = self.client.post(self.url, self.good_data, format='json')
return (response, self.good_data)
def create_post(self):
response, data = self.set_up()
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data["title"], data["title"])
self.assertEqual(response.data["slug"], data["slug"])
self.assertEqual(response.data["content"], data["content"])
def create_post_error(self):
response = self.client.post(self.url, self.bad_data, format='json')
self.assertEqual(response.status_code, 400)
def list_post(self):
self.set_up()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def retrieve_post(self):
self.set_up()
response = self.client.get(self.good_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data["title"], "Test")
def retrieve_post_error(self):
response = self.client.get(self.bad_url)
self.assertEqual(response.status_code, 404)
def update_post(self):
self.set_up()
response = self.client.put(self.good_url, self.good_data, format='json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data["content"], self.good_data["content"])
def update_post_error(self):
response = self.client.put(self.bad_url, self.good_data, format='json')
self.assertEqual(response.status_code, 404)
self.set_up()
response = self.client.put(self.good_url, self.bad_data, format='json')
self.assertEqual(response.status_code, 400)
def delete_post(self):
self.set_up()
response = self.client.delete(self.good_url)
self.assertEqual(response.status_code, 204)
def delete_post_error(self):
response = self.client.delete(self.good_url)
self.assertEqual(response.status_code, 404)
def run_tests(self, type):
print('\nTesting {} API'.format(type))
self.generate_api(type)
self.create_post()
self.create_post_error()
self.list_post()
self.retrieve_post()
self.retrieve_post_error()
self.update_post()
self.update_post_error()
self.delete_post()
self.delete_post_error()
class APIViewTest(BaseTestCase):
def test_apiview(self):
self.run_tests('apiview')
class FunctionViewTest(BaseTestCase):
def test_function(self):
self.run_tests('function')
class ViewSetTest(BaseTestCase):
def test_viewset(self):
self.run_tests('viewset')
class ModelViewSetTest(BaseTestCase):
def test_modelviewset(self):
self.run_tests('modelviewset')
class EdgeCaseTest(BaseTestCase):
def test_invalid_format(self):
try:
self.generate_api('asdf')
except Exception as e:
self.assertTrue(isinstance(e, CommandError))
|
from ctypes import *
from comtypes.hresult import S_OK, S_FALSE
from . import core as DbgEng
from . import exception
class DebugSymbols(object):
def __init__(self, symbols):
self._sym = symbols
exception.wrap_comclass(self._sym)
# IDebugSymbols
def GetSymbolOptions(self):
raise exception.E_NOTIMPL_Error
#hr = self._sym.GetSymbolOptions()
#exception.check_err(hr)
#return options
def AddSymbolOptions(self, options):
hr = self._sym.AddSymbolOptions(options)
exception.check_err(hr)
def RemoveSymbolOptions(self, options):
hr = self._sym.RemoveSymbolOptions(options)
exception.check_err(hr)
def SetSymbolOptions(self, options):
hr = self._sym.SetSymbolOptions(options)
exception.check_err(hr)
def GetNameByOffset(self, offset):
name = create_string_buffer(256)
size = c_ulong()
disp = c_ulonglong()
hr = self._sym.GetNameByOffset(offset, name, 256, byref(size), byref(disp))
exception.check_err(hr)
name = name[:size.value]
name = name.rstrip(b'\x00')
return (name, disp.value)
def GetOffsetByName(self, name):
if isinstance(name, str):
name = name.encode()
offset = c_ulonglong()
hr = self._sym.GetOffsetByName(name, byref(offset))
if hr == S_OK:
ret = True
elif hr == S_FALSE:
ret = False
else:
exception.check_err(hr)
return (ret, offset.value)
def GetNearNameByOffset(self):
raise exception.E_NOTIMPL_Error
def GetLineByOffset(self):
raise exception.E_NOTIMPL_Error
def GetOffsetByLine(self):
raise exception.E_NOTIMPL_Error
def GetNumberModules(self):
loaded = c_ulong()
unloaded = c_ulong()
hr = self._sym.GetNumberModules(byref(loaded), byref(unloaded))
exception.check_err(hr)
return (loaded.value, unloaded.value)
def GetModuleByIndex(self, index):
base = c_ulonglong()
hr = self._sym.GetModuleByIndex(index, byref(base))
exception.check_err(hr)
return base.value
def GetModuleByModuleName(self, name, start=0):
if isinstance(name, str):
name = name.encode()
index = c_ulong()
base = c_ulonglong()
hr = self._sym.GetModuleByModuleName(name, start, byref(index), byref(base))
exception.check_err(hr)
return (index.value, base.value)
def GetModuleByOffset(self, offset):
raise exception.E_NOTIMPL_Error
#hr = self._sym.GetModuleByModuleName()
#exception.check_err(hr)
#return (index, base)
def GetModuleNames(self, base, index=DbgEng.DEBUG_ANY_ID):
if index != DbgEng.DEBUG_ANY_ID:
base = 0
image_name = create_string_buffer(256)
image_size = c_ulong()
module_name = create_string_buffer(256)
module_size = c_ulong()
loaded_name = create_string_buffer(256)
loaded_size = c_ulong()
hr = self._sym.GetModuleNames(index, base,
image_name, 256, byref(image_size),
module_name, 256, byref(module_size),
loaded_name, 256, byref(loaded_size))
exception.check_err(hr)
image_name = image_name[:image_size.value].rstrip(b'\x00').decode()
module_name = module_name[:module_size.value].rstrip(b'\x00').decode()
loaded_name = loaded_name[:loaded_size.value].rstrip(b'\x00').decode()
return (image_name, module_name, loaded_name)
def GetModuleParameters(self, base):
bases = (c_ulonglong * 1)()
bases[0] = base
params = (DbgEng._DEBUG_MODULE_PARAMETERS * 1)()
hr = self._sym.GetModuleParameters(1, bases, 0, params)
exception.check_err(hr)
return params[0]
def GetSymbolModule(self, symbol):
raise exception.E_NOTIMPL_Error
#hr = self._sym.GetSymbolModule()
#exception.check_err(hr)
#return base
def GetTypeName(self):
raise exception.E_NOTIMPL_Error
def GetTypeId(self, name, module=0):
if isinstance(name, str):
name = name.encode()
typeid = c_ulong()
hr = self._sym.GetTypeId(0, name, byref(typeid))
exception.check_err(hr)
return typeid.value
def GetTypeSize(self):
raise exception.E_NOTIMPL_Error
def GetFieldOffset(self):
raise exception.E_NOTIMPL_Error
def GetSymbolTypeId(self):
raise exception.E_NOTIMPL_Error
def GetOffsetTypeId(self):
raise exception.E_NOTIMPL_Error
def ReadTypedDataVirtual(self):
raise exception.E_NOTIMPL_Error
def WriteTypedDataVirtual(self):
raise exception.E_NOTIMPL_Error
def OutputTypedDataVirtual(self, offset, module, typeid, flags=0):
outctl = DbgEng.DEBUG_OUTCTL_ALL_CLIENTS
hr = self._sym.OutputTypedDataVirtual(outctl, offset, module, typeid, flags)
exception.check_err(hr)
def ReadTypedDataPhysical(self):
raise exception.E_NOTIMPL_Error
def WriteTypedDataPhysical(self):
raise exception.E_NOTIMPL_Error
def OutputTypedDataPhysical(self):
raise exception.E_NOTIMPL_Error
def GetScope(self):
raise exception.E_NOTIMPL_Error
def SetScope(self):
raise exception.E_NOTIMPL_Error
def ResetScope(self):
raise exception.E_NOTIMPL_Error
def GetScopeSymbolGroup(self):
raise exception.E_NOTIMPL_Error
def CreateSymbolGroup(self):
raise exception.E_NOTIMPL_Error
def StartSymbolMatch(self, pattern):
if isinstance(pattern, str):
pattern = pattern.encode()
handle = c_ulonglong()
hr = self._sym.StartSymbolMatch(pattern, byref(handle))
exception.check_err(hr)
return handle.value
def GetNextSymbolMatch(self, handle):
name = create_string_buffer(256)
size = c_ulong()
offset = c_ulonglong()
hr = self._sym.GetNextSymbolMatch(handle, name, 256, byref(size), byref(offset))
exception.check_err(hr)
return (offset.value, name[:size.value].rstrip(b'\x00'))
def EndSymbolMatch(self, handle):
hr = self._sym.EndSymbolMatch(handle)
exception.check_err(hr)
def Reload(self):
raise exception.E_NOTIMPL_Error
def GetSymbolPath(self):
raise exception.E_NOTIMPL_Error
#hr = self._sym.GetSymbolPath()
#exception.check_err(hr)
#return path
def SetSymbolPath(self, path):
raise exception.E_NOTIMPL_Error
#hr = self._sym.SetSymbolPath()
#exception.check_err(hr)
def AppendSymbolPath(self, addition):
raise exception.E_NOTIMPL_Error
#hr = self._sym.AppendSymbolPath()
#exception.check_err(hr)
def GetImagePath(self):
raise exception.E_NOTIMPL_Error
def SetImagePath(self):
raise exception.E_NOTIMPL_Error
def AppendImagePath(self):
raise exception.E_NOTIMPL_Error
def GetSourcePath(self):
raise exception.E_NOTIMPL_Error
def GetSourcePathElement(self):
raise exception.E_NOTIMPL_Error
def SetSourcePath(self):
raise exception.E_NOTIMPL_Error
def AppendSourcePath(self):
raise exception.E_NOTIMPL_Error
def FindSourceFile(self):
raise exception.E_NOTIMPL_Error
def GetSourceFileLineOffsets(self):
raise exception.E_NOTIMPL_Error
# IDebugSymbols2
def GetModuleVersionInformation(self):
raise exception.E_NOTIMPL_Error
def GetModuleNameString(self):
raise exception.E_NOTIMPL_Error
def GetConstantName(self):
raise exception.E_NOTIMPL_Error
def GetFieldName(self):
raise exception.E_NOTIMPL_Error
def GetTypeOptions(self):
raise exception.E_NOTIMPL_Error
def AddTypeOptions(self):
raise exception.E_NOTIMPL_Error
def RemoveTypeOptions(self):
raise exception.E_NOTIMPL_Error
def SetTypeOptions(self):
raise exception.E_NOTIMPL_Error
# IDebugSymbols3
def GetNameByOffsetWide(self):
raise exception.E_NOTIMPL_Error
def GetOffsetByNameWide(self):
raise exception.E_NOTIMPL_Error
def GetNearNameByOffsetWide(self):
raise exception.E_NOTIMPL_Error
def GetLineByOffsetWide(self):
raise exception.E_NOTIMPL_Error
def GetOffsetByLineWide(self):
raise exception.E_NOTIMPL_Error
def GetModuleByModuleNameWide(self):
raise exception.E_NOTIMPL_Error
def GetSymbolModuleWide(self):
raise exception.E_NOTIMPL_Error
def GetTypeNameWide(self):
raise exception.E_NOTIMPL_Error
def GetTypeIdWide(self):
raise exception.E_NOTIMPL_Error
def GetFieldOffsetWide(self):
raise exception.E_NOTIMPL_Error
def GetSymbolTypeIdWide(self):
raise exception.E_NOTIMPL_Error
def GetScopeSymbolGroup2(self):
raise exception.E_NOTIMPL_Error
def CreateSymbolGroup2(self):
raise exception.E_NOTIMPL_Error
def StartSymbolMatchWide(self):
raise exception.E_NOTIMPL_Error
def GetNextSymbolMatchWide(self):
raise exception.E_NOTIMPL_Error
def ReloadWide(self):
raise exception.E_NOTIMPL_Error
def GetSymbolPathWide(self):
raise exception.E_NOTIMPL_Error
def SetSymbolPathWide(self):
raise exception.E_NOTIMPL_Error
def AppendSymbolPathWide(self):
raise exception.E_NOTIMPL_Error
def GetImagePathWide(self):
raise exception.E_NOTIMPL_Error
def SetImagePathWide(self):
raise exception.E_NOTIMPL_Error
def AppendImagePathWide(self):
raise exception.E_NOTIMPL_Error
def GetSourcePathWide(self):
raise exception.E_NOTIMPL_Error
def GetSourcePathElementWide(self):
raise exception.E_NOTIMPL_Error
def SetSourcePathWide(self):
raise exception.E_NOTIMPL_Error
def AppendSourcePathWide(self):
raise exception.E_NOTIMPL_Error
def FindSourceFileWide(self):
raise exception.E_NOTIMPL_Error
def GetSourceFileLineOffsetsWide(self):
raise exception.E_NOTIMPL_Error
def GetModuleVersionInformationWide(self):
raise exception.E_NOTIMPL_Error
def GetModuleNameStringWide(self):
raise exception.E_NOTIMPL_Error
def GetConstantNameWide(self):
raise exception.E_NOTIMPL_Error
def GetFieldNameWide(self):
raise exception.E_NOTIMPL_Error
def IsManagedModule(self):
raise exception.E_NOTIMPL_Error
def GetModuleByModuleName2(self):
raise exception.E_NOTIMPL_Error
def GetModuleByModuleName2Wide(self):
raise exception.E_NOTIMPL_Error
def GetModuleByOffset2(self):
raise exception.E_NOTIMPL_Error
def AddSyntheticModule(self):
raise exception.E_NOTIMPL_Error
def AddSyntheticModuleWide(self):
raise exception.E_NOTIMPL_Error
def RemoveSyntheticModule(self):
raise exception.E_NOTIMPL_Error
def GetCurrentScopeFrameIndex(self):
raise exception.E_NOTIMPL_Error
def SetScopeFrameByIndex(self):
raise exception.E_NOTIMPL_Error
def SetScopeFromJitDebugInfo(self):
raise exception.E_NOTIMPL_Error
def SetScopeFromStoredEvent(self):
raise exception.E_NOTIMPL_Error
def OutputSymbolByOffset(self):
raise exception.E_NOTIMPL_Error
def GetFunctionEntryByOffset(self):
raise exception.E_NOTIMPL_Error
def GetFieldTypeAndOffset(self):
raise exception.E_NOTIMPL_Error
def GetFieldTypeAndOffsetWide(self):
raise exception.E_NOTIMPL_Error
def AddSyntheticSymbol(self):
raise exception.E_NOTIMPL_Error
def AddSyntheticSymbolWide(self):
raise exception.E_NOTIMPL_Error
def RemoveSyntheticSymbol(self):
raise exception.E_NOTIMPL_Error
def GetSymbolEntriesByOffset(self):
raise exception.E_NOTIMPL_Error
def GetSymbolEntriesByOffset(self):
raise exception.E_NOTIMPL_Error
def GetSymbolEntriesByName(self):
raise exception.E_NOTIMPL_Error
def GetSymbolEntriesByNameWide(self):
raise exception.E_NOTIMPL_Error
def GetSymbolEntryByToken(self):
raise exception.E_NOTIMPL_Error
def GetSymbolEntryInformation(self):
raise exception.E_NOTIMPL_Error
def GetSymbolEntryString(self):
raise exception.E_NOTIMPL_Error
def GetSymbolEntryStringWide(self):
raise exception.E_NOTIMPL_Error
def GetSymbolEntryOffsetRegions(self):
raise exception.E_NOTIMPL_Error
def GetSymbolEntryBySymbolEntry(self):
raise exception.E_NOTIMPL_Error
def GetSourceEntriesByOffset(self):
raise exception.E_NOTIMPL_Error
def GetSourceEntriesByLine(self):
raise exception.E_NOTIMPL_Error
def GetSourceEntriesByLineWide(self):
raise exception.E_NOTIMPL_Error
def GetSourceEntryString(self):
raise exception.E_NOTIMPL_Error
def GetSourceEntryStringWide(self):
raise exception.E_NOTIMPL_Error
def GetSourceEntryOffsetRegions(self):
raise exception.E_NOTIMPL_Error
def GetSourceEntryBySourceEntry(self):
raise exception.E_NOTIMPL_Error
# IDebugSymbols4
def GetScopeEx(self):
raise exception.E_NOTIMPL_Error
def SetScopeEx(self):
raise exception.E_NOTIMPL_Error
def GetNameByInlineContext(self):
raise exception.E_NOTIMPL_Error
def GetNameByInlineContextWide(self):
raise exception.E_NOTIMPL_Error
def GetLineByInlineContext(self):
raise exception.E_NOTIMPL_Error
def GetLineByInlineContextWide(self):
raise exception.E_NOTIMPL_Error
def OutputSymbolByInlineContext(self):
raise exception.E_NOTIMPL_Error
# IDebugSymbols5
def GetCurrentScopeFrameIndexEx(self):
raise exception.E_NOTIMPL_Error
def SetScopeFrameByIndexEx(self):
raise exception.E_NOTIMPL_Error
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# Import Splinter, BeautifulSoup, and Pandas
from splinter import Browser
from bs4 import BeautifulSoup as soup
import pandas as pd
from webdriver_manager.chrome import ChromeDriverManager
# In[2]:
# Set the executable path and initialize Splinter
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
# ### Visit the NASA Mars News Site
# In[3]:
# Visit the mars nasa news site
url = 'https://redplanetscience.com/'
browser.visit(url)
# Optional delay for loading the page
browser.is_element_present_by_css('div.list_text', wait_time=1)
# In[4]:
# Convert the browser html to a soup object and then quit the browser
html = browser.html
news_soup = soup(html, 'html.parser')
slide_elem = news_soup.select_one('div.list_text')
# In[5]:
slide_elem.find('div', class_='content_title')
# In[6]:
# Use the parent element to find the first a tag and save it as `news_title`
news_title = slide_elem.find('div', class_='content_title').get_text()
news_title
# In[7]:
# Use the parent element to find the paragraph text
news_p = slide_elem.find('div', class_='article_teaser_body').get_text()
news_p
# ### JPL Space Images Featured Image
# In[8]:
# Visit URL
url = 'https://spaceimages-mars.com'
browser.visit(url)
# In[9]:
# Find and click the full image button
full_image_elem = browser.find_by_tag('button')[1]
full_image_elem.click()
# In[10]:
# Parse the resulting html with soup
html = browser.html
img_soup = soup(html, 'html.parser')
img_soup
# In[11]:
# find the relative image url
img_url_rel = img_soup.find('img', class_='fancybox-image').get('src')
img_url_rel
# In[12]:
# Use the base url to create an absolute url
img_url = f'https://spaceimages-mars.com/{img_url_rel}'
img_url
# ### Mars Facts
# In[13]:
df = pd.read_html('https://galaxyfacts-mars.com')[0]
df.head()
# In[14]:
df.columns=['Description', 'Mars', 'Earth']
df.set_index('Description', inplace=True)
df
# In[15]:
df.to_html()
# # D1: Scrape High-Resolution Mars’ Hemisphere Images and Titles
# ### Hemispheres
# In[16]:
# 1. Use browser to visit the URL
url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url)
# Parse the resulting html with soup
html = browser.html
ssoup = soup(html, 'html.parser')
ssoup
# In[17]:
# 2. Create a list to hold the images and titles.
hemisphere_image_urls = []
img_pages = []
# 3. Write code to retrieve the image urls and titles for each hemisphere.
for x in ssoup.find_all('a',class_='itemLink product-item'):
url_img = 'https://astrogeology.usgs.gov'+x.get("href")
if url_img not in img_pages:
img_pages.append(url_img)
for address in img_pages:
browser.visit(address)
a = browser.html
a = soup(a,'html.parser')
hemisphere_image_urls.append({'img_url':a.find('a',target='_blank',string="Sample").get('href'),'title':a.find('h2',class_='title').string})
# In[18]:
# 4. Print the list that holds the dictionary of each image url and title.
hemisphere_image_urls
# In[19]:
# 5. Quit the browser
browser.quit()
# In[ ]:
|
#!/usr/bin/env python3
# ----------------------------------------------------------------------------
# Description: Script to create a new github repo
# ----------------------------------------------------------------------------
# This file is part of the 'SLAC Firmware Standard Library'. It is subject to
# the license terms in the LICENSE.txt file found in the top-level directory
# of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of the 'SLAC Firmware Standard Library', including this file, may be
# copied, modified, propagated, or distributed except according to the terms
# contained in the LICENSE.txt file.
# ----------------------------------------------------------------------------
import os
import argparse
import time
import github # PyGithub
#############################################################################################
# Convert str to bool
def argBool(s):
return s.lower() in ['true', 't', 'yes', '1']
# Set the argument parser
parser = argparse.ArgumentParser('Create New Project')
# Add arguments
parser.add_argument(
'--name',
type = str,
required = True,
help = 'New Repo name for https://github.com/ (example: slaclab/my-new-project)',
)
parser.add_argument(
'--token',
type = str,
required = False,
default = None,
help = 'Token for github'
)
parser.add_argument(
'--private',
type = argBool,
required = False,
default = True,
help = 'Privacy Setting for new repo: Set true (default) if private repo. Set false if public repo',
)
parser.add_argument(
'--org',
type = str,
required = False,
default = 'slaclab',
help = 'Name of Github organization repository (default: slaclab)',
)
parser.add_argument(
'--userRepo',
type = argBool,
required = False,
default = False,
help = 'Set true if you want to make the repo in your user\'s workspace instead of an organization workspace',
)
parser.add_argument(
'--submodules',
nargs = '+',
required = False,
default = ['<EMAIL>:slaclab/ruckus',
'<EMAIL>:slaclab/surf',],
help = 'List of submodules'
)
##########################
## Adding User Permissions
##########################
parser.add_argument(
'--adminUser',
nargs = '+',
required = False,
default = None,
help = 'List of admin users'
)
parser.add_argument(
'--writeUser',
nargs = '+',
required = False,
default = None,
help = 'List of write users'
)
parser.add_argument(
'--readUser',
nargs = '+',
required = False,
default = None,
help = 'List of read users'
)
##########################
## Adding Team Permissions
##########################
parser.add_argument(
'--adminTeam',
nargs = '+',
required = False,
default = [ ['slaclab','tid-air-es-admin'] ],
help = 'List of admin teams [org,team_name]'
)
parser.add_argument(
'--writeTeam',
nargs = '+',
required = False,
default = [ ['slaclab','tidaires'] ],
help = 'List of write teams [org,team_name]'
)
parser.add_argument(
'--readTeam',
nargs = '+',
required = False,
default = None,
help = 'List of read teams'
)
# Get the arguments
args = parser.parse_args()
#############################################################################################
def githubLogin():
# Inform the user that you are logging in
print('\nLogging into github....\n')
# Check if token arg defined
if args.token is not None:
# Inform the user that command line arg is being used
print('Using github token from command line arg.')
# Set the token value
token = args.token
# Check if token arg NOT defined
else:
# Set the token value from environmental variable
token = os.environ.get('GITHUB_TOKEN')
# Check if token is NOT defined
if token is None:
# Ask for the token from the command line prompt
print('Enter your github token. If you do no have one you can generate it here:')
print(' https://github.com/settings/tokens')
print('You may set it in your environment as GITHUB_TOKEN\n')
# Set the token value
token = input('\nGithub token: ')
# Else the token was defined
else:
# Inform the user that you are using GITHUB_TOKEN
print('Using github token from user\'s environment.\n')
# Now that you have a token, log into Github
gh = github.Github(token)
# Return the github login object
return gh
#############################################################################################
def createNewRepo(gh):
# Check if creating repo in user's workspace
if args.userRepo:
# Get the user works space
workspace = gh.get_user()
# Else creating repo in organization space
else:
# Get the organization
workspace = gh.get_organization(args.org)
# Create the repo in the workspace
repo = workspace.create_repo(
name = args.name,
private = args.private,
auto_init = True,
)
# Inform the user that the repo was created
print(f'Created \"https://github.com/{repo.full_name}\" repo\n')
# Return the Github repo object
return repo
#############################################################################################
def setPermissions(gh,repo):
# Inform the user that you are logging in
print('Setting Git repo permissions...')
# Always set the current user who created the repo as admin
currentUser = gh.get_user().login
print( f'Current User Admin Permission: {currentUser}' )
repo.add_to_collaborators(
collaborator = currentUser,
permission = 'admin',
)
##########################
## Adding User Permissions
##########################
# Check for list of users with admin permissions
if args.adminUser is not None:
for user in args.adminUser:
print( f'User Admin Permission: {user}' )
repo.add_to_collaborators(
collaborator = user,
permission = 'admin',
)
# Check for list of users with write permissions
if args.writeUser is not None:
for user in args.writeUser:
print( f'User Write Permission: {user}' )
repo.add_to_collaborators(
collaborator = user,
permission = 'push',
)
# Check for list of users with read permissions
if args.readUser is not None:
for user in args.readUser:
print( f'User Read Permission: {user}' )
repo.add_to_collaborators(
collaborator = user,
permission = 'pull',
)
##########################
## Adding Team Permissions
##########################
# Check for list of teams with admin permissions
if args.adminTeam is not None:
for [orgName, teamName] in args.adminTeam:
print( f'Team Admin Permission: {orgName}/{teamName}' )
org = gh.get_organization(orgName)
team = org.get_team_by_slug(teamName)
team.set_repo_permission(repo, 'admin')
# Check for list of teams with write permissions
if args.writeTeam is not None:
for [orgName, teamName] in args.writeTeam:
print( f'Team Write Permission: {orgName}/{teamName}' )
org = gh.get_organization(orgName)
team = org.get_team_by_slug(teamName)
team.set_repo_permission(repo, 'push')
# Check for list of teams with read permissions
if args.readTeam is not None:
for [orgName, teamName] in args.readTeam:
print( f'Team Read Permission: {orgName}/{teamName}' )
org = gh.get_organization(orgName)
team = org.get_team_by_slug(teamName)
team.set_repo_permission(repo, 'pull')
print('\n')
#############################################################################################
def setupNewRepoStructure(repo):
# Setting up the new Github repo's file structure and submodules
print('Setting up the new Github repo\'s file structure and submodules...')
# Get the base ruckus directory
baseDir = os.path.realpath(__file__).split('scripts')[0]
# Add the LICENSE.txt
repo.create_file(
path = 'LICENSE.txt',
message = 'Adding License.txt',
content = open(f'{baseDir}/LICENSE.txt').read(),
)
# Add the .gitignore
repo.create_file(
path = '.gitignore',
message = 'Adding .gitignore',
content = open(f'{baseDir}/.gitignore').read(),
)
# Add the .gitattributes
repo.create_file(
path = '.gitattributes',
message = 'Adding .gitattributes',
content = open(f'{baseDir}/.gitattributes').read(),
)
# Check if submodule path(s) exist
if args.submodules is not None:
#####################################################
# I couldn't find a python API for submodules ...
# so I am going to do this step using an actual clone
# A.K.A. "brute force method"
#####################################################
time.sleep(10)
os.system(f'git clone --recursive <EMAIL>@github.com:{repo.full_name}')
os.system(f'cd {args.name}; mkdir firmware; cd firmware; mkdir submodules; git pull')
for submodule in args.submodules:
os.system(f'cd {args.name}/firmware/submodules; git submodule add {submodule}')
os.system(f'cd {args.name}; git commit -m \"adding submdoules\"; git push')
os.system(f'rm -rf {args.name}')
print('\n')
#############################################################################################
def setBranchProtection(repo):
# Create pre-release branch from main branch
print('Create pre-release branch from main branch...\n')
repo.create_git_ref(
ref = 'refs/heads/pre-release',
sha = repo.get_branch('main').commit.sha,
)
# Creating Setting Branch Protection for main and pre-release
print('Creating Setting Branch Protection for main and pre-release...\n')
for idx in ['main','pre-release']:
repo.get_branch(idx).edit_protection()
#############################################################################################
if __name__ == '__main__':
# Log into Github
gh = githubLogin()
# Create a new Github repo
repo = createNewRepo(gh)
# Set the User/Team permissions
setPermissions(gh,repo)
# Setup the new repo's structure
setupNewRepoStructure(repo)
# Set the branch protections
setBranchProtection(repo)
# Create first initial release
repo.create_git_release(
tag = 'v0.0.0',
name = 'Initial Release',
message = 'First Tagged Release',
draft =False,
)
print("Success!")
|
<reponame>lucasxlu/MMNet
"""
inference code
"""
import sys
import time
from pprint import pprint
import numpy as np
import torch
import torch.nn as nn
from PIL import Image
from skimage import io
from torchvision.transforms import transforms
sys.path.append('../')
from models.vgg import MMNet
class MMNetRecognizer:
"""
MMNet Recognizer Class Wrapper
"""
def __init__(self, pretrained_model_path='MMNet.pth'):
model = MMNet()
model = model.float()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
# model.load_state_dict(torch.load(pretrained_model_path))
if torch.cuda.device_count() > 1:
print("We are running on", torch.cuda.device_count(), "GPUs!")
model = nn.DataParallel(model)
model.load_state_dict(torch.load(pretrained_model_path))
else:
state_dict = torch.load(pretrained_model_path)
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
model.to(device)
model.eval()
self.device = device
self.model = model
def infer(self, img_file):
tik = time.time()
img = io.imread(img_file)
img = Image.fromarray(img.astype(np.uint8))
preprocess = transforms.Compose([
transforms.Resize(227),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
img = preprocess(img)
img.unsqueeze_(0)
img = img.to(self.device)
e_pred, a_pred, r_pred, g_pred = self.model.forward(img)
tok = time.time()
_, e_predicted = torch.max(e_pred.data, 1)
_, a_predicted = torch.max(a_pred.data, 1)
_, r_predicted = torch.max(r_pred.data, 1)
_, g_predicted = torch.max(g_pred.data, 1)
if int(g_predicted.to("cpu")) == 0:
g_pred = 'male'
elif int(g_predicted.to("cpu")) == 1:
g_pred = 'female'
elif int(g_predicted.to("cpu")) == 2:
g_pred = 'unsure'
if int(r_predicted.to("cpu")) == 0:
r_pred = 'Caucasian'
elif int(r_predicted.to("cpu")) == 1:
r_pred = 'African-American'
elif int(r_predicted.to("cpu")) == 2:
r_pred = 'Asian'
if int(a_predicted.to("cpu")) == 0:
a_pred = '0-3'
elif int(a_predicted.to("cpu")) == 1:
a_pred = '4-19'
elif int(a_predicted.to("cpu")) == 2:
a_pred = '20-39'
elif int(a_predicted.to("cpu")) == 3:
a_pred = '40-69'
elif int(a_predicted.to("cpu")) == 4:
a_pred = '70+'
if int(e_predicted.to("cpu")) == 0:
e_pred = 'Surprise'
elif int(e_predicted.to("cpu")) == 1:
e_pred = 'Fear'
elif int(e_predicted.to("cpu")) == 2:
e_pred = 'Disgust'
elif int(e_predicted.to("cpu")) == 3:
e_pred = 'Happiness'
elif int(e_predicted.to("cpu")) == 4:
e_pred = 'Sadness'
elif int(e_predicted.to("cpu")) == 5:
e_pred = 'Anger'
elif int(e_predicted.to("cpu")) == 6:
e_pred = 'Neutral'
return {
'status': 0,
'message': 'success',
'elapse': tok - tik,
'results': {
'gender': g_pred,
'emotion': e_pred,
'race': r_pred,
'age': a_pred,
'elapse': tok - tik
}
}
if __name__ == '__main__':
mmnet_recognizer = MMNetRecognizer()
pprint(mmnet_recognizer.infer('test.jpg'))
|
"""
fonts
=====
.. module:: fonts
:platform: Unix, Windows
:synopsis: font utils
.. moduleauthor:: <NAME>
"""
import os
from autobasedoc import base_fonts
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.pdfmetrics import getFont
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.lib.fonts import addMapping
def registerFont(faceName, afm, pfb):
"""
Helvetica BUT AS AFM
The below section is NOT equal to::
_baseFontName ='Helvetica'
_baseFontNameB ='Helvetica-Bold'
_baseFontNameI ='Helvetica-Oblique'
_baseFontNameBI='Helvetica-BoldOblique'
we will mapp afm files from matplotlib with pfb files from reportlab
this will give embedded Type1 Face Fonts
"""
afm = os.path.join(__font_dir__, "".join(afm, ".afm"))
pfb = os.path.join(__font_dir__, "".join(pfb, ".pfb"))
face = pdfmetrics.EmbeddedType1Face(afm, pfb)
pdfmetrics.registerTypeFace(face)
font = pdfmetrics.Font(faceName, faceName, 'WinAnsiEncoding')
pdfmetrics.registerFont(font)
def setTtfFonts(familyName,
font_dir,
normal=(None, None),
bold=(None, None),
italic=(None, None),
bold_italic=(None, None)):
"""
Sets fonts for True Type Fonts
"""
normalName, normalFile = normal
boldName, boldFile = bold
italicName, italicFile = italic
bold_italicName, bold_italicFile = bold_italic
pdfmetrics.registerFont(
TTFont(normalName, os.path.join(font_dir, normalFile)))
pdfmetrics.registerFont(TTFont(boldName, os.path.join(font_dir, boldFile)))
pdfmetrics.registerFont(
TTFont(italicName, os.path.join(font_dir, italicFile)))
pdfmetrics.registerFont(
TTFont(bold_italicName, os.path.join(font_dir, bold_italicFile)))
addMapping(familyName, 0, 0, normalName)
addMapping(familyName, 1, 0, boldName)
addMapping(familyName, 0, 1, italicName)
addMapping(familyName, 1, 1, bold_italicName)
base_fonts().update({"normal": getFont(normalName).fontName})
base_fonts().update({"bold": getFont(boldName).fontName})
base_fonts().update({"italic": getFont(italicName).fontName})
base_fonts().update({"bold_italic": getFont(bold_italicName).fontName})
def setFonts(typ):
"""
Sets fonts for standard font-types
:param typ: one of sans-serif-afm, serif (sans-serif is default on init)
:type typ: str
"""
if typ == 'sans-serif-afm':
baseNameDict = {
'Helvetica': "_a______",
'Helvetica-Bold': "_ab_____",
'Helvetica-Oblique': "_ai_____",
'Helvetica-BoldOblique': "_abi____"
}
for afm, pfb in baseNameDict.items():
faceName = afm
registerFont(faceName, afm, pfb)
base_fonts().update({
"normal": pdfmetrics.getFont('Helvetica').fontName
})
base_fonts().update({
"bold": pdfmetrics.getFont('Helvetica-Bold').fontName
})
base_fonts().update({
"italic": pdfmetrics.getFont('Helvetica-Oblique').fontName
})
base_fonts().update({
"bold_italic": pdfmetrics.getFont('Helvetica-BoldOblique').fontName
})
elif typ == 'serif':
setTtfFonts(
'Calibri',
__font_dir__,
normal=('Calibri', 'CALIBRI.TTF'),
italic=('CalibriBd', 'CALIBRIB.TTF'),
bold=('CalibriIt', 'CALIBRII.TTF'),
bold_italic=('CalibriBI', 'CALIBRIZ.TTF'))
|
<reponame>mfkenson/swift
#!/usr/bin/env python
"""
@author <NAME>
"""
import swift as sw
import websockets
import asyncio
from threading import Thread
import webbrowser as wb
import json
import http.server
import socketserver
from pathlib import Path
import os
from queue import Empty
def start_servers(outq, inq, open_tab=True, browser=None):
# Start our websocket server with a new clean port
socket = Thread(
target=SwiftSocket, args=(outq, inq, ), daemon=True)
socket.start()
socket_port = inq.get()
# Start a http server
server = Thread(
target=SwiftServer, args=(outq, inq, socket_port, ), daemon=True)
server.start()
server_port = inq.get()
if open_tab:
if browser is not None:
try:
wb.get(browser).open_new_tab(
'http://localhost:'
+ str(server_port)
+ '/'
+ str(socket_port))
except wb.Error:
print(
'\nCould not open specified browser, '
'using default instead\n')
wb.open_new_tab(
'http://localhost:'
+ str(server_port)
+ '/'
+ str(socket_port))
else:
wb.open_new_tab(
'http://localhost:'
+ str(server_port)
+ '/'
+ str(socket_port))
try:
inq.get(timeout=10)
except Empty:
print('\nCould not connect to the Swift simulator \n')
raise
class SwiftSocket:
def __init__(self, outq, inq):
self.outq = outq
self.inq = inq
self.USERS = set()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
started = False
port = 51480
while not started and port < 62000:
try:
start_server = websockets.serve(self.serve, "localhost", port)
loop.run_until_complete(start_server)
started = True
except OSError:
port += 1
self.inq.put(port)
loop.run_forever()
async def register(self, websocket):
self.USERS.add(websocket)
async def serve(self, websocket, path):
# Initial connection handshake
await(self.register(websocket))
recieved = await websocket.recv()
self.inq.put(recieved)
# Now onto send, recieve cycle
while True:
message = await self.producer()
await websocket.send(json.dumps(message))
recieved = await websocket.recv()
self.inq.put(recieved)
async def producer(self):
data = self.outq.get()
return data
class SwiftServer:
def __init__(self, outq, inq, socket_port, verbose=False):
server_port = 52000
self.inq = inq
root_dir = Path(sw.__file__).parent / 'public'
# os.chdir(Path.home())
os.chdir(Path.home().anchor)
class MyHttpRequestHandler(http.server.SimpleHTTPRequestHandler):
def log_message(self, format, *args):
if verbose:
http.server.SimpleHTTPRequestHandler.log_message(
self, format, *args)
else:
pass
def do_POST(self):
print(self)
def do_GET(self):
# home = str(Path.home())
if self.path == '/':
self.send_response(301)
self.send_header(
'Location', 'http://localhost:'
+ str(server_port)
+ '/'
+ str(socket_port))
self.end_headers()
return
if self.path == '/' + str(socket_port):
self.path = str(root_dir / 'index.html')
elif self.path.endswith('css') or self.path.endswith('js') \
or self.path.endswith('map'):
self.path = str(root_dir) + str(Path(self.path))
self.path = str(Path(self.path))
# if self.path.lower().startswith(home.lower()):
# self.path = self.path[len(home):]
# elif self.path.lower().startswith(home.lower()[2:]):
# self.path = self.path[len(home)-2:]
self.path = Path(self.path).as_posix()
return http.server.SimpleHTTPRequestHandler.do_GET(self)
Handler = MyHttpRequestHandler
connected = False
while not connected and server_port < 62000:
try:
with socketserver.TCPServer(
("", server_port), Handler) as httpd:
self.inq.put(server_port)
connected = True
httpd.serve_forever()
except OSError:
server_port += 1
|
<filename>src/wrapper/atari_wrapper.py<gh_stars>0
import numpy as np
from collections import deque
import gym
from gym import spaces
import cv2
import tensorflow as tf
import json
from detect.backbone.tiny_darknet_fcn import yolo_net, load_from_binary
from detect.util.postprocessing import getboxes
cv2.ocl.setUseOpenCL(False)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condtion for a few frames
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def reset(self):
return self.env.reset()
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.width = 84
self.height = 84
self.observation_space = spaces.Box(low=0, high=255,
shape=(self.height, self.width, 1), dtype=np.uint8)
def observation(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
return frame[:, :, None]
class WrapWarpFrame(gym.ObservationWrapper):
#增加了图像对比的通道
def __init__(self, env):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.width = 84
self.height = 84
self.observation_space = spaces.Box(low=0, high=255,
shape=(self.height, self.width, 1*2), dtype=np.uint8)
self.previous_observation = None
self.current_observation = None
# self.diff_observation = None
def observation(self, frame):
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
if self.current_observation is None:
self.previous_observation = self.current_observation = frame
else:
self.previous_observation = self.current_observation
self.current_observation = frame
diff_observation = self.opticalFlow(self.previous_observation, self.current_observation)[0]
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = frame[:,:,np.newaxis]
diff_observation = cv2.cvtColor(diff_observation, cv2.COLOR_RGB2GRAY)
diff_observation = diff_observation[:,:,np.newaxis]
frame = np.concatenate((frame, diff_observation), axis=2)
return frame#[:, :, None]
# return frame
def opticalFlow(self, prvsimg, nextimg):
hsv = np.zeros_like(nextimg)
hsv[..., 1] = 255
prvs = cv2.cvtColor(prvsimg, cv2.COLOR_BGR2GRAY)
n = cv2.cvtColor(nextimg, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, n, None, 0.5, 3, 15, 3, 5, 1.2, 0)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
prvs = n
return [rgb, prvs]
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[0], shp[1], shp[2] * k), dtype=np.uint8)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=2)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
def make_atari(env_id):
env = gym.make(env_id)
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
return env
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
return env
class WarpFrameRGB(gym.ObservationWrapper):
def __init__(self, env):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.observation_space = spaces.Box(low=0, high=255,
shape=(84, 84, 12), dtype=np.uint8) # hack this part so that the graph is correctly built
def observation(self, frame):
frame = cv2.resize(frame, (224, 224), interpolation=cv2.INTER_AREA)
return frame
class WarpFrameRGBYolo(gym.ObservationWrapper):
def __init__(self, env):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.observation_space = spaces.Box(low=0, high=255,
shape=(84, 84, 24), dtype=np.uint8) # hack this part so that the graph is correctly built
def observation(self, frame):
frame = cv2.resize(frame, (224, 224), interpolation=cv2.INTER_AREA)
return frame
class WarpFrameRGBGreyFlow(gym.ObservationWrapper):
# 增加了图像对比的通道
def __init__(self, env):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.width = 84
self.height = 84
self.observation_space = spaces.Box(low=0, high=255,
shape=(self.height, self.width, 1 * 4), dtype=np.uint8)
self.previous_observation = None
self.current_observation = None
# self.diff_observation = None
self.diff_observation_cache = []
def observation(self, frame):
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
if self.current_observation is None:
self.previous_observation = self.current_observation = frame
else:
self.previous_observation = self.current_observation
self.current_observation = frame
diff_observation = self.opticalFlow(self.previous_observation, self.current_observation)[0]
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
# frame = frame[:, :, np.newaxis]
diff_observation = cv2.cvtColor(diff_observation, cv2.COLOR_RGB2GRAY)
diff_observation = diff_observation[:, :, np.newaxis]
if len(self.diff_observation_cache) > 5:
del(self.diff_observation_cache[0])
self.diff_observation_cache.append(diff_observation)
diff_observation = np.array(self.diff_observation_cache).min(axis=0)
frame = np.concatenate((frame, diff_observation), axis=2)
return frame # [:, :, None]
# return frame
def opticalFlow(self, prvsimg, nextimg):
hsv = np.zeros_like(nextimg)
hsv[..., 1] = 255
prvs = cv2.cvtColor(prvsimg, cv2.COLOR_BGR2GRAY)
n = cv2.cvtColor(nextimg, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, n, None, 0.5, 3, 15, 3, 5, 1.2, 0)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
prvs = n
return [rgb, prvs]
SCALE = 32
GRID_W, GRID_H = 7, 7
IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_DEPTH = GRID_H*SCALE, GRID_W*SCALE, 3
class WrapFrameRGBwithBoundingBox(gym.ObservationWrapper):
# RGB图像+yolo预测特征图像(白板+不同颜色灰色框)
def __init__(self, env):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.width = 84
self.height = 84
self.observation_space = spaces.Box(low=0, high=255, shape=(self.height, self.width, 1 * 4), dtype=np.uint8)
self.yolo = None
self.sess = None
def build(self):
config_path = "detect_model/shijc_config_0505.json"
weights_path = "detect_model/shijc_weights_0505.binary"
with open(config_path) as config_buffer:
config = json.load(config_buffer)
self.config = config
self.image = tf.placeholder(shape=[None, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_DEPTH], dtype=tf.float32, name='image_placeholder')
self.yolo = yolo_net(self.image, False, n_class=len(config['model']['labels']), collections=[tf.GraphKeys.LOCAL_VARIABLES], trainable=False)
self.weights_path = weights_path
self.assign_kernel, self.assign_bias = load_from_binary(self.weights_path, offset=0)
def load(self, sess):
if self.sess == None:
self.sess = sess
# tf.reset_default_graph()
sess.run(self.assign_kernel + self.assign_bias)
def observation(self, frame):
box_channel = np.ones(frame.shape[0:2], dtype=np.int8) * 255
if self.sess != None :
org_img = frame
img = cv2.resize(org_img, (IMAGE_WIDTH, IMAGE_HEIGHT))
img = img / 255.0
anchors = np.array(self.config['model']['anchors']).reshape(-1, 2)
data = self.sess.run(self.yolo, feed_dict={self.image: img})
boxes = getboxes(data, anchors, nclass=len(self.config['model']['labels']))
box_channel = self.draw_boxes_2(box_channel, boxes, self.config["model"]["labels"])
else :
print("no tf session found")
box_channel = cv2.resize(box_channel, (self.width, self.height), interpolation=cv2.INTER_AREA)
frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
box_channel = box_channel[:,:, np.newaxis]
frame = np.concatenate((frame, box_channel), axis=2)
return frame
def draw_boxes_2(self, img, boxes, labels):
for box in boxes:
xmin = int((box['x'] - box['w'] / 2) * img.shape[1])
xmax = int((box['x'] + box['w'] / 2) * img.shape[1])
ymin = int((box['y'] - box['h'] / 2) * img.shape[0])
ymax = int((box['y'] + box['h'] / 2) * img.shape[0])
cv2.rectangle(img, (xmin, ymin), (xmax, ymax), self.label_to_color(labels[box['label']]), 4)
return img
def label_to_color(self, label):
for i in range(len(self.config["model"]['labels'])):
if label == self.config["model"]['labels'][i] :
return int(i * 8 + 8)
return int(255)
# for vec env
import numpy as np
from multiprocessing import Process, Pipe
from baselines.common.vec_env import VecEnv, CloudpickleWrapper
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'build':
remote.close()
break
elif cmd == 'load':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True |
<reponame>nishadg246/stripstream-ivan-nishad<filename>robotics/openrave/tamp_fixed_base.py<gh_stars>0
from time import sleep
import numpy as np
from robotics.openrave.utils import solve_inverse_kinematics, \
set_manipulator_conf, Conf, Traj, manip_from_pose_grasp
from robotics.openrave.motion import has_mp, mp_birrt, mp_straight_line, linear_motion_plan, manipulator_motion_plan
from robotics.openrave.transforms import set_pose, \
object_trans_from_manip_trans, trans_from_point
# TODO - unify with fixed_tamp_holding
APPROACH_VECTOR = 0.15*np.array([0, 0, -1])
DISABLE_MOTIONS = False
DISABLE_MOTION_COLLISIONS = False
assert not DISABLE_MOTIONS or DISABLE_MOTION_COLLISIONS
if DISABLE_MOTIONS:
print 'Warning: trajectories are disabled'
if DISABLE_MOTION_COLLISIONS:
print 'Warning: trajectory collisions are disabled'
####################
def enable_all(all_bodies, enable): # Enables or disables all bodies for collision checking
for body in all_bodies:
body.Enable(enable)
####################
def cfree_pose_fn(env, body1, body2):
def cfree_pose(pose1, pose2): # Collision free test between an object at pose1 and an object at pose2
body1.Enable(True)
set_pose(body1, pose1.value)
body2.Enable(True)
set_pose(body2, pose2.value)
return not env.CheckCollision(body1, body2)
return cfree_pose
####################
def cfree_traj_fn(env, manipulator, body1, body2, all_bodies):
robot = manipulator.GetRobot()
def _cfree_traj_pose(traj, pose): # Collision free test between a robot executing traj and an object at pose
enable_all(all_bodies, False)
body2.Enable(True)
set_pose(body2, pose.value)
for conf in traj.value:
set_manipulator_conf(manipulator, conf)
if env.CheckCollision(robot, body2):
return False
return True
def _cfree_traj_grasp_pose(traj, grasp, pose): # Collision free test between an object held at grasp while executing traj and an object at pose
enable_all(all_bodies, False)
body1.Enable(True)
body2.Enable(True)
set_pose(body2, pose.value)
for conf in traj.value:
set_manipulator_conf(manipulator, conf)
manip_trans = manipulator.GetTransform()
set_pose(body1, object_trans_from_manip_trans(manip_trans, grasp.value))
if env.CheckCollision(body1, body2):
return False
return True
def cfree_traj(traj, pose): # Collision free test between a robot executing traj (which may or may not involve a grasp) and an object at pose
if DISABLE_MOTION_COLLISIONS:
return True
if traj.pose is not None and traj.pose == pose:
# This is the same pose of the manipulation
return True
return _cfree_traj_pose(traj, pose) and (traj.grasp is None or _cfree_traj_grasp_pose(traj, traj.grasp, pose))
return cfree_traj
####################
def sample_grasp_traj_fn(env, manipulator, body1, all_bodies):
robot = manipulator.GetRobot()
def sample_grasp_traj(pose, grasp): # Sample pregrasp config and motion plan that performs a grasp
enable_all(all_bodies, False)
body1.Enable(True)
set_pose(body1, pose.value)
manip_trans = manip_from_pose_grasp(pose.value, grasp.value)
grasp_conf = solve_inverse_kinematics(manipulator, manip_trans) # Grasp configuration
if grasp_conf is None:
return
if DISABLE_MOTIONS:
yield [(Conf(grasp_conf), Traj([]))]
return
set_manipulator_conf(manipulator, grasp_conf)
robot.Grab(body1)
pregrasp_trans = manip_trans.dot(trans_from_point(*APPROACH_VECTOR))
pregrasp_conf = solve_inverse_kinematics(manipulator, pregrasp_trans) # Pre-grasp configuration
if pregrasp_conf is None:
return
# Trajectory from grasp configuration to pregrasp
if has_mp():
path = mp_straight_line(robot, grasp_conf, pregrasp_conf)
else:
path = linear_motion_plan(robot, pregrasp_conf)
#grasp_traj = vector_traj_helper(env, robot, approach_vector)
#grasp_traj = workspace_traj_helper(base_manip, approach_vector)
robot.Release(body1)
if path is None:
return
grasp_traj = Traj(path)
grasp_traj.pose = pose
grasp_traj.grasp = grasp
yield [(Conf(pregrasp_conf), grasp_traj)]
return sample_grasp_traj
####################
def sample_free_motion_fn(manipulator, base_manip, all_bodies):
robot = manipulator.GetRobot()
def sample_free_motion(conf1, conf2): # Sample motion while not holding
if DISABLE_MOTIONS:
#traj = Traj([conf1.value, conf2.value])
traj = Traj([conf2.value])
traj.pose = None
traj.grasp = None
yield [(traj,)]
return
enable_all(all_bodies, False)
set_manipulator_conf(manipulator, conf1.value)
if has_mp():
path = mp_birrt(robot, conf1.value, conf2.value)
else:
#traj = cspace_traj_helper(base_manip, cspace, conf2.value, max_iterations=10)
path = manipulator_motion_plan(base_manip, manipulator, conf2.value, max_iterations=10)
if path is None:
return
traj = Traj(path)
traj.pose = None
traj.grasp = None
yield [(traj,)]
return sample_free_motion
####################
def sample_holding_motion_fn(manipulator, base_manip, body1, all_bodies):
robot = manipulator.GetRobot()
def sample_holding_motion(conf1, conf2, grasp): # Sample motion while holding
if DISABLE_MOTIONS:
#traj = Traj([conf1.value, conf2.value])
traj = Traj([conf2.value])
traj.pose = None
traj.grasp = grasp
yield [(traj,)]
return
enable_all(all_bodies, False)
body1.Enable(True)
set_manipulator_conf(manipulator, conf1.value)
manip_trans = manipulator.GetTransform()
set_pose(body1, object_trans_from_manip_trans(manip_trans, grasp.value))
robot.Grab(body1)
if has_mp():
path = mp_birrt(robot, conf1.value, conf2.value)
else:
#traj = cspace_traj_helper(base_manip, cspace, conf2.value, max_iterations=10)
path = manipulator_motion_plan(base_manip, manipulator, conf2.value, max_iterations=10)
robot.Release(body1)
if path is None:
return
traj = Traj(path)
traj.pose = None
traj.grasp = grasp
yield [(traj,)]
return sample_holding_motion
####################
def visualize_solution(env, problem, initial_conf, robot, manipulator, bodies, plan):
def _execute_traj(confs):
for j, conf in enumerate(confs):
set_manipulator_conf(manipulator, conf)
sleep(0.05)
#raw_input('%s/%s) Step?'%(j, len(confs)))
# Resets the initial state
set_manipulator_conf(manipulator, initial_conf.value)
for obj, pose in problem.initial_poses.iteritems():
set_pose(bodies[obj], pose.value)
raw_input('Start?')
for i, (action, args) in enumerate(plan):
#raw_input('\n%s/%s) Next?'%(i, len(plan)))
if action.name == 'move':
_, _, traj = args
_execute_traj(traj.value)
elif action.name == 'move_holding':
_, _, traj, _, _ = args
_execute_traj(traj.value)
elif action.name == 'pick':
obj, _, _, _, traj = args
_execute_traj(traj.value[::-1])
robot.Grab(bodies[obj])
_execute_traj(traj.value)
elif action.name == 'place':
obj, _, _, _, traj = args
_execute_traj(traj.value[::-1])
robot.Release(bodies[obj])
_execute_traj(traj.value)
else:
raise ValueError(action.name)
env.UpdatePublishedBodies() |
import dill
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
from surprise import SVD, Reader, Dataset
from surprise.model_selection import GridSearchCV
from tensorflow.keras import layers, activations, models, optimizers, losses
from titlecase import titlecase
TFIDF_MATRIX_FILE = 'trained_models/recommendation/tfidf_matrix.pkl'
MOVIE_INDICES_FILE = 'trained_models/recommendation/movie_indices.pkl'
PREDICTED_RATING_SVD_MODEL_FILE = 'trained_models/recommendation/predicted_rating_svd.pkl'
QUANTILES_THRESHOLD = 0.95
PREDICTED_RATING_NN_WITH_EMBEDDING_MODEL = 'trained_models/recommendation/predicted_rating_nn_model'
PREDICTED_RATING_NN_WITH_EMBEDDING_RATING_SCALER_FILE = 'trained_models/recommendation/predicted_rating_nn_rating_scaler.pkl'
PREDICTED_RATING_NN_WITH_EMBEDDING_USER_ENCODER_FILE = 'trained_models/recommendation/predicted_rating_nn_user_encoder.pkl'
PREDICTED_RATING_NN_WITH_EMBEDDING_MOVIE_ENCODER_FILE = 'trained_models/recommendation/predicted_rating_nn_movie_encoder.pkl'
N_FACTORS = 10
# Demographic: trending based on popularity
def get_n_popular_movies(data, n):
return data.nlargest(n, 'popularity')[['id', 'original_title', 'genres', 'popularity', 'imdb_id']]
# Demographic: trending now based on IMDB weighted rating score
def get_n_trending_movies(data, n):
m = data['vote_count'].quantile(QUANTILES_THRESHOLD)
c = data['vote_average'].mean()
rating_movies = data.copy().loc[data['vote_count'] >= m]
rating_movies['rating_score'] = rating_movies.apply(lambda movie: calc_weighted_rating(movie, m, c), axis=1)
# because dataset max year is 2015, recent 3 years is 2012
recent_three_year_movies = rating_movies.loc[rating_movies['release_year'] >= 2012]
older_than_three_year_movies = rating_movies.loc[rating_movies['release_year'] < 2012]
mid = int(n / 2)
recent_three_year_movies = recent_three_year_movies.nlargest(mid, 'rating_score')
older_than_three_year_movies = older_than_three_year_movies.nlargest(n - mid, 'rating_score')
return pd.concat([recent_three_year_movies, older_than_three_year_movies])[
['id', 'original_title', 'genres', 'vote_count', 'vote_average', 'rating_score', 'imdb_id', 'release_year']]
# Demographic: trending based on IMDB weighted rating score
def get_n_rating_movies(data, n):
m = data['vote_count'].quantile(QUANTILES_THRESHOLD)
c = data['vote_average'].mean()
rating_movies = data.copy().loc[data['vote_count'] >= m]
rating_movies['rating_score'] = rating_movies.apply(lambda movie: calc_weighted_rating(movie, m, c), axis=1)
return rating_movies.nlargest(n, 'rating_score')[
['id', 'original_title', 'genres', 'vote_count', 'vote_average', 'rating_score', 'imdb_id']]
def calc_weighted_rating(movie, m, c):
v = movie['vote_count']
r = movie['vote_average']
return (v * r + m * c) / (v + m)
# Content based filtering: propose list of the most similar movies based on cosine similarity calculation
# between the words or text in vector form (use TF-IDF)
def calc_tfidf_matrix(data):
data['original_title'] = data['original_title'].str.strip()
data['overview'] = data['overview'].fillna('')
data['tagline'] = data['tagline'].fillna('')
# Merging original title, overview and tagline together
data['description'] = data['original_title'] + data['overview'] + data['tagline']
tfidf = TfidfVectorizer(analyzer='word', stop_words='english')
tfidf_matrix = tfidf.fit_transform(data['description'])
# construct a reverse map of indices and movie original title
data['title'] = data['original_title'].str.lower()
movie_indices = pd.Series(data.index, index=data['title']).drop_duplicates()
save_obj(tfidf_matrix, TFIDF_MATRIX_FILE) # save tfidf matrix to file
save_obj(movie_indices, MOVIE_INDICES_FILE) # save movie indices to file
return
def get_n_similar_movies(movie, n):
tfidf_matrix = load_obj(TFIDF_MATRIX_FILE) # load tfidf matrix from file
movie_indices = load_obj(MOVIE_INDICES_FILE) # load movie indices from file
# calculate cosine similarity
cosine_similar = linear_kernel(tfidf_matrix, tfidf_matrix)
# Get the pairwise similarity scores of all movies with input movie
# And convert it into a list of tuples and sort by similarity score descending
similar_scores = list(enumerate(cosine_similar[movie_indices[movie.strip().lower()]]))
similar_scores.sort(key=lambda x: x[1], reverse=True)
# Get top n the movie indices exclude first movie (the input movie)
indices = [i[0] for i in similar_scores[1:(n + 1)]]
similar_movies = [titlecase(movie_indices.keys().values[i]) for i in indices]
return similar_movies
# Collaborative filtering: predict rating of user for a movie
# based on Matrix Factorization (user-rating information) calculation (use SVD)
def train_rating_model_with_svd(ratings):
reader = Reader()
data = Dataset.load_from_df(ratings[['userId', 'movieId', 'rating']], reader)
params = {'n_factors': np.arange(95, 100)}
gs = GridSearchCV(SVD, param_grid=params, measures=['rmse'], cv=5)
gs.fit(data)
svd = gs.best_estimator['rmse']
svd.fit(data.build_full_trainset())
save_obj(svd, PREDICTED_RATING_SVD_MODEL_FILE) # save model to file
return str(gs.best_params['rmse']), gs.best_score['rmse']
def predict_rating_with_svd(user_id, movie_id):
model = load_obj(PREDICTED_RATING_SVD_MODEL_FILE) # load model from file
return model.predict(user_id, movie_id).est
# Collaborative filtering: predict rating of user for a movie
# based on a neural collaborative filtering (use neural network with embedding layers)
def train_rating_model_with_neural_network(ratings):
ratings = ratings.drop(columns=['timestamp'])
num_users = len(ratings['userId'].unique()) # calc number of users
num_movies = len(ratings['movieId'].unique()) # calc number of movies
# normalize data
mm_scaler = MinMaxScaler()
ratings[['rating']] = mm_scaler.fit_transform(ratings[['rating']])
users = ratings[['userId']].values
movies = ratings[['movieId']].values
ratings = ratings[['rating']].values.reshape(-1)
# encode users
user_encoder = LabelEncoder()
users = user_encoder.fit_transform(users)
# encode movies
movie_encoder = LabelEncoder()
movies = movie_encoder.fit_transform(movies)
# define embedding layer for user
user_input = layers.Input(shape=(1,))
user_embed = layers.Embedding(num_users, N_FACTORS)(user_input)
user_vector = layers.Flatten()(user_embed)
# define embedding layer for movie
movie_input = layers.Input(shape=(1,))
movie_embed = layers.Embedding(num_movies, N_FACTORS)(movie_input)
movie_vector = layers.Flatten()(movie_embed)
# merge features
merge = layers.concatenate([user_vector, movie_vector])
layer = layers.Dropout(0.5)(merge)
# add fully connected layers with dropout
layer = layers.Dense(32, activation=activations.relu)(layer)
layer = layers.Dropout(0.5)(layer)
layer = layers.Dense(16, activation=activations.relu)(layer)
layer = layers.Dropout(0.5)(layer)
output = layers.Dense(1, activation=activations.sigmoid)(layer)
# create model
model = models.Model(inputs=[user_input, movie_input], outputs=output)
model.compile(optimizer=optimizers.Adam(), loss=losses.mean_squared_error)
# train model
history = model.fit([users, movies], ratings, validation_split=0.2, batch_size=32, epochs=20, verbose=1)
model.summary()
# save all to file
model.save(PREDICTED_RATING_NN_WITH_EMBEDDING_MODEL)
save_obj(mm_scaler, PREDICTED_RATING_NN_WITH_EMBEDDING_RATING_SCALER_FILE)
save_obj(user_encoder, PREDICTED_RATING_NN_WITH_EMBEDDING_USER_ENCODER_FILE)
save_obj(movie_encoder, PREDICTED_RATING_NN_WITH_EMBEDDING_MOVIE_ENCODER_FILE)
# visualization train loss / validate loss
visualize([{"train": history.history["loss"], "validate": history.history["val_loss"]}], ["Model Trained"],
["epoch"], ["loss"])
def predict_rating_with_nn(user_ids, movie_ids):
# load all from file
model = models.load_model(PREDICTED_RATING_NN_WITH_EMBEDDING_MODEL)
mm_scaler = load_obj(PREDICTED_RATING_NN_WITH_EMBEDDING_RATING_SCALER_FILE)
user_encoder = load_obj(PREDICTED_RATING_NN_WITH_EMBEDDING_USER_ENCODER_FILE)
movie_encoder = load_obj(PREDICTED_RATING_NN_WITH_EMBEDDING_MOVIE_ENCODER_FILE)
rating = model.predict([user_encoder.transform(user_ids), movie_encoder.transform(movie_ids)])
return mm_scaler.inverse_transform(rating).reshape(-1)
# Collaborative filtering: recommendation based on predicted rating of user
def get_n_recommended_movies_for_user(user_id, n, movies):
model = load_obj(PREDICTED_RATING_SVD_MODEL_FILE)
# calculate predicted rating of user for all movies
movies['predicted_rating'] = movies.apply(lambda x: model.predict(user_id, x['movieId']).est, axis=1)
return movies.nlargest(n, 'predicted_rating')[['movieId', 'title', 'genres', 'predicted_rating']]
def visualize(data, titles, xlabels, ylabels):
fig, axes = plt.subplots(len(titles), squeeze=False)
fig.suptitle('Visualization', fontsize=16)
for i in range(len(titles)):
axes[i, 0].set_title(titles[i])
axes[i, 0].set_xlabel(xlabels[i])
axes[i, 0].set_ylabel(ylabels[i])
for s in data[i].keys():
axes[i, 0].plot(data[i][s], label=s)
axes[i, 0].legend(loc="best")
axes[i, 0].grid()
plt.tight_layout()
plt.show()
def save_obj(obj, file_path):
with open(file_path, "wb") as f:
dill.dump(obj, f)
def load_obj(file_path):
with open(file_path, "rb") as f:
return dill.load(f)
# data_df = pd.read_csv('data/movies/tmdb_movies_data.csv')
# movies_df = pd.read_csv('data/movies/movies.csv')
# ratings_df = pd.read_csv('data/movies/ratings.csv')
# calc_tfidf_matrix(data_df)
# print(get_n_similar_movies(' jurassic world ', 10))
# print(train_rating_model_with_svd(ratings_df))
# print(predict_rating_with_svd(1, 47))
# print(get_n_recommended_movies_for_user(2, 5, movies_df))
# train_rating_model_with_neural_network(ratings_df)
# print(predict_rating_with_nn([1, 1], [1, 47]))
|
#! /usr/bin/env python
import sys, os
from socket import *
from select import select
import struct, zlib
import time
from common.msgstruct import *
from common.pixmap import decodepixmap
from common import hostchooser
import modes
from modes import KeyPressed, KeyReleased
import caching
# switch to udp_over_tcp if the udp socket didn't receive at least 60% of
# the packets sent by the server
UDP_EXPECTED_RATIO = 0.60
def loadpixmap(dpy, data, colorkey=None):
w, h, data = decodepixmap(data)
if colorkey is None:
colorkey = -1
elif colorkey < 0:
r, g, b = struct.unpack("BBB", self.data[:3])
colorkey = <KEY>
return dpy.pixmap(w, h, data, colorkey)
class Icon:
alpha = 255
def __init__(self, playfield):
self.playfield = playfield
self.size = 0, 0
def __getattr__(self, attr):
if attr == 'pixmap':
self.pixmap = self.playfield.getpixmap(self.bmpcode)
if hasattr(self.playfield.dpy, 'getopticon'):
ico = self.playfield.dpy.getopticon(
self.pixmap, self.originalrect, self.alpha)
if ico is not None:
self.pixmap = ico
self.rect = None
return self.pixmap
elif attr in ('bmpcode', 'rect'):
raise KeyError, attr
elif attr == 'originalrect':
self.originalrect = self.rect
return self.originalrect
raise AttributeError, attr
def clear(self):
if self.__dict__.has_key('pixmap'):
del self.pixmap
class DataChunk(caching.Data):
SOURCEDIR = os.path.abspath(os.path.join(os.path.dirname(caching.__file__),
os.pardir))
CACHEDIR = os.path.join(SOURCEDIR, 'cache')
TOTAL = 0
def __init__(self, fileid):
caching.Data.__init__(self)
self.fileid = fileid
self.pending = []
self.progresshook = None
def server_md5(self, playfield, filename, position, length, checksum):
if not self.loadfrom(filename, position, length, checksum):
self.pending.append((0, position))
playfield.s.sendall(message(CMSG_DATA_REQUEST, self.fileid,
position, length))
def server_patch(self, position, data, lendata):
#print 'server_patch', self.fileid, position, len(data)
prev = DataChunk.TOTAL >> 10
DataChunk.TOTAL += lendata
total = DataChunk.TOTAL >> 10
if total != prev:
print "downloaded %dkb of data from server" % total
self.store(position, data)
try:
self.pending.remove((0, position))
except ValueError:
pass
else:
while self.pending and self.pending[0][0]:
callback = self.pending[0][1]
del self.pending[0]
callback(self)
def when_ready(self, callback):
if self.pending:
self.pending.append((1, callback))
else:
callback(self)
class Playfield:
TASKBAR_HEIGHT = 48
def __init__(self, s, sockaddr):
self.s = s
self.sockaddr = sockaddr
try:
self.s.setsockopt(SOL_IP, IP_TOS, 0x10) # IPTOS_LOWDELAY
except error, e:
print >> sys.stderr, "Cannot set IPTOS_LOWDELAY:", str(e)
try:
self.s.setsockopt(SOL_TCP, TCP_NODELAY, 1)
except error, e:
print >> sys.stderr, "Cannot set TCP_NODELAY:", str(e)
initialbuf = ""
while 1:
t = self.s.recv(200)
if not t and not hasattr(self.s, 'RECV_CAN_RETURN_EMPTY'):
raise error, "connexion closed"
initialbuf += t
if len(initialbuf) >= len(MSG_WELCOME):
head = initialbuf[:len(MSG_WELCOME)]
tail = initialbuf[len(MSG_WELCOME):]
if head != MSG_WELCOME:
raise error, "connected to something not a game server"
if '\n' in tail:
break
n = tail.index('\n')
line2 = tail[:n]
self.initialbuf = tail[n+1:]
self.gameident = line2.strip()
## self.datapath = None
## if self.gameident.endswith(']'):
## i = self.gameident.rfind('[')
## if i >= 0:
## self.gameident, self.datapath = (self.gameident[:i].strip(),
## self.gameident[i+1:-1])
print "connected to %r." % self.gameident
self.s.sendall(message(CMSG_PROTO_VERSION, 3))
def setup(self, mode, udp_over_tcp):
self.playing = {} # 0, 1, or 'l' for local
self.keys = {}
self.keycodes = {}
self.last_key_event = (None, None)
self.dpy = None
self.snd = None
self.pixmaps = {} # {bmpcode: dpy_pixmap}
self.bitmaps = {} # {bmpcode: (fileid_or_data, colorkey)}
self.icons = {}
self.sounds = {}
self.currentmusic = None
self.fileids = {}
self.sprites = []
self.playingsounds = {}
self.playericons = {}
self.screenmode = mode
self.initlevel = 0
if mode[-1].has_key('udp_over_tcp'):
udp_over_tcp = mode[-1]['udp_over_tcp']
self.trackcfgmtime = None
if mode[-1].has_key('cfgfile'):
self.trackcfgfile = mode[-1]['cfgfile']
else:
self.trackcfgfile = os.path.join(DataChunk.SOURCEDIR,
'http2', 'config.txt')
self.udpsock = None
self.udpsock_low = None
self.udpsock2 = None
self.accepted_broadcast = 0
self.tcpbytecounter = 0
self.udpbytecounter = 0
if udp_over_tcp == 1:
self.start_udp_over_tcp()
else:
self.pending_udp_data = None
if udp_over_tcp == 'auto':
self.udpsock_low = 0
self.dyndecompress = [[None, None, None, None] for i in range(8)]
self.dynnorepeat = None
def run(self, mode, udp_over_tcp='auto'):
self.setup(mode, udp_over_tcp)
try:
self.mainloop()
finally:
if self.dpy:
self.dpy.close()
try:
self.s.close()
except:
pass
def mainloop(self):
pss = hostchooser.serverside_ping()
self.initial_iwtd = [self.s, pss]
self.iwtd = self.initial_iwtd[:]
self.animdelay = 0.0
inbuf = self.process_inbuf(self.initialbuf)
self.initialbuf = ""
errors = 0
while 1:
if self.dpy:
self.processkeys()
iwtd, owtd, ewtd = select(self.iwtd, [], [], self.animdelay)
self.animdelay = 0.5
if self.dpy:
self.processkeys()
if self.s in iwtd:
inputdata = self.s.recv(0x6000)
self.tcpbytecounter += len(inputdata)
inbuf += inputdata
inbuf = self.process_inbuf(inbuf)
if self.dpy:
if self.udpsock in iwtd:
udpdata1 = None
while self.udpsock in iwtd:
try:
udpdata = self.udpsock.recv(65535)
except error, e:
print >> sys.stderr, e
errors += 1
if errors > 10:
raise
break
self.udpbytecounter += len(udpdata)
if len(udpdata) > 3 and '\x80' <= udpdata[0] < '\x90':
udpdata = self.dynamic_decompress(udpdata)
if udpdata is not None:
udpdata1 = udpdata
iwtd, owtd, ewtd = select(self.iwtd, [], [], 0)
if udpdata1 is not None:
self.update_sprites(udpdata1)
if self.udpsock2 in iwtd:
while self.udpsock2 in iwtd:
udpdata = self.udpsock2.recv(65535)
self.udpbytecounter += len(udpdata)
if udpdata == BROADCAST_MESSAGE:
if not self.accepted_broadcast:
self.s.sendall(message(CMSG_UDP_PORT, '*'))
self.accepted_broadcast = 1
#self.udpsock_low = None
udpdata = ''
iwtd, owtd, ewtd = select(self.iwtd, [], [], 0)
if udpdata and self.accepted_broadcast:
self.update_sprites(udpdata)
if self.pending_udp_data:
self.update_sprites(self.pending_udp_data)
self.pending_udp_data = ''
erasetb = self.taskbarmode and self.draw_taskbar()
d = self.dpy.flip()
if d:
self.animdelay = min(self.animdelay, d)
if self.snd:
d = self.snd.flop()
if d:
self.animdelay = min(self.animdelay, d)
if erasetb:
self.erase_taskbar(erasetb)
if pss in iwtd:
hostchooser.answer_ping(pss, self.gameident, self.sockaddr)
def process_inbuf(self, inbuf):
while inbuf:
values, inbuf = decodemessage(inbuf)
if not values:
break # incomplete message
fn = Playfield.MESSAGES.get(values[0], self.msg_unknown)
fn(self, *values[1:])
return inbuf
def dynamic_decompress(self, udpdata):
# Format of a UDP version 3 packet:
# header byte: 0x80 - 0x87 packet from thread 0 - 7
# or 0x88 - 0x8F reset packet from thread 0 - 7
# previous frame in same thread (1 byte)
# frame number (1 byte)
thread = self.dyndecompress[ord(udpdata[0]) & 7]
# thread==[decompress, lastframenumber, recompressed, lastframedata]
prevframe = udpdata[1]
thisframe = udpdata[2]
#print '---'
#for t in self.dyndecompress:
# print repr(t)[:120]
#print
#print `udpdata[:3]`
if udpdata[0] >= '\x88':
# reset
d = zlib.decompressobj().decompress
if prevframe != thisframe: # if not global sync point
# sync point from a previous frame
# find all threads with the same prevframe
threads = [t for t in self.dyndecompress if prevframe == t[1]]
if not threads:
return None # lost
# find a thread with already-recompressed data
for t in threads:
if t[2]:
data = t[3]
break
else:
# recompress and cache the prevframe data
t = threads[0]
data = t[3]
co = zlib.compressobj(6)
data = co.compress(data) + co.flush(zlib.Z_SYNC_FLUSH)
t[2] = 1
t[3] = data
d(data) # use it to initialize the state of the decompressobj
#print d
thread[0] = d
elif prevframe != thread[1]:
#print 'lost'
return None # lost
else:
d = thread[0]
# go forward in thread
try:
framedata = d(udpdata[3:])
#print d
thread[1] = thisframe
thread[2] = 0
thread[3] = framedata
if thisframe == self.dynnorepeat:
return None
self.dynnorepeat = thisframe
return framedata
except zlib.error:
#print 'crash'
return None
def geticon(self, icocode):
try:
return self.icons[icocode]
except KeyError:
ico = self.icons[icocode] = Icon(self)
return ico
def getpixmap(self, bmpcode):
try:
return self.pixmaps[bmpcode]
except KeyError:
data, colorkey = self.bitmaps[bmpcode]
if type(data) is type(''):
data = zlib.decompress(data)
else:
if data.pending:
raise KeyError
data = data.read()
pixmap = loadpixmap(self.dpy, data, colorkey)
self.pixmaps[bmpcode] = pixmap
return pixmap
def update_sprites(self, udpdata):
sprites = self.sprites
unpack = struct.unpack
currentsounds = {}
base = 0
while udpdata[base+4:base+6] == '\xFF\xFF':
key, lvol, rvol = struct.unpack("!hBB", udpdata[base:base+4])
try:
snd = self.sounds[key]
except KeyError:
pass # ignore sounds with bad code (probably not defined yet)
else:
n = self.playingsounds.get(key)
if n:
currentsounds[key] = n-1
elif self.snd:
self.snd.play(snd,
lvol / 255.0,
rvol / 255.0)
currentsounds[key] = 4
base += 6
self.playingsounds = currentsounds
for j in range(len(sprites)):
if sprites[j][0] != udpdata[base:base+6]:
removes = sprites[j:]
del sprites[j:]
removes.reverse()
eraser = self.dpy.putppm
for reserved, eraseargs in removes:
eraser(*eraseargs)
break
base += 6
#print "%d sprites redrawn" % (len(udpdata)/6-j)
try:
overlayer = self.dpy.overlayppm
except AttributeError:
getter = self.dpy.getppm
setter = self.dpy.putppm
for j in range(base, len(udpdata)-5, 6):
info = udpdata[j:j+6]
x, y, icocode = unpack("!hhh", info[:6])
try:
ico = self.icons[icocode]
sprites.append((info, (x, y, getter((x, y) + ico.size))))
setter(x, y, ico.pixmap, ico.rect)
except KeyError:
#print "bad ico code", icocode
pass # ignore sprites with bad ico (probably not defined yet)
else:
for j in range(base, len(udpdata)-5, 6):
info = udpdata[j:j+6]
x, y, icocode = unpack("!hhh", info[:6])
try:
ico = self.icons[icocode]
overlay = overlayer(x, y, ico.pixmap, ico.rect, ico.alpha)
sprites.append((info, overlay))
except KeyError:
#print "bad ico code", icocode
pass # ignore sprites with bad ico (probably not defined yet)
t0, n = self.painttimes
n = n + 1
if n == 50:
t = time.time()
t, t0 = t-t0, t
if t:
print "%.2f images per second, %.1f kbytes per second" % (
float(n)/t,
float(self.tcpbytecounter+self.udpbytecounter)/1024/t)
self.tcpbytecounter = -self.udpbytecounter
n = 0
self.painttimes = t0, n
def get_taskbar(self):
y0 = self.height - self.TASKBAR_HEIGHT
iconlist = []
f = 1.5 * time.time()
f = f-int(f)
pi = self.playericons.items()
pi.sort()
xpos = 0
for id, ico in pi:
if self.playing.get(id) != 'l':
w, h = ico.size
xpos += int(w * 5 / 3)
if not self.playing.get(id):
y = self.height - h
if self.keydefinition and id == self.keydefinition[0]:
num, icons = self.keys[self.nextkeyname()]
ico = icons[int(f*len(icons))-1]
y = y0 + int((self.TASKBAR_HEIGHT-ico.size[1])/2)
self.animdelay = 0.04
iconlist.append((xpos-w, y, ico, id))
pi.reverse()
f = f * (1.0-f) * 4.0
xpos = self.width
for id, ico in pi:
if self.playing.get(id) == 'l':
w, h = ico.size
xpos -= int(w * 5 / 3)
dy = self.TASKBAR_HEIGHT - h - 1
y = self.height - h - int(dy*f)
iconlist.append((xpos, y, ico, id))
self.animdelay = 0.04
return y0, iconlist
def clic_taskbar(self, (cx,cy)):
y0, icons = self.get_taskbar()
if cy >= y0:
for x, y, ico, id in icons:
if x <= cx < x+ico.size[0]:
return id
return None
def draw_taskbar(self):
y0, icons = self.get_taskbar()
rect = (0, y0, self.width, self.TASKBAR_HEIGHT)
bkgnd = self.dpy.getppm(rect)
self.dpy.taskbar(rect)
for x, y, ico, id in icons:
try:
self.dpy.putppm(x, y, ico.pixmap, ico.rect)
except KeyError:
pass
return y0, bkgnd
def erase_taskbar(self, (y0, bkgnd)):
self.dpy.putppm(0, y0, bkgnd)
def nextkeyname(self):
pid, df = self.keydefinition
undef = [(num, keyname) for keyname, (num, icons) in self.keys.items()
if not df.has_key(keyname) and icons]
if undef:
num, keyname = min(undef)
return keyname
else:
return None
def startplaying(self):
args = ()
if hasattr(self.s, 'udp_over_udp_mixer'):
# for SocketOverUdp: reuse the UDP address
port = self.s.getsockname()[1]
self.udpsock_low = None
self.s.udp_over_udp_decoder = self.udp_over_udp_decoder
self.start_udp_over_tcp()
elif self.pending_udp_data is not None:
port = MSG_INLINE_FRAME
else:
if '*udpsock*' in PORTS:
self.udpsock, (host, port) = PORTS['*udpsock*']
args = (host,)
else:
self.udpsock = socket(AF_INET, SOCK_DGRAM)
self.udpsock.bind(('', PORTS.get('CLIENT', INADDR_ANY)))
host, port = self.udpsock.getsockname()
# Send a dummy UDP message to the server. Some NATs will
# then let through the UDP messages from the server.
self.udpsock.sendto('.', self.s.getpeername())
self.iwtd.append(self.udpsock)
self.initial_iwtd.append(self.udpsock)
if 'sendudpto' in PORTS:
args = (PORTS['sendudpto'],)
outbound = []
outbound.append(message(CMSG_UDP_PORT, port, *args))
if self.snd and self.snd.has_music:
outbound.append(message(CMSG_ENABLE_MUSIC, 1))
outbound.append(message(CMSG_PING))
self.s.sendall(''.join(outbound))
def start_udp_over_tcp(self):
self.pending_udp_data = ''
self.udp_over_tcp_decompress = zlib.decompressobj().decompress
self.udpsock_low = None
for name in ('udpsock', 'udpsock2'):
sock = getattr(self, name)
if sock is not None:
try:
self.iwtd.remove(sock)
except ValueError:
pass
try:
self.initial_iwtd.remove(sock)
except ValueError:
pass
sock.close()
setattr(self, name, None)
def udp_over_udp_decoder(self, udpdata):
if len(udpdata) > 3 and '\x80' <= udpdata[0] < '\x90':
data = self.dynamic_decompress(udpdata)
if data:
self.pending_udp_data = data
def processkeys(self):
keyevents = self.dpy.keyevents()
if keyevents:
now = time.time()
pending = {}
for keysym, event in keyevents:
pending[keysym] = event
for keysym, event in pending.items():
code = self.keycodes.get((keysym, event))
if code and self.playing.get(code[0]) == 'l':
if (code == self.last_key_event[0] and
now - self.last_key_event[1] < 0.77):
continue # don't send too much events for auto-repeat
self.last_key_event = code, now
self.s.sendall(code[1])
elif self.keydefinition:
self.define_key(keysym)
pointermotion = self.dpy.pointermotion()
if pointermotion:
x, y = pointermotion
self.settaskbar(y >= self.height - 2*self.TASKBAR_HEIGHT)
mouseevents = self.dpy.mouseevents()
if mouseevents:
self.settaskbar(1)
self.keydefinition = None
for clic in mouseevents:
clic_id = self.clic_taskbar(clic)
if clic_id is not None:
if self.playing.get(clic_id) == 'l':
self.s.sendall(message(CMSG_REMOVE_PLAYER, clic_id))
else:
self.keydefinition = clic_id, {}
if self.taskbartimeout is not None and time.time() > self.taskbartimeout:
self.settaskbar(0)
def settaskbar(self, nmode):
self.taskbartimeout = None
if self.taskbarfree:
self.taskbarmode = (nmode or
'l' not in self.playing.values() or
(self.keydefinition is not None))
if nmode:
self.taskbartimeout = time.time() + 5.0
if hasattr(self.dpy, 'settaskbar'):
self.dpy.settaskbar(self.taskbarmode)
def define_key(self, keysym):
clic_id, df = self.keydefinition
if keysym in df.values():
return
df[self.nextkeyname()] = keysym
if self.nextkeyname() is not None:
return
self.keydefinition = None
self.s.sendall(message(CMSG_ADD_PLAYER, clic_id))
for keyname, (num, icons) in self.keys.items():
if keyname[:1] == '-':
event = KeyReleased
keyname = keyname[1:]
else:
event = KeyPressed
if df.has_key(keyname):
keysym = df[keyname]
self.keycodes[keysym, event] = \
clic_id, message(CMSG_KEY, clic_id, num)
def msg_unknown(self, *rest):
print >> sys.stderr, "?"
def msg_player_join(self, id, local, *rest):
if local:
self.playing[id] = 'l'
self.settaskbar(0)
self.checkcfgfile(1)
else:
self.playing[id] = 1
def msg_player_kill(self, id, *rest):
self.playing[id] = 0
for key, (pid, msg) in self.keycodes.items():
if pid == id:
del self.keycodes[key]
def msg_broadcast_port(self, port):
if self.pending_udp_data is not None:
return
if self.udpsock2 is not None:
try:
self.iwtd.remove(self.udpsock2)
except ValueError:
pass
try:
self.initial_iwtd.remove(self.udpsock2)
except ValueError:
pass
self.udpsock2.close()
self.udpsock2 = None
self.accepted_broadcast = 0
try:
self.udpsock2 = socket(AF_INET, SOCK_DGRAM)
self.udpsock2.bind(('', port))
self.udpsock2.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
except error, e:
print "Cannot listen on the broadcast port %d" % port, str(e)
self.udpsock2 = None
else:
self.iwtd.append(self.udpsock2)
self.initial_iwtd.append(self.udpsock2)
def msg_def_playfield(self, width, height, backcolor=None,
gameident=None, *rest):
#if self.snd is not None:
# self.snd.close()
if self.dpy is not None:
# clear all pixmaps
for ico in self.icons.values():
ico.clear()
self.pixmaps.clear()
self.dpy.close()
del self.sprites[:]
self.width = width
self.height = height
if gameident:
self.gameident = gameident
self.dpy = modes.open_dpy(self.screenmode, width, height, self.gameident)
self.snd = self.snd or modes.open_snd(self.screenmode)
if self.snd:
self.s.sendall(message(CMSG_ENABLE_SOUND))
self.iwtd = self.dpy.selectlist() + self.initial_iwtd
self.dpy.clear() # backcolor is ignored
self.painttimes = (time.time(), 0)
self.s.sendall(message(CMSG_PING))
self.taskbarmode = 0
self.taskbarfree = 0
self.taskbartimeout = None
self.keydefinition = None
def msg_def_key(self, name, num, *icons):
self.keys[name] = num, [self.geticon(ico) for ico in icons]
def msg_def_icon(self, bmpcode, icocode, x, y, w, h, alpha=255, *rest):
## if h<0:
## try:
## bitmap, height = self.flippedbitmaps[bmpcode]
## except KeyError:
## bitmap, height = self.dpy.vflipppm(self.bitmaps[bmpcode])
## self.flippedbitmaps[bmpcode] = bitmap, height
## y = height - y
## h = - h
## else:
ico = self.geticon(icocode)
ico.bmpcode = bmpcode
ico.rect = x, y, w, h
ico.size = w, h
if alpha < 255:
ico.alpha = alpha
def msg_def_bitmap(self, bmpcode, data, colorkey=None, *rest):
if type(data) is not type(''):
data = self.fileids[data]
self.bitmaps[bmpcode] = data, colorkey
def msg_def_sample(self, smpcode, data, *rest):
def ready(f, self=self, smpcode=smpcode):
if self.snd:
self.sounds[smpcode] = self.snd.sound(f)
f.clear()
if type(data) is type(''):
data = zlib.decompress(data)
f = DataChunk(None)
f.store(0, data)
ready(f)
else:
f = self.fileids[data]
f.when_ready(ready)
def msg_patch_file(self, fileid, position, data, lendata=None, *rest):
if self.fileids.has_key(fileid):
f = self.fileids[fileid]
else:
f = self.fileids[fileid] = DataChunk(fileid)
f.server_patch(position, data, lendata or len(data))
def msg_zpatch_file(self, fileid, position, data, *rest):
data1 = zlib.decompress(data)
self.msg_patch_file(fileid, position, data1, len(data), *rest)
def msg_md5_file(self, fileid, filename, position, length, checksum, *rest):
if self.fileids.has_key(fileid):
f = self.fileids[fileid]
else:
f = self.fileids[fileid] = DataChunk(fileid)
f.server_md5(self, filename, position, length, checksum)
def msg_play_music(self, loop_from, *codes):
codes = [self.fileids[c] for c in codes]
self.currentmusic = loop_from, codes, list(codes)
self.activate_music()
def activate_music(self, f=None):
loop_from, codes, checkcodes = self.currentmusic
if checkcodes:
checkcodes.pop().when_ready(self.activate_music)
elif self.snd:
self.snd.play_musics(codes, loop_from)
def msg_fadeout(self, time, *rest):
if self.snd:
self.snd.fadeout(time)
def msg_player_icon(self, pid, icocode, *rest):
self.playericons[pid] = self.geticon(icocode)
def checkcfgfile(self, force=0):
if self.trackcfgfile:
try:
st = os.stat(self.trackcfgfile)
except OSError:
pass
else:
if force or (st.st_mtime != self.trackcfgmtime):
self.trackcfgmtime = st.st_mtime
try:
f = open(self.trackcfgfile, 'r')
data = f.read().strip()
f.close()
d = eval(data or '{}', {}, {})
except:
print >> sys.stderr, 'Invalid config file format'
else:
d = d.get(gethostname(), {})
namemsg = ''
for id, local in self.playing.items():
keyid = 'player%d' % id
if local == 'l' and d.has_key(keyid):
namemsg = namemsg + message(
CMSG_PLAYER_NAME, id, d[keyid])
if namemsg:
self.s.sendall(namemsg)
def msg_ping(self, *rest):
self.s.sendall(message(CMSG_PONG, *rest))
self.checkcfgfile()
if rest and self.udpsock_low is not None:
udpkbytes = rest[0]
if not udpkbytes:
return
#inp = self.udpbytecounter / (udpkbytes*1024.0)
#print "(%d%% packet loss)" % int(100*(1.0-inp))
if (udpkbytes<<10) * UDP_EXPECTED_RATIO > self.udpbytecounter:
# too many packets were dropped (including, maybe, all of them)
self.udpsock_low += 1
if self.udpsock_low >= 3 and self.initlevel >= 1:
# third time now -- that's too much
print "Note: routing UDP traffic over TCP",
inp = self.udpbytecounter / (udpkbytes*1024.0)
print "(%d%% packet loss)" % int(100*(1.0-inp))
self.start_udp_over_tcp()
self.s.sendall(message(CMSG_UDP_PORT, MSG_INLINE_FRAME))
else:
# enough packets received
self.udpsock_low = 0
def msg_pong(self, *rest):
if self.initlevel == 0:
self.startplaying()
self.initlevel = 1
elif self.initlevel == 1:
if self.snd and self.snd.has_music:
self.s.sendall(message(CMSG_ENABLE_MUSIC, 2))
self.initlevel = 2
if not self.taskbarfree and not self.taskbarmode:
self.taskbarfree = 1
self.settaskbar(1)
def msg_inline_frame(self, data, *rest):
if self.pending_udp_data is not None:
self.pending_udp_data = self.udp_over_tcp_decompress(data)
MESSAGES = {
MSG_BROADCAST_PORT:msg_broadcast_port,
MSG_DEF_PLAYFIELD: msg_def_playfield,
MSG_DEF_KEY : msg_def_key,
MSG_DEF_ICON : msg_def_icon,
MSG_DEF_BITMAP : msg_def_bitmap,
MSG_DEF_SAMPLE : msg_def_sample,
MSG_PLAY_MUSIC : msg_play_music,
MSG_FADEOUT : msg_fadeout,
MSG_PLAYER_JOIN : msg_player_join,
MSG_PLAYER_KILL : msg_player_kill,
MSG_PLAYER_ICON : msg_player_icon,
MSG_PING : msg_ping,
MSG_PONG : msg_pong,
MSG_INLINE_FRAME : msg_inline_frame,
MSG_PATCH_FILE : msg_patch_file,
MSG_ZPATCH_FILE : msg_zpatch_file,
MSG_MD5_FILE : msg_md5_file,
## MSG_LOAD_PREFIX : msg_load_prefix,
}
def run(s, sockaddr, *args, **kw):
Playfield(s, sockaddr).run(*args, **kw)
|
from collections import namedtuple
from datetime import timedelta
from unittest import mock
from pydrag import Artist
from pydrag import constants
from pydrag import Tag
from pydrag import Track
from pydrag import User
from pydrag.models.common import ListModel
from pytuber.core.models import ConfigManager
from pytuber.core.models import Provider
from pytuber.exceptions import NotFound
from pytuber.lastfm.models import PlaylistType
from pytuber.lastfm.services import LastService
from pytuber.storage import Registry
from tests.utils import TestCase
class LastServiceTests(TestCase):
def get_user(self):
return User(
playlists=1,
playcount=10000000,
gender="m",
name="rj",
url="",
country="greece",
image="",
age="33",
registered=1037793040,
)
@mock.patch.object(LastService, "assert_config")
@mock.patch.object(User, "get_loved_tracks")
@mock.patch.object(LastService, "get_user")
def test_sync_with_user_loved_tracks(self, get_user, loved_tracks, *args):
get_user.return_value = self.get_user()
loved_tracks.return_value = ListModel(["a", "b", "c"])
actual = LastService.get_tracks(
type=PlaylistType.USER_LOVED_TRACKS.value, limit=10, username="foo"
)
self.assertEqual(["a", "b", "c"], actual)
get_user.assert_called_once_with("foo")
loved_tracks.assert_called_once_with(limit=10)
@mock.patch.object(LastService, "assert_config")
@mock.patch.object(User, "get_recent_tracks")
@mock.patch.object(LastService, "get_user")
def test_sync_with_user_recent_tracks(self, get_user, recent_tracks, *args):
get_user.return_value = self.get_user()
recent_tracks.return_value = ListModel(["a", "b", "c"])
actual = LastService.get_tracks(
type=PlaylistType.USER_RECENT_TRACKS.value,
limit=10,
username="foo",
)
self.assertEqual(["a", "b", "c"], actual)
get_user.assert_called_once_with("foo")
recent_tracks.assert_called_once_with(limit=10)
@mock.patch.object(LastService, "assert_config")
@mock.patch.object(User, "get_top_tracks")
@mock.patch.object(LastService, "get_user")
def test_sync_with_user_top_tracks(self, get_user, top_tracks, *args):
get_user.return_value = self.get_user()
top_tracks.return_value = ListModel(["a", "b", "c"])
actual = LastService.get_tracks(
type=PlaylistType.USER_TOP_TRACKS.value, limit=10, username="foo"
)
self.assertEqual(["a", "b", "c"], actual)
get_user.assert_called_once_with("foo")
top_tracks.assert_called_once_with(period=constants.Period.overall, limit=10)
@mock.patch.object(LastService, "assert_config")
@mock.patch.object(User, "get_friends")
@mock.patch.object(LastService, "get_user")
def test_sync_with_user_friends_tracks(self, get_user, friends, *args):
get_user.return_value = self.get_user()
friend = namedtuple("Friend", ["recent_track"])
friends.return_value = [
friend(recent_track=1),
friend(recent_track=2),
friend(recent_track=3),
]
actual = LastService.get_tracks(
type=PlaylistType.USER_FRIENDS_RECENT_TRACKS.value,
limit=10,
username="foo",
)
self.assertEqual([1, 2, 3], actual)
get_user.assert_called_once_with("foo")
friends.assert_called_once_with(limit=10, recent_tracks=True)
@mock.patch.object(LastService, "assert_config")
@mock.patch.object(Track, "get_top_tracks_chart")
def test_sync_with_chart(self, top_tracks_chart, *args):
top_tracks_chart.return_value = ListModel(["a", "b", "c"])
actual = LastService.get_tracks(type=PlaylistType.CHART.value, limit=10)
self.assertEqual(["a", "b", "c"], actual)
top_tracks_chart.assert_called_once_with(limit=10)
@mock.patch.object(LastService, "assert_config")
@mock.patch.object(Track, "get_top_tracks_by_country")
def test_sync_with_country_chart(self, top_tracks_by_country, *args):
top_tracks_by_country.return_value = ListModel(["a", "b", "c"])
actual = LastService.get_tracks(
type=PlaylistType.COUNTRY.value, limit=10, country="greece"
)
self.assertEqual(["a", "b", "c"], actual)
top_tracks_by_country.assert_called_once_with(limit=10, country="greece")
@mock.patch.object(LastService, "assert_config")
@mock.patch.object(Tag, "get_top_tracks")
@mock.patch.object(LastService, "get_tag")
def test_sync_with_tag_chart(self, get_tag, get_top_tracks, *args):
get_tag.return_value = Tag(name="rock")
get_top_tracks.return_value = ListModel(["a", "b", "c"])
actual = LastService.get_tracks(
type=PlaylistType.TAG.value, limit=10, tag="rock"
)
self.assertEqual(["a", "b", "c"], actual)
get_tag.assert_called_once_with("rock")
get_top_tracks.assert_called_once_with(limit=10)
@mock.patch.object(LastService, "assert_config")
@mock.patch.object(Artist, "get_top_tracks")
@mock.patch.object(LastService, "get_artist")
def test_sync_with_artist_chart(self, get_artist, get_top_tracks, *args):
get_artist.return_value = Artist(name="queen")
get_top_tracks.return_value = ListModel(["a", "b", "c"])
actual = LastService.get_tracks(
type=PlaylistType.ARTIST.value, limit=10, artist="queeen"
)
self.assertEqual(["a", "b", "c"], actual)
get_artist.assert_called_once_with("queeen")
get_top_tracks.assert_called_once_with(limit=10)
@mock.patch.object(LastService, "assert_config")
@mock.patch("pytuber.storage.time.time")
@mock.patch.object(Tag, "get_top_tags")
def test_get_tags(self, get_top_tags, time, assert_config):
time.return_value = 1
get_top_tags.side_effect = [
[Tag(name=i) for i in range(0, 250)],
[Tag(name=i) for i in range(250, 500)],
[Tag(name=i) for i in range(500, 750)],
[Tag(name=i) for i in range(750, 1000)],
]
names = [t.name for t in LastService.get_tags()]
self.assertEqual(list(range(0, 1000)), names)
get_top_tags.assert_has_calls(
[
mock.call(limit=250, page=1),
mock.call(limit=250, page=2),
mock.call(limit=250, page=3),
mock.call(limit=250, page=4),
]
)
tags, ttl = Registry.get("last.fm_tag_list")
self.assertEqual(1000, len(tags))
self.assertEqual({"name": 0}, tags[0])
self.assertEqual(timedelta(days=30, seconds=1).total_seconds(), ttl)
assert_config.assert_called_once()
@mock.patch.object(LastService, "assert_config")
@mock.patch("pytuber.storage.time.time")
@mock.patch.object(Artist, "find")
def test_get_artist(self, find, time, assert_config):
time.return_value = 1
find.return_value = Artist(name="Queen")
artist = LastService.get_artist("quueee")
self.assertEqual(Artist(name="Queen"), artist)
find.assert_called_once_with("quueee")
artist, ttl = Registry.get("last.fm_artist_quueee")
self.assertEqual({"name": "Queen"}, artist)
self.assertEqual(timedelta(days=30, seconds=1).total_seconds(), ttl)
assert_config.assert_called_once()
@mock.patch.object(LastService, "assert_config")
@mock.patch("pytuber.storage.time.time")
@mock.patch.object(User, "find")
def test_get_user(self, find, time, assert_config):
time.return_value = 1
my_user = self.get_user()
find.return_value = my_user
self.assertEqual(my_user, LastService.get_user("rj"))
find.assert_called_once_with("rj")
user, ttl = Registry.get("last.fm_user_rj")
self.assertEqual(self.get_user().to_dict(), user)
self.assertEqual(timedelta(hours=24, seconds=1).total_seconds(), ttl)
assert_config.assert_called_once()
@mock.patch("pytuber.lastfm.services.configure")
def test_assert_config(self, configure):
with self.assertRaises(NotFound):
LastService.assert_config()
ConfigManager.set({"provider": Provider.lastfm, "data": {"api_key": "aaa"}})
LastService.assert_config()
configure.assert_called_once_with(api_key="aaa")
|
<reponame>shanghua520/fuck-hexue-class-time<filename>main.py
import threading
import time
import requests
import json
mutex = threading.Lock()
logidall = []
def login(username, pwd, appId):
reps = requests.post(r'http://api.hnscen.cn/mobile/api/login', {'username': username, "pwd": pwd}).text
logininfo = json.loads(reps)
return logininfo
def getuserinfo(auth_fix):
reps = requests.post(r'http://api.hnscen.cn/mobile/api/GetUserInfo', {'auth_fix': auth_fix}).text
userinfo = json.loads(reps)
userinfodata = userinfo['data']
# print(userinfodata['Name'], userinfodata['ClassName'], userinfodata['MajorName'])
def myscore(auth_fix):
reps = requests.post(r'http://api.hnscen.cn/mobile/api/new/MyScore', {'auth_fix': auth_fix}).text
scoreinfo = json.loads(reps)
return scoreinfo
def MyCourse(auth_fix):
reps = requests.post(r'http://api.hnscen.cn/mobile/api/new/MyCourse', {'auth_fix': auth_fix}).text
Courseinfo = json.loads(reps)
return Courseinfo
def GetCourseInfo(auth_fix, courseOpenId):
reps = requests.post(r'http://api.hnscen.cn/mobile/api/GetCourseInfo',
{'auth_fix': auth_fix, 'courseOpenId': courseOpenId}).text
CourseInfo = json.loads(reps)
return CourseInfo
def GetCourseeProcess(auth_fix, courseOpenId):
reps = requests.post(r'http://api.hnscen.cn/mobile/api/GetCourseeProcess',
{'auth_fix': auth_fix, 'courseOpenId': courseOpenId}).text
CourseeProcess = json.loads(reps)
return CourseeProcess
def GetCellInfo(auth_fix, cellid, isIOS):
reps = requests.post(r'http://api.hnscen.cn/mobile/api/GetCellInfo',
{'auth_fix': auth_fix, 'cellid': cellid, 'isIOS': isIOS}).text
CellInfo = json.loads(reps)
return CellInfo
def UpdateLogInfo(auth_fix, videoEndTime, logId, CellLogId):
reps = requests.post(r'http://api.hnscen.cn/mobile/api/UpdateLogInfo',
{'auth_fix': auth_fix, 'videoEndTime': videoEndTime, 'cellLogId': CellLogId,
'LogId': logId}).text
return reps
def addke(auth, CourseeProcess):
state = True
state1 = False
if len(logidall) <= 10:
for i in CourseeProcess['data']:
for j in i['topics']:
for k in j['cells']:
if k['Name'] == logidall[-1]['Process'][1]:
state1 = True
continue
if state1:
try:
CellInfo = GetCellInfo(auth, k['Id'], False)
logId = CellInfo['logId']
dis = {'Process': [k['Id'], k['Name'], k['Process']], 'logID': logId}
logidall.append(dis)
if len(logidall) >= 10:
return
except Exception:
return
def threaduplogidall(auth, Courseinfo, index):
while True:
if len(logidall) == 0:
break
CourseeProcess = GetCourseeProcess(auth, Courseinfo['course']['CourseOpenId'])
if len(logidall) <= 10:
addke(auth, CourseeProcess)
# print(CourseeProcess['data'][index]['topics'])
for i in CourseeProcess['data']:
for j in i['topics']:
for k in j['cells']:
for g in logidall:
if k['Type'] == 1 and k['Name'] == g['Process'][1]:
g['Process'][2] = k['Process']
time.sleep(40)
if __name__ == "__main__":
# 获取登陆返回的信息
# 账户名,密码
logininfo = login('username', 'password', None)
time.sleep(0.1)
# auth好像比较重要,每个方法都基本上用到了
auth = logininfo['auth']
getuserinfo(auth)
# 获取所有的课
Course = MyCourse(auth)
time.sleep(0.1)
print('课程列表-----------------------')
for index, i in enumerate(Course['course']):
print(index, ':课程名字:' + i['name'] + ' 进度:' + str(i['process']) + '%')
print('请选择课程:')
index = input()
# 获取到第index个的courseOpenId通过这个才能获取到所有关于第[index]的课
Courseinfo = GetCourseInfo(auth, Course['course'][int(index)]['courseOpenId'])
time.sleep(0.1)
# 通过courseOpenId获取到所有关于社交礼仪的课
CourseeProcess = GetCourseeProcess(auth, Courseinfo['course']['CourseOpenId'])
print('模块列表-----------------------')
for index, i in enumerate(CourseeProcess['data']):
print(index, '模块名称:' + i['name'])
print('请选择模块:')
index = int(input())
for a in range(index, len(CourseeProcess['data'])):
# 需要这个
CourseeProces = CourseeProcess['data'][a]['topics']
# 所有要刷的课在这个列表里
for i in CourseeProces:
for j in i['cells']:
if j['Type'] == 2 or j['Type'] == 3 or j['Process'] == 100:
continue
CourseeID = j['Id']
print('j', j)
try:
CellInfo = GetCellInfo(auth, CourseeID, False)
logId = CellInfo['logId']
dis = {'Process': [j['Id'], j['Name'], j['Process']], 'logID': logId}
logidall.append(dis)
except Exception:
print("one Error")
pass
t1 = threading.Thread(target=threaduplogidall, args=(auth, Courseinfo, int(index)))
t1.start()
a = 0
while True:
# print(logidall)
for index, i in enumerate(logidall):
if i['Process'][2] == 100:
del logidall[index]
continue
logId = i['logID']
# print(i['Process'][1], i['Process'][2], '%')
UpdateLogInfo(auth, 1, logId, logId)
time.sleep(5)
if len(logidall) == 0:
print("已经刷完了♪(^∇^*)")
break
|
<gh_stars>0
"""Tools to extract and analyze data from GOES-R."""
import datetime as dt
import itertools
from multiprocessing import get_context
import netCDF4 as nc
import numpy as np
from pathlib import Path
import s3fs
def download_goes_hotspot_characterization(folder, start, end, satellite="G17", full_disk=False):
"""Download hotspot characterization data from Amazon AWS S3.
Queries the appropriate S3 folders for file names. If they
already exist in the specified folder, they are not
downloaded again.
Arguments:
folder is the location to store the files
start is the start time
end is the end time and must be after start
satellite must be "G17" or "G16" to choose which satellite to use.
full_disk means to use the full disk instead of the conus imagery.
Returns: A list of file names on the local machine for your processing.
"""
if full_disk:
product = 'ABI-L2-FDCF'
else:
product = 'ABI-L2-FDCC'
return download_goes_data(folder, start, end, product, satellite)
def download_goes_data(folder, start, end, product, satellite="G17"):
"""Download GOES data from Amazon AWS S3.
First checks the local archive in 'folder' and checks for at least
11 files for a date and hour. If there are not enough files, then
it queries the appropriate S3 folders for file names. If they
already exist in the specified folder, they are not
downloaded again.
Arguments:
folder is the location to store the files
start is the start time
end is the end time and must be after start
satellite must be "G17" or "G16" to choose which satellite to use.
Returns: A list of file names on the local machine for your processing.
"""
assert isinstance(start, dt.datetime)
assert isinstance(end, dt.datetime)
assert end > start
assert satellite == "G17" or satellite == "G16"
if not isinstance(folder, Path):
folder = Path(folder)
assert folder.is_dir()
start, bucket = _validate_satellite_dates(satellite, start, end)
if start is None:
return []
# Get a list of files we already have downloaded.
current_files = tuple(
f for f in folder.iterdir()
if "ABI-L2" in f.name and product in f.name and satellite in f.name and f.suffix == ".nc"
)
# Files older than this are too old to be missing, and must be
# permanently missing. So we shouldn't check for them again, just
# remember that htey are missing so we can skip them.
too_old_to_be_missing = dt.datetime.now(dt.timezone.utc) - dt.timedelta(days=1)
# The list of hours with missing data.
missing_data_path = folder / "missing_data.txt"
if missing_data_path.exists():
with open(missing_data_path, "r") as mdf:
missing_data = list(l.strip() for l in mdf if l.strip() != "")
else:
missing_data = []
current_time = start
result_list = []
while current_time < end:
# Check to see how many matching files we have
time_prefix = current_time.strftime("_s%Y%j%H")
missing_key = "{}{}_{}".format(satellite, time_prefix, product)
local_files_this_hour = tuple(f for f in current_files if time_prefix in f.name)
result_list.extend(local_files_this_hour)
# Should be 12 per hour for CONUS
if "FDCC" in missing_key and len(local_files_this_hour) >= 12:
pass
# Should be 6 per hour for Full Disk
elif "FDCF" in missing_key and len(local_files_this_hour) >= 6:
pass
elif missing_key not in missing_data:
result_list.extend(
_download_files(
current_time, bucket, product, folder, too_old_to_be_missing, missing_data,
missing_key
)
)
# Move ahead an hour
current_time += dt.timedelta(hours=1)
# Remember the missing!
with open(missing_data_path, "w") as mdf:
for line in missing_data:
mdf.write(line)
mdf.write("\n")
return result_list
def _download_files(
current_time, s3_bucket, product, target_dir, too_old_to_be_missing, missing_data, missing_key
):
"""Download the files for the hour given by current_time.
The remote directory is built from current_time, s3_bucket, and product.
target_dir is the directory on the local file system to store downloaded
data.
too_old_to_be_missing and missing data keep track of files that are
missing and very unlikely to ever be updated.
Returns a generator that yields the file name on the local file system of
any downloaded files. If the target file was already downloaded, it just
yields the local file name without redownloading it.
"""
# Don't try to download things that don't exist yet.
if current_time > dt.datetime.now(dt.timezone.utc):
return None
time_path = current_time.strftime("%Y/%j/%H")
remote_dir = "{}/{}/{}".format(s3_bucket, product, time_path)
# Use the anonymous credentials to access public data
fs = s3fs.S3FileSystem(anon=True)
remote_files = list(fs.ls(remote_dir))
local_files = (f.split('/')[-1] for f in remote_files)
local_files = (target_dir / f for f in local_files)
files = tuple(zip(remote_files, local_files))
# If there's some missing data, remember!
if len(files) < 12 and current_time < too_old_to_be_missing:
missing_data.append(missing_key)
for remote, local in files:
if not local.exists() or not local.is_file():
print("Downloading", local)
fs.get(remote, str(local))
yield local
return None
def _validate_satellite_dates(satellite, start, end):
"""Validate the start and end times for the satellite.
Uses the known operational dates of the satellites to
adjust the start date if needed. It also selects the
Amazon S3 bucket to use.
Returns: a tuple of (start, S3 bucket). If the start
and end times are invalid, it returns (None, None).
"""
GOES_16_OPERATIONAL = dt.datetime(2017, 12, 18, 17, 30, tzinfo=dt.timezone.utc)
GOES_17_OPERATIONAL = dt.datetime(2019, 2, 12, 18, tzinfo=dt.timezone.utc)
# Satellite specific checks and setup.
if satellite == "G17":
if end < GOES_17_OPERATIONAL:
return (None, None)
if start < GOES_17_OPERATIONAL:
start = GOES_17_OPERATIONAL
bucket = 's3://noaa-goes17'
elif satellite == "G16":
if end < GOES_16_OPERATIONAL:
return (None, None)
if start < GOES_16_OPERATIONAL:
start = GOES_16_OPERATIONAL
bucket = 's3://noaa-goes16'
return (start, bucket)
class BoundingBox:
"""Simple spatial AND temporal boundaries for satellite data."""
def __init__(self, southwest_corner, northeast_corner, start, end, name):
"""Create a simple bounding box.
southwest_corner is a (lat,lon) tuple of the southwest corner of
area of interest.
northeast_corner is a (lat,lon) tuple of the northeast corner of
area of interest.
"""
assert isinstance(start, dt.datetime)
assert isinstance(end, dt.datetime)
assert start < end
self.min_lat, self.min_lon = southwest_corner
self.max_lat, self.max_lon = northeast_corner
self.start, self.end = start, end
self.name = name
return
def sw_corner(self):
"""Get the southwest corner as a tuple (lat, lon)."""
return (self.min_lat, self.min_lon)
def ne_corner(self):
"""Get the northeast corner as a tuple (lat, lon)."""
return (self.max_lat, self.max_lon)
def corners(self):
"""Get a tuple of the corners, each themselves a tuple."""
return (self.sw_corner(), self.ne_corner())
def total_fire_power_time_series(files, bounding_box):
"""Create time series of total fire power.
Arguments:
files is a list of NetCDF4 files with fire power data.
Either the paths or opened nc.Dataset's can be
passed in.
bounding_box is the bounding boxe to gather data for.
Returns: A dictionary where valid time is the key and
the value is tuple with the fire power and original
file name.
"""
assert isinstance(bounding_box, BoundingBox)
bb = bounding_box
results = {}
vals = map(_process_single_fire_power_time_series, zip(files, itertools.repeat(bb)))
vals = (val for val in vals if val is not None)
for time, val, fname in vals:
results[time] = (val, fname)
return results
def total_fire_power_time_series_par(files, bounding_box):
"""Create time series of total fire power.
Arguments:
files is a list of NetCDF4 files with fire power data.
Either the paths or opened nc.Dataset's can be
passed in.
bounding_box is the bounding boxe to gather data for.
Returns: A dictionary where valid time is the key and
the value is tuple with the fire power and original
file name.
"""
assert isinstance(bounding_box, BoundingBox)
bb = bounding_box
results = {}
with get_context('spawn').Pool() as pool:
vals = pool.map(_process_single_fire_power_time_series, zip(files, itertools.repeat(bb)))
vals = (val for val in vals if val is not None)
for time, val, fname in vals:
results[time] = (val, fname)
return results
def _process_single_fire_power_time_series(tuple_arg):
nc_file, bb = tuple_arg
if isinstance(nc_file, nc.Dataset):
nc_data = nc_file
fname = nc_file.filepath()
# Owner opened, they take responsibility for closing.
needs_close = False
else:
fname = str(nc_file)
nc_data = nc.Dataset(nc_file)
needs_close = True
try:
time = get_valid_time(nc_data)
if time >= bb.start and time <= bb.end:
total_power = get_total_fire_power(nc_data, bb)
return (time, total_power, fname)
except Exception as e:
if isinstance(f, nc.Dataset):
msg = f.filepath()
else:
msg = f
print("Error, skipping {} for error {}".format(msg, e))
return None
finally:
if needs_close:
nc_data.close()
return
def is_valid_netcdf_file(nc_data):
"""Various QC checks on the data in the file."""
fname = Path(nc_data.filepath()).name
start_str = fname.split("_")[3][1:-1]
start_fname = dt.datetime.strptime(
start_str + " UTC",
"%Y%j%H%M%S %Z",
)
start_fname = start_fname.replace(tzinfo=dt.timezone.utc)
end_str = fname.split("_")[4][1:-1]
end_fname = dt.datetime.strptime(end_str + " UTC", "%Y%j%H%M%S %Z")
end_fname = end_fname.replace(tzinfo=dt.timezone.utc)
avg_fname = start_fname + (end_fname - start_fname) / 2
vtime = get_valid_time(nc_data)
if vtime is None:
return False
diff = (avg_fname - vtime).total_seconds()
if diff > 60:
return False
return True
def get_valid_time(nc_dataset):
"""Extract the valid time.
This is the average of the starting and ending times of
the scan.
Arguments:
nc_dataset is a dataset returned by nc.Dataset(filename).
It is assumed that these are fire files. Usually
they have ABI-L2-FDCC in the file name.
Returns: the valid time as a datetime.datetime object.
"""
try:
time = nc_dataset.variables['time_bounds'][:]
time = sum(time) / len(time)
time = _SATELLITE_EPOCH + dt.timedelta(seconds=time)
return time
except Exception:
return None
# EPOCH - satellite data stored in NetCDF files uses this datetime as
# the epoch. Time values in the files are in seconds since this time.
_SATELLITE_EPOCH = dt.datetime(2000, 1, 1, 12, 0, 0, tzinfo=dt.timezone.utc)
def get_total_fire_power(nc_dataset, bounding_box):
"""Extract the total fire power in the area of interest.
Arguments:
nc_dataset is a dataset returned by nc.Dataset(filename).
It is assumed that these are fire files. Usually
they have ABI-L2-FDCC in the file name.
bounding_box is the area from which to extract data.
Returns: The fire power in gigawatts.
"""
idxs = _get_grid_cell_indexes(
nc_dataset.variables['goes_imager_projection'], nc_dataset.variables['x'],
nc_dataset.variables['y'], bounding_box
)
powers = list(nc_dataset.variables['Power'][:].flatten()[idxs])
powers = (x for x in powers if x != 'masked')
total_power = sum(powers) / 1000.0 # This makes it Gigawatts
return total_power
def _get_grid_cell_indexes(proj, xs, ys, bounding_box):
"""Get the indexes of the desired pixels in a satellite image.
I found this algorithm at
https://makersportal.com/blog/2018/11/25/goes-r-satellite-latitude-and-longitude-grid-projection-algorithm
After implementing it I checked the ranges of lat-lon coords
produced by using it against the values reported in the
NetCDF4 files.
Arguments:
proj is the projection from a GOES-R NetCDF4 file.
xs is a 1D array of from the NetCDF4 file with the x-coordinates.
ys is a 1D array of from the NetCDF4 file with the y-coordinates.
bounding_box is the area we need to get the indexes for.
Returns: A list of indexes into a flattened array of the values
from a satellite image.
"""
# Unpack values from the projection
eq_rad = proj.semi_major_axis
polar_rad = proj.semi_minor_axis
h = proj.perspective_point_height + eq_rad
lon0 = proj.longitude_of_projection_origin
# Unpack values from the area we want to grab the data
min_lat, min_lon = bounding_box.sw_corner()
max_lat, max_lon = bounding_box.ne_corner()
with np.errstate(invalid='ignore'):
# Calculate the lat and lon grids
xs, ys = np.meshgrid(xs, ys)
a_vals = np.power(np.sin(xs), 2.0) + \
np.power(np.cos(xs), 2.0) * (np.power(np.cos(ys), 2.0) + \
eq_rad * eq_rad / polar_rad / polar_rad * np.power(np.sin(ys), 2.0))
b_vals = -2 * h * np.cos(xs) * np.cos(ys)
c_val = h * h - eq_rad * eq_rad
rs = (-b_vals - np.sqrt(np.power(b_vals, 2.0) - 4 * a_vals * c_val)) / (2 * a_vals)
sx = rs * np.cos(xs) * np.cos(ys)
sy = -rs * np.sin(xs)
sz = rs * np.cos(xs) * np.sin(ys)
lats = np.arctan((eq_rad *eq_rad * sz) \
/ (polar_rad * polar_rad * np.sqrt(np.power(h - sx, 2.0) + np.power(sy, 2.0))))
lats = np.degrees(lats)
lons = np.radians(lon0) - np.arctan(sy / (h - sx))
lons = np.degrees(lons)
# Flatten the arrays so we get a 1D list of indexes
lats = lats.flatten()
lons = lons.flatten()
# Filter out values not in our bounding box
lats = np.where(np.logical_and(lats >= min_lat, lats <= max_lat))[0]
lons = np.where(np.logical_and(lons >= min_lon, lons <= max_lon))[0]
idxs = list(set(lons).intersection(set(lats)))
return idxs
|
<reponame>rsmith-nl/deploy
#!/usr/bin/env python
# file: deploy.py
# vim:fileencoding=utf-8:fdm=marker:ft=python
#
# Copyright © 2018 <NAME> <<EMAIL>>.
# SPDX-License-Identifier: MIT
# Created: 2014-03-09T17:08:09+01:00
# Last modified: 2020-10-27T18:16:10+0100
"""
Script for deploying files.
It can check for differences, show diffs and install files. It will only work if
a file named 'filelist.<host>.<name>' or 'filelist.<name>' is present, where <host>
is the host name without domain, and <name> is the login name of the user. If both
are present the first form is processed last.
"""
from difflib import unified_diff
from enum import IntEnum
from hashlib import sha256
from shutil import copyfile
import argparse
import os
import pwd
import stat
import subprocess
import sys
__version__ = "2020.09.20"
def check(src, perm, dest, cmds, comp, verbose=False):
"""
Report if src and dest are different.
Arguments
src: Location of the source file.
perm: Permissions of the destination file (ignored).
dest: Location of the destination file.
cmds: Post-install commands (ignored).
comp: Cmp enum
verbose: Report if files are the same.
"""
if comp == Cmp.differ:
ansiprint(f"The file '{src}' differs from '{dest}'.", fg=Color.red, i=True)
elif comp == Cmp.nodest:
ansiprint(
f"The destination file '{dest}' does not exist",
fg=Color.black,
bg=Color.red,
)
elif comp == Cmp.nosrc:
ansiprint(
f"The source file '{src}' does not exist.", fg=Color.black, bg=Color.red
)
elif comp == Cmp.same and verbose:
ansiprint(f"The files '{src}' and '{dest}' are the same.", fg=Color.green)
def status(src, perm, dest, cmds, comp, _):
"""
Report the status for all files.
Equivalent to ‘check’ with the verbose option.
"""
check(src, perm, dest, cmds, comp, verbose=True)
def diff(src, perm, dest, cmds, comp, verbose=False):
"""
Print the difference between src and dest.
Arguments
src: Location of the source file.
perm: Permissions of the destination file (ignored).
dest: Location of the destination file.
cmds: Post-install commands (ignored).
cmp: Cmp enum
verbose: Report on successful installs (ignored).
"""
if comp != Cmp.differ:
return
with open(src) as s, open(dest) as d:
srcl, destl = list(s), list(d)
out = unified_diff(destl, srcl, dest, src)
colordiff(out)
def install(src, perm, dest, cmds, comp, verbose=False):
"""
Install src into dest and execute post-install commands.
Arguments
src: Location of the source file.
perm: Permissions of the destination file.
dest: Location of the destination file.
cmds: Post-install commands.
comp: Cmp enum
verbose: Report on successful installs.
"""
if comp == Cmp.nosrc:
ansiprint(
f"The source file '{src}' does not exist.", fg=Color.black, bg=Color.red
)
elif comp == Cmp.same:
return
try:
if os.path.exists(dest):
os.chmod(dest, stat.S_IRUSR | stat.S_IWUSR)
copyfile(src, dest)
os.chmod(dest, perm)
if cmds and subprocess.call(cmds) != 0:
ansiprint(f"Post-install commands for {dest} failed.", fg=Color.red)
except Exception as e:
ansiprint(f"Installing '{src}' as '{dest}' failed: {e}", fg=Color.red)
return
ansiprint(f"File '{src}' was successfully installed as '{dest}'.", fg=Color.green)
cmdset = {"check": check, "status": status, "diff": diff, "install": install}
class Color(IntEnum):
"""Standard ANSI colors."""
black = 0
red = 1
green = 2
yellow = 3
blue = 4
magenta = 5
cyan = 6
white = 7
class Cmp(IntEnum):
"""File comparison result."""
differ = 0 # source and destination are different
same = 1 # source and destination are identical
nodest = 2 # destination doesn't exist
nosrc = 3 # source doesn't exist
def main(argv):
"""
Entry point for the deploy script.
Arguments:
argv: All command line arguments save the name of the script.
"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="also report if files are the same",
default=False,
)
parser.add_argument("-V", "--version", action="version", version=__version__)
parser.add_argument("command", choices=cmdset.keys())
args = parser.parse_args(argv)
fn = cmdset[args.command]
try:
install_data = parsefilelist(args.verbose)
except Exception as e:
ansiprint(str(e), fg=Color.red)
parser.print_help()
sys.exit(1)
for src, perm, dest, cmds in install_data:
cv = compare(src, dest)
fn(src, perm, dest, cmds, cv, args.verbose)
def parsefilelist(verbose):
"""
Parse a install file list.
The install file list should have the name 'filelist.<user>' or 'filelist.<hostname>.<user>',
where the hostname is *without* the domain. Both are tried, in the order given above.
Returns:
A list of (src, perm, dest, commands) tuples.
"""
user = pwd.getpwuid(os.getuid()).pw_name
hostname = os.environ["HOST"].split(".")[0]
filenames = [f"filelist.{user}", f"filelist.{hostname}.{user}"]
installs = []
for filename in filenames:
try:
with open(filename, "r") as infile:
for ln in infile:
if ln.startswith("#") or ln.isspace():
continue
try:
src, perm, dest, *cmds = ln.strip().split()
except ValueError:
ansiprint(f"Invalid line in {filename}: '{ln}'", fg=Color.red)
continue
installs.append((src, int(perm, base=8), dest, cmds))
except FileNotFoundError:
if verbose:
ansiprint(
f"Command file '{filename}' not found, skipping.", fg=Color.cyan
)
return installs
def compare(src, dest):
"""
Compare two files.
Arguments
src: Path of the source file.
dest: Path of the destination file.
Returns:
Cmp enum
"""
xsrc, xdest = os.path.exists(src), os.path.exists(dest)
if not xsrc:
return Cmp.nosrc
if not xdest:
return Cmp.nodest
with open(src, "rb") as s:
csrc = sha256(s.read()).digest()
if xdest:
with open(dest, "rb") as d:
cdest = sha256(d.read()).digest()
else:
cdest = b""
if csrc == cdest:
return Cmp.same
return Cmp.differ
def ansiprint(s, fg="", bg="", i=False):
"""
Print a colored text with ansi escape sequences.
Arguments
fg: Optional foreground color.
bg: Optional background color.
i: Boolean to indicate intense colors (default False)
"""
esc = "\033[{:d}{}m"
iv = ""
if i:
iv = ";1"
if fg != "":
fg = esc.format(30 + fg, iv)
if bg != "":
bg = esc.format(40 + bg, iv)
print("".join([fg, bg, s, esc.format(0, "")]))
def colordiff(txt):
"""
Print a colored diff.
Arguments:
txt: diff list or generator to print
"""
for line in txt:
line = line.rstrip()
if line.startswith(("+++ ", "--- ")):
ansiprint(line, fg=Color.yellow, i=True)
continue
if line.startswith("+"):
ansiprint(line, fg=Color.green, i=True)
continue
if line.startswith("-"):
ansiprint(line, fg=Color.red, i=True)
continue
if line.startswith("@@"):
ansiprint(line, fg=Color.magenta, i=True)
continue
print(line)
if __name__ == "__main__":
main(sys.argv[1:])
|
# coding: utf-8
# 词典来自: https://github.com/mahavivo/english-wordlists/edit/master/CET4+6_edited.txt
import numpy as np
class AutoCheck(object):
def __init__(self, word_file='words.txt'):
self.word_file = word_file
self.word_list = self.read_words(word_file=word_file)
print(len(self.word_list))
def read_words(self, word_file):
word_list = []
with open(word_file, 'r') as f:
for word in f.readlines():
word_list.append(word.strip())
return word_list
def jaccard_sim(self, str_a ,str_b):
'''
Jaccard相似性系数
计算sa和sb的相似度 len(sa & sb)/ len(sa | sb)
'''
seta = set(str_a)
setb = set(str_b)
sa_sb = 1.0 * len(seta & setb) / len(seta | setb)
return sa_sb
def cos_sim(self, str_a, str_b):
seta = set(str_a)
setb = set(str_b)
all_chars = seta | setb
cnt_a = {}
cnt_b = {}
for c in str_a:
if c not in cnt_a:
cnt_a[c] = 0
cnt_a[c] += 1
for c in str_b:
if c not in cnt_b:
cnt_b[c] = 0
cnt_b[c] += 1
a = []
b = []
for c in all_chars:
k = 0
if c in cnt_a.keys():
k = cnt_a[c]
a.append(k)
k = 0
if c in cnt_b.keys():
k = cnt_b[c]
b.append(k)
a = np.array(a)
b = np.array(b)
#return {"文本的余弦相似度:":np.sum(a*b) / (np.sqrt(np.sum(a ** 2)) * np.sqrt(np.sum(b ** 2)))}
return np.sum(a*b) / (np.sqrt(np.sum(a ** 2)) * np.sqrt(np.sum(b ** 2)))
def min_edit_distance(self, str_a, str_b):
'''
最小编辑距离,只有三种操作方式 替换、插入、删除
'''
lensum = float(len(str_a) + len(str_b))
if len(str_a) > len(str_b): #得到最短长度的字符串
str_a,str_b = str_b,str_a
distances = range(len(str_a) + 1) #设置默认值
for index2,char2 in enumerate(str_b): #str_b > str_a
newDistances = [index2+1] #设置新的距离,用来标记
for index1,char1 in enumerate(str_a):
if char1 == char2: #如果相等,证明在下标index1出不用进行操作变换,最小距离跟前一个保持不变,
newDistances.append(distances[index1])
else: #得到最小的变化数,
newDistances.append(1 + min((distances[index1], #删除
distances[index1+1], #插入
newDistances[-1]))) #变换
distances = newDistances #更新最小编辑距离
mindist = distances[-1]
ratio = (lensum - mindist)/lensum
#return {'distance':mindist, 'ratio':ratio}
return ratio
# https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance
def levenshtein_distance(self, str1, str2, damerau=True):
'''
编辑距离——莱文斯坦距离,计算文本的相似度
'''
m = len(str1)
n = len(str2)
lensum = float(m + n)
d = [ [0 for _ in range(n+1)] for _ in range(m+1)]
for i in range(m+1):
d[i][0] = 0
for j in range(n+1):
d[0][j] = 0
for i in range(1, m):
for j in range(1, n):
cost = 0
if str1[i-1] == str2[j-1]:
cost = 0
else:
cost = 1
d[i][j] = min(d[i-1][j]+1, d[i][j-1]+1, d[i-1][j-1]+cost)
if damerau:
if i > 1 and j > 1 and str1[i] == str2[j-1] and str1[i-1] == str2[j]:
d[i][j] = min(d[i][j], d[i-2][j-2]+1)
ldist = d[-1][-1]
ratio = (lensum - ldist) / lensum
#return {'distance':ldist, 'ratio':ratio}
return ratio
# query值包含空格,使用空格分隔
def auto_check(self, ori_query):
# print(ori_query)
query_list = ori_query.split(" ")
hint_query = ""
for i, query in enumerate(query_list):
check_word = ""
ratio = 0.0
for word in self.word_list:
if ratio < self.levenshtein_distance(query, word):
check_word = word
ratio = self.levenshtein_distance(query, word)
# 如果相等,进一步判断cos_sim
elif ratio == self.levenshtein_distance(query, word):
cos_sim_1 = self.cos_sim(query, word)
cos_sim_2 = self.cos_sim(query, check_word)
# print(query, check_word, word, cos_sim_1, cos_sim_2)
if cos_sim_1 > cos_sim_2:
check_word = word
if i == 0:
hint_query += check_word
else:
hint_query += " " + check_word
# print(query, check_word, i)
print("ori_query", ori_query)
print("hint_query", hint_query)
return hint_query
def test_AutoCheck():
auto_check = AutoCheck(word_file="./static/autocheck/words.txt")
# print(auto_check.cos_sim("chcek", "check"))
# print(auto_check.cos_sim("chcek", "cheek"))
# print(auto_check.min_edit_distance("chcek", "check"))
# print(auto_check.min_edit_distance("chcek", "cheek"))
query = "tests"
auto_check.auto_check(ori_query=query)
if __name__ == "__main__":
test_AutoCheck() |
from PyQt4 import QtCore
from gui import Ui_MainWindow
from gui_components import BaseGuiComponent
from gui_components import SelectFile
from gui_components import FileInfo
from gui_components import Convert
from gui_components import Player
from gui_components import Cut
from gui_components import FftAnalysis
class AppUi(Ui_MainWindow):
def __init__(self, connector):
super(AppUi, self).__init__()
self.connector = connector
self.select_file = SelectFile(self, connector)
self.file_info = FileInfo(self, connector)
self.convert = Convert(self, connector)
self.player = Player(self, connector)
self.cut = Cut(self, connector)
self.fft_analysis = FftAnalysis(self, connector)
def setupUi(self, MainWindow):
super(AppUi, self).setupUi(MainWindow)
self.select_file.add_element('centralWidget', self.centralWidget)
self.select_file.add_element('selected_file', self.selected_file)
self.select_file.add_element('select_file_btn', self.select_file_btn)
self.select_file.add_element('invalid_file_label', self.invalid_file_label)
self.select_file.setup_ui()
self.file_info.add_element('info_file_name_value', self.info_file_name_value)
self.file_info.add_element('info_sampling_frequency_value', self.info_sampling_frequency_value)
self.file_info.add_element('info_bitrate_value', self.info_bitrate_value)
self.file_info.add_element('info_duration_value', self.info_duration_value)
self.file_info.add_element('info_channels_value', self.info_channels_value)
self.file_info.setup_ui()
self.convert.add_element('convert_target_ext_combo_box', self.convert_target_ext_combo_box)
self.convert.add_element('convert_btn', self.convert_btn)
self.convert.add_element('convert_status_label', self.convert_status_label)
self.convert.setup_ui()
self.player.add_element('play_btn', self.play_btn)
self.player.add_element('pause_btn', self.pause_btn)
self.player.add_element('stop_btn', self.stop_btn)
self.player.add_element('player_slider', self.player_slider)
self.player.setup_ui()
self.cut.add_element('cut_audio_duration_label', self.cut_audio_duration_label)
self.cut.add_element('cut_audio_duration_value', self.cut_audio_duration_value)
self.cut.add_element('cut_start_from_label', self.cut_start_from_label)
self.cut.add_element('cut_start_from_edit', self.cut_start_from_edit)
self.cut.add_element('cut_length_label', self.cut_length_label)
self.cut.add_element('cut_length_edit', self.cut_length_edit)
self.cut.add_element('cut_invalid_values_label', self.cut_invalid_values_label)
self.cut.add_element('cut_btn', self.cut_btn)
self.cut.add_element('cut_status_label', self.cut_status_label)
self.cut.setup_ui()
self.fft_analysis.add_element('fft_audio_duration_label', self.fft_audio_duration_label)
self.fft_analysis.add_element('fft_audio_duration_value', self.fft_audio_duration_value)
self.fft_analysis.add_element('fft_start_from_label', self.fft_start_from_label)
self.fft_analysis.add_element('fft_start_from_edit', self.fft_start_from_edit)
self.fft_analysis.add_element('fft_length_label', self.fft_length_label)
self.fft_analysis.add_element('fft_length_edit', self.fft_length_edit)
self.fft_analysis.add_element('fft_invalid_values_label', self.fft_invalid_values_label)
self.fft_analysis.add_element('fft_window_size_label', self.fft_window_size_label)
self.fft_analysis.add_element('fft_window_size_edit', self.fft_window_size_edit)
self.fft_analysis.add_element('fft_overlap_size_label', self.fft_overlap_size_label)
self.fft_analysis.add_element('fft_overlap_size_edit', self.fft_overlap_size_edit)
self.fft_analysis.add_element('fft_overlap_error_label', self.fft_overlap_error_label)
self.fft_analysis.add_element('fft_analysis_btn', self.fft_analysis_btn)
self.fft_analysis.add_element('fft_analysis_status_label', self.fft_analysis_status_label)
self.fft_analysis.setup_ui()
self.disable_all()
def disable_all(self):
self.select_file.disable_all()
self.file_info.disable_all()
self.convert.disable_all()
self.player.disable_all()
self.cut.disable_all()
self.fft_analysis.disable_all()
def enable_all(self):
self.select_file.enable_all()
self.file_info.enable_all()
self.convert.enable_all()
self.player.enable_all()
self.cut.enable_all()
self.fft_analysis.enable_all()
def handle_invalid_file(self):
self.select_file.handle_invalid_file()
self.disable_all()
def show_selected_file_path(self, path):
self.select_file.show_selected_file_path(path)
self.enable_all()
|
<filename>heart_failure_app.py
import streamlit as st
import pandas as pd
import numpy as np
import pickle
from sklearn.ensemble import RandomForestClassifier
st.write("""
# HEART FAILURE PREDICTION APP
This app predicts the likelihood of a person having an **Heart Attack** .
""")
st.sidebar.header('User Medical Records')
st.sidebar.write('please enter your credentials here')
st.header('**Upload a File, or Use the Sliders and Selecboxes by the left.**')
st.header('**Notice!**')
st.write("""Uploaded File Must be in CSV format comprising of 12 Columns,
with Column Names in the Following order.
1. age
2. anaemia
3. creatinine_phosphokinase
4. diabetes
5. ejection_fraction
6. high_blood_pressure
7. platelets
8. serum_creatinine
9. serum_sodium
10.sex
11.smoking
12.time""")
uploaded_file = st.sidebar.file_uploader("Upload your input health CSV file", type = ['csv'])
try:
input_df = pd.read_csv(uploaded_file)
input_df['sex'] = np.where(input_df['sex'] == 1, 'Male','Female')
except BaseException:
def user_input_features():
age = st.sidebar.slider('What is your Age?',20,100,50)
anaemia = st.sidebar.selectbox('Do you Have Anaemia?',(True,False))
creatinine_phosphokinase = st.sidebar.slider('What is the level of Creatinine_Phosphokinase(CP) in your body?',20,8000,3000)
diabetes = st.sidebar.selectbox('Do you have Diabetes?',(True,False))
ejection_fraction = st.sidebar.slider('What is your Ejection_ Fraction?',0,150,75)
high_blood_pressure = st.sidebar.selectbox('Are you Hypertensive?',(True,False))
platelets = st.sidebar.slider('What is your Blood Platelets count?',15000,900000,15000)
serum_creatinine = st.sidebar.slider('What is the amount of Serum_creatinine in your bloodstream?',0.5,10.0,0.5)
serum_sodium = st.sidebar.slider('What is the level of Serum_Sodium in your Body?',50,200,50)
sex = st.sidebar.selectbox('What is your Sex?',('Male','Female'))
smoking = st.sidebar.selectbox('Do you Smoke?',(True,False))
time = st.sidebar.slider('How many times have you gone for an appointment at the Hospital?',0,400,20)
data = {'age': age,'anaemia':anaemia,'creatinine_phosphokinase':creatinine_phosphokinase,
'diabetes':diabetes,'ejection_fraction':ejection_fraction,
'high_blood_pressure':high_blood_pressure,'platelets':platelets,
'serum_creatinine':serum_creatinine,'serum_sodium':serum_sodium,
'sex':sex,'smoking':smoking,'time':time}
features = pd.DataFrame(data,index=[0])
return features
input_df = user_input_features()
heart_raw = pd.read_csv('heart_failure_clinical_records_dataset.csv')
heart_raw['sex'] = np.where(heart_raw['sex'] == 1, 'Male','Female')
heart = heart_raw.drop(columns = ['DEATH_EVENT'])
data = pd.concat([input_df,heart], axis = 0)
df = data.copy()
df1 = data.copy()
def set_cpk(row):
if row["creatinine_phosphokinase"] >=10 and row["creatinine_phosphokinase"] <= 120:
return "Normal"
else:
return "High"
df = df.assign(cp_desc = df.apply(set_cpk, axis = 1))
def set_eject_fract(row):
if row["ejection_fraction"] <= 35:
return "Low"
elif row["ejection_fraction"] > 35 and row["ejection_fraction"] <= 49:
return "Below_Normal"
elif row["ejection_fraction"] > 50 and row["ejection_fraction"] <= 75:
return "Normal"
else:
return "High"
df['ejection_fraction_desc'] = df.apply(set_eject_fract, axis =1)
def set_platelets(row):
if row["sex"] == 'Female': #females
if row["platelets"] < 157000:
return "Low"
elif row["platelets"] >=157000 and row["platelets"] <= 371000:
return "Normal"
else:
return "High"
elif row["sex"] == 'Male': #males
if row["platelets"] < 135000:
return "Low"
if row["platelets"] >= 135000 and row["platelets"] <= 317000:
return "Normal"
else:
return "High"
df['platelets_desc'] = df.apply(set_platelets, axis = 1)
def set_sodium(row):
if row["serum_sodium"] < 135:
return "Low"
elif row["serum_sodium"] >=135 and row["serum_sodium"] <= 145:
return "Normal"
else:
return "High"
df['sodium_desc'] = df.apply(set_sodium, axis =1)
def set_creatinine(row):
if row["sex"] == 'Female': #females
if row["serum_creatinine"] >=0.5 and row["serum_creatinine"] <= 1.1:
return "Normal"
else:
return "High"
elif row["sex"] == 'Male': #males
if row["serum_creatinine"] >=0.6 and row["serum_creatinine"] <= 1.2:
return "Normal"
else:
return "High"
df['serum_creatinine_desc'] = df.apply(set_creatinine, axis = 1)
df2 = df1.copy()
df1 = pd.get_dummies(df1,columns = ['sex'], drop_first = True)
df2 = pd.get_dummies(df2,columns = ['sex'], drop_first = True)
st.subheader('User Medical Profile')
if df is not None:
st.write(input_df[:len(input_df)])
else:
st.write('This is the raw input df')
st.write(input_df[:len(input_df)])
col = ['age','creatinine_phosphokinase','ejection_fraction',
'platelets','serum_creatinine','serum_sodium','time',
'anaemia','diabetes','high_blood_pressure','smoking',
'sex_Male']
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import MinMaxScaler
col_trans = ColumnTransformer(remainder='passthrough',
transformers = [('scaler',MinMaxScaler(),
[0,2,4,6,7,8,10])])
trans = col_trans.fit_transform(df1)
trans = col_trans.transform(df2)
try:
trans = pd.DataFrame(trans,columns = col)
except ValueError:
st.header('**The data you entered is invalid!**')
st.header("""It either contains wrongly spelt and/or arranged column headers,"""
"""or more than seven columns.""")
df_ = trans[:len(input_df)]
st.subheader('Medical Profile Description')
if uploaded_file is not None:
st.write(df.iloc[:len(input_df),12:])
else:
st.write('These are the scaled input features of the user')
st.write(df.iloc[:len(input_df),12:])
load_clf = pickle.load(open('model.pkl', 'rb'))
try:
prediction = load_clf.predict(df_)
prediction_proba = load_clf.predict_proba(df_)
st.subheader('DIAGNOSIS')
for i in range(len(prediction)):
if prediction[i] > 0:
st.write(prediction[i],'-->This Patient is at Risk of Suffering a Heart Attack')
else:
st.write(prediction[i],'-->This Patient is in a stable Health Condition')
st.subheader('Probability')
st.write(prediction_proba)
except ValueError:
st.header("Invalid data was supplied to the predictor") |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 29 20:53:21 2020
@author: asherhensley
"""
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
import pandas as pd
import yulesimon as ys
from plotly.subplots import make_subplots
import plotly.graph_objects as go
from dash.dependencies import Input, Output
from dash.exceptions import PreventUpdate
import numpy as np
import dash_table
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
colors = {
'background': '#000000',
'text': '#4ae2ed'
}
fig1 = make_subplots()
# fig1.update_layout(
# autosize=False,
# height=400,
# width=600,
# showlegend=False,
# #margin=dict(l=0,r=0,b=50,t=50),
# )
fig2 = make_subplots()
# fig2.update_layout(
# autosize=False,
# height=400,
# width=600,
# showlegend=False,
# #margin=dict(l=0,r=0,b=50,t=50),
# )
fig3 = make_subplots()
colors = {
'background': '#000000',
'text': '#7FDBFF'
}
df = pd.DataFrame(data={
"Key Statistics":[6],
"Values":[4]})
app.layout = html.Div(children=[
html.H1(children='CIRCLON-8', style={'textAlign':'left'}),
html.Div(children=[
'Ticker: ',
dcc.Input(id='Ticker',value='MSFT',type='text', size='50'),
html.Button('Search',id='Search',n_clicks=0)]
),
html.Br(),
html.H6(id='Status',children='Ready', style={'textAlign':'left'}),
# dash_table.DataTable(
# id='table',
# columns=[{"name": "Key Statistics", "id": "Key Statistics"},
# {"name": "Values", "id": "Values"}],
# data=df.to_dict('records')
# ),
dcc.Tabs(id="tabs", value='tab-1', children=[
dcc.Tab(label='Prices/Returns',
children=[dcc.Graph(id='Figure1',figure=fig1)]),
dcc.Tab(label='Volatility Profile',
children=[dcc.Graph(id='Figure2',figure=fig2)]),
dcc.Tab(label='Modeling Analysis',
children=[dcc.Graph(id='Figure3',figure=fig2)]),
]),
html.Div(id='tabs-content')
])
@app.callback(
Output(component_id='Status', component_property='children'),
Input(component_id='Search', component_property='n_clicks')
)
def set_status(n_clicks):
status = 'Searching...'
if n_clicks==0:
status = 'Initializing...'
return status
@app.callback(
Output(component_id='Figure1', component_property='figure'),
Output(component_id='Figure2', component_property='figure'),
Output(component_id='Figure3', component_property='figure'),
Output(component_id='Status', component_property='children'),
Input(component_id='Ticker', component_property='value'),
Input(component_id='Search', component_property='n_clicks')
)
def update_figure(ticker_in, n_clicks):
ctx = dash.callback_context
if not ctx.triggered:
ticker = 'MSFT'
else:
callback_id = ctx.triggered[0]['prop_id'].split('.')[0]
if callback_id=='Search':
ticker = ticker_in
else:
ticker = None
if ticker==None:
raise PreventUpdate
else:
# Run Model
closing_prices, log_returns, dates = ys.GetYahooFeed(ticker,5)
Chain = ys.TimeSeries(log_returns)
nsteps = 200
burnin = nsteps/2.0
downsample = 2
history = Chain.step(nsteps)
sigma, sample_size = ys.ExpectedValue(history.std_deviation, burnin, downsample)
mu, sample_size = ys.ExpectedValue(history.mean, burnin, downsample)
z = np.arange(-0.2,0.2,0.001)
yulesimon_PDF = ys.MixtureModel(z,mu/100,sigma/100)
H,b = np.histogram(log_returns,200)
delta = b[1]-b[0]
bctr = b[1:]-delta/2.0
empirical_PDF = H/(sum(H)*delta)
gaussian_PDF = ys.Gaussian(z,np.mean(log_returns),1/np.var(log_returns))
# Update Prices/Returns
fig1 = make_subplots(rows=2,cols=1,shared_xaxes=True,vertical_spacing=0.05)
fig1.add_trace(go.Scatter(x=dates[1:],y=closing_prices[1:],
fill='tozeroy',
line_color='#0000ff',
fillcolor='#7474f7'), row=1,col=1)
fig1.add_trace(go.Scatter(x=dates[1:],y=mu/100+2*sigma/100,
fill='tozeroy',
fillcolor='#ffb0b0',
mode='none'), row=2,col=1)
fig1.add_trace(go.Scatter(x=dates[1:],y=mu/100-2*sigma/100,
fill='tozeroy',
fillcolor='#ffb0b0',
mode='none'), row=2,col=1)
fig1.add_trace(go.Scatter(x=dates[1:],y=log_returns,
line_color='#ff0000'), row=2,col=1)
fig1.add_trace(go.Scatter(x=dates[1:],y=mu,
line_color='#000000'), row=2,col=1)
#fig1.add_trace(go.Scatter(x=dates[1:],y=mu*0,line=dict(dash='dash'),
# line_color='#000000'), row=2,col=1)
fig1.update_layout(
showlegend=False,
height=700
)
fig1.update_yaxes(title_text='Daily Close',row=1,col=1)
fig1.update_yaxes(title_text='Daily Log-Return',row=2,col=1)
# Update Volatility Profile
fig2 = make_subplots(rows=1,cols=2,
shared_xaxes=True,
subplot_titles=("Linear Scale","Log Scale"))
fig2.add_trace(go.Scatter(x=bctr,y=empirical_PDF,mode='markers',marker_color='#ff0000'),row=1,col=1)
#fig2.add_trace(go.Scatter(x=z,y=gaussian_PDF,line_color='#edc24a',),row=1,col=1)
fig2.add_trace(go.Scatter(x=z,y=yulesimon_PDF,line_color='#0000ff',),row=1,col=1)
fig2.add_trace(go.Scatter(x=bctr,y=empirical_PDF,mode='markers',marker_color='#ff0000'),row=1,col=2)
#fig2.add_trace(go.Scatter(x=z,y=gaussian_PDF,line_color='#edc24a',),row=1,col=2)
fig2.add_trace(go.Scatter(x=z,y=yulesimon_PDF,line_color='#0000ff',),row=1,col=2)
fig2.update_xaxes(title_text='Log Returns',row=1,col=1)
fig2.update_yaxes(title_text='Probability Density',row=1,col=1)
fig2.update_xaxes(title_text='Log Returns',row=1,col=2)
fig2.update_yaxes(title_text='Probability Density',type="log",row=1,col=2)
fig2.update_layout(showlegend=False)
# Update Modeling Analysis Tab
fig3 = make_subplots(rows=1,cols=2)
fig3.add_trace(go.Scatter(y=history.log_likelihood,line_color='#0000ff',),row=1,col=1)
fig3.add_trace(go.Scatter(y=history.pvalue,line_color='#ff0000',),row=1,col=2)
fig3.update_xaxes(title_text='Iteration',row=1,col=1)
fig3.update_yaxes(title_text='Log-Likelihood',row=1,col=1)
fig3.update_xaxes(title_text='Iteration',row=1,col=2)
fig3.update_yaxes(title_text='p-Value',type="log",row=1,col=2)
fig3.update_layout(showlegend=False)
return fig1, fig2, fig3, 'Ready'
if __name__ == '__main__':
app.run_server(debug=True)
|
<reponame>legitbee/pulumi-ovh
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['DbaasLogsInputArgs', 'DbaasLogsInput']
@pulumi.input_type
class DbaasLogsInputArgs:
def __init__(__self__, *,
configuration: pulumi.Input['DbaasLogsInputConfigurationArgs'],
description: pulumi.Input[str],
engine_id: pulumi.Input[str],
service_name: pulumi.Input[str],
stream_id: pulumi.Input[str],
title: pulumi.Input[str],
allowed_networks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
exposed_port: Optional[pulumi.Input[str]] = None,
nb_instance: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a DbaasLogsInput resource.
:param pulumi.Input['DbaasLogsInputConfigurationArgs'] configuration: Input configuration
:param pulumi.Input[str] description: Input description
:param pulumi.Input[str] engine_id: Input engine ID
:param pulumi.Input[str] stream_id: Associated Graylog stream
:param pulumi.Input[str] title: Input title
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_networks: IP blocks
:param pulumi.Input[str] exposed_port: Port
:param pulumi.Input[int] nb_instance: Number of instance running
"""
pulumi.set(__self__, "configuration", configuration)
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "engine_id", engine_id)
pulumi.set(__self__, "service_name", service_name)
pulumi.set(__self__, "stream_id", stream_id)
pulumi.set(__self__, "title", title)
if allowed_networks is not None:
pulumi.set(__self__, "allowed_networks", allowed_networks)
if exposed_port is not None:
pulumi.set(__self__, "exposed_port", exposed_port)
if nb_instance is not None:
pulumi.set(__self__, "nb_instance", nb_instance)
@property
@pulumi.getter
def configuration(self) -> pulumi.Input['DbaasLogsInputConfigurationArgs']:
"""
Input configuration
"""
return pulumi.get(self, "configuration")
@configuration.setter
def configuration(self, value: pulumi.Input['DbaasLogsInputConfigurationArgs']):
pulumi.set(self, "configuration", value)
@property
@pulumi.getter
def description(self) -> pulumi.Input[str]:
"""
Input description
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: pulumi.Input[str]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="engineId")
def engine_id(self) -> pulumi.Input[str]:
"""
Input engine ID
"""
return pulumi.get(self, "engine_id")
@engine_id.setter
def engine_id(self, value: pulumi.Input[str]):
pulumi.set(self, "engine_id", value)
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "service_name")
@service_name.setter
def service_name(self, value: pulumi.Input[str]):
pulumi.set(self, "service_name", value)
@property
@pulumi.getter(name="streamId")
def stream_id(self) -> pulumi.Input[str]:
"""
Associated Graylog stream
"""
return pulumi.get(self, "stream_id")
@stream_id.setter
def stream_id(self, value: pulumi.Input[str]):
pulumi.set(self, "stream_id", value)
@property
@pulumi.getter
def title(self) -> pulumi.Input[str]:
"""
Input title
"""
return pulumi.get(self, "title")
@title.setter
def title(self, value: pulumi.Input[str]):
pulumi.set(self, "title", value)
@property
@pulumi.getter(name="allowedNetworks")
def allowed_networks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
IP blocks
"""
return pulumi.get(self, "allowed_networks")
@allowed_networks.setter
def allowed_networks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_networks", value)
@property
@pulumi.getter(name="exposedPort")
def exposed_port(self) -> Optional[pulumi.Input[str]]:
"""
Port
"""
return pulumi.get(self, "exposed_port")
@exposed_port.setter
def exposed_port(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "exposed_port", value)
@property
@pulumi.getter(name="nbInstance")
def nb_instance(self) -> Optional[pulumi.Input[int]]:
"""
Number of instance running
"""
return pulumi.get(self, "nb_instance")
@nb_instance.setter
def nb_instance(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "nb_instance", value)
@pulumi.input_type
class _DbaasLogsInputState:
def __init__(__self__, *,
allowed_networks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
configuration: Optional[pulumi.Input['DbaasLogsInputConfigurationArgs']] = None,
created_at: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
engine_id: Optional[pulumi.Input[str]] = None,
exposed_port: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
input_id: Optional[pulumi.Input[str]] = None,
is_restart_required: Optional[pulumi.Input[bool]] = None,
nb_instance: Optional[pulumi.Input[int]] = None,
public_address: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
ssl_certificate: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
stream_id: Optional[pulumi.Input[str]] = None,
title: Optional[pulumi.Input[str]] = None,
updated_at: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering DbaasLogsInput resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_networks: IP blocks
:param pulumi.Input['DbaasLogsInputConfigurationArgs'] configuration: Input configuration
:param pulumi.Input[str] created_at: Input creation
:param pulumi.Input[str] description: Input description
:param pulumi.Input[str] engine_id: Input engine ID
:param pulumi.Input[str] exposed_port: Port
:param pulumi.Input[str] hostname: Hostname
:param pulumi.Input[str] input_id: Input ID
:param pulumi.Input[bool] is_restart_required: Indicate if input need to be restarted
:param pulumi.Input[int] nb_instance: Number of instance running
:param pulumi.Input[str] public_address: Input IP address
:param pulumi.Input[str] ssl_certificate: Input SSL certificate
:param pulumi.Input[str] status: init: configuration required, pending: ready to start, running: available
:param pulumi.Input[str] stream_id: Associated Graylog stream
:param pulumi.Input[str] title: Input title
:param pulumi.Input[str] updated_at: Input last update
"""
if allowed_networks is not None:
pulumi.set(__self__, "allowed_networks", allowed_networks)
if configuration is not None:
pulumi.set(__self__, "configuration", configuration)
if created_at is not None:
pulumi.set(__self__, "created_at", created_at)
if description is not None:
pulumi.set(__self__, "description", description)
if engine_id is not None:
pulumi.set(__self__, "engine_id", engine_id)
if exposed_port is not None:
pulumi.set(__self__, "exposed_port", exposed_port)
if hostname is not None:
pulumi.set(__self__, "hostname", hostname)
if input_id is not None:
pulumi.set(__self__, "input_id", input_id)
if is_restart_required is not None:
pulumi.set(__self__, "is_restart_required", is_restart_required)
if nb_instance is not None:
pulumi.set(__self__, "nb_instance", nb_instance)
if public_address is not None:
pulumi.set(__self__, "public_address", public_address)
if service_name is not None:
pulumi.set(__self__, "service_name", service_name)
if ssl_certificate is not None:
pulumi.set(__self__, "ssl_certificate", ssl_certificate)
if status is not None:
pulumi.set(__self__, "status", status)
if stream_id is not None:
pulumi.set(__self__, "stream_id", stream_id)
if title is not None:
pulumi.set(__self__, "title", title)
if updated_at is not None:
pulumi.set(__self__, "updated_at", updated_at)
@property
@pulumi.getter(name="allowedNetworks")
def allowed_networks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
IP blocks
"""
return pulumi.get(self, "allowed_networks")
@allowed_networks.setter
def allowed_networks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_networks", value)
@property
@pulumi.getter
def configuration(self) -> Optional[pulumi.Input['DbaasLogsInputConfigurationArgs']]:
"""
Input configuration
"""
return pulumi.get(self, "configuration")
@configuration.setter
def configuration(self, value: Optional[pulumi.Input['DbaasLogsInputConfigurationArgs']]):
pulumi.set(self, "configuration", value)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[pulumi.Input[str]]:
"""
Input creation
"""
return pulumi.get(self, "created_at")
@created_at.setter
def created_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_at", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Input description
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="engineId")
def engine_id(self) -> Optional[pulumi.Input[str]]:
"""
Input engine ID
"""
return pulumi.get(self, "engine_id")
@engine_id.setter
def engine_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "engine_id", value)
@property
@pulumi.getter(name="exposedPort")
def exposed_port(self) -> Optional[pulumi.Input[str]]:
"""
Port
"""
return pulumi.get(self, "exposed_port")
@exposed_port.setter
def exposed_port(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "exposed_port", value)
@property
@pulumi.getter
def hostname(self) -> Optional[pulumi.Input[str]]:
"""
Hostname
"""
return pulumi.get(self, "hostname")
@hostname.setter
def hostname(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hostname", value)
@property
@pulumi.getter(name="inputId")
def input_id(self) -> Optional[pulumi.Input[str]]:
"""
Input ID
"""
return pulumi.get(self, "input_id")
@input_id.setter
def input_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "input_id", value)
@property
@pulumi.getter(name="isRestartRequired")
def is_restart_required(self) -> Optional[pulumi.Input[bool]]:
"""
Indicate if input need to be restarted
"""
return pulumi.get(self, "is_restart_required")
@is_restart_required.setter
def is_restart_required(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_restart_required", value)
@property
@pulumi.getter(name="nbInstance")
def nb_instance(self) -> Optional[pulumi.Input[int]]:
"""
Number of instance running
"""
return pulumi.get(self, "nb_instance")
@nb_instance.setter
def nb_instance(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "nb_instance", value)
@property
@pulumi.getter(name="publicAddress")
def public_address(self) -> Optional[pulumi.Input[str]]:
"""
Input IP address
"""
return pulumi.get(self, "public_address")
@public_address.setter
def public_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "public_address", value)
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "service_name")
@service_name.setter
def service_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_name", value)
@property
@pulumi.getter(name="sslCertificate")
def ssl_certificate(self) -> Optional[pulumi.Input[str]]:
"""
Input SSL certificate
"""
return pulumi.get(self, "ssl_certificate")
@ssl_certificate.setter
def ssl_certificate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ssl_certificate", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
init: configuration required, pending: ready to start, running: available
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter(name="streamId")
def stream_id(self) -> Optional[pulumi.Input[str]]:
"""
Associated Graylog stream
"""
return pulumi.get(self, "stream_id")
@stream_id.setter
def stream_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stream_id", value)
@property
@pulumi.getter
def title(self) -> Optional[pulumi.Input[str]]:
"""
Input title
"""
return pulumi.get(self, "title")
@title.setter
def title(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "title", value)
@property
@pulumi.getter(name="updatedAt")
def updated_at(self) -> Optional[pulumi.Input[str]]:
"""
Input last update
"""
return pulumi.get(self, "updated_at")
@updated_at.setter
def updated_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "updated_at", value)
class DbaasLogsInput(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allowed_networks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
configuration: Optional[pulumi.Input[pulumi.InputType['DbaasLogsInputConfigurationArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
engine_id: Optional[pulumi.Input[str]] = None,
exposed_port: Optional[pulumi.Input[str]] = None,
nb_instance: Optional[pulumi.Input[int]] = None,
service_name: Optional[pulumi.Input[str]] = None,
stream_id: Optional[pulumi.Input[str]] = None,
title: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Create a DbaasLogsInput resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_networks: IP blocks
:param pulumi.Input[pulumi.InputType['DbaasLogsInputConfigurationArgs']] configuration: Input configuration
:param pulumi.Input[str] description: Input description
:param pulumi.Input[str] engine_id: Input engine ID
:param pulumi.Input[str] exposed_port: Port
:param pulumi.Input[int] nb_instance: Number of instance running
:param pulumi.Input[str] stream_id: Associated Graylog stream
:param pulumi.Input[str] title: Input title
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DbaasLogsInputArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a DbaasLogsInput resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param DbaasLogsInputArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DbaasLogsInputArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allowed_networks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
configuration: Optional[pulumi.Input[pulumi.InputType['DbaasLogsInputConfigurationArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
engine_id: Optional[pulumi.Input[str]] = None,
exposed_port: Optional[pulumi.Input[str]] = None,
nb_instance: Optional[pulumi.Input[int]] = None,
service_name: Optional[pulumi.Input[str]] = None,
stream_id: Optional[pulumi.Input[str]] = None,
title: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DbaasLogsInputArgs.__new__(DbaasLogsInputArgs)
__props__.__dict__["allowed_networks"] = allowed_networks
if configuration is None and not opts.urn:
raise TypeError("Missing required property 'configuration'")
__props__.__dict__["configuration"] = configuration
if description is None and not opts.urn:
raise TypeError("Missing required property 'description'")
__props__.__dict__["description"] = description
if engine_id is None and not opts.urn:
raise TypeError("Missing required property 'engine_id'")
__props__.__dict__["engine_id"] = engine_id
__props__.__dict__["exposed_port"] = exposed_port
__props__.__dict__["nb_instance"] = nb_instance
if service_name is None and not opts.urn:
raise TypeError("Missing required property 'service_name'")
__props__.__dict__["service_name"] = service_name
if stream_id is None and not opts.urn:
raise TypeError("Missing required property 'stream_id'")
__props__.__dict__["stream_id"] = stream_id
if title is None and not opts.urn:
raise TypeError("Missing required property 'title'")
__props__.__dict__["title"] = title
__props__.__dict__["created_at"] = None
__props__.__dict__["hostname"] = None
__props__.__dict__["input_id"] = None
__props__.__dict__["is_restart_required"] = None
__props__.__dict__["public_address"] = None
__props__.__dict__["ssl_certificate"] = None
__props__.__dict__["status"] = None
__props__.__dict__["updated_at"] = None
super(DbaasLogsInput, __self__).__init__(
'ovh:index/dbaasLogsInput:DbaasLogsInput',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
allowed_networks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
configuration: Optional[pulumi.Input[pulumi.InputType['DbaasLogsInputConfigurationArgs']]] = None,
created_at: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
engine_id: Optional[pulumi.Input[str]] = None,
exposed_port: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
input_id: Optional[pulumi.Input[str]] = None,
is_restart_required: Optional[pulumi.Input[bool]] = None,
nb_instance: Optional[pulumi.Input[int]] = None,
public_address: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
ssl_certificate: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
stream_id: Optional[pulumi.Input[str]] = None,
title: Optional[pulumi.Input[str]] = None,
updated_at: Optional[pulumi.Input[str]] = None) -> 'DbaasLogsInput':
"""
Get an existing DbaasLogsInput resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_networks: IP blocks
:param pulumi.Input[pulumi.InputType['DbaasLogsInputConfigurationArgs']] configuration: Input configuration
:param pulumi.Input[str] created_at: Input creation
:param pulumi.Input[str] description: Input description
:param pulumi.Input[str] engine_id: Input engine ID
:param pulumi.Input[str] exposed_port: Port
:param pulumi.Input[str] hostname: Hostname
:param pulumi.Input[str] input_id: Input ID
:param pulumi.Input[bool] is_restart_required: Indicate if input need to be restarted
:param pulumi.Input[int] nb_instance: Number of instance running
:param pulumi.Input[str] public_address: Input IP address
:param pulumi.Input[str] ssl_certificate: Input SSL certificate
:param pulumi.Input[str] status: init: configuration required, pending: ready to start, running: available
:param pulumi.Input[str] stream_id: Associated Graylog stream
:param pulumi.Input[str] title: Input title
:param pulumi.Input[str] updated_at: Input last update
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _DbaasLogsInputState.__new__(_DbaasLogsInputState)
__props__.__dict__["allowed_networks"] = allowed_networks
__props__.__dict__["configuration"] = configuration
__props__.__dict__["created_at"] = created_at
__props__.__dict__["description"] = description
__props__.__dict__["engine_id"] = engine_id
__props__.__dict__["exposed_port"] = exposed_port
__props__.__dict__["hostname"] = hostname
__props__.__dict__["input_id"] = input_id
__props__.__dict__["is_restart_required"] = is_restart_required
__props__.__dict__["nb_instance"] = nb_instance
__props__.__dict__["public_address"] = public_address
__props__.__dict__["service_name"] = service_name
__props__.__dict__["ssl_certificate"] = ssl_certificate
__props__.__dict__["status"] = status
__props__.__dict__["stream_id"] = stream_id
__props__.__dict__["title"] = title
__props__.__dict__["updated_at"] = updated_at
return DbaasLogsInput(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allowedNetworks")
def allowed_networks(self) -> pulumi.Output[Sequence[str]]:
"""
IP blocks
"""
return pulumi.get(self, "allowed_networks")
@property
@pulumi.getter
def configuration(self) -> pulumi.Output['outputs.DbaasLogsInputConfiguration']:
"""
Input configuration
"""
return pulumi.get(self, "configuration")
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> pulumi.Output[str]:
"""
Input creation
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
Input description
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="engineId")
def engine_id(self) -> pulumi.Output[str]:
"""
Input engine ID
"""
return pulumi.get(self, "engine_id")
@property
@pulumi.getter(name="exposedPort")
def exposed_port(self) -> pulumi.Output[str]:
"""
Port
"""
return pulumi.get(self, "exposed_port")
@property
@pulumi.getter
def hostname(self) -> pulumi.Output[str]:
"""
Hostname
"""
return pulumi.get(self, "hostname")
@property
@pulumi.getter(name="inputId")
def input_id(self) -> pulumi.Output[str]:
"""
Input ID
"""
return pulumi.get(self, "input_id")
@property
@pulumi.getter(name="isRestartRequired")
def is_restart_required(self) -> pulumi.Output[bool]:
"""
Indicate if input need to be restarted
"""
return pulumi.get(self, "is_restart_required")
@property
@pulumi.getter(name="nbInstance")
def nb_instance(self) -> pulumi.Output[int]:
"""
Number of instance running
"""
return pulumi.get(self, "nb_instance")
@property
@pulumi.getter(name="publicAddress")
def public_address(self) -> pulumi.Output[str]:
"""
Input IP address
"""
return pulumi.get(self, "public_address")
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> pulumi.Output[str]:
return pulumi.get(self, "service_name")
@property
@pulumi.getter(name="sslCertificate")
def ssl_certificate(self) -> pulumi.Output[str]:
"""
Input SSL certificate
"""
return pulumi.get(self, "ssl_certificate")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
init: configuration required, pending: ready to start, running: available
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="streamId")
def stream_id(self) -> pulumi.Output[str]:
"""
Associated Graylog stream
"""
return pulumi.get(self, "stream_id")
@property
@pulumi.getter
def title(self) -> pulumi.Output[str]:
"""
Input title
"""
return pulumi.get(self, "title")
@property
@pulumi.getter(name="updatedAt")
def updated_at(self) -> pulumi.Output[str]:
"""
Input last update
"""
return pulumi.get(self, "updated_at")
|
'''
explicit control evaluator
build a machine that can evaluate any scheme program
we skip the parsing part, instead we reuse parser in sicp414_evaluator to generate expressions
we feed our machine with expressions and resolved distances
we allocate a special register "dist" to hold resolved distances
this register will stay constant after initialization
we also have two more temporary registers "unev2" and "unev3"
mainly because we use python list instead of scheme linked list
and traversing the python list need more local variables
our ec evalutor is written in machine language code, see ec_eval_code
we also skip parsing, so our code is written in instruction structures
we lack the mechanism to write the evaluator in a modular way in machine language
unless we generate the code using python, but that will make the code hard to read
we use a lot of high level operations, making it less similar to assembly
we should consider them as specialized hardware
but some are just selector, which we may just use python getattr
for environment related operations, we tend to use simple ones like env_set, instead of pure_eval_set
this is to be consistent with our compiler, which should not use any operation involving expression
error reporting is achieved by special return value
for those operations that can fail, instead of purely returning result, we return pair(error, result)
then we extract "error" via car, test/branch on it
to print error message, we need token to get its position in source code
we acquire the token from the expression itself using get_var_name_token and get_paren_token
then we concatenate token and "error", put it in a special register err, and goto error-handler label
notice we only use "err" register to store final concatenated error message as input to error-handler
we don't store intermediate un-tokened message here, nor any other information
in recursion test, factorial iteration use constant max stack depth (16)
this shows our tail recursion support is correctly implemented
to support that we need to ensure the last operation in procedure/primitive call is recursive expression evaluation
no more restore or assignment should come after that
from the result we can see running a factorial using ec evaluator needs 100x more instructions
than handcrafted machine in sicp524_monitor, excessive work includes:
operator and every operands is a expression, requiring evaluation; in sicp524_monitor they are just constant;
all operands need to form a list and put into argl; in sicp524_monitor they are fed directly;
stack operations, error checking for arity and primitive also consumes lots of instructions
'''
from typing import Any, Callable, List, Tuple, Union
from sicp414_evaluator import AndExpr, BooleanVal, CallExpr, DefineProcExpr, DefineVarExpr, Environment, Expression, \
GenericExpr, GenericVal, IfExpr, NotExpr, NumberVal, OrExpr, PairVal, PrimVal, ProcPlainVal, ProcVal, \
SchemeEnvError, SchemePanic, SchemePrimError, SchemeRuntimeError, SchemeVal, SequenceExpr, SetExpr, \
StringVal, SymbolExpr, SymbolVal, Token, UndefVal, env_define, env_extend, install_is_equal_rules, \
install_parse_expr_rules, install_primitives, install_stringify_expr_rules, install_stringify_value_rules, \
is_truthy, make_global_env, parse_expr, parse_tokens, pure_eval_boolean, pure_eval_define_proc_plain_value, \
pure_eval_lambda_plain, pure_eval_nil, pure_eval_number, pure_eval_quote, pure_eval_string, \
pure_get_proc_arguments, pure_get_proc_parameters, scan_source, scheme_flush, scheme_panic, stringify_expr, stringify_token, stringify_value
from sicp416_resolver import ResDistancesType, env_lookup_at, env_set_at, install_resolver_rules, resolve_expr
from sicp523_simulator import AssignMstmt, BranchMstmt, ConstMxpr, GotoMstmt, InstMstmt, LabelMstmt, LabelMxpr, Mstmt, OpMxpr, \
PerformMstmt, RegInstPtr, RegMxpr, RestoreMstmt, SaveMstmt, TestMstmt, get_operations, init_machine_pc, \
install_assemble_mstmt_rules, install_assemble_mxpr_rules, install_operations, \
make_machine, make_run_machine, update_operations
from sicp524_monitor import MachineStatistic, TraceState, install_stringify_mstmt_rules, install_stringify_mxpr_rules, \
monitor_statistics, stringify_mstmt, trace_machine
# fmt: off
ec_eval_code_list: List[Mstmt] = [
LabelMstmt('main'),
AssignMstmt('continue', LabelMxpr('done')),
LabelMstmt('eval-dispatch'),
# unev stores the type name of expr
AssignMstmt('unev', OpMxpr('get_expr_type', [RegMxpr('expr')])),
TestMstmt(OpMxpr('equal?', [RegMxpr('unev'), ConstMxpr(StringVal('SequenceExpr'))])),
BranchMstmt(LabelMxpr('ev-sequence')),
TestMstmt(OpMxpr('equal?', [RegMxpr('unev'), ConstMxpr(StringVal('SymbolExpr'))])),
BranchMstmt(LabelMxpr('ev-symbol')),
TestMstmt(OpMxpr('equal?', [RegMxpr('unev'), ConstMxpr(StringVal('StringExpr'))])),
BranchMstmt(LabelMxpr('ev-string')),
TestMstmt(OpMxpr('equal?', [RegMxpr('unev'), ConstMxpr(StringVal('NumberExpr'))])),
BranchMstmt(LabelMxpr('ev-number')),
TestMstmt(OpMxpr('equal?', [RegMxpr('unev'), ConstMxpr(StringVal('BooleanExpr'))])),
BranchMstmt(LabelMxpr('ev-boolean')),
TestMstmt(OpMxpr('equal?', [RegMxpr('unev'), ConstMxpr(StringVal('NilExpr'))])),
BranchMstmt(LabelMxpr('ev-nil')),
TestMstmt(OpMxpr('equal?', [RegMxpr('unev'), ConstMxpr(StringVal('QuoteExpr'))])),
BranchMstmt(LabelMxpr('ev-quote')),
TestMstmt(OpMxpr('equal?', [RegMxpr('unev'), ConstMxpr(StringVal('LambdaExpr'))])),
BranchMstmt(LabelMxpr('ev-lambda')),
TestMstmt(OpMxpr('equal?', [RegMxpr('unev'), ConstMxpr(StringVal('CallExpr'))])),
BranchMstmt(LabelMxpr('ev-call')),
TestMstmt(OpMxpr('equal?', [RegMxpr('unev'), ConstMxpr(StringVal('IfExpr'))])),
BranchMstmt(LabelMxpr('ev-if')),
TestMstmt(OpMxpr('equal?', [RegMxpr('unev'), ConstMxpr(StringVal('SetExpr'))])),
BranchMstmt(LabelMxpr('ev-set')),
TestMstmt(OpMxpr('equal?', [RegMxpr('unev'), ConstMxpr(StringVal('DefineVarExpr'))])),
BranchMstmt(LabelMxpr('ev-define-var')),
TestMstmt(OpMxpr('equal?', [RegMxpr('unev'), ConstMxpr(StringVal('DefineProcExpr'))])),
BranchMstmt(LabelMxpr('ev-define-proc')),
TestMstmt(OpMxpr('equal?', [RegMxpr('unev'), ConstMxpr(StringVal('AndExpr'))])),
BranchMstmt(LabelMxpr('ev-and')),
TestMstmt(OpMxpr('equal?', [RegMxpr('unev'), ConstMxpr(StringVal('OrExpr'))])),
BranchMstmt(LabelMxpr('ev-or')),
TestMstmt(OpMxpr('equal?', [RegMxpr('unev'), ConstMxpr(StringVal('NotExpr'))])),
BranchMstmt(LabelMxpr('ev-not')),
# put expression type in err, then goto error
AssignMstmt('err', OpMxpr('ec_eval_expr_invalid', [RegMxpr('unev')])),
GotoMstmt(LabelMxpr('error-handler')),
LabelMstmt('ev-string'),
AssignMstmt('val', OpMxpr('pure_eval_string', [RegMxpr('expr')])),
GotoMstmt(RegMxpr('continue')),
LabelMstmt('ev-number'),
AssignMstmt('val', OpMxpr('pure_eval_number', [RegMxpr('expr')])),
GotoMstmt(RegMxpr('continue')),
LabelMstmt('ev-boolean'),
AssignMstmt('val', OpMxpr('pure_eval_boolean', [RegMxpr('expr')])),
GotoMstmt(RegMxpr('continue')),
LabelMstmt('ev-nil'),
AssignMstmt('val', OpMxpr('pure_eval_nil', [])),
GotoMstmt(RegMxpr('continue')),
LabelMstmt('ev-quote'),
AssignMstmt('val', OpMxpr('pure_eval_quote', [RegMxpr('expr')])),
GotoMstmt(RegMxpr('continue')),
LabelMstmt('ev-lambda'),
AssignMstmt('val', OpMxpr('pure_eval_lambda_plain', [RegMxpr('expr'), RegMxpr('env')])),
GotoMstmt(RegMxpr('continue')),
LabelMstmt('ev-symbol'),
AssignMstmt('unev', OpMxpr('get_var_name', [RegMxpr('expr')])),
# unev2 = distance
AssignMstmt('unev2', OpMxpr('get_distance', [RegMxpr('dist'), RegMxpr('expr')])),
# val = pair(error, result)
AssignMstmt('val', OpMxpr('ec_env_lookup_at', [RegMxpr('env'), RegMxpr('unev2'), RegMxpr('unev')])),
# unev = error
AssignMstmt('unev', OpMxpr('car', [RegMxpr('val')])),
TestMstmt(OpMxpr('equal?', [RegMxpr('unev'), ConstMxpr(UndefVal())])),
BranchMstmt(LabelMxpr('ev-symbol-no-error')),
# has error, first val = token, then val = concatenated_message
AssignMstmt('val', OpMxpr('get_var_name_token', [RegMxpr('expr')])),
AssignMstmt('err', OpMxpr('concat_token_message', [RegMxpr('val'), RegMxpr('unev')])),
GotoMstmt(LabelMxpr('error-handler')),
LabelMstmt('ev-symbol-no-error'),
# val = result
AssignMstmt('val', OpMxpr('cdr', [RegMxpr('val')])),
GotoMstmt(RegMxpr('continue')),
# we need three registers for contents, n=len(contents), i
# we use unev, unev2, unev3
LabelMstmt('ev-sequence'),
AssignMstmt('unev', OpMxpr('get_expr_contents', [RegMxpr('expr')])),
AssignMstmt('unev2', OpMxpr('get_exprs_len', [RegMxpr('unev')])),
TestMstmt(OpMxpr('>', [RegMxpr('unev2'), ConstMxpr(NumberVal(0))])),
BranchMstmt(LabelMxpr('ev-sequence-non-empty')),
GotoMstmt(RegMxpr('continue')),
LabelMstmt('ev-sequence-non-empty'),
SaveMstmt('continue'),
# now unev2 = len-1
AssignMstmt('unev2', OpMxpr('-', [RegMxpr('unev2'), ConstMxpr(NumberVal(1))])),
# init unev3 = 0
AssignMstmt('unev3', ConstMxpr(NumberVal(0))),
LabelMstmt('ev-sequence-fronts'),
TestMstmt(OpMxpr('=', [RegMxpr('unev3'), RegMxpr('unev2')])),
BranchMstmt(LabelMxpr('ev-sequence-last')),
SaveMstmt('unev'),
SaveMstmt('unev2'),
SaveMstmt('unev3'),
SaveMstmt('env'),
AssignMstmt('expr', OpMxpr('get_expr_at', [RegMxpr('unev'), RegMxpr('unev3')])),
AssignMstmt('continue', LabelMxpr('ev-sequence-ret')),
GotoMstmt(LabelMxpr('eval-dispatch')),
LabelMstmt('ev-sequence-ret'),
RestoreMstmt('env'),
RestoreMstmt('unev3'),
RestoreMstmt('unev2'),
RestoreMstmt('unev'),
AssignMstmt('unev3', OpMxpr('+', [RegMxpr('unev3'), ConstMxpr(NumberVal(1))])),
GotoMstmt(LabelMxpr('ev-sequence-fronts')),
# support for tail recursion: ensure goto eval-dispatch is the last instruction
# to do that, can't save/restore for env/unev/unev2/unev3
# and should restore continue before call rather than after
LabelMstmt('ev-sequence-last'),
AssignMstmt('expr', OpMxpr('get_expr_at', [RegMxpr('unev'), RegMxpr('unev3')])),
RestoreMstmt('continue'),
GotoMstmt(LabelMxpr('eval-dispatch')),
LabelMstmt('ev-call'),
SaveMstmt('continue'),
SaveMstmt('expr'),
SaveMstmt('env'),
AssignMstmt('unev', OpMxpr('get_call_operands', [RegMxpr('expr')])),
SaveMstmt('unev'),
# getting operator
AssignMstmt('expr', OpMxpr('get_call_operator', [RegMxpr('expr')])),
AssignMstmt('continue', LabelMxpr('ev-call-operands')),
GotoMstmt(LabelMxpr('eval-dispatch')),
LabelMstmt('ev-call-operands'),
# getting operands
# we still do save/restore for the last operand
# this has a little performance lost, but do not destroy tail recursion
AssignMstmt('proc', RegMxpr('val')),
# each time we must create a new empty list
# therefore we must call op init_val_list
# we cannot assign from a const [], otherwise that [] will be mutated and reused
# an else solution is to not mutate to [], instead every append create a new one
AssignMstmt('argl', OpMxpr('init_val_list', [])),
RestoreMstmt('unev'),
RestoreMstmt('env'),
AssignMstmt('unev2', OpMxpr('get_exprs_len', [RegMxpr('unev')])),
AssignMstmt('unev3', ConstMxpr(NumberVal(0))),
SaveMstmt('proc'),
LabelMstmt('ev-call-operand-start'),
TestMstmt(OpMxpr('=', [RegMxpr('unev3'), RegMxpr('unev2')])),
BranchMstmt(LabelMxpr('ev-call-call')),
SaveMstmt('env'),
SaveMstmt('unev'),
SaveMstmt('unev2'),
SaveMstmt('unev3'),
SaveMstmt('argl'),
AssignMstmt('expr', OpMxpr('get_expr_at', [RegMxpr('unev'), RegMxpr('unev3')])),
AssignMstmt('continue', LabelMxpr('ev-call-operand-ret')),
GotoMstmt(LabelMxpr('eval-dispatch')),
LabelMstmt('ev-call-operand-ret'),
RestoreMstmt('argl'),
RestoreMstmt('unev3'),
RestoreMstmt('unev2'),
RestoreMstmt('unev'),
RestoreMstmt('env'),
# the evil list mutation, because of this argl must be recreated from op in every call
PerformMstmt(OpMxpr('append_val_list', [RegMxpr('argl'), RegMxpr('val')])),
AssignMstmt('unev3', OpMxpr('+', [RegMxpr('unev3'), ConstMxpr(NumberVal(1))])),
GotoMstmt(LabelMxpr('ev-call-operand-start')),
# calling body, need proc, and argl is already correct
LabelMstmt('ev-call-call'),
RestoreMstmt('proc'),
RestoreMstmt('expr'),
RestoreMstmt('continue'),
AssignMstmt('unev', OpMxpr('get_val_type', [RegMxpr('proc')])),
TestMstmt(OpMxpr('equal?', [RegMxpr('unev'), ConstMxpr(StringVal('ProcPlainVal'))])),
BranchMstmt(LabelMxpr('ev-call-proc-plain')),
TestMstmt(OpMxpr('equal?', [RegMxpr('unev'), ConstMxpr(StringVal('PrimVal'))])),
BranchMstmt(LabelMxpr('ev-call-prim')),
GotoMstmt(LabelMxpr('ev-call-invalid')),
LabelMstmt('ev-call-invalid'),
AssignMstmt('unev', OpMxpr('get_paren_token', [RegMxpr('expr')])),
AssignMstmt('val', OpMxpr('ec_eval_call_invalid', [RegMxpr('proc')])),
AssignMstmt('err', OpMxpr('concat_token_message', [RegMxpr('unev'), RegMxpr('val')])),
GotoMstmt(LabelMxpr('error-handler')),
LabelMstmt('ev-call-proc-plain'),
AssignMstmt('val', OpMxpr('ec_check_proc_arity', [RegMxpr('proc'), RegMxpr('argl')])),
TestMstmt(OpMxpr('equal?', [RegMxpr('val'), ConstMxpr(UndefVal())])),
BranchMstmt(LabelMxpr('ev-call-proc-plain-arity-ok')),
AssignMstmt('unev', OpMxpr('get_paren_token', [RegMxpr('expr')])),
AssignMstmt('err', OpMxpr('concat_token_message', [RegMxpr('unev'), RegMxpr('val')])),
GotoMstmt(LabelMxpr('error-handler')),
LabelMstmt('ev-call-proc-plain-arity-ok'),
AssignMstmt('env', OpMxpr('get_proc_env', [RegMxpr('proc')])),
AssignMstmt('unev', OpMxpr('get_call_parameters', [RegMxpr('proc')])),
AssignMstmt('unev2', OpMxpr('get_call_arguments', [RegMxpr('proc'), RegMxpr('argl')])),
AssignMstmt('env', OpMxpr('ec_env_extend', [RegMxpr('env'), RegMxpr('unev'), RegMxpr('unev2')])),
AssignMstmt('expr', OpMxpr('get_proc_plain_body', [RegMxpr('proc')])),
GotoMstmt(LabelMxpr('ev-sequence')),
LabelMstmt('ev-call-prim'),
AssignMstmt('val', OpMxpr('ec_check_prim_arity', [RegMxpr('proc'), RegMxpr('argl')])),
TestMstmt(OpMxpr('equal?', [RegMxpr('val'), ConstMxpr(UndefVal())])),
BranchMstmt(LabelMxpr('ev-call-prim-arity-ok')),
AssignMstmt('unev', OpMxpr('get_paren_token', [RegMxpr('expr')])),
AssignMstmt('err', OpMxpr('concat_token_message', [RegMxpr('unev'), RegMxpr('val')])),
GotoMstmt(LabelMxpr('error-handler')),
LabelMstmt('ev-call-prim-arity-ok'),
AssignMstmt('uenv', OpMxpr('call_prim', [RegMxpr('proc'), RegMxpr('argl')])),
AssignMstmt('val', OpMxpr('car', [RegMxpr('uenv')])),
TestMstmt(OpMxpr('equal?', [RegMxpr('val'), ConstMxpr(UndefVal())])),
BranchMstmt(LabelMxpr('ev-call-prim-call-ok')),
AssignMstmt('unev', OpMxpr('get_paren_token', [RegMxpr('expr')])),
AssignMstmt('err', OpMxpr('concat_token_message', [RegMxpr('unev'), RegMxpr('val')])),
GotoMstmt(LabelMxpr('error-handler')),
LabelMstmt('ev-call-prim-call-ok'),
AssignMstmt('val', OpMxpr('cdr', [RegMxpr('uenv')])),
GotoMstmt(RegMxpr('continue')),
LabelMstmt('ev-if'),
SaveMstmt('continue'),
SaveMstmt('expr'),
SaveMstmt('env'),
AssignMstmt('expr', OpMxpr('get_if_predicate', [RegMxpr('expr')])),
AssignMstmt('continue', LabelMxpr('ev-if-predicate-ret')),
GotoMstmt(LabelMxpr('eval-dispatch')),
LabelMstmt('ev-if-predicate-ret'),
RestoreMstmt('env'),
RestoreMstmt('expr'),
RestoreMstmt('continue'),
# although we can directly assign val to flag, we still follow the convention to use test
TestMstmt(OpMxpr('true?', [RegMxpr('val')])),
BranchMstmt(LabelMxpr('ev-if-then')),
TestMstmt(OpMxpr('has_if_else', [RegMxpr('expr')])),
BranchMstmt(LabelMxpr('ev-if-else')),
# no else
AssignMstmt('val', ConstMxpr(UndefVal())),
GotoMstmt(RegMxpr('continue')),
# tail recursion supported
LabelMstmt('ev-if-then'),
AssignMstmt('expr', OpMxpr('get_if_then', [RegMxpr('expr')])),
GotoMstmt(LabelMxpr('eval-dispatch')),
LabelMstmt('ev-if-else'),
AssignMstmt('expr', OpMxpr('get_if_else', [RegMxpr('expr')])),
GotoMstmt(LabelMxpr('eval-dispatch')),
LabelMstmt('ev-set'),
SaveMstmt('expr'),
SaveMstmt('env'),
SaveMstmt('continue'),
AssignMstmt('expr', OpMxpr('get_var_init', [RegMxpr('expr')])),
AssignMstmt('continue', LabelMxpr('ev-set-init-ret')),
GotoMstmt(LabelMxpr('eval-dispatch')),
# now val = initializer, it still stay there until end
LabelMstmt('ev-set-init-ret'),
RestoreMstmt('continue'),
RestoreMstmt('env'),
RestoreMstmt('expr'),
# unev = name
AssignMstmt('unev', OpMxpr('get_var_name', [RegMxpr('expr')])),
# unev2 = distance
AssignMstmt('unev2', OpMxpr('get_distance', [RegMxpr('dist'), RegMxpr('expr')])),
# unev = error
AssignMstmt('unev', OpMxpr('ec_env_set_at', [RegMxpr('env'), RegMxpr('unev2'), RegMxpr('unev'), RegMxpr('val')])),
TestMstmt(OpMxpr('equal?', [RegMxpr('unev'), ConstMxpr(UndefVal())])),
BranchMstmt(LabelMxpr('ev-set-ok')),
# unev2 = name
AssignMstmt('unev2', OpMxpr('get_var_name_token', [RegMxpr('expr')])),
AssignMstmt('err', OpMxpr('concat_token_message', [RegMxpr('unev2'), RegMxpr('unev')])),
GotoMstmt(LabelMxpr('error-handler')),
LabelMstmt('ev-set-ok'),
GotoMstmt(RegMxpr('continue')),
LabelMstmt('ev-define-var'),
SaveMstmt('expr'),
SaveMstmt('env'),
SaveMstmt('continue'),
AssignMstmt('expr', OpMxpr('get_var_init', [RegMxpr('expr')])),
AssignMstmt('continue', LabelMxpr('ev-define-var-init-ret')),
GotoMstmt(LabelMxpr('eval-dispatch')),
LabelMstmt('ev-define-var-init-ret'),
RestoreMstmt('continue'),
RestoreMstmt('env'),
RestoreMstmt('expr'),
AssignMstmt('unev', OpMxpr('get_var_name', [RegMxpr('expr')])),
PerformMstmt(OpMxpr('ec_env_define', [RegMxpr('env'), RegMxpr('unev'), RegMxpr('val')])),
AssignMstmt('val', OpMxpr('get_var_symbol', [RegMxpr('expr')])),
GotoMstmt(RegMxpr('continue')),
LabelMstmt('ev-define-proc'),
AssignMstmt('val', OpMxpr('ec_define_proc_plain_val', [RegMxpr('expr'), RegMxpr('env')])),
AssignMstmt('unev', OpMxpr('get_var_name', [RegMxpr('expr')])),
PerformMstmt(OpMxpr('ec_env_define', [RegMxpr('env'), RegMxpr('unev'), RegMxpr('val')])),
AssignMstmt('val', OpMxpr('get_var_symbol', [RegMxpr('expr')])),
GotoMstmt(RegMxpr('continue')),
LabelMstmt('ev-and'),
AssignMstmt('unev', OpMxpr('get_expr_contents', [RegMxpr('expr')])),
AssignMstmt('unev2', OpMxpr('get_exprs_len', [RegMxpr('unev')])),
SaveMstmt('continue'),
# init unev3 = 0
AssignMstmt('unev3', ConstMxpr(NumberVal(0))),
LabelMstmt('ev-and-loop'),
TestMstmt(OpMxpr('=', [RegMxpr('unev3'), RegMxpr('unev2')])),
BranchMstmt(LabelMxpr('ev-and-finish')),
SaveMstmt('unev'),
SaveMstmt('unev2'),
SaveMstmt('unev3'),
SaveMstmt('env'),
AssignMstmt('expr', OpMxpr('get_expr_at', [RegMxpr('unev'), RegMxpr('unev3')])),
AssignMstmt('continue', LabelMxpr('ev-and-ret')),
GotoMstmt(LabelMxpr('eval-dispatch')),
LabelMstmt('ev-and-ret'),
RestoreMstmt('env'),
RestoreMstmt('unev3'),
RestoreMstmt('unev2'),
RestoreMstmt('unev'),
AssignMstmt('unev3', OpMxpr('+', [RegMxpr('unev3'), ConstMxpr(NumberVal(1))])),
TestMstmt(OpMxpr('true?', [RegMxpr('val')])),
BranchMstmt(LabelMxpr('ev-and-loop')),
# no support for tail recursion, because we don't know where to early return (which is first falsy expr)
LabelMstmt('ev-and-finish'),
RestoreMstmt('continue'),
GotoMstmt(RegMxpr('continue')),
LabelMstmt('ev-or'),
AssignMstmt('unev', OpMxpr('get_expr_contents', [RegMxpr('expr')])),
AssignMstmt('unev2', OpMxpr('get_exprs_len', [RegMxpr('unev')])),
SaveMstmt('continue'),
# init unev3 = 0
AssignMstmt('unev3', ConstMxpr(NumberVal(0))),
LabelMstmt('ev-or-loop'),
TestMstmt(OpMxpr('=', [RegMxpr('unev3'), RegMxpr('unev2')])),
BranchMstmt(LabelMxpr('ev-or-finish')),
SaveMstmt('unev'),
SaveMstmt('unev2'),
SaveMstmt('unev3'),
SaveMstmt('env'),
AssignMstmt('expr', OpMxpr('get_expr_at', [RegMxpr('unev'), RegMxpr('unev3')])),
AssignMstmt('continue', LabelMxpr('ev-or-ret')),
GotoMstmt(LabelMxpr('eval-dispatch')),
LabelMstmt('ev-or-ret'),
RestoreMstmt('env'),
RestoreMstmt('unev3'),
RestoreMstmt('unev2'),
RestoreMstmt('unev'),
AssignMstmt('unev3', OpMxpr('+', [RegMxpr('unev3'), ConstMxpr(NumberVal(1))])),
TestMstmt(OpMxpr('true?', [RegMxpr('val')])),
# only difference with and: branch go to finish, rather than loop
BranchMstmt(LabelMxpr('ev-or-finish')),
GotoMstmt(LabelMxpr('ev-or-loop')),
LabelMstmt('ev-or-finish'),
RestoreMstmt('continue'),
GotoMstmt(RegMxpr('continue')),
LabelMstmt('ev-not'),
SaveMstmt('continue'),
AssignMstmt('expr', OpMxpr('get_expr_content', [RegMxpr('expr')])),
AssignMstmt('continue', LabelMxpr('ev-not-ret')),
GotoMstmt(LabelMxpr('eval-dispatch')),
LabelMstmt('ev-not-ret'),
AssignMstmt('val', OpMxpr('boolean_not', [RegMxpr('val')])),
RestoreMstmt('continue'),
GotoMstmt(RegMxpr('continue')),
# handling of all errors are the same
LabelMstmt('error-handler'),
# just goto_panic, assuming the error message in val
PerformMstmt(OpMxpr('goto_panic', [RegMxpr('err')])),
# following goto not really necessary, since goto_panic will exit execution
GotoMstmt(LabelMxpr('done')),
LabelMstmt('done')
]
# fmt: on
'''
we assume all data to be processed are of following types:
Expression, List[Expression], Environment, SchemeVal, List[SchemeVal], Token, Dict[Union[SetExpr,SymbolExpr],int], RegInstPtr
we try to exclude pure integer and string
only these types will appear in instruction and operations input/output
'''
'''stringify instructions'''
def stringify_inst_data(data: Any) -> str:
if isinstance(data, Token):
return '<token=%s>' % stringify_token(data)
elif isinstance(data, Environment):
return '<environment>'
elif isinstance(data, list):
return '[%s]' % (', '.join([stringify_inst_data(d) for d in data]))
elif isinstance(data, Expression):
return '<expression=%s>' % stringify_expr(data)
elif isinstance(data, SchemeVal):
return stringify_value(data)
elif isinstance(data, RegInstPtr):
return '<ptr=%d>' % data.index
else:
return '<invalid>'
'''
additional operations
we try to make their input/ouput only of following types
'''
def concat_token_message(token: Token, message: StringVal):
rt_err = SchemeRuntimeError(token, message.value)
return StringVal(str(rt_err))
def get_distance(distances: ResDistancesType, expr: Union[SymbolExpr, SetExpr]):
return NumberVal(distances[expr])
def ec_env_lookup_at(env: Environment, distance: NumberVal, name: StringVal):
'''return error and result'''
try:
return PairVal(UndefVal(), env_lookup_at(env, int(distance.value), name.value))
except SchemeEnvError:
return PairVal(StringVal('symbol undefined'), UndefVal())
def ec_eval_expr_invalid(expr_type: StringVal):
message = 'expression type undefined: %s' % expr_type.value
return StringVal(message)
def ec_eval_call_invalid(operator: SchemeVal):
return StringVal('cannot call %s value' % type(operator).__name__)
def _ec_check_arity(name: str, pos_arity: int, has_rest: bool, arg_count: int):
if has_rest:
if arg_count < pos_arity:
return '%s expect at least %d arguments, only get %d' % (name, pos_arity, arg_count)
else:
if arg_count != pos_arity:
return '%s expect exactly %d arguments, but get %d' % (name, pos_arity, arg_count)
return ''
def ec_check_prim_arity(operator: PrimVal, operands: List[SchemeVal]):
message = _ec_check_arity(
operator.name, operator.pos_arity, operator.has_rest, len(operands))
return UndefVal() if message == '' else StringVal(message)
def ec_check_proc_arity(operator: ProcVal, operands: List[SchemeVal]):
message = _ec_check_arity(operator.name, len(
operator.pos_paras), operator.rest_para is not None, len(operands))
return UndefVal() if message == '' else StringVal(message)
def get_expr_type(expr: GenericExpr):
return StringVal(type(expr).__name__)
def get_expr_contents(expr: Union[SequenceExpr, AndExpr, OrExpr]):
return expr.contents
def get_expr_content(expr: NotExpr):
return expr.content
def get_exprs_len(exprs: List[GenericExpr]):
return NumberVal(len(exprs))
def get_expr_at(exprs: List[GenericExpr], index: NumberVal):
return exprs[int(index.value)]
def get_paren_token(expr: CallExpr):
return expr.paren
def get_call_operator(expr: CallExpr):
return expr.operator
def get_call_operands(expr: CallExpr):
return expr.operands
def get_proc_plain_body(proc: ProcPlainVal):
return proc.body
def ec_define_proc_plain_val(expr: DefineProcExpr, env: Environment):
return pure_eval_define_proc_plain_value(expr.name.literal, expr.pos_paras, expr.rest_para, expr.body, env)
def init_val_list():
ls: List[SchemeVal] = []
return ls
def append_val_list(vals: List[SchemeVal], val: SchemeVal):
vals.append(val)
def get_val_type(expr: GenericVal):
return StringVal(type(expr).__name__)
def call_prim(operator: PrimVal, operands: List[SchemeVal]):
try:
return PairVal(UndefVal(), operator.body(*operands))
except SchemePrimError as err:
return PairVal(StringVal(err.message), UndefVal())
def get_if_predicate(expr: IfExpr):
return expr.pred
def get_if_then(expr: IfExpr):
return expr.then_branch
def get_if_else(expr: IfExpr):
return expr.else_branch
def has_if_else(expr: IfExpr):
return BooleanVal(expr.else_branch is not None)
def get_var_name(expr: Union[SymbolExpr, SetExpr, DefineVarExpr, DefineProcExpr]):
return StringVal(expr.name.literal)
def get_var_name_token(expr: Union[SymbolExpr, SetExpr, DefineVarExpr, DefineProcExpr]):
return expr.name
def get_var_symbol(expr: Union[SymbolExpr, SetExpr, DefineVarExpr, DefineProcExpr]):
return SymbolVal(expr.name.literal)
def get_var_init(expr: Union[SetExpr, DefineVarExpr]):
return expr.initializer
def ec_env_set_at(env: Environment, distance: NumberVal, name: StringVal, initializer: SchemeVal):
try:
env_set_at(env, int(distance.value), name.value, initializer)
return UndefVal()
except SchemeEnvError:
return StringVal('symbol undefined')
def ec_env_define(env: Environment, name: StringVal, initializer: SchemeVal):
env_define(env, name.value, initializer)
def goto_panic(message: StringVal):
scheme_panic(message.value)
def boolean_not(val: SchemeVal):
return BooleanVal(False if is_truthy(val) else True)
def boolean_true(val: SchemeVal):
return BooleanVal(is_truthy(val))
def get_proc_env(val: ProcPlainVal):
return val.env
def get_call_parameters(operator: ProcVal):
parameters = pure_get_proc_parameters(operator)
return [StringVal(s) for s in parameters]
def get_call_arguments(operator: ProcVal, operands: List[SchemeVal]):
arguments = pure_get_proc_arguments(operator, operands)
return arguments
def ec_env_extend(env: Environment, parameters: List[StringVal], arguments: List[SchemeVal]):
return env_extend(env, [s.value for s in parameters], arguments)
def install_operations_ec():
ops = {
'pure_eval_string': pure_eval_string,
'pure_eval_number': pure_eval_number,
'pure_eval_boolean': pure_eval_boolean,
'pure_eval_nil': pure_eval_nil,
'pure_eval_quote': pure_eval_quote,
'pure_eval_lambda_plain': pure_eval_lambda_plain,
'goto_panic': goto_panic,
'get_var_name_token': get_var_name_token,
'concat_token_message': concat_token_message,
'get_expr_type': get_expr_type,
'get_expr_contents': get_expr_contents,
'get_expr_content': get_expr_content,
'get_exprs_len': get_exprs_len,
'get_expr_at': get_expr_at,
'get_paren_token': get_paren_token,
'get_call_operator': get_call_operator,
'get_call_operands': get_call_operands,
'get_proc_plain_body': get_proc_plain_body,
'get_proc_env': get_proc_env,
'get_call_parameters': get_call_parameters,
'get_call_arguments': get_call_arguments,
'init_val_list': init_val_list,
'append_val_list': append_val_list,
'get_val_type': get_val_type,
'call_prim': call_prim,
'get_if_predicate': get_if_predicate,
'get_if_then': get_if_then,
'get_if_else': get_if_else,
'has_if_else': has_if_else,
'get_var_name': get_var_name,
'get_var_symbol': get_var_symbol,
'get_var_init': get_var_init,
'ec_env_set_at': ec_env_set_at,
'ec_env_define': ec_env_define,
'ec_env_lookup_at': ec_env_lookup_at,
'ec_env_extend': ec_env_extend,
'get_distance': get_distance,
'ec_define_proc_plain_val': ec_define_proc_plain_val,
'ec_eval_expr_invalid': ec_eval_expr_invalid,
'ec_eval_call_invalid': ec_eval_call_invalid,
'ec_check_prim_arity': ec_check_prim_arity,
'ec_check_proc_arity': ec_check_proc_arity,
'boolean_not': boolean_not,
'true?': boolean_true
}
update_operations(ops)
'''
we have two more tmp registers: unev2, unev3
we also have dist register to hold resolution distances
and err register hold err message
'''
ec_eval_regs = {
'val': None,
'expr': None,
'env': None,
'unev': None,
'unev2': None,
'unev3': None,
'dist': None,
'err': None,
'proc': None,
'argl': None,
'continue': None
}
def prepare_source(source: str):
tokens = scan_source(source)
combos = parse_tokens(tokens)
expr = parse_expr(combos)
distances = resolve_expr(expr)
return expr, distances
def test_one(source: str, **kargs: str):
source = source.strip()
print('* source: %s' % source)
expr, distances = prepare_source(source)
try:
try:
# build machine
ops = get_operations()
glbenv = make_global_env()
machine = make_machine(ec_eval_regs, ops, ec_eval_code_list)
machine.state.regs.update(
{'expr': expr, 'env': glbenv, 'dist': distances})
execute_machine = make_run_machine(lambda _: False)
# trace
tstate = TraceState()
trace_machine(machine.instructions, machine.state,
stringify_inst_data, tstate)
# result
init_machine_pc(machine)
execute_machine(machine)
result = machine.state.regs['val']
result_str = stringify_value(result)
output_str = scheme_flush()
if len(output_str):
print('* output: %s' % output_str)
if 'output' in kargs:
assert output_str == kargs['output']
print('* result: %s' % result_str)
if 'result' in kargs:
assert result_str == kargs['result']
except SchemePanic as err:
# any kind of panic
print('* panic: %s' % err.message)
assert err.message == kargs['panic']
except Exception as err:
# print current instruction and regs
print('\n'.join(tstate.outputs[-200:]))
raise err
print('----------')
def test_one_recursion(source_tmpl: str, name: str, nrng: Tuple[int, int], get_val: Callable[[int], int]):
print('%s (%d, %d)' % (name, nrng[0], nrng[1]))
source_tmpl = source_tmpl.strip()
print(source_tmpl)
for nval in range(*nrng):
# source
source = source_tmpl % nval
expr, distances = prepare_source(source)
try:
# build machine
ops = get_operations()
glbenv = make_global_env()
machine = make_machine(ec_eval_regs, ops, ec_eval_code_list)
machine.state.regs.update(
{'expr': expr, 'env': glbenv, 'dist': distances})
execute_machine = make_run_machine(lambda _: False)
# statistics
statistics = MachineStatistic()
monitor_statistics(machine.instructions, machine.state, statistics)
# result
init_machine_pc(machine)
execute_machine(machine)
res = machine.state.regs['val']
res_str = stringify_value(res)
assert res_str == str(get_val(nval))
print('n = %d, val = %s, total_insts = %d, stack_ops = %d, stack_depth = %d' %
(nval, res_str, statistics.total_insts, statistics.stack_ops, statistics.stack_depth))
except SchemePanic as err:
# any kind of panic
print('* panic: %s' % err.message)
assert False
print('----------')
def test_error():
test_one(
'x',
panic='runtime error at SYMBOL:x in line 1: symbol undefined'
)
test_one(
'''
(define (f a b . c) c)
(f 1)
''',
panic='runtime error at LEFT_PAREN in line 2: f expect at least 2 arguments, only get 1'
)
test_one(
'''
(define f "not_an_op")
(f 1 2)
''',
panic='runtime error at LEFT_PAREN in line 2: cannot call StringVal value'
)
test_one(
'(+ "1" "2")',
panic='runtime error at LEFT_PAREN in line 1: <lambda> requires both operators to be numbers, now StringVal and StringVal'
)
def test_expr():
test_one(
'''
(define x 1)
2
"string"
()
#f
x
''',
result='1'
)
test_one(
'((lambda (x) (+ x 1)) 2)',
result='3',
)
test_one(
'''
(define (f x) (+ x 1))
(f 2)
''',
result='3',
)
test_one(
'(if #t (if 3 4) 2)',
result='4',
)
test_one(
'(and (+ 1 2) (or (not #t) (list 3 4)))',
result='(3 4)',
)
test_one(
'''
(define a 1)
(define (incr)
(set! a (+ a 1)))
(incr)
(incr)
''',
result='3'
)
test_one(
'''
(define a '(2 3 4))
(define b (cons 1 a))
(display (car b))
(newline)
(display (cdr b))
(newline)
(display (cdr (cdr b)))
(length b)
''',
output='1\n(2 3 4)\n(3 4)',
result='4'
)
def test_resolve():
# use before intialization in different scopes pass resolution
test_one(
'''
(define (f)
(define (g) x)
(define x 1)
(g))
(f)
''',
result='1'
)
# local variable shadows outer definitions
test_one(
'''
(define x 1)
(define (f)
(define x 2)
x)
(f)
''',
result='2'
)
# mutual recursion ok, even in local scope
test_one(
'''
(define (f)
(define (even n) (if (= n 0) #t (odd (- n 1))))
(define (odd n) (if (= n 0) #f (even (- n 1))))
(even 5))
(f)
''',
result='#f'
)
def factorial(n: int):
product = 1
for i in range(1, n+1):
product *= i
return product
def test_recursion():
# recursion
test_one_recursion(
'''
(define (factorial n)
(if (= n 1)
1
(* n (factorial (- n 1)))))
(factorial %d)
''',
name='factorial-recur',
nrng=(1, 10),
get_val=factorial
)
# iteration, should use constant stack depth
test_one_recursion(
'''
(define (factorial n)
(define (fact-iter product counter)
(if (> counter n)
product
(fact-iter (* counter product)
(+ counter 1))))
(fact-iter 1 1))
(factorial %d)
''',
name='factorial-iter',
nrng=(1, 10),
get_val=factorial
)
def install_rules():
install_parse_expr_rules()
install_stringify_expr_rules()
install_stringify_value_rules()
install_is_equal_rules()
install_resolver_rules()
install_primitives()
install_assemble_mxpr_rules()
install_assemble_mstmt_rules()
install_stringify_mxpr_rules()
install_stringify_mstmt_rules()
install_operations()
install_operations_ec()
def print_code_list(code_list: List[Mstmt]):
index = 0
for code in code_list:
if isinstance(code, InstMstmt):
print('@ pc = %d: %s' %
(index, stringify_mstmt(code, stringify_inst_data)))
index += 1
else:
print(stringify_mstmt(code, stringify_inst_data))
def print_ec_eval_code_list():
print('ec_eval_code_list:')
print_code_list(ec_eval_code_list)
print('----------')
def test():
test_error()
test_expr()
test_resolve()
test_recursion()
if __name__ == '__main__':
install_rules()
print_ec_eval_code_list()
test()
|
from polymuse import dataset, dataset2 as d2, constant, enc_deco
from keras.utils import Sequence
import numpy, random, traceback, sys
from sklearn.model_selection import train_test_split
"""
It generates the note data batch wise
Returns:
NoteDataGenerator -- generator class for note while trainin
"""
class NoteDataGenerator(Sequence):
def __init__(self, trk, seq_names, batch_size, ip_memory, enc = True, steps_per_epoch = 0, norm = True):
self.seq_names = numpy.array(seq_names) # list of midi files avalable
self.batch_size = batch_size # batch size used while training , i.e. no. of instances at time
self.sFlat = None # strores the sFlat representation of midi file
self.top = 0 #increases by every file
self.trk = trk #which track (lead : 0, chorus : 1, drum : 2)
self.DEPTH = constant.depths_of_3tracks[trk] # depth parrameter while making of sFlat...
self.iter = 0 #Increases by every batch size
self.ip_memory = ip_memory
self.norm = norm
self.flat_shape = None
e = 32 if enc else 32
self.shape = (batch_size, ip_memory, self.DEPTH)
self.oshape = (batch_size, ip_memory, self.DEPTH * e)
self.steps_per_epoch = steps_per_epoch
self.steps = 0
self.enc = enc # if to encode the sFlat to octave encoding
if steps_per_epoch == 0:
self.calc_steps_per_epoch()
self.top = 0
# print("steps per epochs : ", self.steps_per_epoch)
self.__read__()
# self.on_epoch_end()
def calc_steps_per_epoch(self):
# i = 0
# print("sequences : ", self.seq_names)
for mid in self.seq_names:
try :
ns = dataset.to_note_sequence(mid)
except:
continue
ns = dataset.merge_to_3tracks(ns)
ar = dataset.ns_to_tarray(ns, resolution=64)
if ar.shape[0] < 3: continue
self.sFlat = dataset.ns_tarray_to_sFlat(t_arr= ar[ self.trk: self.trk + 1 ], DEPTH= self.DEPTH)
self.steps_per_epoch += ((self.sFlat.shape[1] // self.batch_size)) - 1
def __next_file__(self):
self.top += 1
if self.top == len(self.seq_names) + 1 : return False
# print("Top : ", self.top)
return self.seq_names[self.top - 1]
def __read__(self):
ns = None
if self.steps_per_epoch == 0: raise FileNotFoundError("Any of MIDI file in given dataset_path : "+ self.seq_names + " not reaable")
while not self.__exit__():
try :
filec = self.__next_file__()
ns = dataset.to_note_sequence(filec)
break
except:
continue
if not ns :return False
ns = dataset.merge_to_3tracks(ns)
ar = dataset.ns_to_tarray(ns, resolution=64)
# print("AR shape : ", ar.shape, self.trk)
if ar.shape[0] <= self.trk : return self.__read__()
# print("AGAIN AR shape : ", ar.shape)
self.sFlat = dataset.ns_tarray_to_sFlat(t_arr= ar[ self.trk: self.trk + 1 ], DEPTH= self.DEPTH)
self.sFlat = self.sFlat[:, : (self.sFlat.shape[1] // self.batch_size) * self.batch_size + 1]
self.steps = self.sFlat.shape[1] // self.batch_size - 1
self.iter = 0
self.sFlat = self.sFlat if self.norm else sFlat
self.flat_shape = self.sFlat.shape
return True
def __len__(self):
return len(self.seq_names)
def __exit__(self):
if self.top == len(self.seq_names) : return True
return False
def on_epoch_end(self):
self.top = 0
def __getitem__(self, idx):
if self.steps <= 0: self.__read__()
enc = self.sFlat[:, self.iter : self.iter + self.batch_size + self.ip_memory]
# print("sFlat shape : ", enc.shape)
x, y = dataset.prepare_sFlat_data(enc, enc_shape= (self.DEPTH,), ip_memory=self.ip_memory, depth= self.DEPTH)
# print("Prepare shape : ", x.shape, y.shape)
# if self.enc: enc = enc_deco.sFlat_to_octave(self.sFlat[:, self.iter : self.iter + self.batch_size + self.ip_memory]) #Improving started
# shape = enc.shape[-2: ] if self.enc else tuple()
x, y = x / 128, enc_deco.sFlat_to_octave(y)
# print("Prepare Encoded shape : ", x.shape, y.shape)
# print(shape, enc.shape)
# print(x.shape, y.shape, '----> x, y', self.flat_shape)
x, y = numpy.reshape(x, x.shape[1:3] + (-1, )), numpy.reshape(y, y.shape[1:2] + (-1, )) #reshaping to fit as rnn input
# print("Prepare shape reshsape : ", x.shape, y.shape)
self.iter += self.batch_size
self.steps -= 1
# print("steps : ", self.steps)
# print(x.shape, y.shape, '----> x, y')
return x, y
def __str__(self):
return '{\n\ttrk : ' + str(self.trk) + "\n\tseq_name : " + str(self.seq_names) + "\n\tbatch_size : " + str(self.batch_size) + \
"\n\tshape : " + str(self.shape) + '\n\tsFlat_shape : ' + str(self.flat_shape) + '\n\tsteps_per_epochs : ' + str(self.steps_per_epoch) + \
'\n\titer : ' + str(self.iter) +'\n\tEND\n}'
"""
It generates the note data batch wise
Returns:
NoteDataGenerator -- generator class for note while trainin
"""
class NoteTimeDataGenerator(Sequence):
def __init__(self, trk, seq_names, batch_size, ip_memory, enc = True, steps_per_epoch = 0, norm = True, bits = 8):
self.seq_names = numpy.array(seq_names) # list of midi files avalable
self.batch_size = batch_size # batch size used while training , i.e. no. of instances at time
self.sFlat = None # strores the sFlat representation of midi file
self.time = None
self.top = 0 #increases by every file
self.trk = trk #which track (lead : 0, chorus : 1, drum : 2)
self.DEPTH = constant.depths_of_3tracks[trk] # depth parrameter while making of sFlat...
self.iter = 0 #Increases by every batch size
self.ip_memory = ip_memory
self.norm = norm
self.quanta = 2 #[0, 1, 2, ...., 32] out off 32, 32 means whole note
self.iftime = False
self.flat_shape = None
self.bits = bits
e = 32 if enc else 32
self.shape = (batch_size, ip_memory, self.DEPTH * bits)
self.oshape = (batch_size, ip_memory, self.DEPTH * e)
self.steps_per_epoch = steps_per_epoch
self.steps = 0
self.enc = enc # if to encode the sFlat to octave encoding
if steps_per_epoch == 0:
self.calc_steps_per_epoch()
self.top = 0
self.__read__()
# self.on_epoch_end()
def calc_steps_per_epoch(self):
for mid in self.seq_names:
try :
ns = dataset.to_note_sequence(mid)
except:
continue
ns = dataset.merge_to_3tracks(ns)
ar = dataset.ns_to_tarray(ns, resolution=64)
if ar.shape[0] < 3: continue
self.sFlat = d2.ns_tarray_to_sFlatroll(tarray= ar[ self.trk: self.trk + 1 ], quanta = self.quanta, depth= self.DEPTH)
self.steps_per_epoch += ((self.sFlat.shape[1] // self.batch_size)) - 20
def __next_file__(self):
self.top += 1
if self.top == len(self.seq_names) + 1 : return False
return self.seq_names[self.top - 1]
def __read__(self):
ns = None
if self.steps_per_epoch == 0: raise FileNotFoundError("Any of MIDI file in given dataset_path : "+ self.seq_names + " not reaable")
while not self.__exit__():
try :
filec = self.__next_file__()
ns = dataset.to_note_sequence(filec)
break
except:
continue
if not ns :return False
ns = dataset.merge_to_3tracks(ns)
ar = dataset.ns_to_tarray(ns, resolution=64)
if ar.shape[0] <= self.trk : return self.__read__()
# print("Updated the successfully")
self.sFlat = d2.ns_tarray_to_sFlatroll(tarray= ar[ self.trk: self.trk + 1 ], quanta= self.quanta ,depth= self.DEPTH)
self.sFlat = self.sFlat[:, : (self.sFlat.shape[1] // self.batch_size) * self.batch_size + 1]
self.time = dataset.ns_tarray_to_time(t_arr= ar[ self.trk: self.trk + 1 ])
self.steps = self.sFlat.shape[1] // self.batch_size - 1
self.iter = 0
self.sFlat = self.sFlat if self.norm else sFlat
self.flat_shape = self.sFlat.shape
# print("Updated the successfully")
return True
def __len__(self):
return len(self.seq_names)
def __exit__(self):
if self.top == len(self.seq_names) : return True
return False
def on_epoch_end(self):
self.top = 0
def __getitem__(self, idx):
if self.steps <= 0: self.__read__()
if idx == 0:
# print("Reading the file ....", self.trk)
self.__read__() #reading the new file atleast to get first smaple, to init everything , that is not understanding now
enc = self.sFlat[:, self.iter : self.iter + self.batch_size + self.ip_memory]
x, y = dataset.prepare_sFlat_data(enc, enc_shape= (self.DEPTH + 1,), ip_memory=self.ip_memory, depth= self.DEPTH )
x, y = enc_deco.binary(x, self.bits) , enc_deco.sFlat_to_octave(y)
x, y = numpy.reshape(x, x.shape[1:3] + (-1, )), numpy.reshape(y, y.shape[1:2] + (-1, )) #reshaping to fit as rnn input
# print("x.shape, y.shape : ", x.shape, y.shape )
self.iter += self.batch_size
self.steps -= 1
return x, y
def __str__(self):
return '{\n\ttrk : ' + str(self.trk) + "\n\tseq_name : " + str(self.seq_names) + "\n\tbatch_size : " + str(self.batch_size) + \
"\n\tshape : " + str(self.shape) + '\n\tsFlat_shape : ' + str(self.flat_shape) + '\n\tsteps_per_epochs : ' + str(self.steps_per_epoch) + \
'\n\titer : ' + str(self.iter) +'\n\tEND\n}'
class DataGenerator_3Tracks(Sequence):
def __init__(self, seq_names, batch_size, ip_memory, enc = True, steps_per_epoch = 0, norm = True, bits = 8, test_size = 0.2):
self.ftrain, self.fval = train_test_split(seq_names, test_size= test_size)
print("len : train, val : ", len(self.ftrain), len(self.fval))
self.train = sFlatDataGenerator_3Tracks(self.ftrain, batch_size, ip_memory, enc, steps_per_epoch, norm)
self.val = sFlatDataGenerator_3Tracks(self.fval, batch_size, ip_memory, enc, steps_per_epoch, norm, bits)
self.ip_memory = ip_memory
self.batch_size = batch_size
self.bits = bits
def __len__(self):
return len(self.seq_names)
pass
class sFlatDataGenerator_3Tracks(Sequence):
def __init__(self, seq_names, batch_size, ip_memory, enc = True, steps_per_epoch = 0, norm = True, bits = 8):
self.ip_memory = ip_memory
self.batch_size = batch_size
self.bits = bits
self.lead = NoteTimeDataGenerator(0, seq_names, batch_size, ip_memory, enc, steps_per_epoch, norm)
self.chorus = NoteTimeDataGenerator(1, seq_names, batch_size, ip_memory, enc, steps_per_epoch, norm)
self.drum = NoteTimeDataGenerator(2, seq_names, batch_size, ip_memory, enc, steps_per_epoch, norm)
self.iter = -1
self.steps_per_epoch = min([self.lead.steps_per_epoch, self.chorus.steps_per_epoch, self.drum.steps_per_epoch])
def __len__(self):
return self.steps_per_epoch
def on_epoch_end(self):
self.lead.on_epoch_end()
self.chorus.on_epoch_end()
self.drum.on_epoch_end()
self.lead.top = 0
self.chorus.top = 0
self.drum.top = 0
self.iter = -1
def __getitem__(self, idx):
self.iter += 1
x0, y0 = self.lead.__getitem__(self.iter)
x1, y1 = self.chorus.__getitem__(self.iter)
x2, y2 = self.drum.__getitem__(self.iter)
return [x0, x1, x2], [y0, y1, y2]
def note_data(f, trk = 0, idx = None, ip_memory = 32, batch_size= 32, DEPTH = 1, all_ = False, randm = True):
# following reads the file to sFalt representaion
ns = dataset.to_note_sequence(f)
ar = dataset.ns_to_tarray(ns, resolution= 64)
sFlat = dataset.ns_tarray_to_sFlat(t_arr= ar[trk: trk + 1 ], DEPTH= DEPTH)
MX = (sFlat.shape[1] - ip_memory - batch_size)
if MX < 0: MX = 1
idx = idx if idx else random.randint(0, MX) # get index which slice of ip_memory you want
if idx > MX: raise Exception("Index out of bound err : Not in midi file") # if index is greater than MX, out of file
x, y = dataset.prepare_sFlat_data(sFlat[:, idx : idx + batch_size + ip_memory], ip_memory=ip_memory, depth= DEPTH)
y = enc_deco.sFlat_to_octave(y) # Improving started
if all_ : return x[0], y[0]
rx = random.randint(0, x.shape[1])
print("random init : ", rx)
if randm: x[0, rx], y[0, rx]
return x[0, 0], y[0, 0]
"""
Time data generator for batch_training
Returns:
[type] -- [description]
"""
class TimeDataGenerator(Sequence):
def __init__(self, trk, seq_names, batch_size, ip_memory):
self.seq_names = numpy.array(seq_names) # list of midi files avalable
self.batch_size = batch_size # batch size used while training , i.e. no. of instances at time
self.sFlat = None # strores the time instanses for sFlat representation of midi file
self.top = 0 #increases by every file
self.trk = trk #which track (lead : 0, chorus : 1, drum : 2)
# self.DEPTH = constant.depths_of_3tracks[trk] # depth parrameter while making of sFlat...
self.iter = 0 #Increases by every batch size
self.ip_memory = ip_memory
self.flat_shape = None
self.shape = (batch_size, ip_memory, 64)
self.steps_per_epoch = 0
self.steps = 0
self.calc_steps_per_epoch()
self.top = 0
self.__read__()
# self.on_epoch_end()
def calc_steps_per_epoch(self):
for mid in self.seq_names:
filec = self.__next_file__()
try :
ns = dataset.to_note_sequence(filec)
except: continue
ar = dataset.ns_to_tarray(ns, resolution=64)
self.sFlat = dataset.ns_tarray_to_time(t_arr= ar[ self.trk: self.trk + 1 ])
self.steps_per_epoch += ((self.sFlat.shape[1] // self.batch_size) * self.batch_size + 1) // self.batch_size - 1
def __next_file__(self):
self.top += 1
if self.top == len(self.seq_names) : return False
return self.seq_names[self.top - 1]
def __read__(self):
while not self.__exit__():
try :
filec = self.__next_file__()
ns = dataset.to_note_sequence(filec)
break
except: continue
ar = dataset.ns_to_tarray(ns, resolution=64)
self.sFlat = dataset.ns_tarray_to_time(t_arr= ar[ self.trk: self.trk + 1 ])
self.sFlat = self.sFlat[:, : (self.sFlat.shape[1] // self.batch_size) * self.batch_size + 1]
self.steps = self.sFlat.shape[1] // self.batch_size - 1
self.iter = 0
self.flat_shape = self.sFlat.shape
return True
def __len__(self):
return len(self.seq_names)
def __exit__(self):
if self.top == len(self.seq_names) : return True
return False
def on_epoch_end(self):
self.top = 0
def __getitem__(self, idx):
if self.steps <= 0: self.__read__()
enc = enc_deco.tm_to_enc_tm(self.sFlat[:, self.iter : self.iter + self.batch_size + self.ip_memory]) #None
x, y = dataset.prepare_sFlat_data(enc, enc_shape= enc.shape[-2: ], ip_memory=self.ip_memory, depth= self.DEPTH)
print(x.shape, y.shape, '----> x, y', self.flat_shape)
# x, y = numpy.reshape(x, x.shape[1:3] + (-1, )), numpy.reshape(y, y.shape[1:2] + (-1, )) #reshaping to fit as rnn input
self.iter += self.batch_size
self.steps -= 1
# print("steps : ", self.steps)
# print(x.shape, y.shape, '----> x, y')
return x, y
def __str__(self):
return '{\n\ttrk : ' + str(self.trk) + "\n\tseq_name : " + str(self.seq_names) + "\n\tbatch_size : " + str(self.batch_size) + \
"\n\tshape : " + str(self.shape) + '\n\tsFlat_shape : ' + str(self.flat_shape) + '\n\tsteps_per_epochs : ' + str(self.steps_per_epoch) + \
'\n\titer : ' + str(self.iter) +'\n\tEND\n}'
|
<gh_stars>1-10
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid as fluid
class TestMatchMatrixTensorOp(OpTest):
def setUp(self):
self.init_op_type()
self.set_data()
self.compute()
def init_op_type(self):
self.op_type = "match_matrix_tensor"
def set_data(self):
ix, iy, h, dim_t = [5, 8, 3, 4]
x_lod = [[1, 2, 2]]
y_lod = [[3, 1, 4]]
self.init_data(ix, x_lod, iy, y_lod, h, dim_t)
def init_data(self, ix, x_lod, iy, y_lod, h, dim_t):
x_data = np.random.random((ix, h)).astype('float32')
y_data = np.random.random((iy, h)).astype('float32')
w_data = np.random.random((h, dim_t, h)).astype('float32')
self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod), 'W': w_data}
self.attrs = {'dim_t': dim_t}
def compute(self):
x_data, x_lod = self.inputs['X']
y_data, y_lod = self.inputs['Y']
# [k, dim_t, k] -> [dim_t, k, k]
w_data = self.inputs['W'].transpose(1, 0, 2)
out = np.zeros((0, 1), dtype=x_data.dtype)
# for x*w
tmp = np.zeros((0, 1), dtype=x_data.dtype)
out_lod = [[]]
tmp_lod = [[]]
x_offset, y_offset = 0, 0
for idx in range(len(x_lod[0])):
x_len = x_lod[0][idx]
y_len = y_lod[0][idx]
x_sub = x_data[x_offset:(x_offset + x_len), :]
y_sub = y_data[y_offset:(y_offset + y_len), :]
tmp_sub = np.dot(x_sub, w_data)
tmp = np.vstack((tmp, tmp_sub.reshape(tmp_sub.size, 1)))
out_sub = np.dot(tmp_sub, y_sub.T).transpose(1, 0, 2)
out_lod[0].append(out_sub.size)
out = np.vstack((out, out_sub.reshape(out_sub.size, 1)))
x_offset += x_len
y_offset += y_len
self.outputs = {'Out': (out, out_lod), 'Tmp': tmp}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.005)
class TestMatchMatrixTensorOpCase1(TestMatchMatrixTensorOp):
def set_data(self):
ix, iy, h, dim_t = [5, 8, 16, 4]
x_lod = [[5]]
y_lod = [[8]]
self.init_data(ix, x_lod, iy, y_lod, h, dim_t)
class TestMatchMatrixTensorOpCase2(TestMatchMatrixTensorOp):
def set_data(self):
ix, iy, h, dim_t = [7, 8, 1, 4]
x_lod = [[2, 3, 2]]
y_lod = [[3, 1, 4]]
self.init_data(ix, x_lod, iy, y_lod, h, dim_t)
class TestMatchMatrixTensorOpCase3(TestMatchMatrixTensorOp):
def set_data(self):
ix, iy, h, dim_t = [5, 9, 32, 1]
x_lod = [[1, 2, 2]]
y_lod = [[3, 2, 4]]
self.init_data(ix, x_lod, iy, y_lod, h, dim_t)
class TestMatchMatrixTensorOpCase4(TestMatchMatrixTensorOp):
def set_data(self):
ix, iy, h, dim_t = [8, 12, 16, 5]
x_lod = [[1, 2, 3, 1, 1]]
y_lod = [[3, 2, 4, 1, 2]]
self.init_data(ix, x_lod, iy, y_lod, h, dim_t)
def test_api(self):
x_lod_tensor = fluid.layers.data(name='x', shape=[10], lod_level=1)
y_lod_tensor = fluid.layers.data(name='y', shape=[10], lod_level=1)
out, out_tmp = fluid.contrib.match_matrix_tensor(
x=x_lod_tensor, y=y_lod_tensor, channel_num=3)
place = fluid.CPUPlace()
x_data = np.random.rand(7, 10).astype('float32')
y_data = np.random.rand(9, 10).astype('float32')
x = fluid.create_lod_tensor(x_data, [[2, 5]], place)
y = fluid.create_lod_tensor(y_data, [[3, 6]], place)
exe = fluid.Executor(place=place)
exe.run(fluid.default_startup_program())
ret = exe.run(feed={'x': x,
'y': y},
fetch_list=[out],
return_numpy=False)
if __name__ == '__main__':
unittest.main()
|
<filename>ETCetera/util/parsing/parser_nonlinear_systems.py
"""
Created on Sat May 16 14:53:58 2020
@author: gmaddodi
"""
import re
import sys
import ETCetera.util.parsing.syntax_checker as sc
import sympy as sp
from ETCetera.exceptions.parser_exceptions.general_parser_exception import EmptyValueException, \
MultipleValuesFoundException, NotPositiveRealNumberException, IncorrectSyntaxException, GenericParsingException
from ETCetera.exceptions.parser_exceptions.symbolic_expression_exceptions import \
IncorrectNumOfSymbolicExpressionException
def parse_nonlinear(line):
"""
The function takes a string representing the key-value pair of non-linear control system input, checks which input
datastructure it is and extracts the value. Returns the value in appropriate form if checks are passed, else raises
an exception.
Parameters:
----------
line : string
A string of key and value corresponding to an attribute of InputDataStructureLinear, e.g. Dynamics = [1 2 3 4].
Returns:
-------
An appropriate data structure (list, string, dictionary).
Exceptions:
----------
IncorrectSyntaxException, EmptyValueException, IncorrectNumOfSymbolicExpressionException,
NotPositiveRealNumberException.
"""
# If the key is 'dynamics'
if line.split(':')[0].strip() == 'Dynamics':
dynamics = []
allowed_chars = list(['x', 'u', 'e', 'd', 'w']) # Defined allowed characters in symbolic expression
try:
value = line.split(':')[1].strip() # Check value exists
except IndexError:
raise EmptyValueException
sc.check_symbols_in_exprs(allowed_chars, value) # Get the symbols from expression
for expr in line.split(':')[1].strip().split(', '): # Split the symbolic expressions delimited by ','
dynamics.append(sc.check_symbolic_expr(expr)) # Verify the symbolic expression and append
return dynamics
# If the line is 'controller'
elif line.split(':')[0].strip() == 'Controller':
controller = []
allowed_chars = list(['x', 'w', 'e']) # Defined allowed characters in symbolic expression
try:
value = line.split(':')[1].strip()
except IndexError:
raise EmptyValueException
sc.check_symbols_in_exprs(allowed_chars, value) # Get the symbols from expression
for expr in line.split(':')[1].strip().split(', '): # Split the symbolic expressions delimited by ','
controller.append(sc.check_symbolic_expr(expr)) # Verify the symbolic expression and append
return controller
# If the line is 'triggering_condition'
elif line.split(':')[0].strip() == 'Triggering Condition':
triggering_condition = 0
allowed_chars = list(['x', 'e', 'w']) # Defined allowed characters in symbolic expression
try:
num_exprs = len(line.split(':')[1].strip().split(', '))
except IndexError:
raise EmptyValueException
if num_exprs == 1: # There should be only one expression
sc.check_symbols_in_exprs(allowed_chars, line.split(':')[1].strip()) # Get the symbols from expression
triggering_condition = sc.check_symbolic_expr(line.split(':')[1].strip()) # Verify the symbolic expression and append
return triggering_condition
else:
raise IncorrectNumOfSymbolicExpressionException(num_exprs, 1)
# If the key is 'hyperbox_states'
elif line.split(':')[0].strip() == 'Hyperbox States':
hyperbox_states = []
try:
hyperbox_states_vectors = line.split(':')[1].strip().split(', ')
except IndexError:
raise IncorrectSyntaxException
for item in hyperbox_states_vectors: # Split the vectors delimited by ','
list_of_values = sc.check_keyvalue_syntax(' ', '\[(.*)\]', item) # Check the vector syntax
sc.check_if_numerical_values(list_of_values) # Check that values are all real numbers
hyperbox_states.append([float(i) for i in list_of_values]) # Convert list into vector and append
return hyperbox_states
elif line.split(':')[0].strip() == 'Grid Points Per Dimension':
item = line.split(':')[1].strip()
list_of_values = sc.check_keyvalue_syntax(' ', '\[(.*)\]', item) # Check the vector syntax
sc.check_if_numerical_values(list_of_values) # Check that values are all real numbers
grid_points = [int(i) for i in list_of_values] # Convert list into vector and append
return grid_points
# If the line is 'hyperbox_disturbances'
elif line.split(':')[0].strip() == 'Hyperbox Disturbances':
hyperbox_disturbances = []
try:
hyperbox_disturbances_vectors = line.split(':')[1].strip().split(', ')
except IndexError:
raise IncorrectSyntaxException
len_vectors = len(hyperbox_disturbances_vectors)
for item in hyperbox_disturbances_vectors: # Split the vectors delimited by ','
list_of_values = sc.check_keyvalue_syntax(' ', '\[(.*)\]', item) # Check the vector syntax
if len(list_of_values) == 0 and len_vectors > 1:
raise GenericParsingException('No other vectors can be specified when an empty vector defined. Syntax '
'Error on line ')
elif len(list_of_values) == 0 and len_vectors == 1:
pass
else:
sc.check_if_numerical_values(list_of_values) # Check the values are real numbers
hyperbox_disturbances.append([float(i) for i in list_of_values]) # Convert list into vector and append
return hyperbox_disturbances
# If the line is 'deg_of_homogeneity'
elif line.split(':')[0].strip() == 'Deg. of Homogeneity':
try:
value = line.split(':')[1]
except IndexError:
raise EmptyValueException
if len(value.strip().split(',')) != 1: # There should be only one value
raise MultipleValuesFoundException
sc.check_if_numerical_values(value.strip().split(' ')) # Check if float value
if float(value.strip()) < 0: # value shpuld be positive integer
raise NotPositiveRealNumberException
return float(value.strip())
# If the line is 'solver_options'
elif line.split(':')[0].strip() == 'Solver Options':
solver_options = dict()
try:
# Check if no values specified as this data structure can be empty,
if len(list(filter(None, line.split(':')[1].strip().split(', ')))) == 0:
return solver_options
for item in line.split(':')[1].strip().split(', '):
# re.search('[a-z_]+=[a-z0-9.]+', item).group(0)
re.search('[a-zA-Z_]+=(\[([a-zA-Z0-9. ])*]|[a-zA-Z0-9.]+)', item).group(0)
key = item.split('=')[0]
value = item.split('=')[1]
if key == 'precision_deltas':
solver_options.update({key: float(value)})
elif key == 'timeout_deltas':
solver_options.update({key: int(value)})
elif key == 'partition_method':
solver_options.update({key: str(value)})
elif key == 'manifolds_times':
values = re.search('\[([0-9. ]*)]', value).group(1)
values = values.strip().split(' ')
values = list(filter(None, values))
if len(values) > 0:
solver_options.update({key: [float(v) for v in values]})
elif key == 'angles_discretization':
values = re.search('\[([0-9 ]*)]', value).group(1)
values = values.strip().split(' ')
values = list(filter(None, values))
if len(values) > 0:
solver_options.update({key: [int(v) for v in values]})
# elif key == 'nr_cones_small_angles':
# values = re.search('\[(.*)]', value).group(1)
# values = values.strip().split(' ')
# values = list(filter(None, values))
# if len(values) > 0:
# sc.check_if_numerical_values(values)
# solver_options.update({key: [int(i) for i in values]})
# elif key == 'nr_cones_big_angle':
# solver_options.update({key: int(value)})
# elif key == 'state_space_limits':
# hyperbox_states = []
# try:
# hyperbox_states_vectors = line.split(':')[1].strip().split(', ')
# except IndexError:
# raise IncorrectSyntaxException
# for item in hyperbox_states_vectors: # Split the vectors delimited by ','
# list_of_values = sc.check_keyvalue_syntax(' ', '\[(.*)\]', item) # Check the vector syntax
# sc.check_if_numerical_values(list_of_values) # Check that values are all real numbers
# hyperbox_states.append(
# [float(i) for i in list_of_values]) # Convert list into vector and append
# solver_options.update({key: hyperbox_states})
# elif key == 'grid_points_'
elif key == 'heartbeat':
solver_options.update({key: float(value)})
elif key == 'precision_timing_bounds':
solver_options.update({key: float(value)})
elif key == 'precision_transitions':
solver_options.update({key: float(value)})
elif key == 'timeout_timing_bounds':
solver_options.update({key: int(value)})
elif key == 'timeout_transitions':
solver_options.update({key: int(value)})
elif key == 'order_approx':
assert int(value) > 0
solver_options.update({key: int(value)})
else:
continue
# elif key == 'gridstep':
# assert int(value) > 1, "Gridstep should be greater than 1."
# solver_options.update({key: int(value)})
#
# elif key == 'opt_method':
# assert value in ['revised simplex', 'simplex', 'interior-point']
# solver_options.update({key: value})
# elif key == 'heart_beat':
# solver_options.update({key: float(value)})
# elif key == 'grid_pts_per_dim':
# values = re.search('\{(.*)\}', value).group(1)
# values = values.strip().split(' ')
# values = list(filter(None, values))
# if len(values) > 0:
# sc.check_if_numerical_values(values)
# solver_options.update({key: [int(i) for i in values]})
#
#
# elif key == 'remainder_reachability':
# solver_options.update({key: float(value)})
# elif key == 'timeout_reachability':
# solver_options.update({key: int(value)})
# else:
# pass
return solver_options
except Exception:
raise IncorrectSyntaxException
# # If the line is 'linesearch_options'
# elif line.split(':')[0].strip() == 'Linesearch Options':
# linesearch_options = dict()
# try:
# # Check if no values specified, this data structure can be empty
# if len(list(filter(None, line.split(':')[1].strip().split(', ')))) == 0:
# return linesearch_options
# for item in line.split(':')[1].strip().split(', '):
# re.search('[a-z_]+=[a-z0-9.{} ]+', item).group(0)
# key = item.split('=')[0]
# value = item.split('=')[1]
# if key == 'timeout_upper_bounds':
# linesearch_options.update({key: int(value)})
# elif key == 'remainder_upper_bounds':
# linesearch_options.update({key: float(value)})
# else:
# pass
# return linesearch_options
# except Exception as e:
# raise IncorrectSyntaxException
else:
pass
def get_etc_controller(controller):
"""
The function takes a non-linear data structure and checks following:
1. number of controller expressions is equal to number of dynamics input symbols
2. initializes etc_controller by replacing xn with (xn + en), where n=1,2,3...
3. checks all attributes are initialized expect for optional ones (dynamics_disturbances',
'solver_options', 'linesearch_options).
Returns the new data structure object.
Parameters:
----------
data : InputDataStructureNonLinear class object from input_datastructure module.
Returns:
-------
Modified InputDataStructureNonLinear class object from input_datastructure module.
"""
dynamics_errors = set()
etc_controller = list()
for index, item in enumerate(controller):
item = str(item) # Get string representation of the symbol
for symbol in set(re.findall('x\d+', item)): # get all state symbols, i.e. x0, x1 ...
error_symbol = 'e' + re.search('\d+', symbol).group(0) # Construct error symbol, e.g. x0 -> e0
dynamics_errors.add(sp.Symbol(error_symbol)) # Create error symbol (en) if doesn't exist
item = item.replace(symbol, '(' + symbol + ' + ' + error_symbol + ')') # Replace xn with (xn + en)
etc_controller.append(sp.sympify(item))
return dynamics_errors, etc_controller
|
<gh_stars>0
"""Calendar parsing."""
import os
import datetime
import re
from . import spec
TODAY = datetime.date.today().strftime("%Y%m%d")
def machine(file, shift=TODAY, event='first', report=False):
"""Creates a new calendar file with shifted dates.
Args:
file: str. Calendar file. Supported extensions are ['.ics'].
shift: str or int. A date to shift to or a number of days to shift.
- Date: The format must be 'YYYYMMDD'. Based on the event arg,
either the first or last event will be shifted to this date
and all other events will be shifted by the same relative
time.
- Days: All dates will be shifted by this number of days either
forward for a positive int or backward for a negative int.
event: str, default: 'first'. Reference point for shift calculation.
Possible values are ['first', 'last'].
report: bool. Print a summary report.
"""
days = None
# check args:
valid_file = ['.ics']
valid_event = ['first', 'last']
fbase, fext = os.path.splitext(file)
if fext not in valid_file:
raise LookupError('Invalid file arg; bad extension. See docstring.')
if isinstance(shift, int):
days = shift
dtshift = None
else:
dtshift = datetime.datetime.strptime(shift, '%Y%m%d')
if event not in valid_event:
raise LookupError('Invalid event arg. See docstring.')
# read source txt
with open(os.path.abspath(file)) as f:
txt = f.read()
# load calendar spec
if fext == '.ics':
cal = spec.icalendar()
dtstart = cal.dtstart
dtend = cal.dtend
evstart = cal.evstart
evend = cal.evend
form = cal.form
# calculate shift
evidx = event_idx(txt, evstart, evend)
if not days:
dts = dates(txt, evidx, dtstart, form)
if event == 'first':
dtbase = min(dts)
elif event == 'last':
dtbase = max(dts)
dtdelta = dtshift - dtbase
days = dtdelta.days
if report:
results = [('Events Modified', len(evidx)),
('Days Traveled', days)]
if dtshift:
results.append(['Origination', dtbase.strftime(form)])
results.append(['Destination', dtshift.strftime(form)])
print_report(results)
# shift dates
out = travel(txt, evidx, days, dtstart, dtend, form)
# write new txt
outfile = os.path.abspath(fbase + '_' + TODAY + fext)
with open(outfile, 'w') as f:
f.write(out)
def travel(txt, evidx, days, dtstart, dtend, form):
"""Shift all events in txt by days."""
repl_ = lambda match: repl(match, days, form)
out = txt[:evidx[0][0]]
for start, end in evidx:
if start != len(out):
out += txt[len(out): start - 1]
event = txt[start:end]
for pat in [dtstart, dtend]:
event = re.sub(pat, repl_, event)
out += event
out += txt[len(out):]
return out
def event_idx(txt, evstart, evend):
"""Create a list of indexes for event start/end points in txt."""
pos = []
start = txt.find(evstart)
end = txt.find(evend)
while end != -1:
pos.append((start, end + len(evend)))
start = txt.find(evstart, start + 1)
end = txt.find(evend, end + 1)
return pos
def dates(txt, evidx, pat, form):
"""Extract dates from txt."""
dts = []
for start, end in evidx:
match = re.search(pat, txt[start:end])
s = [g for g in match.groups() if g][0]
dts.append(datetime.datetime.strptime(s, form))
return dts
def repl(match, days, form):
"""Shift matched date by days."""
base = match.group(0)
s = [g for g in match.groups() if g][0]
start = base.find(s)
end = start + len(s)
dt = datetime.datetime.strptime(s, form)
new_dt = dt + datetime.timedelta(days=days)
return base[:start] + new_dt.strftime(form) + base[end:]
def print_report(args):
"""Print args."""
for item, val in args:
print(item, ':', val)
|
<reponame>ecoromka/mbed-os
"""
mbed SDK
Copyright (c) 2011-2014 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: <NAME> <<EMAIL>>
"""
from __future__ import print_function
import six
import os
import re
import sys
import json
import uuid
import pprint
import random
import argparse
import datetime
import threading
import ctypes
import functools
from colorama import Fore, Back, Style
from prettytable import PrettyTable, HEADER
from copy import copy, deepcopy
from time import sleep, time
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty
from os.path import join, exists, basename, relpath, isdir, isfile
from threading import Thread, Lock
from multiprocessing import Pool, cpu_count
from subprocess import Popen, PIPE
# Imports related to mbed build api
from tools.tests import TESTS
from tools.tests import TEST_MAP
from tools.paths import BUILD_DIR
from tools.paths import HOST_TESTS
from tools.utils import ToolException
from tools.utils import NotSupportedException
from tools.utils import construct_enum
from tools.memap import MemapParser
from tools.targets import TARGET_MAP, Target
from tools.config import Config
import tools.test_configs as TestConfig
from tools.test_db import BaseDBAccess
from tools.build_api import build_project, build_mbed_libs, build_lib
from tools.build_api import get_target_supported_toolchains
from tools.build_api import write_build_report
from tools.build_api import prep_report
from tools.build_api import prep_properties
from tools.build_api import create_result
from tools.build_api import add_result_to_report
from tools.build_api import prepare_toolchain
from tools.build_api import get_config
from tools.resources import Resources, MbedIgnoreSet, IGNORE_FILENAME
from tools.libraries import LIBRARIES, LIBRARY_MAP
from tools.options import extract_profile
from tools.toolchains import TOOLCHAIN_PATHS
from tools.toolchains import TOOLCHAINS
from tools.test_exporters import ReportExporter, ResultExporterType
from tools.utils import argparse_filestring_type
from tools.utils import argparse_uppercase_type
from tools.utils import argparse_lowercase_type
from tools.utils import argparse_many
from tools.notifier.mock import MockNotifier
from tools.notifier.term import TerminalNotifier
import tools.host_tests.host_tests_plugins as host_tests_plugins
try:
import mbed_lstools
from tools.compliance.ioper_runner import get_available_oper_test_scopes
except:
pass
class ProcessObserver(Thread):
def __init__(self, proc):
Thread.__init__(self)
self.proc = proc
self.queue = Queue()
self.daemon = True
self.active = True
self.start()
def run(self):
while self.active:
c = self.proc.stdout.read(1)
self.queue.put(c)
def stop(self):
self.active = False
try:
self.proc.terminate()
except Exception:
pass
class SingleTestExecutor(threading.Thread):
""" Example: Single test class in separate thread usage
"""
def __init__(self, single_test):
self.single_test = single_test
threading.Thread.__init__(self)
def run(self):
start = time()
# Execute tests depending on options and filter applied
test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext = self.single_test.execute()
elapsed_time = time() - start
# Human readable summary
if not self.single_test.opts_suppress_summary:
# prints well-formed summary with results (SQL table like)
print(self.single_test.generate_test_summary(test_summary,
shuffle_seed))
if self.single_test.opts_test_x_toolchain_summary:
# prints well-formed summary with results (SQL table like)
# table shows text x toolchain test result matrix
print(self.single_test.generate_test_summary_by_target(
test_summary, shuffle_seed))
print("Completed in %.2f sec"% (elapsed_time))
class SingleTestRunner(object):
""" Object wrapper for single test run which may involve multiple MUTs
"""
RE_DETECT_TESTCASE_RESULT = None
# Return codes for test script
TEST_RESULT_OK = "OK"
TEST_RESULT_FAIL = "FAIL"
TEST_RESULT_ERROR = "ERROR"
TEST_RESULT_UNDEF = "UNDEF"
TEST_RESULT_IOERR_COPY = "IOERR_COPY"
TEST_RESULT_IOERR_DISK = "IOERR_DISK"
TEST_RESULT_IOERR_SERIAL = "IOERR_SERIAL"
TEST_RESULT_TIMEOUT = "TIMEOUT"
TEST_RESULT_NO_IMAGE = "NO_IMAGE"
TEST_RESULT_MBED_ASSERT = "MBED_ASSERT"
TEST_RESULT_BUILD_FAILED = "BUILD_FAILED"
TEST_RESULT_NOT_SUPPORTED = "NOT_SUPPORTED"
GLOBAL_LOOPS_COUNT = 1 # How many times each test should be repeated
TEST_LOOPS_LIST = [] # We redefine no.of loops per test_id
TEST_LOOPS_DICT = {} # TEST_LOOPS_LIST in dict format: { test_id : test_loop_count}
muts = {} # MUTs descriptor (from external file)
test_spec = {} # Test specification (from external file)
# mbed test suite -> SingleTestRunner
TEST_RESULT_MAPPING = {"success" : TEST_RESULT_OK,
"failure" : TEST_RESULT_FAIL,
"error" : TEST_RESULT_ERROR,
"ioerr_copy" : TEST_RESULT_IOERR_COPY,
"ioerr_disk" : TEST_RESULT_IOERR_DISK,
"ioerr_serial" : TEST_RESULT_IOERR_SERIAL,
"timeout" : TEST_RESULT_TIMEOUT,
"no_image" : TEST_RESULT_NO_IMAGE,
"end" : TEST_RESULT_UNDEF,
"mbed_assert" : TEST_RESULT_MBED_ASSERT,
"build_failed" : TEST_RESULT_BUILD_FAILED,
"not_supproted" : TEST_RESULT_NOT_SUPPORTED
}
def __init__(self,
_global_loops_count=1,
_test_loops_list=None,
_muts={},
_clean=False,
_parser=None,
_opts=None,
_opts_db_url=None,
_opts_log_file_name=None,
_opts_report_html_file_name=None,
_opts_report_junit_file_name=None,
_opts_report_build_file_name=None,
_opts_report_text_file_name=None,
_opts_build_report={},
_opts_build_properties={},
_test_spec={},
_opts_goanna_for_mbed_sdk=None,
_opts_goanna_for_tests=None,
_opts_shuffle_test_order=False,
_opts_shuffle_test_seed=None,
_opts_test_by_names=None,
_opts_peripheral_by_names=None,
_opts_test_only_peripheral=False,
_opts_test_only_common=False,
_opts_verbose_skipped_tests=False,
_opts_verbose_test_result_only=False,
_opts_verbose=False,
_opts_firmware_global_name=None,
_opts_only_build_tests=False,
_opts_parallel_test_exec=False,
_opts_suppress_summary=False,
_opts_test_x_toolchain_summary=False,
_opts_copy_method=None,
_opts_mut_reset_type=None,
_opts_jobs=None,
_opts_waterfall_test=None,
_opts_consolidate_waterfall_test=None,
_opts_extend_test_timeout=None,
_opts_auto_detect=None,
_opts_include_non_automated=False):
""" Let's try hard to init this object
"""
from colorama import init
init()
PATTERN = "\\{(" + "|".join(self.TEST_RESULT_MAPPING.keys()) + ")\\}"
self.RE_DETECT_TESTCASE_RESULT = re.compile(PATTERN)
# Settings related to test loops counters
try:
_global_loops_count = int(_global_loops_count)
except:
_global_loops_count = 1
if _global_loops_count < 1:
_global_loops_count = 1
self.GLOBAL_LOOPS_COUNT = _global_loops_count
self.TEST_LOOPS_LIST = _test_loops_list if _test_loops_list else []
self.TEST_LOOPS_DICT = self.test_loop_list_to_dict(_test_loops_list)
self.shuffle_random_seed = 0.0
self.SHUFFLE_SEED_ROUND = 10
# MUT list and test specification storage
self.muts = _muts
self.test_spec = _test_spec
# Settings passed e.g. from command line
self.opts_db_url = _opts_db_url
self.opts_log_file_name = _opts_log_file_name
self.opts_report_html_file_name = _opts_report_html_file_name
self.opts_report_junit_file_name = _opts_report_junit_file_name
self.opts_report_build_file_name = _opts_report_build_file_name
self.opts_report_text_file_name = _opts_report_text_file_name
self.opts_goanna_for_mbed_sdk = _opts_goanna_for_mbed_sdk
self.opts_goanna_for_tests = _opts_goanna_for_tests
self.opts_shuffle_test_order = _opts_shuffle_test_order
self.opts_shuffle_test_seed = _opts_shuffle_test_seed
self.opts_test_by_names = _opts_test_by_names
self.opts_peripheral_by_names = _opts_peripheral_by_names
self.opts_test_only_peripheral = _opts_test_only_peripheral
self.opts_test_only_common = _opts_test_only_common
self.opts_verbose_skipped_tests = _opts_verbose_skipped_tests
self.opts_verbose_test_result_only = _opts_verbose_test_result_only
self.opts_verbose = _opts_verbose
self.opts_firmware_global_name = _opts_firmware_global_name
self.opts_only_build_tests = _opts_only_build_tests
self.opts_parallel_test_exec = _opts_parallel_test_exec
self.opts_suppress_summary = _opts_suppress_summary
self.opts_test_x_toolchain_summary = _opts_test_x_toolchain_summary
self.opts_copy_method = _opts_copy_method
self.opts_mut_reset_type = _opts_mut_reset_type
self.opts_jobs = _opts_jobs if _opts_jobs is not None else 1
self.opts_waterfall_test = _opts_waterfall_test
self.opts_consolidate_waterfall_test = _opts_consolidate_waterfall_test
self.opts_extend_test_timeout = _opts_extend_test_timeout
self.opts_clean = _clean
self.opts_parser = _parser
self.opts = _opts
self.opts_auto_detect = _opts_auto_detect
self.opts_include_non_automated = _opts_include_non_automated
self.build_report = _opts_build_report
self.build_properties = _opts_build_properties
# File / screen logger initialization
self.logger = CLITestLogger(file_name=self.opts_log_file_name) # Default test logger
# Database related initializations
self.db_logger = factory_db_logger(self.opts_db_url)
self.db_logger_build_id = None # Build ID (database index of build_id table)
# Let's connect to database to set up credentials and confirm database is ready
if self.db_logger:
self.db_logger.connect_url(self.opts_db_url) # Save db access info inside db_logger object
if self.db_logger.is_connected():
# Get hostname and uname so we can use it as build description
# when creating new build_id in external database
(_hostname, _uname) = self.db_logger.get_hostname()
_host_location = os.path.dirname(os.path.abspath(__file__))
build_id_type = None if self.opts_only_build_tests is None else self.db_logger.BUILD_ID_TYPE_BUILD_ONLY
self.db_logger_build_id = self.db_logger.get_next_build_id(_hostname, desc=_uname, location=_host_location, type=build_id_type)
self.db_logger.disconnect()
def dump_options(self):
""" Function returns data structure with common settings passed to SingelTestRunner
It can be used for example to fill _extra fields in database storing test suite single run data
Example:
data = self.dump_options()
or
data_str = json.dumps(self.dump_options())
"""
result = {"db_url" : str(self.opts_db_url),
"log_file_name" : str(self.opts_log_file_name),
"shuffle_test_order" : str(self.opts_shuffle_test_order),
"shuffle_test_seed" : str(self.opts_shuffle_test_seed),
"test_by_names" : str(self.opts_test_by_names),
"peripheral_by_names" : str(self.opts_peripheral_by_names),
"test_only_peripheral" : str(self.opts_test_only_peripheral),
"test_only_common" : str(self.opts_test_only_common),
"verbose" : str(self.opts_verbose),
"firmware_global_name" : str(self.opts_firmware_global_name),
"only_build_tests" : str(self.opts_only_build_tests),
"copy_method" : str(self.opts_copy_method),
"mut_reset_type" : str(self.opts_mut_reset_type),
"jobs" : str(self.opts_jobs),
"extend_test_timeout" : str(self.opts_extend_test_timeout),
"_dummy" : ''
}
return result
def shuffle_random_func(self):
return self.shuffle_random_seed
def is_shuffle_seed_float(self):
""" return true if function parameter can be converted to float
"""
result = True
try:
float(self.shuffle_random_seed)
except ValueError:
result = False
return result
# This will store target / toolchain specific properties
test_suite_properties_ext = {} # target : toolchain
# Here we store test results
test_summary = []
# Here we store test results in extended data structure
test_summary_ext = {}
execute_thread_slice_lock = Lock()
def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_report, build_properties):
for toolchain in toolchains:
tt_id = "%s::%s" % (toolchain, target)
T = TARGET_MAP[target]
# print target, toolchain
# Test suite properties returned to external tools like CI
test_suite_properties = {
'jobs': self.opts_jobs,
'clean': clean,
'target': target,
'vendor': T.extra_labels[0],
'test_ids': ', '.join(test_ids),
'toolchain': toolchain,
'shuffle_random_seed': self.shuffle_random_seed
}
# print '=== %s::%s ===' % (target, toolchain)
# Let's build our test
if target not in TARGET_MAP:
print(self.logger.log_line(
self.logger.LogType.NOTIF,
'Skipped tests for %s target. Target platform not found' %
(target)))
continue
clean_mbed_libs_options = (self.opts_goanna_for_mbed_sdk or
self.opts_clean or clean)
profile = extract_profile(self.opts_parser, self.opts, toolchain)
stats_depth = self.opts.stats_depth or 2
try:
build_mbed_libs_result = build_mbed_libs(
T, toolchain,
clean=clean_mbed_libs_options,
jobs=self.opts_jobs,
report=build_report,
properties=build_properties,
build_profile=profile,
notify=TerminalNotifier())
if not build_mbed_libs_result:
print(self.logger.log_line(
self.logger.LogType.NOTIF,
'Skipped tests for %s target. Toolchain %s is not '
'supported for this target'% (T.name, toolchain)))
continue
except ToolException:
print(self.logger.log_line(
self.logger.LogType.ERROR,
'There were errors while building MBED libs for %s using %s'
% (target, toolchain)))
continue
build_dir = join(BUILD_DIR, "test", target, toolchain)
test_suite_properties['build_mbed_libs_result'] = build_mbed_libs_result
test_suite_properties['build_dir'] = build_dir
test_suite_properties['skipped'] = []
# Enumerate through all tests and shuffle test order if requested
test_map_keys = sorted(TEST_MAP.keys())
if self.opts_shuffle_test_order:
random.shuffle(test_map_keys, self.shuffle_random_func)
# Update database with shuffle seed f applicable
if self.db_logger:
self.db_logger.reconnect();
if self.db_logger.is_connected():
self.db_logger.update_build_id_info(
self.db_logger_build_id,
_shuffle_seed=self.shuffle_random_func())
self.db_logger.disconnect();
if self.db_logger:
self.db_logger.reconnect();
if self.db_logger.is_connected():
# Update MUTs and Test Specification in database
self.db_logger.update_build_id_info(
self.db_logger_build_id,
_muts=self.muts, _test_spec=self.test_spec)
# Update Extra information in database (some options passed to test suite)
self.db_logger.update_build_id_info(
self.db_logger_build_id,
_extra=json.dumps(self.dump_options()))
self.db_logger.disconnect();
valid_test_map_keys = self.get_valid_tests(test_map_keys, target, toolchain, test_ids, self.opts_include_non_automated)
skipped_test_map_keys = self.get_skipped_tests(test_map_keys, valid_test_map_keys)
for skipped_test_id in skipped_test_map_keys:
test_suite_properties['skipped'].append(skipped_test_id)
# First pass through all tests and determine which libraries need to be built
libraries = []
for test_id in valid_test_map_keys:
test = TEST_MAP[test_id]
# Detect which lib should be added to test
# Some libs have to compiled like RTOS or ETH
for lib in LIBRARIES:
if lib['build_dir'] in test.dependencies and lib['id'] not in libraries:
libraries.append(lib['id'])
clean_project_options = True if self.opts_goanna_for_tests or clean or self.opts_clean else None
# Build all required libraries
for lib_id in libraries:
try:
build_lib(lib_id,
T,
toolchain,
clean=clean_mbed_libs_options,
jobs=self.opts_jobs,
report=build_report,
properties=build_properties,
build_profile=profile,
notify=TerminalNotifier())
except ToolException:
print(self.logger.log_line(
self.logger.LogType.ERROR,
'There were errors while building library %s' % lib_id))
continue
for test_id in valid_test_map_keys:
test = TEST_MAP[test_id]
test_suite_properties['test.libs.%s.%s.%s'% (target, toolchain, test_id)] = ', '.join(libraries)
# TODO: move this 2 below loops to separate function
INC_DIRS = []
for lib_id in libraries:
if 'inc_dirs_ext' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['inc_dirs_ext']:
INC_DIRS.extend(LIBRARY_MAP[lib_id]['inc_dirs_ext'])
MACROS = []
for lib_id in libraries:
if 'macros' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['macros']:
MACROS.extend(LIBRARY_MAP[lib_id]['macros'])
MACROS.append('TEST_SUITE_TARGET_NAME="%s"'% target)
MACROS.append('TEST_SUITE_TEST_ID="%s"'% test_id)
test_uuid = uuid.uuid4()
MACROS.append('TEST_SUITE_UUID="%s"'% str(test_uuid))
# Prepare extended test results data structure (it can be used to generate detailed test report)
if target not in self.test_summary_ext:
self.test_summary_ext[target] = {} # test_summary_ext : toolchain
if toolchain not in self.test_summary_ext[target]:
self.test_summary_ext[target][toolchain] = {} # test_summary_ext : toolchain : target
tt_test_id = "%s::%s::%s" % (toolchain, target, test_id) # For logging only
project_name = self.opts_firmware_global_name if self.opts_firmware_global_name else None
try:
path = build_project(
test.source_dir, join(build_dir, test_id), T,
toolchain, test.dependencies, clean=clean_project_options,
name=project_name, macros=MACROS,
inc_dirs=INC_DIRS, jobs=self.opts_jobs, report=build_report,
properties=build_properties, project_id=test_id,
project_description=test.get_description(),
build_profile=profile, stats_depth=stats_depth,
notify=TerminalNotifier(),
)
except Exception as e:
project_name_str = project_name if project_name is not None else test_id
test_result = self.TEST_RESULT_FAIL
if isinstance(e, ToolException):
print(self.logger.log_line(
self.logger.LogType.ERROR,
'There were errors while building project %s' %
project_name_str))
test_result = self.TEST_RESULT_BUILD_FAILED
elif isinstance(e, NotSupportedException):
print(self.logger.log_line(
self.logger.LogType.INFO,
'Project %s is not supported' % project_name_str))
test_result = self.TEST_RESULT_NOT_SUPPORTED
# Append test results to global test summary
self.test_summary.append(
(test_result, target, toolchain, test_id,
test.get_description(), 0, 0, '-')
)
# Add detailed test result to test summary structure
if test_id not in self.test_summary_ext[target][toolchain]:
self.test_summary_ext[target][toolchain][test_id] = []
self.test_summary_ext[target][toolchain][test_id].append({ 0: {
'result' : test_result,
'output' : '',
'target_name' : target,
'target_name_unique': target,
'toolchain_name' : toolchain,
'id' : test_id,
'description' : test.get_description(),
'elapsed_time' : 0,
'duration' : 0,
'copy_method' : None
}})
continue
if self.opts_only_build_tests:
# With this option we are skipping testing phase
continue
# Test duration can be increased by global value
test_duration = test.duration
if self.opts_extend_test_timeout is not None:
test_duration += self.opts_extend_test_timeout
# For an automated test the duration act as a timeout after
# which the test gets interrupted
test_spec = self.shape_test_request(target, path, test_id, test_duration)
test_loops = self.get_test_loop_count(test_id)
test_suite_properties['test.duration.%s.%s.%s'% (target, toolchain, test_id)] = test_duration
test_suite_properties['test.loops.%s.%s.%s'% (target, toolchain, test_id)] = test_loops
test_suite_properties['test.path.%s.%s.%s'% (target, toolchain, test_id)] = path
# read MUTs, test specification and perform tests
handle_results = self.handle(test_spec, target, toolchain, test_loops=test_loops)
if handle_results is None:
continue
for handle_result in handle_results:
if handle_result:
single_test_result, detailed_test_results = handle_result
else:
continue
# Append test results to global test summary
if single_test_result is not None:
self.test_summary.append(single_test_result)
# Add detailed test result to test summary structure
if target not in self.test_summary_ext[target][toolchain]:
if test_id not in self.test_summary_ext[target][toolchain]:
self.test_summary_ext[target][toolchain][test_id] = []
append_test_result = detailed_test_results
# If waterfall and consolidate-waterfall options are enabled,
# only include the last test result in the report.
if self.opts_waterfall_test and self.opts_consolidate_waterfall_test:
append_test_result = {0: detailed_test_results[len(detailed_test_results) - 1]}
self.test_summary_ext[target][toolchain][test_id].append(append_test_result)
test_suite_properties['skipped'] = ', '.join(test_suite_properties['skipped'])
self.test_suite_properties_ext[target][toolchain] = test_suite_properties
q.put(target + '_'.join(toolchains))
return
def execute(self):
clean = self.test_spec.get('clean', False)
test_ids = self.test_spec.get('test_ids', [])
q = Queue()
# Generate seed for shuffle if seed is not provided in
self.shuffle_random_seed = round(random.random(), self.SHUFFLE_SEED_ROUND)
if self.opts_shuffle_test_seed is not None and self.is_shuffle_seed_float():
self.shuffle_random_seed = round(float(self.opts_shuffle_test_seed), self.SHUFFLE_SEED_ROUND)
if self.opts_parallel_test_exec:
###################################################################
# Experimental, parallel test execution per singletest instance.
###################################################################
execute_threads = [] # Threads used to build mbed SDL, libs, test cases and execute tests
# Note: We are building here in parallel for each target separately!
# So we are not building the same thing multiple times and compilers
# in separate threads do not collide.
# Inside execute_thread_slice() function function handle() will be called to
# get information about available MUTs (per target).
for target, toolchains in self.test_spec['targets'].items():
self.test_suite_properties_ext[target] = {}
t = threading.Thread(target=self.execute_thread_slice, args = (q, target, toolchains, clean, test_ids, self.build_report, self.build_properties))
t.daemon = True
t.start()
execute_threads.append(t)
for t in execute_threads:
q.get() # t.join() would block some threads because we should not wait in any order for thread end
else:
# Serialized (not parallel) test execution
for target, toolchains in self.test_spec['targets'].items():
if target not in self.test_suite_properties_ext:
self.test_suite_properties_ext[target] = {}
self.execute_thread_slice(q, target, toolchains, clean, test_ids, self.build_report, self.build_properties)
q.get()
if self.db_logger:
self.db_logger.reconnect();
if self.db_logger.is_connected():
self.db_logger.update_build_id_info(self.db_logger_build_id, _status_fk=self.db_logger.BUILD_ID_STATUS_COMPLETED)
self.db_logger.disconnect();
return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext, self.build_report, self.build_properties
def get_valid_tests(self, test_map_keys, target, toolchain, test_ids, include_non_automated):
valid_test_map_keys = []
for test_id in test_map_keys:
test = TEST_MAP[test_id]
if self.opts_test_by_names and test_id not in self.opts_test_by_names:
continue
if test_ids and test_id not in test_ids:
continue
if self.opts_test_only_peripheral and not test.peripherals:
if self.opts_verbose_skipped_tests:
print(self.logger.log_line(
self.logger.LogType.INFO,
'Common test skipped for target %s' % target))
continue
if (self.opts_peripheral_by_names and test.peripherals and
not any((i in self.opts_peripheral_by_names)
for i in test.peripherals)):
# We will skip tests not forced with -p option
if self.opts_verbose_skipped_tests:
print(self.logger.log_line(
self.logger.LogType.INFO,
'Common test skipped for target %s' % target))
continue
if self.opts_test_only_common and test.peripherals:
if self.opts_verbose_skipped_tests:
print(self.logger.log_line(
self.logger.LogType.INFO,
'Peripheral test skipped for target %s' % target))
continue
if not include_non_automated and not test.automated:
if self.opts_verbose_skipped_tests:
print(self.logger.log_line(
self.logger.LogType.INFO,
'Non automated test skipped for target %s' % target))
continue
if test.is_supported(target, toolchain):
if test.peripherals is None and self.opts_only_build_tests:
# When users are using 'build only flag' and test do not have
# specified peripherals we can allow test building by default
pass
elif self.opts_peripheral_by_names and test_id not in self.opts_peripheral_by_names:
# If we force peripheral with option -p we expect test
# to pass even if peripheral is not in MUTs file.
pass
elif not self.is_peripherals_available(target, test.peripherals):
if self.opts_verbose_skipped_tests:
if test.peripherals:
print(self.logger.log_line(
self.logger.LogType.INFO,
'Peripheral %s test skipped for target %s' %
(",".join(test.peripherals), target)))
else:
print(self.logger.log_line(
self.logger.LogType.INFO,
'Test %s skipped for target %s' %
(test_id, target)))
continue
# The test has made it through all the filters, so add it to the valid tests list
valid_test_map_keys.append(test_id)
return valid_test_map_keys
def get_skipped_tests(self, all_test_map_keys, valid_test_map_keys):
# NOTE: This will not preserve order
return list(set(all_test_map_keys) - set(valid_test_map_keys))
def generate_test_summary_by_target(self, test_summary, shuffle_seed=None):
""" Prints well-formed summary with results (SQL table like)
table shows text x toolchain test result matrix
"""
RESULT_INDEX = 0
TARGET_INDEX = 1
TOOLCHAIN_INDEX = 2
TEST_INDEX = 3
DESC_INDEX = 4
unique_targets = get_unique_value_from_summary(test_summary, TARGET_INDEX)
unique_tests = get_unique_value_from_summary(test_summary, TEST_INDEX)
unique_test_desc = get_unique_value_from_summary_ext(test_summary, TEST_INDEX, DESC_INDEX)
unique_toolchains = get_unique_value_from_summary(test_summary, TOOLCHAIN_INDEX)
result = "Test summary:\n"
for target in unique_targets:
result_dict = {} # test : { toolchain : result }
unique_target_toolchains = []
for test in test_summary:
if test[TARGET_INDEX] == target:
if test[TOOLCHAIN_INDEX] not in unique_target_toolchains:
unique_target_toolchains.append(test[TOOLCHAIN_INDEX])
if test[TEST_INDEX] not in result_dict:
result_dict[test[TEST_INDEX]] = {}
result_dict[test[TEST_INDEX]][test[TOOLCHAIN_INDEX]] = test[RESULT_INDEX]
pt_cols = ["Target", "Test ID", "Test Description"] + unique_target_toolchains
pt = PrettyTable(pt_cols, junction_char="|", hrules=HEADER)
for col in pt_cols:
pt.align[col] = "l"
pt.padding_width = 1 # One space between column edges and contents (default)
for test in unique_tests:
if test in result_dict:
test_results = result_dict[test]
if test in unique_test_desc:
row = [target, test, unique_test_desc[test]]
for toolchain in unique_toolchains:
if toolchain in test_results:
row.append(test_results[toolchain])
pt.add_row(row)
result += pt.get_string()
shuffle_seed_text = "Shuffle Seed: %.*f"% (self.SHUFFLE_SEED_ROUND,
shuffle_seed if shuffle_seed else self.shuffle_random_seed)
result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '')
return result
def generate_test_summary(self, test_summary, shuffle_seed=None):
""" Prints well-formed summary with results (SQL table like)
table shows target x test results matrix across
"""
success_code = 0 # Success code that can be leter returned to
result = "Test summary:\n"
# Pretty table package is used to print results
pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description",
"Elapsed Time (sec)", "Timeout (sec)", "Loops"], junction_char="|", hrules=HEADER)
pt.align["Result"] = "l" # Left align
pt.align["Target"] = "l" # Left align
pt.align["Toolchain"] = "l" # Left align
pt.align["Test ID"] = "l" # Left align
pt.align["Test Description"] = "l" # Left align
pt.padding_width = 1 # One space between column edges and contents (default)
result_dict = {self.TEST_RESULT_OK : 0,
self.TEST_RESULT_FAIL : 0,
self.TEST_RESULT_ERROR : 0,
self.TEST_RESULT_UNDEF : 0,
self.TEST_RESULT_IOERR_COPY : 0,
self.TEST_RESULT_IOERR_DISK : 0,
self.TEST_RESULT_IOERR_SERIAL : 0,
self.TEST_RESULT_NO_IMAGE : 0,
self.TEST_RESULT_TIMEOUT : 0,
self.TEST_RESULT_MBED_ASSERT : 0,
self.TEST_RESULT_BUILD_FAILED : 0,
self.TEST_RESULT_NOT_SUPPORTED : 0
}
for test in test_summary:
if test[0] in result_dict:
result_dict[test[0]] += 1
pt.add_row(test)
result += pt.get_string()
result += "\n"
# Print result count
result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.items()])
shuffle_seed_text = "Shuffle Seed: %.*f\n"% (self.SHUFFLE_SEED_ROUND,
shuffle_seed if shuffle_seed else self.shuffle_random_seed)
result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '')
return result
def test_loop_list_to_dict(self, test_loops_str):
""" Transforms test_id=X,test_id=X,test_id=X into dictionary {test_id : test_id_loops_count}
"""
result = {}
if test_loops_str:
test_loops = test_loops_str
for test_loop in test_loops:
test_loop_count = test_loop.split('=')
if len(test_loop_count) == 2:
_test_id, _test_loops = test_loop_count
try:
_test_loops = int(_test_loops)
except:
continue
result[_test_id] = _test_loops
return result
def get_test_loop_count(self, test_id):
""" This function returns no. of loops per test (deducted by test_id_.
If test is not in list of redefined loop counts it will use default value.
"""
result = self.GLOBAL_LOOPS_COUNT
if test_id in self.TEST_LOOPS_DICT:
result = self.TEST_LOOPS_DICT[test_id]
return result
def delete_file(self, file_path):
""" Remove file from the system
"""
result = True
resutl_msg = ""
try:
os.remove(file_path)
except Exception as e:
resutl_msg = e
result = False
return result, resutl_msg
def handle_mut(self, mut, data, target_name, toolchain_name, test_loops=1):
""" Test is being invoked for given MUT.
"""
# Get test information, image and test timeout
test_id = data['test_id']
test = TEST_MAP[test_id]
test_description = TEST_MAP[test_id].get_description()
image = data["image"]
duration = data.get("duration", 10)
if mut is None:
print("Error: No Mbed available: MUT[%s]" % data['mcu'])
return None
mcu = mut['mcu']
copy_method = mut.get('copy_method') # Available board configuration selection e.g. core selection etc.
if self.db_logger:
self.db_logger.reconnect()
selected_copy_method = self.opts_copy_method if copy_method is None else copy_method
# Tests can be looped so test results must be stored for the same test
test_all_result = []
# Test results for one test ran few times
detailed_test_results = {} # { Loop_number: { results ... } }
for test_index in range(test_loops):
# If mbedls is available and we are auto detecting MUT info,
# update MUT info (mounting may changed)
if get_module_avail('mbed_lstools') and self.opts_auto_detect:
platform_name_filter = [mcu]
muts_list = {}
found = False
for i in range(0, 60):
print('Looking for %s with MBEDLS' % mcu)
muts_list = get_autodetected_MUTS_list(platform_name_filter=platform_name_filter)
if 1 not in muts_list:
sleep(3)
else:
found = True
break
if not found:
print("Error: mbed not found with MBEDLS: %s" % data['mcu'])
return None
else:
mut = muts_list[1]
disk = mut.get('disk')
port = mut.get('port')
if disk is None or port is None:
return None
target_by_mcu = TARGET_MAP[mut['mcu']]
target_name_unique = mut['mcu_unique'] if 'mcu_unique' in mut else mut['mcu']
# Some extra stuff can be declared in MUTs structure
reset_type = mut.get('reset_type') # reboot.txt, reset.txt, shutdown.txt
reset_tout = mut.get('reset_tout') # COPY_IMAGE -> RESET_PROC -> SLEEP(RESET_TOUT)
# When the build and test system were separate, this was relative to a
# base network folder base path: join(NETWORK_BASE_PATH, ).
# "image" is now a list representing a development image and an update image
# (for device management). When testing, we only use the development image.
image_path = image[0]
# Host test execution
start_host_exec_time = time()
single_test_result = self.TEST_RESULT_UNDEF # single test run result
_copy_method = selected_copy_method
if not exists(image_path):
single_test_result = self.TEST_RESULT_NO_IMAGE
elapsed_time = 0
single_test_output = self.logger.log_line(self.logger.LogType.ERROR, 'Image file does not exist: %s'% image_path)
print(single_test_output)
else:
# Host test execution
start_host_exec_time = time()
host_test_verbose = self.opts_verbose_test_result_only or self.opts_verbose
host_test_reset = self.opts_mut_reset_type if reset_type is None else reset_type
host_test_result = self.run_host_test(test.host_test,
image_path, disk, port, duration,
micro=target_name,
verbose=host_test_verbose,
reset=host_test_reset,
reset_tout=reset_tout,
copy_method=selected_copy_method,
program_cycle_s=target_by_mcu.program_cycle_s)
single_test_result, single_test_output, single_testduration, single_timeout = host_test_result
# Store test result
test_all_result.append(single_test_result)
total_elapsed_time = time() - start_host_exec_time # Test time with copy (flashing) / reset
elapsed_time = single_testduration # TIme of single test case execution after reset
detailed_test_results[test_index] = {
'result' : single_test_result,
'output' : single_test_output,
'target_name' : target_name,
'target_name_unique' : target_name_unique,
'toolchain_name' : toolchain_name,
'id' : test_id,
'description' : test_description,
'elapsed_time' : round(elapsed_time, 2),
'duration' : single_timeout,
'copy_method' : _copy_method,
}
print(self.print_test_result(
single_test_result, target_name_unique, toolchain_name, test_id,
test_description, elapsed_time, single_timeout))
# Update database entries for ongoing test
if self.db_logger and self.db_logger.is_connected():
test_type = 'SingleTest'
self.db_logger.insert_test_entry(self.db_logger_build_id,
target_name,
toolchain_name,
test_type,
test_id,
single_test_result,
single_test_output,
elapsed_time,
single_timeout,
test_index)
# If we perform waterfall test we test until we get OK and we stop testing
if self.opts_waterfall_test and single_test_result == self.TEST_RESULT_OK:
break
if self.db_logger:
self.db_logger.disconnect()
return (self.shape_global_test_loop_result(test_all_result, self.opts_waterfall_test and self.opts_consolidate_waterfall_test),
target_name_unique,
toolchain_name,
test_id,
test_description,
round(elapsed_time, 2),
single_timeout,
self.shape_test_loop_ok_result_count(test_all_result)), detailed_test_results
def handle(self, test_spec, target_name, toolchain_name, test_loops=1):
""" Function determines MUT's mbed disk/port and copies binary to
target.
"""
handle_results = []
data = json.loads(test_spec)
# Find a suitable MUT:
mut = None
for id, m in self.muts.items():
if m['mcu'] == data['mcu']:
mut = m
handle_result = self.handle_mut(mut, data, target_name, toolchain_name, test_loops=test_loops)
handle_results.append(handle_result)
return handle_results
def print_test_result(self, test_result, target_name, toolchain_name,
test_id, test_description, elapsed_time, duration):
""" Use specific convention to print test result and related data
"""
tokens = []
tokens.append("TargetTest")
tokens.append(target_name)
tokens.append(toolchain_name)
tokens.append(test_id)
tokens.append(test_description)
separator = "::"
time_info = " in %.2f of %d sec" % (round(elapsed_time, 2), duration)
result = separator.join(tokens) + " [" + test_result +"]" + time_info
return Fore.MAGENTA + result + Fore.RESET
def shape_test_loop_ok_result_count(self, test_all_result):
""" Reformats list of results to simple string
"""
test_loop_count = len(test_all_result)
test_loop_ok_result = test_all_result.count(self.TEST_RESULT_OK)
return "%d/%d"% (test_loop_ok_result, test_loop_count)
def shape_global_test_loop_result(self, test_all_result, waterfall_and_consolidate):
""" Reformats list of results to simple string
"""
result = self.TEST_RESULT_FAIL
if all(test_all_result[0] == res for res in test_all_result):
result = test_all_result[0]
elif waterfall_and_consolidate and any(res == self.TEST_RESULT_OK for res in test_all_result):
result = self.TEST_RESULT_OK
return result
def run_host_test(self, name, image_path, disk, port, duration,
micro=None, reset=None, reset_tout=None,
verbose=False, copy_method=None, program_cycle_s=None):
""" Function creates new process with host test configured with particular test case.
Function also is pooling for serial port activity from process to catch all data
printed by test runner and host test during test execution
"""
def get_char_from_queue(obs):
""" Get character from queue safe way
"""
try:
c = obs.queue.get(block=True, timeout=0.5)
except Empty:
c = None
return c
def filter_queue_char(c):
""" Filters out non ASCII characters from serial port
"""
if ord(c) not in range(128):
c = ' '
return c
def get_test_result(output):
""" Parse test 'output' data
"""
result = self.TEST_RESULT_TIMEOUT
for line in "".join(output).splitlines():
search_result = self.RE_DETECT_TESTCASE_RESULT.search(line)
if search_result and len(search_result.groups()):
result = self.TEST_RESULT_MAPPING[search_result.groups(0)[0]]
break
return result
def get_auto_property_value(property_name, line):
""" Scans auto detection line from MUT and returns scanned parameter 'property_name'
Returns string
"""
result = None
if re.search("HOST: Property '%s'"% property_name, line) is not None:
property = re.search("HOST: Property '%s' = '([\w\d _]+)'"% property_name, line)
if property is not None and len(property.groups()) == 1:
result = property.groups()[0]
return result
cmd = ["python",
'%s.py'% name,
'-d', disk,
'-f', '"%s"'% image_path,
'-p', port,
'-t', str(duration),
'-C', str(program_cycle_s)]
if get_module_avail('mbed_lstools') and self.opts_auto_detect:
cmd += ['--auto']
# Add extra parameters to host_test
if copy_method is not None:
cmd += ["-c", copy_method]
if micro is not None:
cmd += ["-m", micro]
if reset is not None:
cmd += ["-r", reset]
if reset_tout is not None:
cmd += ["-R", str(reset_tout)]
if verbose:
print(Fore.MAGENTA + "Executing '" + " ".join(cmd) + "'" + Fore.RESET)
print("Test::Output::Start")
proc = Popen(cmd, stdout=PIPE, cwd=HOST_TESTS)
obs = ProcessObserver(proc)
update_once_flag = {} # Stores flags checking if some auto-parameter was already set
line = ''
output = []
start_time = time()
while (time() - start_time) < (2 * duration):
c = get_char_from_queue(obs)
if c:
if verbose:
sys.stdout.write(c)
c = filter_queue_char(c)
output.append(c)
# Give the mbed under test a way to communicate the end of the test
if c in ['\n', '\r']:
# Checking for auto-detection information from the test about MUT reset moment
if 'reset_target' not in update_once_flag and "HOST: Reset target..." in line:
# We will update this marker only once to prevent multiple time resets
update_once_flag['reset_target'] = True
start_time = time()
# Checking for auto-detection information from the test about timeout
auto_timeout_val = get_auto_property_value('timeout', line)
if 'timeout' not in update_once_flag and auto_timeout_val is not None:
# We will update this marker only once to prevent multiple time resets
update_once_flag['timeout'] = True
duration = int(auto_timeout_val)
# Detect mbed assert:
if 'mbed assertation failed: ' in line:
output.append('{{mbed_assert}}')
break
# Check for test end
if '{end}' in line:
break
line = ''
else:
line += c
end_time = time()
testcase_duration = end_time - start_time # Test case duration from reset to {end}
c = get_char_from_queue(obs)
if c:
if verbose:
sys.stdout.write(c)
c = filter_queue_char(c)
output.append(c)
if verbose:
print("Test::Output::Finish")
# Stop test process
obs.stop()
result = get_test_result(output)
return (result, "".join(output), testcase_duration, duration)
def is_peripherals_available(self, target_mcu_name, peripherals=None):
""" Checks if specified target should run specific peripheral test case defined in MUTs file
"""
if peripherals is not None:
peripherals = set(peripherals)
for id, mut in self.muts.items():
# Target MCU name check
if mut["mcu"] != target_mcu_name:
continue
# Peripherals check
if peripherals is not None:
if 'peripherals' not in mut:
continue
if not peripherals.issubset(set(mut['peripherals'])):
continue
return True
return False
def shape_test_request(self, mcu, image_path, test_id, duration=10):
""" Function prepares JSON structure describing test specification
"""
test_spec = {
"mcu": mcu,
"image": image_path,
"duration": duration,
"test_id": test_id,
}
return json.dumps(test_spec)
def get_unique_value_from_summary(test_summary, index):
""" Gets list of unique target names
"""
result = []
for test in test_summary:
target_name = test[index]
if target_name not in result:
result.append(target_name)
return sorted(result)
def get_unique_value_from_summary_ext(test_summary, index_key, index_val):
""" Gets list of unique target names and return dictionary
"""
result = {}
for test in test_summary:
key = test[index_key]
val = test[index_val]
if key not in result:
result[key] = val
return result
def show_json_file_format_error(json_spec_filename, line, column):
""" Prints JSON broken content
"""
with open(json_spec_filename) as data_file:
line_no = 1
for json_line in data_file:
if line_no + 5 >= line: # Print last few lines before error
print('Line %d:\t'%line_no + json_line)
if line_no == line:
print('%s\t%s^' (' ' * len('Line %d:' % line_no),
'-' * (column - 1)))
break
line_no += 1
def json_format_error_defect_pos(json_error_msg):
""" Gets first error line and column in JSON file format.
Parsed from exception thrown by json.loads() string
"""
result = None
line, column = 0, 0
# Line value search
line_search = re.search('line [0-9]+', json_error_msg)
if line_search is not None:
ls = line_search.group().split(' ')
if len(ls) == 2:
line = int(ls[1])
# Column position search
column_search = re.search('column [0-9]+', json_error_msg)
if column_search is not None:
cs = column_search.group().split(' ')
if len(cs) == 2:
column = int(cs[1])
result = [line, column]
return result
def get_json_data_from_file(json_spec_filename, verbose=False):
""" Loads from file JSON formatted string to data structure
"""
result = None
try:
with open(json_spec_filename) as data_file:
try:
result = json.load(data_file)
except ValueError as json_error_msg:
result = None
print('JSON file %s parsing failed. Reason: %s' %
(json_spec_filename, json_error_msg))
# We can print where error occurred inside JSON file if we can parse exception msg
json_format_defect_pos = json_format_error_defect_pos(str(json_error_msg))
if json_format_defect_pos is not None:
line = json_format_defect_pos[0]
column = json_format_defect_pos[1]
print()
show_json_file_format_error(json_spec_filename, line, column)
except IOError as fileopen_error_msg:
print('JSON file %s not opened. Reason: %s\n'%
(json_spec_filename, fileopen_error_msg))
if verbose and result:
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(result)
return result
def print_muts_configuration_from_json(json_data, join_delim=", ", platform_filter=None):
""" Prints MUTs configuration passed to test script for verboseness
"""
muts_info_cols = []
# We need to check all unique properties for each defined MUT
for k in json_data:
mut_info = json_data[k]
for mut_property in mut_info:
if mut_property not in muts_info_cols:
muts_info_cols.append(mut_property)
# Prepare pretty table object to display all MUTs
pt_cols = ["index"] + muts_info_cols
pt = PrettyTable(pt_cols, junction_char="|", hrules=HEADER)
for col in pt_cols:
pt.align[col] = "l"
# Add rows to pretty print object
for k in json_data:
row = [k]
mut_info = json_data[k]
add_row = True
if platform_filter and 'mcu' in mut_info:
add_row = re.search(platform_filter, mut_info['mcu']) is not None
if add_row:
for col in muts_info_cols:
cell_val = mut_info[col] if col in mut_info else None
if isinstance(cell_val, list):
cell_val = join_delim.join(cell_val)
row.append(cell_val)
pt.add_row(row)
return pt.get_string()
def print_test_configuration_from_json(json_data, join_delim=", "):
""" Prints test specification configuration passed to test script for verboseness
"""
toolchains_info_cols = []
# We need to check all toolchains for each device
for k in json_data:
# k should be 'targets'
targets = json_data[k]
for target in targets:
toolchains = targets[target]
for toolchain in toolchains:
if toolchain not in toolchains_info_cols:
toolchains_info_cols.append(toolchain)
# Prepare pretty table object to display test specification
pt_cols = ["mcu"] + sorted(toolchains_info_cols)
pt = PrettyTable(pt_cols, junction_char="|", hrules=HEADER)
for col in pt_cols:
pt.align[col] = "l"
# { target : [conflicted toolchains] }
toolchain_conflicts = {}
toolchain_path_conflicts = []
for k in json_data:
# k should be 'targets'
targets = json_data[k]
for target in targets:
target_supported_toolchains = get_target_supported_toolchains(target)
if not target_supported_toolchains:
target_supported_toolchains = []
target_name = target if target in TARGET_MAP else "%s*"% target
row = [target_name]
toolchains = targets[target]
for toolchain in sorted(toolchains_info_cols):
# Check for conflicts: target vs toolchain
conflict = False
conflict_path = False
if toolchain in toolchains:
if toolchain not in target_supported_toolchains:
conflict = True
if target not in toolchain_conflicts:
toolchain_conflicts[target] = []
toolchain_conflicts[target].append(toolchain)
# Add marker inside table about target usage / conflict
cell_val = 'Yes' if toolchain in toolchains else '-'
if conflict:
cell_val += '*'
# Check for conflicts: toolchain vs toolchain path
if toolchain in TOOLCHAIN_PATHS:
toolchain_path = TOOLCHAIN_PATHS[toolchain]
if not os.path.isdir(toolchain_path):
conflict_path = True
if toolchain not in toolchain_path_conflicts:
toolchain_path_conflicts.append(toolchain)
if conflict_path:
cell_val += '#'
row.append(cell_val)
pt.add_row(row)
# generate result string
result = pt.get_string() # Test specification table
if toolchain_conflicts or toolchain_path_conflicts:
result += "\n"
result += "Toolchain conflicts:\n"
for target in toolchain_conflicts:
if target not in TARGET_MAP:
result += "\t* Target %s unknown\n"% (target)
conflict_target_list = join_delim.join(toolchain_conflicts[target])
sufix = 's' if len(toolchain_conflicts[target]) > 1 else ''
result += "\t* Target %s does not support %s toolchain%s\n"% (target, conflict_target_list, sufix)
for toolchain in toolchain_path_conflicts:
# Let's check toolchain configuration
if toolchain in TOOLCHAIN_PATHS:
toolchain_path = TOOLCHAIN_PATHS[toolchain]
if not os.path.isdir(toolchain_path):
result += "\t# Toolchain %s path not found: %s\n"% (toolchain, toolchain_path)
return result
def get_avail_tests_summary_table(cols=None, result_summary=True, join_delim=',',platform_filter=None):
""" Generates table summary with all test cases and additional test cases
information using pretty print functionality. Allows test suite user to
see test cases
"""
# get all unique test ID prefixes
unique_test_id = []
for test in TESTS:
split = test['id'].split('_')[:-1]
test_id_prefix = '_'.join(split)
if test_id_prefix not in unique_test_id:
unique_test_id.append(test_id_prefix)
unique_test_id.sort()
counter_dict_test_id_types = dict((t, 0) for t in unique_test_id)
counter_dict_test_id_types_all = dict((t, 0) for t in unique_test_id)
test_properties = ['id',
'automated',
'description',
'peripherals',
'host_test',
'duration'] if cols is None else cols
# All tests status table print
pt = PrettyTable(test_properties, junction_char="|", hrules=HEADER)
for col in test_properties:
pt.align[col] = "l"
pt.align['duration'] = "r"
counter_all = 0
counter_automated = 0
pt.padding_width = 1 # One space between column edges and contents (default)
for test_id in sorted(TEST_MAP.keys()):
if platform_filter is not None:
# FIlter out platforms using regex
if re.search(platform_filter, test_id) is None:
continue
row = []
test = TEST_MAP[test_id]
split = test_id.split('_')[:-1]
test_id_prefix = '_'.join(split)
for col in test_properties:
col_value = test[col]
if isinstance(test[col], list):
col_value = join_delim.join(test[col])
elif test[col] == None:
col_value = "-"
row.append(col_value)
if test['automated'] == True:
counter_dict_test_id_types[test_id_prefix] += 1
counter_automated += 1
pt.add_row(row)
# Update counters
counter_all += 1
counter_dict_test_id_types_all[test_id_prefix] += 1
result = pt.get_string()
result += "\n\n"
if result_summary and not platform_filter:
# Automation result summary
test_id_cols = ['automated', 'all', 'percent [%]', 'progress']
pt = PrettyTable(test_id_cols, junction_char="|", hrules=HEADER)
pt.align['automated'] = "r"
pt.align['all'] = "r"
pt.align['percent [%]'] = "r"
percent_progress = round(100.0 * counter_automated / float(counter_all), 1)
str_progress = progress_bar(percent_progress, 75)
pt.add_row([counter_automated, counter_all, percent_progress, str_progress])
result += "Automation coverage:\n"
result += pt.get_string()
result += "\n\n"
# Test automation coverage table print
test_id_cols = ['id', 'automated', 'all', 'percent [%]', 'progress']
pt = PrettyTable(test_id_cols, junction_char="|", hrules=HEADER)
pt.align['id'] = "l"
pt.align['automated'] = "r"
pt.align['all'] = "r"
pt.align['percent [%]'] = "r"
for unique_id in unique_test_id:
# print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id])
percent_progress = round(100.0 * counter_dict_test_id_types[unique_id] / float(counter_dict_test_id_types_all[unique_id]), 1)
str_progress = progress_bar(percent_progress, 75)
row = [unique_id,
counter_dict_test_id_types[unique_id],
counter_dict_test_id_types_all[unique_id],
percent_progress,
"[" + str_progress + "]"]
pt.add_row(row)
result += "Test automation coverage:\n"
result += pt.get_string()
result += "\n\n"
return result
def progress_bar(percent_progress, saturation=0):
""" This function creates progress bar with optional simple saturation mark
"""
step = int(percent_progress / 2) # Scale by to (scale: 1 - 50)
str_progress = '#' * step + '.' * int(50 - step)
c = '!' if str_progress[38] == '.' else '|'
if saturation > 0:
saturation = saturation / 2
str_progress = str_progress[:saturation] + c + str_progress[saturation:]
return str_progress
def singletest_in_cli_mode(single_test):
""" Runs SingleTestRunner object in CLI (Command line interface) mode
@return returns success code (0 == success) for building and running tests
"""
start = time()
# Execute tests depending on options and filter applied
test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext, build_report, build_properties = single_test.execute()
elapsed_time = time() - start
# Human readable summary
if not single_test.opts_suppress_summary:
# prints well-formed summary with results (SQL table like)
print(single_test.generate_test_summary(test_summary, shuffle_seed))
if single_test.opts_test_x_toolchain_summary:
# prints well-formed summary with results (SQL table like)
# table shows text x toolchain test result matrix
print(single_test.generate_test_summary_by_target(test_summary,
shuffle_seed))
print("Completed in %.2f sec" % elapsed_time)
print
# Write summary of the builds
print_report_exporter = ReportExporter(ResultExporterType.PRINT, package="build")
status = print_report_exporter.report(build_report)
# Store extra reports in files
if single_test.opts_report_html_file_name:
# Export results in form of HTML report to separate file
report_exporter = ReportExporter(ResultExporterType.HTML)
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_html_file_name, test_suite_properties=test_suite_properties_ext)
if single_test.opts_report_junit_file_name:
# Export results in form of JUnit XML report to separate file
report_exporter = ReportExporter(ResultExporterType.JUNIT)
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_junit_file_name, test_suite_properties=test_suite_properties_ext)
if single_test.opts_report_text_file_name:
# Export results in form of a text file
report_exporter = ReportExporter(ResultExporterType.TEXT)
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_text_file_name, test_suite_properties=test_suite_properties_ext)
if single_test.opts_report_build_file_name:
# Export build results as html report to sparate file
report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build")
report_exporter.report_to_file(build_report, single_test.opts_report_build_file_name, test_suite_properties=build_properties)
# Returns True if no build failures of the test projects or their dependencies
return status
class TestLogger():
""" Super-class for logging and printing ongoing events for test suite pass
"""
def __init__(self, store_log=True):
""" We can control if logger actually stores log in memory
or just handled all log entries immediately
"""
self.log = []
self.log_to_file = False
self.log_file_name = None
self.store_log = store_log
self.LogType = construct_enum(INFO='Info',
WARN='Warning',
NOTIF='Notification',
ERROR='Error',
EXCEPT='Exception')
self.LogToFileAttr = construct_enum(CREATE=1, # Create or overwrite existing log file
APPEND=2) # Append to existing log file
def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'):
""" Log one line of text
"""
log_timestamp = time()
log_entry = {'log_type' : LogType,
'log_timestamp' : log_timestamp,
'log_line' : log_line,
'_future' : None
}
# Store log in memory
if self.store_log:
self.log.append(log_entry)
return log_entry
class CLITestLogger(TestLogger):
""" Logger used with CLI (Command line interface) test suite. Logs on screen and to file if needed
"""
def __init__(self, store_log=True, file_name=None):
TestLogger.__init__(self)
self.log_file_name = file_name
#self.TIMESTAMP_FORMAT = '%y-%m-%d %H:%M:%S' # Full date and time
self.TIMESTAMP_FORMAT = '%H:%M:%S' # Time only
def log_print(self, log_entry, timestamp=True):
""" Prints on screen formatted log entry
"""
ts = log_entry['log_timestamp']
timestamp_str = datetime.datetime.fromtimestamp(ts).strftime("[%s] "% self.TIMESTAMP_FORMAT) if timestamp else ''
log_line_str = "%(log_type)s: %(log_line)s"% (log_entry)
return timestamp_str + log_line_str
def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'):
""" Logs line, if log file output was specified log line will be appended
at the end of log file
"""
log_entry = TestLogger.log_line(self, LogType, log_line)
log_line_str = self.log_print(log_entry, timestamp)
if self.log_file_name is not None:
try:
with open(self.log_file_name, 'a') as f:
f.write(log_line_str + line_delim)
except IOError:
pass
return log_line_str
def factory_db_logger(db_url):
""" Factory database driver depending on database type supplied in database connection string db_url
"""
if db_url is not None:
from tools.test_mysql import MySQLDBAccess
connection_info = BaseDBAccess().parse_db_connection_string(db_url)
if connection_info is not None:
(db_type, username, password, host, db_name) = BaseDBAccess().parse_db_connection_string(db_url)
if db_type == 'mysql':
return MySQLDBAccess()
return None
def detect_database_verbose(db_url):
""" uses verbose mode (prints) database detection sequence to check it database connection string is valid
"""
result = BaseDBAccess().parse_db_connection_string(db_url)
if result is not None:
# Parsing passed
(db_type, username, password, host, db_name) = result
#print "DB type '%s', user name '%s', password '%s', host '%s', db name '%s'"% result
# Let's try to connect
db_ = factory_db_logger(db_url)
if db_ is not None:
print("Connecting to database '%s'..." % db_url)
db_.connect(host, username, password, db_name)
if db_.is_connected():
print("ok")
print("Detecting database...")
print(db_.detect_database(verbose=True))
print("Disconnecting...")
db_.disconnect()
print("done")
else:
print("Database type '%s' unknown" % db_type)
else:
print("Parse error: '%s' - DB Url error" % db_url)
def get_module_avail(module_name):
""" This function returns True if module_name is already imported module
"""
return module_name in sys.modules.keys()
def get_autodetected_MUTS_list(platform_name_filter=None):
oldError = None
if os.name == 'nt':
# Disable Windows error box temporarily
oldError = ctypes.windll.kernel32.SetErrorMode(1) #note that SEM_FAILCRITICALERRORS = 1
mbeds = mbed_lstools.create()
detect_muts_list = mbeds.list_mbeds()
if os.name == 'nt':
ctypes.windll.kernel32.SetErrorMode(oldError)
return get_autodetected_MUTS(detect_muts_list, platform_name_filter=platform_name_filter)
def get_autodetected_MUTS(mbeds_list, platform_name_filter=None):
""" Function detects all connected to host mbed-enabled devices and generates artificial MUTS file.
If function fails to auto-detect devices it will return empty dictionary.
if get_module_avail('mbed_lstools'):
mbeds = mbed_lstools.create()
mbeds_list = mbeds.list_mbeds()
@param mbeds_list list of mbeds captured from mbed_lstools
@param platform_name You can filter 'platform_name' with list of filtered targets from 'platform_name_filter'
"""
result = {} # Should be in muts_all.json format
# Align mbeds_list from mbed_lstools to MUT file format (JSON dictionary with muts)
# mbeds_list = [{'platform_name': 'NUCLEO_F302R8', 'mount_point': 'E:', 'target_id': '07050200623B61125D5EF72A', 'serial_port': u'COM34'}]
index = 1
for mut in mbeds_list:
# Filter the MUTS if a filter is specified
if platform_name_filter and not mut['platform_name'] in platform_name_filter:
continue
# For mcu_unique - we are assigning 'platform_name_unique' value from mbedls output (if its existing)
# if not we are creating our own unique value (last few chars from platform's target_id).
m = {'mcu': mut['platform_name'],
'mcu_unique' : mut['platform_name_unique'] if 'platform_name_unique' in mut else "%s[%s]" % (mut['platform_name'], mut['target_id'][-4:]),
'port': mut['serial_port'],
'disk': mut['mount_point'],
'peripherals': [] # No peripheral detection
}
if index not in result:
result[index] = {}
result[index] = m
index += 1
return result
def get_autodetected_TEST_SPEC(mbeds_list,
use_default_toolchain=True,
use_supported_toolchains=False,
toolchain_filter=None,
platform_name_filter=None):
""" Function detects all connected to host mbed-enabled devices and generates artificial test_spec file.
If function fails to auto-detect devices it will return empty 'targets' test_spec description.
use_default_toolchain - if True add default toolchain to test_spec
use_supported_toolchains - if True add all supported toolchains to test_spec
toolchain_filter - if [...list of toolchains...] add from all toolchains only those in filter to test_spec
"""
result = {'targets': {} }
for mut in mbeds_list:
mcu = mut['mcu']
if platform_name_filter is None or (platform_name_filter and mut['mcu'] in platform_name_filter):
if mcu in TARGET_MAP:
default_toolchain = TARGET_MAP[mcu].default_toolchain
supported_toolchains = TARGET_MAP[mcu].supported_toolchains
# Decide which toolchains should be added to test specification toolchain pool for each target
toolchains = []
if use_default_toolchain:
toolchains.append(default_toolchain)
if use_supported_toolchains:
toolchains += supported_toolchains
if toolchain_filter is not None:
all_toolchains = supported_toolchains + [default_toolchain]
for toolchain in toolchain_filter:
if toolchain in all_toolchains:
toolchains.append(toolchain)
result['targets'][mcu] = list(set(toolchains))
return result
def get_default_test_options_parser():
""" Get common test script options used by CLI, web services etc.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--tests',
dest='test_spec_filename',
metavar="FILE",
type=argparse_filestring_type,
help='Points to file with test specification')
parser.add_argument('-M', '--MUTS',
dest='muts_spec_filename',
metavar="FILE",
type=argparse_filestring_type,
help='Points to file with MUTs specification (overwrites settings.py and private_settings.py)')
parser.add_argument("-j", "--jobs",
dest='jobs',
metavar="NUMBER",
type=int,
help="Define number of compilation jobs. Default value is 1")
if get_module_avail('mbed_lstools'):
# Additional features available when mbed_lstools is installed on host and imported
# mbed_lstools allow users to detect connected to host mbed-enabled devices
parser.add_argument('--auto',
dest='auto_detect',
action="store_true",
help='Use mbed-ls module to detect all connected mbed devices')
toolchain_list = list(TOOLCHAINS) + ["DEFAULT", "ALL"]
parser.add_argument('--tc',
dest='toolchains_filter',
type=argparse_many(argparse_uppercase_type(toolchain_list, "toolchains")),
help="Toolchain filter for --auto argument. Use toolchains names separated by comma, 'default' or 'all' to select toolchains")
test_scopes = ','.join(["'%s'" % n for n in get_available_oper_test_scopes()])
parser.add_argument('--oper',
dest='operability_checks',
type=argparse_lowercase_type(get_available_oper_test_scopes(), "scopes"),
help='Perform interoperability tests between host and connected mbed devices. Available test scopes are: %s' % test_scopes)
parser.add_argument('--clean',
dest='clean',
action="store_true",
help='Clean the build directory')
parser.add_argument('-P', '--only-peripherals',
dest='test_only_peripheral',
default=False,
action="store_true",
help='Test only peripheral declared for MUT and skip common tests')
parser.add_argument("--profile", dest="profile", action="append",
type=argparse_filestring_type,
default=[])
parser.add_argument('-C', '--only-commons',
dest='test_only_common',
default=False,
action="store_true",
help='Test only board internals. Skip perpherials tests and perform common tests')
parser.add_argument('-n', '--test-by-names',
dest='test_by_names',
type=argparse_many(str),
help='Runs only test enumerated it this switch. Use comma to separate test case names')
parser.add_argument('-p', '--peripheral-by-names',
dest='peripheral_by_names',
type=argparse_many(str),
help='Forces discovery of particular peripherals. Use comma to separate peripheral names')
copy_methods = host_tests_plugins.get_plugin_caps('CopyMethod')
copy_methods_str = "Plugin support: " + ', '.join(copy_methods)
parser.add_argument('-c', '--copy-method',
dest='copy_method',
type=argparse_uppercase_type(copy_methods, "flash method"),
help="Select binary copy (flash) method. Default is Python's shutil.copy() method. %s"% copy_methods_str)
reset_methods = host_tests_plugins.get_plugin_caps('ResetMethod')
reset_methods_str = "Plugin support: " + ', '.join(reset_methods)
parser.add_argument('-r', '--reset-type',
dest='mut_reset_type',
default=None,
type=argparse_uppercase_type(reset_methods, "reset method"),
help='Extra reset method used to reset MUT by host test script. %s'% reset_methods_str)
parser.add_argument('-g', '--goanna-for-tests',
dest='goanna_for_tests',
action="store_true",
help='Run Goanna static analyse tool for tests. (Project will be rebuilded)')
parser.add_argument('-G', '--goanna-for-sdk',
dest='goanna_for_mbed_sdk',
action="store_true",
help='Run Goanna static analyse tool for mbed SDK (Project will be rebuilded)')
parser.add_argument('-s', '--suppress-summary',
dest='suppress_summary',
default=False,
action="store_true",
help='Suppresses display of wellformatted table with test results')
parser.add_argument('-t', '--test-summary',
dest='test_x_toolchain_summary',
default=False,
action="store_true",
help='Displays wellformatted table with test x toolchain test result per target')
parser.add_argument('-A', '--test-automation-report',
dest='test_automation_report',
default=False,
action="store_true",
help='Prints information about all tests and exits')
parser.add_argument('-R', '--test-case-report',
dest='test_case_report',
default=False,
action="store_true",
help='Prints information about all test cases and exits')
parser.add_argument("-S", "--supported-toolchains",
action="store_true",
dest="supported_toolchains",
default=False,
help="Displays supported matrix of MCUs and toolchains")
parser.add_argument("-O", "--only-build",
action="store_true",
dest="only_build_tests",
default=False,
help="Only build tests, skips actual test procedures (flashing etc.)")
parser.add_argument('--parallel',
dest='parallel_test_exec',
default=False,
action="store_true",
help='Experimental, you execute test runners for connected to your host MUTs in parallel (speeds up test result collection)')
parser.add_argument('--config',
dest='verbose_test_configuration_only',
default=False,
action="store_true",
help='Displays full test specification and MUTs configration and exits')
parser.add_argument('--loops',
dest='test_loops_list',
type=argparse_many(str),
help='Set no. of loops per test. Format: TEST_1=1,TEST_2=2,TEST_3=3')
parser.add_argument('--global-loops',
dest='test_global_loops_value',
type=int,
help='Set global number of test loops per test. Default value is set 1')
parser.add_argument('--consolidate-waterfall',
dest='consolidate_waterfall_test',
default=False,
action="store_true",
help='Used with --waterfall argument. Adds only one test to report reflecting outcome of waterfall test.')
parser.add_argument('-W', '--waterfall',
dest='waterfall_test',
default=False,
action="store_true",
help='Used with --loops or --global-loops arguments. Tests until OK result occurs and assumes test passed')
parser.add_argument('-N', '--firmware-name',
dest='firmware_global_name',
help='Set global name for all produced projects. Note, proper file extension will be added by buid scripts')
parser.add_argument('-u', '--shuffle',
dest='shuffle_test_order',
default=False,
action="store_true",
help='Shuffles test execution order')
parser.add_argument('--shuffle-seed',
dest='shuffle_test_seed',
default=None,
help='Shuffle seed (If you want to reproduce your shuffle order please use seed provided in test summary)')
parser.add_argument('-f', '--filter',
dest='general_filter_regex',
type=argparse_many(str),
default=None,
help='For some commands you can use filter to filter out results')
parser.add_argument('--inc-timeout',
dest='extend_test_timeout',
metavar="NUMBER",
type=int,
help='You can increase global timeout for each test by specifying additional test timeout in seconds')
parser.add_argument('--db',
dest='db_url',
help='This specifies what database test suite uses to store its state. To pass DB connection info use database connection string. Example: \'mysql://username:[email protected]/db_name\'')
parser.add_argument('-l', '--log',
dest='log_file_name',
help='Log events to external file (note not all console entries may be visible in log file)')
parser.add_argument('--report-html',
dest='report_html_file_name',
help='You can log test suite results in form of HTML report')
parser.add_argument('--report-junit',
dest='report_junit_file_name',
help='You can log test suite results in form of JUnit compliant XML report')
parser.add_argument("--report-build",
dest="report_build_file_name",
help="Output the build results to a junit xml file")
parser.add_argument("--report-text",
dest="report_text_file_name",
help="Output the build results to a text file")
parser.add_argument('--verbose-skipped',
dest='verbose_skipped_tests',
default=False,
action="store_true",
help='Prints some extra information about skipped tests')
parser.add_argument('-V', '--verbose-test-result',
dest='verbose_test_result_only',
default=False,
action="store_true",
help='Prints test serial output')
parser.add_argument('-v', '--verbose',
dest='verbose',
default=False,
action="store_true",
help='Verbose mode (prints some extra information)')
parser.add_argument('--version',
dest='version',
default=False,
action="store_true",
help='Prints script version and exits')
parser.add_argument('--stats-depth',
dest='stats_depth',
default=2,
type=int,
help="Depth level for static memory report")
return parser
def test_path_to_name(path, base):
"""Change all slashes in a path into hyphens
This creates a unique cross-platform test name based on the path
This can eventually be overriden by a to-be-determined meta-data mechanism"""
name_parts = []
head, tail = os.path.split(relpath(path,base))
while (tail and tail != "."):
name_parts.insert(0, tail)
head, tail = os.path.split(head)
return "-".join(name_parts).lower()
def get_test_config(config_name, target_name):
"""Finds the path to a test configuration file
config_name: path to a custom configuration file OR mbed OS interface "ethernet, wifi_odin, etc"
target_name: name of target to determing if mbed OS interface given is valid
returns path to config, will return None if no valid config is found
"""
# If they passed in a full path
if exists(config_name):
# This is a module config
return config_name
# Otherwise find the path to configuration file based on mbed OS interface
return TestConfig.get_config_path(config_name, target_name)
def find_tests(base_dir, target_name, toolchain_name, icetea, greentea, app_config=None):
""" Finds all tests in a directory recursively
:param base_dir: path to the directory to scan for tests (ex. 'path/to/project')
:param target_name: name of the target to use for scanning (ex. 'K64F')
:param toolchain_name: name of the toolchain to use for scanning (ex. 'GCC_ARM')
:param icetea: icetea enabled
:param greentea: greentea enabled
:param app_config - location of a chosen mbed_app.json file
returns a dictionary where keys are the test name, and the values are
lists of paths needed to biuld the test.
"""
# Temporary structure: tests referenced by (name, base, group, case) tuple
tests = {}
# List of common folders: (predicate function, path) tuple
commons = []
config = Config(target_name, base_dir, app_config)
# Scan the directory for paths to probe for 'TESTS' folders
base_resources = Resources(MockNotifier(), collect_ignores=True)
base_resources.scan_with_config(base_dir, config)
if greentea:
dirs = [d for d in base_resources.ignored_dirs if basename(d) == 'TESTS']
ignoreset = MbedIgnoreSet()
for directory in dirs:
ignorefile = join(directory, IGNORE_FILENAME)
if isfile(ignorefile):
ignoreset.add_mbedignore(directory, ignorefile)
for test_group_directory in os.listdir(directory):
grp_dir = join(directory, test_group_directory)
if not isdir(grp_dir) or ignoreset.is_ignored(grp_dir):
continue
grpignorefile = join(grp_dir, IGNORE_FILENAME)
if isfile(grpignorefile):
ignoreset.add_mbedignore(grp_dir, grpignorefile)
for test_case_directory in os.listdir(grp_dir):
d = join(directory, test_group_directory, test_case_directory)
if not isdir(d) or ignoreset.is_ignored(d):
continue
special_dirs = ['host_tests', 'COMMON']
if test_group_directory not in special_dirs and test_case_directory not in special_dirs:
test_name = test_path_to_name(d, base_dir)
tests[(test_name, directory, test_group_directory, test_case_directory)] = [d]
if test_case_directory == 'COMMON':
def predicate(base_pred, group_pred, name_base_group_case):
(name, base, group, case) = name_base_group_case
return base == base_pred and group == group_pred
commons.append((functools.partial(predicate, directory, test_group_directory), d))
if test_group_directory == 'COMMON':
def predicate(base_pred, name_base_group_case):
(name, base, group, case) = name_base_group_case
return base == base_pred
commons.append((functools.partial(predicate, directory), grp_dir))
if icetea:
dirs = [d for d in base_resources.ignored_dirs if basename(d) == 'TEST_APPS']
for directory in dirs:
if not isdir(directory):
continue
for subdir in os.listdir(directory):
d = join(directory, subdir)
if not isdir(d):
continue
if 'device' == subdir:
for test_dir in os.listdir(d):
test_dir_path = join(d, test_dir)
test_name = test_path_to_name(test_dir_path, base_dir)
tests[(test_name, directory, subdir, test_dir)] = [test_dir_path]
# Apply common directories
for pred, path in commons:
for test_identity, test_paths in six.iteritems(tests):
if pred(test_identity):
test_paths.append(path)
# Drop identity besides name
return {name: paths for (name, _, _, _), paths in six.iteritems(tests)}
def print_tests(tests, format="list", sort=True):
"""Given a dictionary of tests (as returned from "find_tests"), print them
in the specified format"""
if format == "list":
for test_name in sorted(tests.keys()):
test_path = tests[test_name][0]
print("Test Case:")
print(" Name: %s" % test_name)
print(" Path: %s" % test_path)
elif format == "json":
print(json.dumps({test_name: test_path[0] for test_name, test_paths
in tests}, indent=2))
else:
print("Unknown format '%s'" % format)
sys.exit(1)
def norm_relative_path(path, start):
"""This function will create a normalized, relative path. It mimics the
python os.path.relpath function, but also normalizes a Windows-syle path
that use backslashes to a Unix style path that uses forward slashes."""
path = os.path.normpath(path)
path = os.path.relpath(path, start)
path = path.replace("\\", "/")
return path
def build_test_worker(*args, **kwargs):
"""This is a worker function for the parallel building of tests. The `args`
and `kwargs` are passed directly to `build_project`. It returns a dictionary
with the following structure:
{
'result': `True` if no exceptions were thrown, `False` otherwise
'reason': Instance of exception that was thrown on failure
'bin_file': Path to the created binary if `build_project` was
successful. Not present otherwise
'kwargs': The keyword arguments that were passed to `build_project`.
This includes arguments that were modified (ex. report)
}
"""
bin_file = None
ret = {
'result': False,
'args': args,
'kwargs': kwargs
}
# Use parent TOOLCHAIN_PATHS variable
for key, value in kwargs['toolchain_paths'].items():
TOOLCHAIN_PATHS[key] = value
del kwargs['toolchain_paths']
try:
bin_file, _ = build_project(*args, **kwargs)
ret['result'] = True
ret['bin_file'] = bin_file
ret['kwargs'] = kwargs
except NotSupportedException as e:
ret['reason'] = e
except ToolException as e:
ret['reason'] = e
except KeyboardInterrupt as e:
ret['reason'] = e
except:
# Print unhandled exceptions here
import traceback
traceback.print_exc(file=sys.stdout)
return ret
def build_tests(tests, base_source_paths, build_path, target, toolchain_name,
clean=False, notify=None, jobs=1, macros=None,
silent=False, report=None, properties=None,
continue_on_build_fail=False, app_config=None,
build_profile=None, stats_depth=None, ignore=None, spe_build=False):
"""Given the data structure from 'find_tests' and the typical build parameters,
build all the tests
Returns a tuple of the build result (True or False) followed by the test
build data structure"""
execution_directory = "."
base_path = norm_relative_path(build_path, execution_directory)
if isinstance(target, Target):
target_name = target.name
else:
target_name = target
target = TARGET_MAP[target_name]
cfg, _, _, _ = get_config(base_source_paths, target, app_config=app_config)
baud_rate = 9600
if 'platform.stdio-baud-rate' in cfg:
baud_rate = cfg['platform.stdio-baud-rate'].value
test_build = {
"platform": target_name,
"toolchain": toolchain_name,
"base_path": base_path,
"baud_rate": baud_rate,
"binary_type": "bootable",
"tests": {},
"test_apps": {}
}
result = True
jobs_count = int(jobs if jobs else cpu_count())
p = Pool(processes=jobs_count)
results = []
for test_name, test_paths in tests.items():
if not isinstance(test_paths, list):
test_paths = [test_paths]
test_build_path = os.path.join(build_path, test_paths[0])
src_paths = base_source_paths + test_paths
bin_file = None
test_case_folder_name = os.path.basename(test_paths[0])
args = (src_paths, test_build_path, deepcopy(target), toolchain_name)
kwargs = {
'jobs': 1,
'clean': clean,
'macros': macros,
'name': test_case_folder_name,
'project_id': test_name,
'report': report,
'properties': properties,
'app_config': app_config,
'build_profile': build_profile,
'toolchain_paths': TOOLCHAIN_PATHS,
'stats_depth': stats_depth,
'notify': MockNotifier(),
'spe_build': spe_build
}
results.append(p.apply_async(build_test_worker, args, kwargs))
p.close()
result = True
itr = 0
while len(results):
itr += 1
if itr > 360000:
p.terminate()
p.join()
raise ToolException("Compile did not finish in 10 minutes")
else:
sleep(0.01)
pending = 0
for r in results:
if r.ready() is True:
try:
worker_result = r.get()
results.remove(r)
# Push all deferred notifications out to the actual notifier
new_notify = deepcopy(notify)
for message in worker_result['kwargs']['notify'].messages:
new_notify.notify(message)
# Take report from the kwargs and merge it into existing report
if report:
report_entry = worker_result['kwargs']['report'][target_name][toolchain_name]
report_entry[worker_result['kwargs']['project_id'].upper()][0][0]['output'] = new_notify.get_output()
for test_key in report_entry.keys():
report[target_name][toolchain_name][test_key] = report_entry[test_key]
# Set the overall result to a failure if a build failure occurred
if ('reason' in worker_result and
not worker_result['reason'] and
not isinstance(worker_result['reason'], NotSupportedException)):
result = False
break
# Adding binary path to test build result
if ('result' in worker_result and
worker_result['result'] and
'bin_file' in worker_result):
bin_file = norm_relative_path(worker_result['bin_file'], execution_directory)
test_key = 'test_apps' if 'test_apps-' in worker_result['kwargs']['project_id'] else 'tests'
test_build[test_key][worker_result['kwargs']['project_id']] = {
"binaries": [
{
"path": bin_file
}
]
}
test_key = worker_result['kwargs']['project_id'].upper()
print('Image: %s\n' % bin_file)
except:
if p._taskqueue.queue:
p._taskqueue.queue.clear()
sleep(0.5)
p.terminate()
p.join()
raise
else:
pending += 1
if pending >= jobs_count:
break
# Break as soon as possible if there is a failure and we are not
# continuing on build failures
if not result and not continue_on_build_fail:
if p._taskqueue.queue:
p._taskqueue.queue.clear()
sleep(0.5)
p.terminate()
break
p.join()
test_builds = {}
test_builds["%s-%s" % (target_name, toolchain_name)] = test_build
return result, test_builds
def test_spec_from_test_builds(test_builds):
for build in test_builds:
if Target.get_target(test_builds[build]['platform']).is_PSA_non_secure_target:
if test_builds[build]['platform'].endswith('_NS'):
test_builds[build]['platform'] = test_builds[build]['platform'][:-3]
if test_builds[build]['platform'].endswith('_PSA'):
test_builds[build]['platform'] = test_builds[build]['platform'][:-4]
return {
"builds": test_builds
}
|
<gh_stars>0
"""
The state is central in torchbearer, storing all of the relevant intermediate values that may be changed or replaced
during model fitting. This module defines classes for interacting with state and all of the built in state keys used
throughout torchbearer. The :func:`state_key` function can be used to create custom state keys for use in callbacks or
metrics.
Example: ::
from torchbearer import state_key
MY_KEY = state_key('my_test_key')
"""
from torchbearer import Metric
import warnings
__keys__ = []
def state_key(key):
"""Computes and returns a non-conflicting key for the state dictionary when given a seed key
Args:
key (str): The seed key - basis for new state key
Returns:
:class:`.StateKey`: New state key
"""
return StateKey(key)
class StateKey(Metric):
""" StateKey class that is a unique state key based on the input string key. State keys are also metrics which
retrieve themselves from state.
Args:
key (str): Base key
"""
def __init__(self, key):
self.key = self._gen_key_(key)
super(StateKey, self).__init__(self.key)
def process(self, state):
return {self.name: state[self]}
def process_final(self, state):
return {self.name: state[self]}
def _gen_key_(self, key):
if key in __keys__:
count = 1
my_key = key + '_' + str(count)
while my_key in __keys__:
count += 1
my_key = key + '_' + str(count)
key = my_key
__keys__.append(key)
return key
def __repr__(self):
return self.key
def __str__(self):
return self.key
def __eq__(self, other):
return self.key == str(other)
def __hash__(self):
return self.key.__hash__()
class State(dict):
"""
State dictionary that behaves like a python dict but accepts StateKeys
"""
def __init__(self):
super(State, self).__init__()
def get_key(self, statekey):
if isinstance(statekey, str):
warnings.warn("State was accessed with a string: {}, generate keys with StateKey(str).".format(statekey), stacklevel=3)
return statekey
def __getitem__(self, key):
return super(State, self).__getitem__(self.get_key(key))
def __setitem__(self, key, val):
super(State, self).__setitem__(self.get_key(key), val)
def __delitem__(self, val):
super(State, self).__delitem__(val)
def __contains__(self, o):
return super(State, self).__contains__(self.get_key(o))
def update(self, d):
new_dict = {}
for key in d:
new_dict[self.get_key(key)] = d[key]
super(State, self).update(new_dict)
#: The torchbearer version
VERSION = state_key('torchbearer_version')
#: The PyTorch module / model that will be trained
MODEL = state_key('model')
#: The criterion to use when model fitting
CRITERION = state_key('criterion')
#: The optimizer to use when model fitting
OPTIMIZER = state_key('optimizer')
#: The device currently in use by the :class:`.Trial` and PyTorch model
DEVICE = state_key('device')
#: The data type of tensors in use by the model, match this to avoid type issues
DATA_TYPE = state_key('dtype')
#: The list of metrics in use by the :class:`.Trial`
METRIC_LIST = state_key('metric_list')
#: The metric dict from the current batch of data
METRICS = state_key('metrics')
#: A self refrence to the Trial object for persistence etc.
SELF = state_key('self')
#: The current epoch number
EPOCH = state_key('epoch')
#: The total number of epochs to run for
MAX_EPOCHS = state_key('max_epochs')
#: The string name of the current data
DATA = state_key('data')
#: The current data generator (DataLoader)
GENERATOR = state_key('generator')
#: The current iterator
ITERATOR = state_key('iterator')
#: The current number of steps per epoch
STEPS = state_key('steps')
#: The train data generator in the Trial object
TRAIN_GENERATOR = state_key('train_generator')
#: The number of train steps to take
TRAIN_STEPS = state_key('train_steps')
#: The flag representing train data
TRAIN_DATA = state_key('train_data')
#: The validation data generator in the Trial object
VALIDATION_GENERATOR = state_key('validation_generator')
#: The number of validation steps to take
VALIDATION_STEPS = state_key('validation_steps')
#: The flag representing validation data
VALIDATION_DATA = state_key('validation_data')
#: The test data generator in the Trial object
TEST_GENERATOR = state_key('test_generator')
#: The number of test steps to take
TEST_STEPS = state_key('test_steps')
#: The flag representing test data
TEST_DATA = state_key('test_data')
#: A flag that can be set to true to stop the current fit call
STOP_TRAINING = state_key('stop_training')
#: The current batch of ground truth data
TARGET = Y_TRUE = state_key('y_true')
#: The current batch of predictions
PREDICTION = Y_PRED = state_key('y_pred')
#: The current batch of inputs
INPUT = X = state_key('x')
#: The sampler which loads data from the generator onto the correct device
SAMPLER = state_key('sampler')
#: The current value for the loss
LOSS = state_key('loss')
#: The key which maps to the predictions over the dataset when calling predict
FINAL_PREDICTIONS = state_key('final_predictions')
#: The current batch number
BATCH = state_key('t')
#: The timings keys used by the timer callback
TIMINGS = state_key('timings')
#: The :class:`.CallbackList` object which is called by the Trial
CALLBACK_LIST = state_key('callback_list')
#: The history list of the Trial instance
HISTORY = state_key('history')
#: The optional arguments which should be passed to the backward call
BACKWARD_ARGS = state_key('backward_args')
# Legacy
VALIDATION_ITERATOR = 'validation_iterator'
TRAIN_ITERATOR = 'train_iterator'
|
<reponame>landrito/api-client-staging<filename>generated/python/gapic-google-cloud-speech-v1beta1/google/cloud/gapic/speech/v1beta1/enums.py
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrappers for protocol buffer enum types."""
class RecognitionConfig(object):
class AudioEncoding(object):
"""
Audio encoding of the data sent in the audio message. All encodings support
only 1 channel (mono) audio. Only ``FLAC`` includes a header that describes
the bytes of audio that follow the header. The other encodings are raw
audio bytes with no header.
For best results, the audio source should be captured and transmitted using
a lossless encoding (``FLAC`` or ``LINEAR16``). Recognition accuracy may be
reduced if lossy codecs (such as AMR, AMR_WB and MULAW) are used to capture
or transmit the audio, particularly if background noise is present.
Attributes:
ENCODING_UNSPECIFIED (int): Not specified. Will return result ``google.rpc.Code.INVALID_ARGUMENT``.
LINEAR16 (int): Uncompressed 16-bit signed little-endian samples (Linear PCM).
This is the only encoding that may be used by ``AsyncRecognize``.
FLAC (int): This is the recommended encoding for ``SyncRecognize`` and
``StreamingRecognize`` because it uses lossless compression; therefore
recognition accuracy is not compromised by a lossy codec.
The stream FLAC (Free Lossless Audio Codec) encoding is specified at:
http://flac.sourceforge.net/documentation.html.
16-bit and 24-bit samples are supported.
Not all fields in STREAMINFO are supported.
MULAW (int): 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
AMR (int): Adaptive Multi-Rate Narrowband codec. ``sample_rate`` must be 8000 Hz.
AMR_WB (int): Adaptive Multi-Rate Wideband codec. ``sample_rate`` must be 16000 Hz.
"""
ENCODING_UNSPECIFIED = 0
LINEAR16 = 1
FLAC = 2
MULAW = 3
AMR = 4
AMR_WB = 5
class StreamingRecognizeResponse(object):
class EndpointerType(object):
"""
Indicates the type of endpointer event.
Attributes:
ENDPOINTER_EVENT_UNSPECIFIED (int): No endpointer event specified.
START_OF_SPEECH (int): Speech has been detected in the audio stream, and the service is
beginning to process it.
END_OF_SPEECH (int): Speech has ceased to be detected in the audio stream. (For example, the
user may have paused after speaking.) If ``single_utterance`` is ``false``,
the service will continue to process audio, and if subsequent speech is
detected, will send another START_OF_SPEECH event.
END_OF_AUDIO (int): This event is sent after the client has half-closed the input stream gRPC
connection and the server has received all of the audio. (The server may
still be processing the audio and may subsequently return additional
results.)
END_OF_UTTERANCE (int): This event is only sent when ``single_utterance`` is ``true``. It indicates
that the server has detected the end of the user's speech utterance and
expects no additional speech. Therefore, the server will not process
additional audio (although it may subsequently return additional
results). The client should stop sending additional audio data,
half-close the gRPC connection, and wait for any additional results
until the server closes the gRPC connection.
"""
ENDPOINTER_EVENT_UNSPECIFIED = 0
START_OF_SPEECH = 1
END_OF_SPEECH = 2
END_OF_AUDIO = 3
END_OF_UTTERANCE = 4
|
from collections.abc import Iterable, MutableSequence, Mapping
from enum import Enum
from pathlib import Path
from numbers import Real, Integral
from xml.etree import ElementTree as ET
import openmc.checkvalue as cv
from . import VolumeCalculation, Source, RegularMesh
from ._xml import clean_indentation, get_text, reorder_attributes
class RunMode(Enum):
EIGENVALUE = 'eigenvalue'
FIXED_SOURCE = 'fixed source'
PLOT = 'plot'
VOLUME = 'volume'
PARTICLE_RESTART = 'particle restart'
_RES_SCAT_METHODS = ['dbrc', 'rvs']
class Settings:
"""Settings used for an OpenMC simulation.
Attributes
----------
batches : int
Number of batches to simulate
confidence_intervals : bool
If True, uncertainties on tally results will be reported as the
half-width of the 95% two-sided confidence interval. If False,
uncertainties on tally results will be reported as the sample standard
deviation.
create_fission_neutrons : bool
Indicate whether fission neutrons should be created or not.
cutoff : dict
Dictionary defining weight cutoff and energy cutoff. The dictionary may
have six keys, 'weight', 'weight_avg', 'energy_neutron', 'energy_photon',
'energy_electron', and 'energy_positron'. Value for 'weight'
should be a float indicating weight cutoff below which particle undergo
Russian roulette. Value for 'weight_avg' should be a float indicating
weight assigned to particles that are not killed after Russian
roulette. Value of energy should be a float indicating energy in eV
below which particle type will be killed.
dagmc : bool
Indicate that a CAD-based DAGMC geometry will be used.
delayed_photon_scaling : bool
Indicate whether to scale the fission photon yield by (EGP + EGD)/EGP
where EGP is the energy release of prompt photons and EGD is the energy
release of delayed photons.
.. versionadded:: 0.12
electron_treatment : {'led', 'ttb'}
Whether to deposit all energy from electrons locally ('led') or create
secondary bremsstrahlung photons ('ttb').
energy_mode : {'continuous-energy', 'multi-group'}
Set whether the calculation should be continuous-energy or multi-group.
entropy_mesh : openmc.RegularMesh
Mesh to be used to calculate Shannon entropy. If the mesh dimensions are
not specified. OpenMC assigns a mesh such that 20 source sites per mesh
cell are to be expected on average.
event_based : bool
Indicate whether to use event-based parallelism instead of the default
history-based parallelism.
.. versionadded:: 0.12
generations_per_batch : int
Number of generations per batch
max_lost_particles : int
Maximum number of lost particles
.. versionadded:: 0.12
rel_max_lost_particles : int
Maximum number of lost particles, relative to the total number of particles
.. versionadded:: 0.12
inactive : int
Number of inactive batches
keff_trigger : dict
Dictionary defining a trigger on eigenvalue. The dictionary must have
two keys, 'type' and 'threshold'. Acceptable values corresponding to
type are 'variance', 'std_dev', and 'rel_err'. The threshold value
should be a float indicating the variance, standard deviation, or
relative error used.
log_grid_bins : int
Number of bins for logarithmic energy grid search
material_cell_offsets : bool
Generate an "offset table" for material cells by default. These tables
are necessary when a particular instance of a cell needs to be tallied.
.. versionadded:: 0.12
max_particles_in_flight : int
Number of neutrons to run concurrently when using event-based
parallelism.
.. versionadded:: 0.12
max_order : None or int
Maximum scattering order to apply globally when in multi-group mode.
no_reduce : bool
Indicate that all user-defined and global tallies should not be reduced
across processes in a parallel calculation.
output : dict
Dictionary indicating what files to output. Acceptable keys are:
:path: String indicating a directory where output files should be
written
:summary: Whether the 'summary.h5' file should be written (bool)
:tallies: Whether the 'tallies.out' file should be written (bool)
particles : int
Number of particles per generation
photon_transport : bool
Whether to use photon transport.
ptables : bool
Determine whether probability tables are used.
resonance_scattering : dict
Settings for resonance elastic scattering. Accepted keys are 'enable'
(bool), 'method' (str), 'energy_min' (float), 'energy_max' (float), and
'nuclides' (list). The 'method' can be set to 'dbrc' (Doppler broadening
rejection correction) or 'rvs' (relative velocity sampling). If not
specified, 'rvs' is the default method. The 'energy_min' and
'energy_max' values indicate the minimum and maximum energies above and
below which the resonance elastic scattering method is to be
applied. The 'nuclides' list indicates what nuclides the method should
be applied to. In its absence, the method will be applied to all
nuclides with 0 K elastic scattering data present.
run_mode : {'eigenvalue', 'fixed source', 'plot', 'volume', 'particle restart'}
The type of calculation to perform (default is 'eigenvalue')
seed : int
Seed for the linear congruential pseudorandom number generator
source : Iterable of openmc.Source
Distribution of source sites in space, angle, and energy
sourcepoint : dict
Options for writing source points. Acceptable keys are:
:batches: list of batches at which to write source
:overwrite: bool indicating whether to overwrite
:separate: bool indicating whether the source should be written as a
separate file
:write: bool indicating whether or not to write the source
statepoint : dict
Options for writing state points. Acceptable keys are:
:batches: list of batches at which to write source
survival_biasing : bool
Indicate whether survival biasing is to be used
tabular_legendre : dict
Determines if a multi-group scattering moment kernel expanded via
Legendre polynomials is to be converted to a tabular distribution or
not. Accepted keys are 'enable' and 'num_points'. The value for
'enable' is a bool stating whether the conversion to tabular is
performed; the value for 'num_points' sets the number of points to use
in the tabular distribution, should 'enable' be True.
temperature : dict
Defines a default temperature and method for treating intermediate
temperatures at which nuclear data doesn't exist. Accepted keys are
'default', 'method', 'range', 'tolerance', and 'multipole'. The value
for 'default' should be a float representing the default temperature in
Kelvin. The value for 'method' should be 'nearest' or 'interpolation'.
If the method is 'nearest', 'tolerance' indicates a range of temperature
within which cross sections may be used. The value for 'range' should be
a pair a minimum and maximum temperatures which are used to indicate
that cross sections be loaded at all temperatures within the
range. 'multipole' is a boolean indicating whether or not the windowed
multipole method should be used to evaluate resolved resonance cross
sections.
trace : tuple or list
Show detailed information about a single particle, indicated by three
integers: the batch number, generation number, and particle number
track : tuple or list
Specify particles for which track files should be written. Each particle
is identified by a triplet with the batch number, generation number, and
particle number.
trigger_active : bool
Indicate whether tally triggers are used
trigger_batch_interval : int
Number of batches in between convergence checks
trigger_max_batches : int
Maximum number of batches simulated. If this is set, the number of
batches specified via ``batches`` is interpreted as the minimum number
of batches
ufs_mesh : openmc.RegularMesh
Mesh to be used for redistributing source sites via the uniform fision
site (UFS) method.
verbosity : int
Verbosity during simulation between 1 and 10. Verbosity levels are
described in :ref:`verbosity`.
volume_calculations : VolumeCalculation or iterable of VolumeCalculation
Stochastic volume calculation specifications
"""
def __init__(self):
self._run_mode = RunMode.EIGENVALUE
self._batches = None
self._generations_per_batch = None
self._inactive = None
self._max_lost_particles = None
self._rel_max_lost_particles = None
self._particles = None
self._keff_trigger = None
# Energy mode subelement
self._energy_mode = None
self._max_order = None
# Source subelement
self._source = cv.CheckedList(Source, 'source distributions')
self._confidence_intervals = None
self._electron_treatment = None
self._photon_transport = None
self._ptables = None
self._seed = None
self._survival_biasing = None
# Shannon entropy mesh
self._entropy_mesh = None
# Trigger subelement
self._trigger_active = None
self._trigger_max_batches = None
self._trigger_batch_interval = None
self._output = None
# Output options
self._statepoint = {}
self._sourcepoint = {}
self._no_reduce = None
self._verbosity = None
self._trace = None
self._track = None
self._tabular_legendre = {}
self._temperature = {}
# Cutoff subelement
self._cutoff = None
# Uniform fission source subelement
self._ufs_mesh = None
self._resonance_scattering = {}
self._volume_calculations = cv.CheckedList(
VolumeCalculation, 'volume calculations')
self._create_fission_neutrons = None
self._delayed_photon_scaling = None
self._material_cell_offsets = None
self._log_grid_bins = None
self._dagmc = False
self._event_based = None
self._max_particles_in_flight = None
@property
def run_mode(self):
return self._run_mode.value
@property
def batches(self):
return self._batches
@property
def generations_per_batch(self):
return self._generations_per_batch
@property
def inactive(self):
return self._inactive
@property
def max_lost_particles(self):
return self._max_lost_particles
@property
def rel_max_lost_particles(self):
return self._rel_max_lost_particles
@property
def particles(self):
return self._particles
@property
def keff_trigger(self):
return self._keff_trigger
@property
def energy_mode(self):
return self._energy_mode
@property
def max_order(self):
return self._max_order
@property
def source(self):
return self._source
@property
def confidence_intervals(self):
return self._confidence_intervals
@property
def electron_treatment(self):
return self._electron_treatment
@property
def ptables(self):
return self._ptables
@property
def photon_transport(self):
return self._photon_transport
@property
def seed(self):
return self._seed
@property
def survival_biasing(self):
return self._survival_biasing
@property
def entropy_mesh(self):
return self._entropy_mesh
@property
def trigger_active(self):
return self._trigger_active
@property
def trigger_max_batches(self):
return self._trigger_max_batches
@property
def trigger_batch_interval(self):
return self._trigger_batch_interval
@property
def output(self):
return self._output
@property
def sourcepoint(self):
return self._sourcepoint
@property
def statepoint(self):
return self._statepoint
@property
def no_reduce(self):
return self._no_reduce
@property
def verbosity(self):
return self._verbosity
@property
def tabular_legendre(self):
return self._tabular_legendre
@property
def temperature(self):
return self._temperature
@property
def trace(self):
return self._trace
@property
def track(self):
return self._track
@property
def cutoff(self):
return self._cutoff
@property
def ufs_mesh(self):
return self._ufs_mesh
@property
def resonance_scattering(self):
return self._resonance_scattering
@property
def volume_calculations(self):
return self._volume_calculations
@property
def create_fission_neutrons(self):
return self._create_fission_neutrons
@property
def delayed_photon_scaling(self):
return self._delayed_photon_scaling
@property
def material_cell_offsets(self):
return self._material_cell_offsets
@property
def log_grid_bins(self):
return self._log_grid_bins
@property
def dagmc(self):
return self._dagmc
@property
def event_based(self):
return self._event_based
@property
def max_particles_in_flight(self):
return self._max_particles_in_flight
@run_mode.setter
def run_mode(self, run_mode):
cv.check_value('run mode', run_mode, {x.value for x in RunMode})
for mode in RunMode:
if mode.value == run_mode:
self._run_mode = mode
@batches.setter
def batches(self, batches):
cv.check_type('batches', batches, Integral)
cv.check_greater_than('batches', batches, 0)
self._batches = batches
@generations_per_batch.setter
def generations_per_batch(self, generations_per_batch):
cv.check_type('generations per patch', generations_per_batch, Integral)
cv.check_greater_than('generations per batch', generations_per_batch, 0)
self._generations_per_batch = generations_per_batch
@inactive.setter
def inactive(self, inactive):
cv.check_type('inactive batches', inactive, Integral)
cv.check_greater_than('inactive batches', inactive, 0, True)
self._inactive = inactive
@max_lost_particles.setter
def max_lost_particles(self, max_lost_particles):
cv.check_type('max_lost_particles', max_lost_particles, Integral)
cv.check_greater_than('max_lost_particles', max_lost_particles, 0)
self._max_lost_particles = max_lost_particles
@rel_max_lost_particles.setter
def rel_max_lost_particles(self, rel_max_lost_particles):
cv.check_type('rel_max_lost_particles', rel_max_lost_particles, Real)
cv.check_greater_than('rel_max_lost_particles', rel_max_lost_particles, 0)
cv.check_less_than('rel_max_lost_particles', rel_max_lost_particles, 1)
self._rel_max_lost_particles = rel_max_lost_particles
@particles.setter
def particles(self, particles):
cv.check_type('particles', particles, Integral)
cv.check_greater_than('particles', particles, 0)
self._particles = particles
@keff_trigger.setter
def keff_trigger(self, keff_trigger):
if not isinstance(keff_trigger, dict):
msg = 'Unable to set a trigger on keff from "{0}" which ' \
'is not a Python dictionary'.format(keff_trigger)
raise ValueError(msg)
elif 'type' not in keff_trigger:
msg = 'Unable to set a trigger on keff from "{0}" which ' \
'does not have a "type" key'.format(keff_trigger)
raise ValueError(msg)
elif keff_trigger['type'] not in ['variance', 'std_dev', 'rel_err']:
msg = 'Unable to set a trigger on keff with ' \
'type "{0}"'.format(keff_trigger['type'])
raise ValueError(msg)
elif 'threshold' not in keff_trigger:
msg = 'Unable to set a trigger on keff from "{0}" which ' \
'does not have a "threshold" key'.format(keff_trigger)
raise ValueError(msg)
elif not isinstance(keff_trigger['threshold'], Real):
msg = 'Unable to set a trigger on keff with ' \
'threshold "{0}"'.format(keff_trigger['threshold'])
raise ValueError(msg)
self._keff_trigger = keff_trigger
@energy_mode.setter
def energy_mode(self, energy_mode):
cv.check_value('energy mode', energy_mode,
['continuous-energy', 'multi-group'])
self._energy_mode = energy_mode
@max_order.setter
def max_order(self, max_order):
if max_order is not None:
cv.check_type('maximum scattering order', max_order, Integral)
cv.check_greater_than('maximum scattering order', max_order, 0,
True)
self._max_order = max_order
@source.setter
def source(self, source):
if not isinstance(source, MutableSequence):
source = [source]
self._source = cv.CheckedList(Source, 'source distributions', source)
@output.setter
def output(self, output):
cv.check_type('output', output, Mapping)
for key, value in output.items():
cv.check_value('output key', key, ('summary', 'tallies', 'path'))
if key in ('summary', 'tallies'):
cv.check_type("output['{}']".format(key), value, bool)
else:
cv.check_type("output['path']", value, str)
self._output = output
@verbosity.setter
def verbosity(self, verbosity):
cv.check_type('verbosity', verbosity, Integral)
cv.check_greater_than('verbosity', verbosity, 1, True)
cv.check_less_than('verbosity', verbosity, 10, True)
self._verbosity = verbosity
@sourcepoint.setter
def sourcepoint(self, sourcepoint):
cv.check_type('sourcepoint options', sourcepoint, Mapping)
for key, value in sourcepoint.items():
if key == 'batches':
cv.check_type('sourcepoint batches', value, Iterable, Integral)
for batch in value:
cv.check_greater_than('sourcepoint batch', batch, 0)
elif key == 'separate':
cv.check_type('sourcepoint separate', value, bool)
elif key == 'write':
cv.check_type('sourcepoint write', value, bool)
elif key == 'overwrite':
cv.check_type('sourcepoint overwrite', value, bool)
else:
raise ValueError("Unknown key '{}' encountered when setting "
"sourcepoint options.".format(key))
self._sourcepoint = sourcepoint
@statepoint.setter
def statepoint(self, statepoint):
cv.check_type('statepoint options', statepoint, Mapping)
for key, value in statepoint.items():
if key == 'batches':
cv.check_type('statepoint batches', value, Iterable, Integral)
for batch in value:
cv.check_greater_than('statepoint batch', batch, 0)
else:
raise ValueError("Unknown key '{}' encountered when setting "
"statepoint options.".format(key))
self._statepoint = statepoint
@confidence_intervals.setter
def confidence_intervals(self, confidence_intervals):
cv.check_type('confidence interval', confidence_intervals, bool)
self._confidence_intervals = confidence_intervals
@electron_treatment.setter
def electron_treatment(self, electron_treatment):
cv.check_value('electron treatment', electron_treatment, ['led', 'ttb'])
self._electron_treatment = electron_treatment
@photon_transport.setter
def photon_transport(self, photon_transport):
cv.check_type('photon transport', photon_transport, bool)
self._photon_transport = photon_transport
@dagmc.setter
def dagmc(self, dagmc):
cv.check_type('dagmc geometry', dagmc, bool)
self._dagmc = dagmc
@ptables.setter
def ptables(self, ptables):
cv.check_type('probability tables', ptables, bool)
self._ptables = ptables
@seed.setter
def seed(self, seed):
cv.check_type('random number generator seed', seed, Integral)
cv.check_greater_than('random number generator seed', seed, 0)
self._seed = seed
@survival_biasing.setter
def survival_biasing(self, survival_biasing):
cv.check_type('survival biasing', survival_biasing, bool)
self._survival_biasing = survival_biasing
@cutoff.setter
def cutoff(self, cutoff):
if not isinstance(cutoff, Mapping):
msg = 'Unable to set cutoff from "{0}" which is not a '\
' Python dictionary'.format(cutoff)
raise ValueError(msg)
for key in cutoff:
if key == 'weight':
cv.check_type('weight cutoff', cutoff[key], Real)
cv.check_greater_than('weight cutoff', cutoff[key], 0.0)
elif key == 'weight_avg':
cv.check_type('average survival weight', cutoff[key], Real)
cv.check_greater_than('average survival weight',
cutoff[key], 0.0)
elif key in ['energy_neutron', 'energy_photon', 'energy_electron',
'energy_positron']:
cv.check_type('energy cutoff', cutoff[key], Real)
cv.check_greater_than('energy cutoff', cutoff[key], 0.0)
else:
msg = 'Unable to set cutoff to "{0}" which is unsupported by '\
'OpenMC'.format(key)
self._cutoff = cutoff
@entropy_mesh.setter
def entropy_mesh(self, entropy):
cv.check_type('entropy mesh', entropy, RegularMesh)
self._entropy_mesh = entropy
@trigger_active.setter
def trigger_active(self, trigger_active):
cv.check_type('trigger active', trigger_active, bool)
self._trigger_active = trigger_active
@trigger_max_batches.setter
def trigger_max_batches(self, trigger_max_batches):
cv.check_type('trigger maximum batches', trigger_max_batches, Integral)
cv.check_greater_than('trigger maximum batches', trigger_max_batches, 0)
self._trigger_max_batches = trigger_max_batches
@trigger_batch_interval.setter
def trigger_batch_interval(self, trigger_batch_interval):
cv.check_type('trigger batch interval', trigger_batch_interval, Integral)
cv.check_greater_than('trigger batch interval', trigger_batch_interval, 0)
self._trigger_batch_interval = trigger_batch_interval
@no_reduce.setter
def no_reduce(self, no_reduce):
cv.check_type('no reduction option', no_reduce, bool)
self._no_reduce = no_reduce
@tabular_legendre.setter
def tabular_legendre(self, tabular_legendre):
cv.check_type('tabular_legendre settings', tabular_legendre, Mapping)
for key, value in tabular_legendre.items():
cv.check_value('tabular_legendre key', key,
['enable', 'num_points'])
if key == 'enable':
cv.check_type('enable tabular_legendre', value, bool)
elif key == 'num_points':
cv.check_type('num_points tabular_legendre', value, Integral)
cv.check_greater_than('num_points tabular_legendre', value, 0)
self._tabular_legendre = tabular_legendre
@temperature.setter
def temperature(self, temperature):
cv.check_type('temperature settings', temperature, Mapping)
for key, value in temperature.items():
cv.check_value('temperature key', key,
['default', 'method', 'tolerance', 'multipole',
'range'])
if key == 'default':
cv.check_type('default temperature', value, Real)
elif key == 'method':
cv.check_value('temperature method', value,
['nearest', 'interpolation'])
elif key == 'tolerance':
cv.check_type('temperature tolerance', value, Real)
elif key == 'multipole':
cv.check_type('temperature multipole', value, bool)
elif key == 'range':
cv.check_length('temperature range', value, 2)
for T in value:
cv.check_type('temperature', T, Real)
self._temperature = temperature
@trace.setter
def trace(self, trace):
cv.check_type('trace', trace, Iterable, Integral)
cv.check_length('trace', trace, 3)
cv.check_greater_than('trace batch', trace[0], 0)
cv.check_greater_than('trace generation', trace[1], 0)
cv.check_greater_than('trace particle', trace[2], 0)
self._trace = trace
@track.setter
def track(self, track):
cv.check_type('track', track, Iterable, Integral)
if len(track) % 3 != 0:
msg = 'Unable to set the track to "{0}" since its length is ' \
'not a multiple of 3'.format(track)
raise ValueError(msg)
for t in zip(track[::3], track[1::3], track[2::3]):
cv.check_greater_than('track batch', t[0], 0)
cv.check_greater_than('track generation', t[0], 0)
cv.check_greater_than('track particle', t[0], 0)
self._track = track
@ufs_mesh.setter
def ufs_mesh(self, ufs_mesh):
cv.check_type('UFS mesh', ufs_mesh, RegularMesh)
cv.check_length('UFS mesh dimension', ufs_mesh.dimension, 3)
cv.check_length('UFS mesh lower-left corner', ufs_mesh.lower_left, 3)
cv.check_length('UFS mesh upper-right corner', ufs_mesh.upper_right, 3)
self._ufs_mesh = ufs_mesh
@resonance_scattering.setter
def resonance_scattering(self, res):
cv.check_type('resonance scattering settings', res, Mapping)
keys = ('enable', 'method', 'energy_min', 'energy_max', 'nuclides')
for key, value in res.items():
cv.check_value('resonance scattering dictionary key', key, keys)
if key == 'enable':
cv.check_type('resonance scattering enable', value, bool)
elif key == 'method':
cv.check_value('resonance scattering method', value,
_RES_SCAT_METHODS)
elif key == 'energy_min':
name = 'resonance scattering minimum energy'
cv.check_type(name, value, Real)
cv.check_greater_than(name, value, 0)
elif key == 'energy_max':
name = 'resonance scattering minimum energy'
cv.check_type(name, value, Real)
cv.check_greater_than(name, value, 0)
elif key == 'nuclides':
cv.check_type('resonance scattering nuclides', value,
Iterable, str)
self._resonance_scattering = res
@volume_calculations.setter
def volume_calculations(self, vol_calcs):
if not isinstance(vol_calcs, MutableSequence):
vol_calcs = [vol_calcs]
self._volume_calculations = cv.CheckedList(
VolumeCalculation, 'stochastic volume calculations', vol_calcs)
@create_fission_neutrons.setter
def create_fission_neutrons(self, create_fission_neutrons):
cv.check_type('Whether create fission neutrons',
create_fission_neutrons, bool)
self._create_fission_neutrons = create_fission_neutrons
@delayed_photon_scaling.setter
def delayed_photon_scaling(self, value):
cv.check_type('delayed photon scaling', value, bool)
self._delayed_photon_scaling = value
@event_based.setter
def event_based(self, value):
cv.check_type('event based', value, bool)
self._event_based = value
@max_particles_in_flight.setter
def max_particles_in_flight(self, value):
cv.check_type('max particles in flight', value, Integral)
cv.check_greater_than('max particles in flight', value, 0)
self._max_particles_in_flight = value
@material_cell_offsets.setter
def material_cell_offsets(self, value):
cv.check_type('material cell offsets', value, bool)
self._material_cell_offsets = value
@log_grid_bins.setter
def log_grid_bins(self, log_grid_bins):
cv.check_type('log grid bins', log_grid_bins, Real)
cv.check_greater_than('log grid bins', log_grid_bins, 0)
self._log_grid_bins = log_grid_bins
def _create_run_mode_subelement(self, root):
elem = ET.SubElement(root, "run_mode")
elem.text = self._run_mode.value
def _create_batches_subelement(self, root):
if self._batches is not None:
element = ET.SubElement(root, "batches")
element.text = str(self._batches)
def _create_generations_per_batch_subelement(self, root):
if self._generations_per_batch is not None:
element = ET.SubElement(root, "generations_per_batch")
element.text = str(self._generations_per_batch)
def _create_inactive_subelement(self, root):
if self._inactive is not None:
element = ET.SubElement(root, "inactive")
element.text = str(self._inactive)
def _create_max_lost_particles_subelement(self, root):
if self._max_lost_particles is not None:
element = ET.SubElement(root, "max_lost_particles")
element.text = str(self._max_lost_particles)
def _create_rel_max_lost_particles_subelement(self, root):
if self._rel_max_lost_particles is not None:
element = ET.SubElement(root, "rel_max_lost_particles")
element.text = str(self._rel_max_lost_particles)
def _create_particles_subelement(self, root):
if self._particles is not None:
element = ET.SubElement(root, "particles")
element.text = str(self._particles)
def _create_keff_trigger_subelement(self, root):
if self._keff_trigger is not None:
element = ET.SubElement(root, "keff_trigger")
for key, value in sorted(self._keff_trigger.items()):
subelement = ET.SubElement(element, key)
subelement.text = str(value).lower()
def _create_energy_mode_subelement(self, root):
if self._energy_mode is not None:
element = ET.SubElement(root, "energy_mode")
element.text = str(self._energy_mode)
def _create_max_order_subelement(self, root):
if self._max_order is not None:
element = ET.SubElement(root, "max_order")
element.text = str(self._max_order)
def _create_source_subelement(self, root):
for source in self.source:
root.append(source.to_xml_element())
def _create_volume_calcs_subelement(self, root):
for calc in self.volume_calculations:
root.append(calc.to_xml_element())
def _create_output_subelement(self, root):
if self._output is not None:
element = ET.SubElement(root, "output")
for key, value in sorted(self._output.items()):
subelement = ET.SubElement(element, key)
if key in ('summary', 'tallies'):
subelement.text = str(value).lower()
else:
subelement.text = value
def _create_verbosity_subelement(self, root):
if self._verbosity is not None:
element = ET.SubElement(root, "verbosity")
element.text = str(self._verbosity)
def _create_statepoint_subelement(self, root):
if self._statepoint:
element = ET.SubElement(root, "state_point")
if 'batches' in self._statepoint:
subelement = ET.SubElement(element, "batches")
subelement.text = ' '.join(
str(x) for x in self._statepoint['batches'])
def _create_sourcepoint_subelement(self, root):
if self._sourcepoint:
element = ET.SubElement(root, "source_point")
if 'batches' in self._sourcepoint:
subelement = ET.SubElement(element, "batches")
subelement.text = ' '.join(
str(x) for x in self._sourcepoint['batches'])
if 'separate' in self._sourcepoint:
subelement = ET.SubElement(element, "separate")
subelement.text = str(self._sourcepoint['separate']).lower()
if 'write' in self._sourcepoint:
subelement = ET.SubElement(element, "write")
subelement.text = str(self._sourcepoint['write']).lower()
# Overwrite latest subelement
if 'overwrite' in self._sourcepoint:
subelement = ET.SubElement(element, "overwrite_latest")
subelement.text = str(self._sourcepoint['overwrite']).lower()
def _create_confidence_intervals(self, root):
if self._confidence_intervals is not None:
element = ET.SubElement(root, "confidence_intervals")
element.text = str(self._confidence_intervals).lower()
def _create_electron_treatment_subelement(self, root):
if self._electron_treatment is not None:
element = ET.SubElement(root, "electron_treatment")
element.text = str(self._electron_treatment)
def _create_photon_transport_subelement(self, root):
if self._photon_transport is not None:
element = ET.SubElement(root, "photon_transport")
element.text = str(self._photon_transport).lower()
def _create_ptables_subelement(self, root):
if self._ptables is not None:
element = ET.SubElement(root, "ptables")
element.text = str(self._ptables).lower()
def _create_seed_subelement(self, root):
if self._seed is not None:
element = ET.SubElement(root, "seed")
element.text = str(self._seed)
def _create_survival_biasing_subelement(self, root):
if self._survival_biasing is not None:
element = ET.SubElement(root, "survival_biasing")
element.text = str(self._survival_biasing).lower()
def _create_cutoff_subelement(self, root):
if self._cutoff is not None:
element = ET.SubElement(root, "cutoff")
for key, value in self._cutoff.items():
subelement = ET.SubElement(element, key)
subelement.text = str(value)
def _create_entropy_mesh_subelement(self, root):
if self.entropy_mesh is not None:
# See if a <mesh> element already exists -- if not, add it
path = "./mesh[@id='{}']".format(self.entropy_mesh.id)
if root.find(path) is None:
root.append(self.entropy_mesh.to_xml_element())
subelement = ET.SubElement(root, "entropy_mesh")
subelement.text = str(self.entropy_mesh.id)
def _create_trigger_subelement(self, root):
if self._trigger_active is not None:
trigger_element = ET.SubElement(root, "trigger")
element = ET.SubElement(trigger_element, "active")
element.text = str(self._trigger_active).lower()
if self._trigger_max_batches is not None:
element = ET.SubElement(trigger_element, "max_batches")
element.text = str(self._trigger_max_batches)
if self._trigger_batch_interval is not None:
element = ET.SubElement(trigger_element, "batch_interval")
element.text = str(self._trigger_batch_interval)
def _create_no_reduce_subelement(self, root):
if self._no_reduce is not None:
element = ET.SubElement(root, "no_reduce")
element.text = str(self._no_reduce).lower()
def _create_tabular_legendre_subelements(self, root):
if self.tabular_legendre:
element = ET.SubElement(root, "tabular_legendre")
subelement = ET.SubElement(element, "enable")
subelement.text = str(self._tabular_legendre['enable']).lower()
if 'num_points' in self._tabular_legendre:
subelement = ET.SubElement(element, "num_points")
subelement.text = str(self._tabular_legendre['num_points'])
def _create_temperature_subelements(self, root):
if self.temperature:
for key, value in sorted(self.temperature.items()):
element = ET.SubElement(root,
"temperature_{}".format(key))
if isinstance(value, bool):
element.text = str(value).lower()
elif key == 'range':
element.text = ' '.join(str(T) for T in value)
else:
element.text = str(value)
def _create_trace_subelement(self, root):
if self._trace is not None:
element = ET.SubElement(root, "trace")
element.text = ' '.join(map(str, self._trace))
def _create_track_subelement(self, root):
if self._track is not None:
element = ET.SubElement(root, "track")
element.text = ' '.join(map(str, self._track))
def _create_ufs_mesh_subelement(self, root):
if self.ufs_mesh is not None:
# See if a <mesh> element already exists -- if not, add it
path = "./mesh[@id='{}']".format(self.ufs_mesh.id)
if root.find(path) is None:
root.append(self.ufs_mesh.to_xml_element())
subelement = ET.SubElement(root, "ufs_mesh")
subelement.text = str(self.ufs_mesh.id)
def _create_resonance_scattering_subelement(self, root):
res = self.resonance_scattering
if res:
elem = ET.SubElement(root, 'resonance_scattering')
if 'enable' in res:
subelem = ET.SubElement(elem, 'enable')
subelem.text = str(res['enable']).lower()
if 'method' in res:
subelem = ET.SubElement(elem, 'method')
subelem.text = res['method']
if 'energy_min' in res:
subelem = ET.SubElement(elem, 'energy_min')
subelem.text = str(res['energy_min'])
if 'energy_max' in res:
subelem = ET.SubElement(elem, 'energy_max')
subelem.text = str(res['energy_max'])
if 'nuclides' in res:
subelem = ET.SubElement(elem, 'nuclides')
subelem.text = ' '.join(res['nuclides'])
def _create_create_fission_neutrons_subelement(self, root):
if self._create_fission_neutrons is not None:
elem = ET.SubElement(root, "create_fission_neutrons")
elem.text = str(self._create_fission_neutrons).lower()
def _create_delayed_photon_scaling_subelement(self, root):
if self._delayed_photon_scaling is not None:
elem = ET.SubElement(root, "delayed_photon_scaling")
elem.text = str(self._delayed_photon_scaling).lower()
def _create_event_based_subelement(self, root):
if self._event_based is not None:
elem = ET.SubElement(root, "event_based")
elem.text = str(self._event_based).lower()
def _create_max_particles_in_flight_subelement(self, root):
if self._max_particles_in_flight is not None:
elem = ET.SubElement(root, "max_particles_in_flight")
elem.text = str(self._max_particles_in_flight).lower()
def _create_material_cell_offsets_subelement(self, root):
if self._material_cell_offsets is not None:
elem = ET.SubElement(root, "material_cell_offsets")
elem.text = str(self._material_cell_offsets).lower()
def _create_log_grid_bins_subelement(self, root):
if self._log_grid_bins is not None:
elem = ET.SubElement(root, "log_grid_bins")
elem.text = str(self._log_grid_bins)
def _create_dagmc_subelement(self, root):
if self._dagmc:
elem = ET.SubElement(root, "dagmc")
elem.text = str(self._dagmc).lower()
def _eigenvalue_from_xml_element(self, root):
elem = root.find('eigenvalue')
if elem is not None:
self._run_mode_from_xml_element(elem)
self._particles_from_xml_element(elem)
self._batches_from_xml_element(elem)
self._inactive_from_xml_element(elem)
self._max_lost_particles_from_xml_element(elem)
self._rel_max_lost_particles_from_xml_element(elem)
self._generations_per_batch_from_xml_element(elem)
def _run_mode_from_xml_element(self, root):
text = get_text(root, 'run_mode')
if text is not None:
self.run_mode = text
def _particles_from_xml_element(self, root):
text = get_text(root, 'particles')
if text is not None:
self.particles = int(text)
def _batches_from_xml_element(self, root):
text = get_text(root, 'batches')
if text is not None:
self.batches = int(text)
def _inactive_from_xml_element(self, root):
text = get_text(root, 'inactive')
if text is not None:
self.inactive = int(text)
def _max_lost_particles_from_xml_element(self, root):
text = get_text(root, 'max_lost_particles')
if text is not None:
self.max_lost_particles = int(text)
def _rel_max_lost_particles_from_xml_element(self, root):
text = get_text(root, 'rel_max_lost_particles')
if text is not None:
self.rel_max_lost_particles = float(text)
def _generations_per_batch_from_xml_element(self, root):
text = get_text(root, 'generations_per_batch')
if text is not None:
self.generations_per_batch = int(text)
def _keff_trigger_from_xml_element(self, root):
elem = root.find('keff_trigger')
if elem is not None:
trigger = get_text(elem, 'type')
threshold = float(get_text(elem, 'threshold'))
self.keff_trigger = {'type': trigger, 'threshold': threshold}
def _source_from_xml_element(self, root):
for elem in root.findall('source'):
self.source.append(Source.from_xml_element(elem))
def _output_from_xml_element(self, root):
elem = root.find('output')
if elem is not None:
self.output = {}
for key in ('summary', 'tallies', 'path'):
value = get_text(elem, key)
if value is not None:
if key in ('summary', 'tallies'):
value = value in ('true', '1')
self.output[key] = value
def _statepoint_from_xml_element(self, root):
elem = root.find('state_point')
if elem is not None:
text = get_text(elem, 'batches')
if text is not None:
self.statepoint['batches'] = [int(x) for x in text.split()]
def _sourcepoint_from_xml_element(self, root):
elem = root.find('source_point')
if elem is not None:
for key in ('separate', 'write', 'overwrite_latest', 'batches'):
value = get_text(elem, key)
if value is not None:
if key in ('separate', 'write'):
value = value in ('true', '1')
elif key == 'overwrite_latest':
value = value in ('true', '1')
key = 'overwrite'
else:
value = [int(x) for x in value.split()]
self.sourcepoint[key] = value
def _confidence_intervals_from_xml_element(self, root):
text = get_text(root, 'confidence_intervals')
if text is not None:
self.confidence_intervals = text in ('true', '1')
def _electron_treatment_from_xml_element(self, root):
text = get_text(root, 'electron_treatment')
if text is not None:
self.electron_treatment = text
def _energy_mode_from_xml_element(self, root):
text = get_text(root, 'energy_mode')
if text is not None:
self.energy_mode = text
def _max_order_from_xml_element(self, root):
text = get_text(root, 'max_order')
if text is not None:
self.max_order = int(text)
def _photon_transport_from_xml_element(self, root):
text = get_text(root, 'photon_transport')
if text is not None:
self.photon_transport = text in ('true', '1')
def _ptables_from_xml_element(self, root):
text = get_text(root, 'ptables')
if text is not None:
self.ptables = text in ('true', '1')
def _seed_from_xml_element(self, root):
text = get_text(root, 'seed')
if text is not None:
self.seed = int(text)
def _survival_biasing_from_xml_element(self, root):
text = get_text(root, 'survival_biasing')
if text is not None:
self.survival_biasing = text in ('true', '1')
def _cutoff_from_xml_element(self, root):
elem = root.find('cutoff')
if elem is not None:
self.cutoff = {}
for key in ('energy_neutron', 'energy_photon', 'energy_electron',
'energy_positron', 'weight', 'weight_avg'):
value = get_text(elem, key)
if value is not None:
self.cutoff[key] = float(value)
def _entropy_mesh_from_xml_element(self, root):
text = get_text(root, 'entropy_mesh')
if text is not None:
path = "./mesh[@id='{}']".format(int(text))
elem = root.find(path)
if elem is not None:
self.entropy_mesh = RegularMesh.from_xml_element(elem)
def _trigger_from_xml_element(self, root):
elem = root.find('trigger')
if elem is not None:
self.trigger_active = get_text(elem, 'active') in ('true', '1')
text = get_text(elem, 'max_batches')
if text is not None:
self.trigger_max_batches = int(text)
text = get_text(elem, 'batch_interval')
if text is not None:
self.trigger_batch_interval = int(text)
def _no_reduce_from_xml_element(self, root):
text = get_text(root, 'no_reduce')
if text is not None:
self.no_reduce = text in ('true', '1')
def _verbosity_from_xml_element(self, root):
text = get_text(root, 'verbosity')
if text is not None:
self.verbosity = int(text)
def _tabular_legendre_from_xml_element(self, root):
elem = root.find('tabular_legendre')
if elem is not None:
text = get_text(elem, 'enable')
self.tabular_legendre['enable'] = text in ('true', '1')
text = get_text(elem, 'num_points')
if text is not None:
self.tabular_legendre['num_points'] = int(text)
def _temperature_from_xml_element(self, root):
text = get_text(root, 'temperature_default')
if text is not None:
self.temperature['default'] = float(text)
text = get_text(root, 'temperature_tolerance')
if text is not None:
self.temperature['tolerance'] = float(text)
text = get_text(root, 'temperature_method')
if text is not None:
self.temperature['method'] = text
text = get_text(root, 'temperature_range')
if text is not None:
self.temperature['range'] = [float(x) for x in text.split()]
text = get_text(root, 'temperature_multipole')
if text is not None:
self.temperature['multipole'] = text in ('true', '1')
def _trace_from_xml_element(self, root):
text = get_text(root, 'trace')
if text is not None:
self.trace = [int(x) for x in text.split()]
def _track_from_xml_element(self, root):
text = get_text(root, 'track')
if text is not None:
self.track = [int(x) for x in text.split()]
def _ufs_mesh_from_xml_element(self, root):
text = get_text(root, 'ufs_mesh')
if text is not None:
path = "./mesh[@id='{}']".format(int(text))
elem = root.find(path)
if elem is not None:
self.ufs_mesh = RegularMesh.from_xml_element(elem)
def _resonance_scattering_from_xml_element(self, root):
elem = root.find('resonance_scattering')
if elem is not None:
keys = ('enable', 'method', 'energy_min', 'energy_max', 'nuclides')
for key in keys:
value = get_text(elem, key)
if value is not None:
if key == 'enable':
value = value in ('true', '1')
elif key in ('energy_min', 'energy_max'):
value = float(value)
elif key == 'nuclides':
value = value.split()
self.resonance_scattering[key] = value
def _create_fission_neutrons_from_xml_element(self, root):
text = get_text(root, 'create_fission_neutrons')
if text is not None:
self.create_fission_neutrons = text in ('true', '1')
def _delayed_photon_scaling_from_xml_element(self, root):
text = get_text(root, 'delayed_photon_scaling')
if text is not None:
self.delayed_photon_scaling = text in ('true', '1')
def _event_based_from_xml_element(self, root):
text = get_text(root, 'event_based')
if text is not None:
self.event_based = text in ('true', '1')
def _max_particles_in_flight_from_xml_element(self, root):
text = get_text(root, 'max_particles_in_flight')
if text is not None:
self.max_particles_in_flight = int(text)
def _material_cell_offsets_from_xml_element(self, root):
text = get_text(root, 'material_cell_offsets')
if text is not None:
self.material_cell_offsets = text in ('true', '1')
def _log_grid_bins_from_xml_element(self, root):
text = get_text(root, 'log_grid_bins')
if text is not None:
self.log_grid_bins = int(text)
def _dagmc_from_xml_element(self, root):
text = get_text(root, 'dagmc')
if text is not None:
self.dagmc = text in ('true', '1')
def export_to_xml(self, path='settings.xml'):
"""Export simulation settings to an XML file.
Parameters
----------
path : str
Path to file to write. Defaults to 'settings.xml'.
"""
# Reset xml element tree
root_element = ET.Element("settings")
self._create_run_mode_subelement(root_element)
self._create_particles_subelement(root_element)
self._create_batches_subelement(root_element)
self._create_inactive_subelement(root_element)
self._create_max_lost_particles_subelement(root_element)
self._create_rel_max_lost_particles_subelement(root_element)
self._create_generations_per_batch_subelement(root_element)
self._create_keff_trigger_subelement(root_element)
self._create_source_subelement(root_element)
self._create_output_subelement(root_element)
self._create_statepoint_subelement(root_element)
self._create_sourcepoint_subelement(root_element)
self._create_confidence_intervals(root_element)
self._create_electron_treatment_subelement(root_element)
self._create_energy_mode_subelement(root_element)
self._create_max_order_subelement(root_element)
self._create_photon_transport_subelement(root_element)
self._create_ptables_subelement(root_element)
self._create_seed_subelement(root_element)
self._create_survival_biasing_subelement(root_element)
self._create_cutoff_subelement(root_element)
self._create_entropy_mesh_subelement(root_element)
self._create_trigger_subelement(root_element)
self._create_no_reduce_subelement(root_element)
self._create_verbosity_subelement(root_element)
self._create_tabular_legendre_subelements(root_element)
self._create_temperature_subelements(root_element)
self._create_trace_subelement(root_element)
self._create_track_subelement(root_element)
self._create_ufs_mesh_subelement(root_element)
self._create_resonance_scattering_subelement(root_element)
self._create_volume_calcs_subelement(root_element)
self._create_create_fission_neutrons_subelement(root_element)
self._create_delayed_photon_scaling_subelement(root_element)
self._create_event_based_subelement(root_element)
self._create_max_particles_in_flight_subelement(root_element)
self._create_material_cell_offsets_subelement(root_element)
self._create_log_grid_bins_subelement(root_element)
self._create_dagmc_subelement(root_element)
# Clean the indentation in the file to be user-readable
clean_indentation(root_element)
# Check if path is a directory
p = Path(path)
if p.is_dir():
p /= 'settings.xml'
# Write the XML Tree to the settings.xml file
reorder_attributes(root_element) # TODO: Remove when support is Python 3.8+
tree = ET.ElementTree(root_element)
tree.write(str(p), xml_declaration=True, encoding='utf-8')
@classmethod
def from_xml(cls, path='settings.xml'):
"""Generate settings from XML file
Parameters
----------
path : str, optional
Path to settings XML file
Returns
-------
openmc.Settings
Settings object
"""
tree = ET.parse(path)
root = tree.getroot()
settings = cls()
settings._eigenvalue_from_xml_element(root)
settings._run_mode_from_xml_element(root)
settings._particles_from_xml_element(root)
settings._batches_from_xml_element(root)
settings._inactive_from_xml_element(root)
settings._max_lost_particles_from_xml_element(root)
settings._rel_max_lost_particles_from_xml_element(root)
settings._generations_per_batch_from_xml_element(root)
settings._keff_trigger_from_xml_element(root)
settings._source_from_xml_element(root)
settings._output_from_xml_element(root)
settings._statepoint_from_xml_element(root)
settings._sourcepoint_from_xml_element(root)
settings._confidence_intervals_from_xml_element(root)
settings._electron_treatment_from_xml_element(root)
settings._energy_mode_from_xml_element(root)
settings._max_order_from_xml_element(root)
settings._photon_transport_from_xml_element(root)
settings._ptables_from_xml_element(root)
settings._seed_from_xml_element(root)
settings._survival_biasing_from_xml_element(root)
settings._cutoff_from_xml_element(root)
settings._entropy_mesh_from_xml_element(root)
settings._trigger_from_xml_element(root)
settings._no_reduce_from_xml_element(root)
settings._verbosity_from_xml_element(root)
settings._tabular_legendre_from_xml_element(root)
settings._temperature_from_xml_element(root)
settings._trace_from_xml_element(root)
settings._track_from_xml_element(root)
settings._ufs_mesh_from_xml_element(root)
settings._resonance_scattering_from_xml_element(root)
settings._create_fission_neutrons_from_xml_element(root)
settings._delayed_photon_scaling_from_xml_element(root)
settings._event_based_from_xml_element(root)
settings._max_particles_in_flight_from_xml_element(root)
settings._material_cell_offsets_from_xml_element(root)
settings._log_grid_bins_from_xml_element(root)
settings._dagmc_from_xml_element(root)
# TODO: Get volume calculations
return settings
|
from io import StringIO
import pathlib
import rich
import pytest
from django.core.management import call_command
from apps.greencheck.bulk_importers import (
ImporterCSV,
MissingHoster,
MissingPath,
EmberCO2Import,
)
from apps.greencheck.models import GreencheckIp
from apps.greencheck.models.checks import CO2Intensity
@pytest.fixture
def sample_country_row():
return {
"country_or_region": "Afghanistan",
"country_code": "AFG",
"year": "2020",
"emissions_intensity_gco2_per_kwh": "115.385",
}
@pytest.fixture
def sample_fossil_share_row():
return {
"country_or_region": "Afghanistan",
"country_code": "AFG",
"year": "2020",
"variable": "Fossil",
"share_of_generation_pct": "15.38",
"latest_year": "2020",
}
@pytest.mark.django_db
class TestImporterCSV:
def test_add_green_ipv4_for_hoster(self, hosting_provider, csv_file):
# so we have an id we can use when associating new IPs with the hoster
hosting_provider.save()
bulk_importer = ImporterCSV(hosting_provider, csv_file)
assert GreencheckIp.objects.count() == 0
res = bulk_importer.run()
green_ips = [gcip.ip_start for gcip in GreencheckIp.objects.all()]
assert len(res["ipv4"]) == 10
for ip_addy in bulk_importer.ips:
assert str(ip_addy) in green_ips
def test_needs_a_path_for_the_csv(self, hosting_provider):
with pytest.raises(MissingPath):
assert ImporterCSV(hosting_provider, None)
def test_needs_a_hosting_provider(self, csv_file):
with pytest.raises(MissingHoster):
assert ImporterCSV("2", csv_file)
@pytest.mark.django_db
class TestCSVImportCommand:
"""
This tests that we have a management command that can run, and checks
for existence of the necessary command line args.
"""
def test_handle(self, hosting_provider, csv_file):
out = StringIO()
hosting_provider.save()
call_command("import_from_csv", hosting_provider.id, csv_file, stdout=out)
assert "Import Complete:" in out.getvalue()
@pytest.fixture
def fossil_csv_path():
greencheck_app_root = pathlib.Path(__file__).parent.parent
return (
greencheck_app_root
/ "fixtures"
/ "ember-2021-share-from-fossil-fuels-sample.csv"
)
@pytest.mark.django_db
class TestEmberCO2Import:
def test_return_csv(self, fossil_csv_path):
"""
Do we get back a list of dicts we can work with easily?
"""
importer = EmberCO2Import()
res = importer.parse_csv(fossil_csv_path)
# do we have a list we can easily manipulate?
assert isinstance(res, list)
assert len(res) > 1
# do we have dicts we can access?
assert isinstance(res[0], dict)
def test_load_co2_intensity_data(
self, sample_country_row, sample_fossil_share_row, fossil_csv_path
):
importer = EmberCO2Import()
importer.load_fossil_data(fossil_csv_path)
country, *rest = importer.load_co2_intensity_data([sample_country_row])
# check the geo info for lookups
assert country.country_name == sample_country_row["country_or_region"]
assert country.country_code_iso_3 == sample_country_row["country_code"]
assert country.country_code_iso_2 == "AF"
# then check the fossil and carbon intensity numbers we want to expose
assert (
country.carbon_intensity
== sample_country_row["emissions_intensity_gco2_per_kwh"]
)
assert country.carbon_intensity_type == "avg"
assert (
country.generation_from_fossil
== sample_fossil_share_row["share_of_generation_pct"]
)
# do now have the CO2 Intensity figures for our country in the db?
assert CO2Intensity.objects.count() == 1
|
import logging
import json
import re
from typing import Any, Dict, List, Optional, TypeVar
import pydash
from fidesops.common_exceptions import FidesopsException
from fidesops.graph.config import ScalarField
from fidesops.core.config import config
from fidesops.schemas.saas.shared_schemas import SaaSRequestParams
from fidesops.graph.traversal import TraversalNode
from fidesops.models.policy import Policy
from fidesops.models.privacy_request import PrivacyRequest
from fidesops.schemas.saas.saas_config import Endpoint, SaaSRequest
from fidesops.service.connectors.query_config import QueryConfig
from fidesops.util.collection_util import Row, merge_dicts
from fidesops.util.saas_util import unflatten_dict, FIDESOPS_GROUPED_INPUTS, format_body
logger = logging.getLogger(__name__)
T = TypeVar("T")
class SaaSQueryConfig(QueryConfig[SaaSRequestParams]):
"""Query config that generates populated SaaS requests for a given collection"""
def __init__(
self,
node: TraversalNode,
endpoints: Dict[str, Endpoint],
secrets: Dict[str, Any],
data_protection_request: Optional[SaaSRequest] = None,
):
super().__init__(node)
self.collection_name = node.address.collection
self.endpoints = endpoints
self.secrets = secrets
self.data_protection_request = data_protection_request
self.action: Optional[str] = None
def get_request_by_action(self, action: str) -> Optional[SaaSRequest]:
"""
Returns the appropriate request config based on the
current collection and preferred action (read, update, delete)
"""
try:
# store action name for logging purposes
self.action = action
collection_name = self.node.address.collection
request = self.endpoints[collection_name].requests[action]
logger.info(
f"Found matching endpoint to {action} '{collection_name}' collection"
)
return request
except KeyError:
logger.info(
f"The '{action}' action is not defined for the '{collection_name}' endpoint in {self.node.node.dataset.connection_key}"
)
return None
def get_masking_request(self) -> Optional[SaaSRequest]:
"""Returns a tuple of the preferred action and SaaSRequest to use for masking.
An update request is preferred, but we can use a gdpr delete endpoint or delete endpoint if not MASKING_STRICT.
"""
update: Optional[SaaSRequest] = self.get_request_by_action("update")
gdpr_delete: Optional[SaaSRequest] = None
delete: Optional[SaaSRequest] = None
if not config.execution.MASKING_STRICT:
gdpr_delete = self.data_protection_request
delete = self.get_request_by_action("delete")
try:
# Return first viable option
action_type: str = next(
action
for action in [
"update" if update else None,
"data_protection_request" if gdpr_delete else None,
"delete" if delete else None,
]
if action
)
# store action name for logging purposes
self.action = action_type
logger.info(
f"Selecting '{action_type}' action to perform masking request for '{self.collection_name}' collection."
)
return next(request for request in [update, gdpr_delete, delete] if request)
except StopIteration:
return None
def generate_requests(
self, input_data: Dict[str, List[Any]], policy: Optional[Policy]
) -> List[SaaSRequestParams]:
"""Takes the input_data and uses it to generate a list of SaaS request params"""
filtered_data = self.node.typed_filtered_values(input_data)
request_params = []
# Build SaaS requests for fields that are independent of each other
for string_path, reference_values in filtered_data.items():
for value in reference_values:
request_params.append(
self.generate_query({string_path: [value]}, policy)
)
# Build SaaS requests for fields that are dependent on each other
grouped_input_data: List[Dict[str, Any]] = input_data.get(
FIDESOPS_GROUPED_INPUTS, []
)
for dependent_data in grouped_input_data:
request_params.append(self.generate_query(dependent_data, policy))
return request_params
@staticmethod
def assign_placeholders(value: str, param_values: Dict[str, Any]) -> Optional[str]:
"""
Finds all the placeholders (indicated by <>) in the passed in value
and replaces them with the actual param values
Returns None if any of the placeholders cannot be found in the param_values
"""
if value and isinstance(value, str):
placeholders = re.findall("<(.+?)>", value)
for placeholder in placeholders:
placeholder_value = param_values.get(placeholder)
if placeholder_value:
value = value.replace(f"<{placeholder}>", str(placeholder_value))
else:
return None
return value
def map_param_values(
self, current_request: SaaSRequest, param_values: Dict[str, Any]
) -> SaaSRequestParams:
"""
Visits path, headers, query, and body params in the current request and replaces
the placeholders with the request param values.
"""
path: Optional[str] = self.assign_placeholders(
current_request.path, param_values
)
if path is None:
raise ValueError(
f"At least one param_values references an invalid field for the '{self.action}' request of the '{self.collection_name}' collection."
)
headers: Dict[str, Any] = {}
for header in current_request.headers or []:
header_value = self.assign_placeholders(header.value, param_values)
# only create header if placeholders were replaced with actual values
if header_value is not None:
headers[header.name] = self.assign_placeholders(
header.value, param_values
)
query_params: Dict[str, Any] = {}
for query_param in current_request.query_params or []:
query_param_value = self.assign_placeholders(
query_param.value, param_values
)
# only create query param if placeholders were replaced with actual values
if query_param_value is not None:
query_params[query_param.name] = query_param_value
body: Optional[str] = self.assign_placeholders(
current_request.body, param_values
)
# if we declared a body and it's None after assigning placeholders we should error the request
if current_request.body and body is None:
raise ValueError(
f"Unable to replace placeholders in body for the '{self.action}' request of the '{self.collection_name}' collection."
)
# format the body based on the content type
updated_headers, formatted_body = format_body(headers, body)
return SaaSRequestParams(
method=current_request.method,
path=path,
headers=updated_headers,
query_params=query_params,
body=formatted_body,
)
def generate_query(
self, input_data: Dict[str, List[Any]], policy: Optional[Policy]
) -> SaaSRequestParams:
"""
This returns the method, path, header, query, and body params needed to make an API call.
This is the API equivalent of building the components of a database
query statement (select statement, where clause, limit, offset, etc.)
"""
current_request: SaaSRequest = self.get_request_by_action("read")
if not current_request:
raise FidesopsException(
f"The 'read' action is not defined for the '{self.collection_name}' "
f"endpoint in {self.node.node.dataset.connection_key}"
)
# create the source of param values to populate the various placeholders
# in the path, headers, query_params, and body
param_values: Dict[str, Any] = {}
for param_value in current_request.param_values or []:
if param_value.references or param_value.identity:
# TODO: how to handle missing reference or identity values in a way
# in a way that is obvious based on configuration
input_list = input_data.get(param_value.name)
if input_list:
param_values[param_value.name] = input_list[0]
elif param_value.connector_param:
param_values[param_value.name] = pydash.get(
self.secrets, param_value.connector_param
)
# map param values to placeholders in path, headers, and query params
saas_request_params: SaaSRequestParams = self.map_param_values(
current_request, param_values
)
logger.info(f"Populated request params for {current_request.path}")
return saas_request_params
def generate_update_stmt(
self, row: Row, policy: Policy, request: PrivacyRequest
) -> SaaSRequestParams:
"""
This returns the method, path, header, query, and body params needed to make an API call.
The fields in the row are masked according to the policy and added to the request body
if specified by the body field of the masking request.
"""
current_request: SaaSRequest = self.get_masking_request()
collection_name: str = self.node.address.collection
collection_values: Dict[str, Row] = {collection_name: row}
identity_data: Dict[str, Any] = request.get_cached_identity_data()
# create the source of param values to populate the various placeholders
# in the path, headers, query_params, and body
param_values: Dict[str, Any] = {}
for param_value in current_request.param_values or []:
if param_value.references:
param_values[param_value.name] = pydash.get(
collection_values, param_value.references[0].field
)
elif param_value.identity:
param_values[param_value.name] = pydash.get(
identity_data, param_value.identity
)
elif param_value.connector_param:
param_values[param_value.name] = pydash.get(
self.secrets, param_value.connector_param
)
# mask row values
update_value_map: Dict[str, Any] = self.update_value_map(row, policy, request)
masked_object: Dict[str, Any] = unflatten_dict(update_value_map)
# map of all values including those not being masked/updated
all_value_map: Dict[str, Any] = self.all_value_map(row)
# both maps use field paths for the keys so we can merge them before unflattening
# values in update_value_map will override values in all_value_map
complete_object: Dict[str, Any] = unflatten_dict(
merge_dicts(all_value_map, update_value_map)
)
# removes outer {} wrapper from body for greater flexibility in custom body config
param_values["masked_object_fields"] = json.dumps(masked_object)[1:-1]
param_values["all_object_fields"] = json.dumps(complete_object)[1:-1]
# map param values to placeholders in path, headers, and query params
saas_request_params: SaaSRequestParams = self.map_param_values(
current_request, param_values
)
logger.info(f"Populated request params for {current_request.path}")
return saas_request_params
def all_value_map(self, row: Row) -> Dict[str, Any]:
"""
Takes a row and preserves only the fields that are defined in the Dataset
and are not flagged as read-only. Used for scenarios when an update endpoint
has required fields other than just the fields being updated.
"""
all_value_map: Dict[str, Any] = {}
for field_path, field in self.field_map().items():
# only map scalar fields that are not read-only
if isinstance(field, ScalarField) and not field.read_only:
# only map if the value exists on the row
if pydash.get(row, field_path.string_path) is not None:
all_value_map[field_path.string_path] = pydash.get(
row, field_path.string_path
)
return all_value_map
def query_to_str(self, t: T, input_data: Dict[str, List[Any]]) -> str:
"""Convert query to string"""
return "Not yet supported for SaaSQueryConfig"
def dry_run_query(self) -> Optional[str]:
"""dry run query for display"""
return None
|
<filename>hxl/scripts.py
"""
Console scripts
<NAME>
April 2015
This is a big, ugly module to support the libhxl
console scripts, including (mainly) argument parsing.
License: Public Domain
Documentation: https://github.com/HXLStandard/libhxl-python/wiki
"""
from __future__ import print_function
import argparse, json, logging, os, re, requests, sys
# Do not import hxl, to avoid circular imports
import hxl.converters, hxl.filters, hxl.io
logger = logging.getLogger(__name__)
# In Python2, sys.stdin is a byte stream; in Python3, it's a text stream
STDIN = sys.stdin.buffer
# Posix exit codes
EXIT_OK = 0
EXIT_ERROR = 1
EXIT_SYNTAX = 2
#
# Console script entry points
#
def hxladd():
"""Console script for hxladd."""
run_script(hxladd_main)
def hxlappend():
"""Console script for hxlappend."""
run_script(hxlappend_main)
def hxlclean():
"""Console script for hxlclean"""
run_script(hxlclean_main)
def hxlcount():
"""Console script for hxlcount."""
run_script(hxlcount_main)
def hxlcut():
"""Console script for hxlcut."""
run_script(hxlcut_main)
def hxldedup():
"""Console script for hxldedup."""
run_script(hxldedup_main)
def hxlhash():
"""Console script for hxlhash."""
run_script(hxlhash_main)
def hxlmerge():
"""Console script for hxlmerge."""
run_script(hxlmerge_main)
def hxlrename():
"""Console script for hxlrename."""
run_script(hxlrename_main)
def hxlreplace():
"""Console script for hxlreplace."""
run_script(hxlreplace_main)
def hxlfill():
"""Console script for hxlreplace."""
run_script(hxlfill_main)
def hxlexpand():
"""Console script for hxlexpand."""
run_script(hxlexpand_main)
def hxlexplode():
"""Console script for hxlexplode."""
run_script(hxlexplode_main)
def hxlimplode():
"""Console script for hxlimplode."""
run_script(hxlimplode_main)
def hxlselect():
"""Console script for hxlselect."""
run_script(hxlselect_main)
def hxlsort():
"""Console script for hxlsort."""
run_script(hxlsort_main)
def hxlspec():
"""Console script for hxlspec."""
run_script(hxlspec_main)
def hxltag():
"""Console script for hxltag."""
run_script(hxltag_main)
def hxlvalidate():
"""Console script for hxlvalidate."""
run_script(hxlvalidate_main)
#
# Main scripts for command-line tools.
#
def hxladd_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxladd with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Add new columns with constant values to a HXL dataset.')
parser.add_argument(
'-s',
'--spec',
help='Constant value to add to each row (may repeat option)',
metavar='header#<tag>=<value>',
action='append',
required=True
)
parser.add_argument(
'-b',
'--before',
help='Add new columns before existing ones rather than after them.',
action='store_const',
const=True,
default=False
)
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.AddColumnsFilter(source, specs=args.spec, before=args.before)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlappend_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlappend with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Concatenate two HXL datasets')
# repeatable argument
parser.add_argument(
'-a',
'--append',
help='HXL file to append (may repeat option).',
metavar='file_or_url',
action='append',
default=[]
)
parser.add_argument(
'-l',
'--list',
help='URL or filename of list of URLs (may repeat option). Will appear after sources in -a options.',
action='append',
default=[]
)
parser.add_argument(
'-x',
'--exclude-extra-columns',
help='Don not add extra columns not in the original dataset.',
action='store_const',
const=True,
default=False
)
add_queries_arg(parser, 'From --append datasets, include only rows matching at least one query.')
args = parser.parse_args(args)
do_common_args(args)
append_sources = []
for append_source in args.append:
append_sources.append(hxl.data(append_source, True))
for list_source in args.list:
for append_source in hxl.filters.AppendFilter.parse_external_source_list(hxl.data(list_source, True)):
append_sources.append(hxl.data(append_source, True))
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.AppendFilter(
source,
append_sources=append_sources,
add_columns=(not args.exclude_extra_columns),
queries=args.query
)
hxl.io.write_hxl(output.output, filter, show_headers=not args.remove_headers, show_tags=not args.strip_tags)
return EXIT_OK
def hxlclean_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlclean with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Clean data in a HXL file.')
parser.add_argument(
'-w',
'--whitespace',
help='Comma-separated list of tag patterns for whitespace normalisation.',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list
)
parser.add_argument(
'-u',
'--upper',
help='Comma-separated list of tag patterns for uppercase conversion.',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list
)
parser.add_argument(
'-l',
'--lower',
help='Comma-separated list of tag patterns for lowercase conversion.',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list
)
parser.add_argument(
'-d',
'--date',
help='Comma-separated list of tag patterns for date normalisation.',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list
)
parser.add_argument(
'--date-format',
help='Date formatting string in strftime format (defaults to %%Y-%%m-%%d).',
default=None,
metavar='format',
)
parser.add_argument(
'-n',
'--number',
help='Comma-separated list of tag patternss for number normalisation.',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list
)
parser.add_argument(
'--number-format',
help='Number formatting string in printf format (without leading %%).',
default=None,
metavar='format',
)
parser.add_argument(
'--latlon',
help='Comma-separated list of tag patterns for lat/lon normalisation.',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list
)
parser.add_argument(
'-p',
'--purge',
help='Purge unparseable dates, numbers, and lat/lon during cleaning.',
action='store_const',
const=True,
default=False
)
add_queries_arg(parser, 'Clean only rows matching at least one query.')
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.CleanDataFilter(
source, whitespace=args.whitespace, upper=args.upper, lower=args.lower,
date=args.date, date_format=args.date_format, number=args.number, number_format=args.number_format,
latlon=args.latlon, purge=args.purge, queries=args.query
)
hxl.io.write_hxl(output.output, filter, show_headers=not args.remove_headers, show_tags=not args.strip_tags)
return EXIT_OK
def hxlcount_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlcount with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
# Command-line arguments
parser = make_args('Generate aggregate counts for a HXL dataset')
parser.add_argument(
'-t',
'--tags',
help='Comma-separated list of column tags to count.',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list,
default='loc,org,sector,adm1,adm2,adm3'
)
parser.add_argument(
'-a',
'--aggregator',
help='Aggregator statement',
metavar='statement',
action='append',
type=hxl.filters.Aggregator.parse,
default=[]
)
add_queries_arg(parser, 'Count only rows that match at least one query.')
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.CountFilter(source, patterns=args.tags, aggregators=args.aggregator, queries=args.query)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlcut_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
parser = make_args('Cut columns from a HXL dataset.')
parser.add_argument(
'-i',
'--include',
help='Comma-separated list of column tags to include',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list
)
parser.add_argument(
'-x',
'--exclude',
help='Comma-separated list of column tags to exclude',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list
)
parser.add_argument(
'-s',
'--skip-untagged',
help="Skip columns without HXL hashtags",
action='store_const',
const=True,
default=False
)
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.ColumnFilter(source, args.include, args.exclude, args.skip_untagged)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxldedup_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
parser = make_args('Remove duplicate rows from a HXL dataset.')
parser.add_argument(
'-t',
'--tags',
help='Comma-separated list of column tags to use for deduplication (by default, use all values).',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list
)
add_queries_arg(parser, 'Leave rows alone if they don\'t match at least one query.')
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.DeduplicationFilter(source, args.tags, args.query)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlhash_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
parser = make_args(
'Generate an MD5 hash for a HXL dataset (or just its header rows).',
hxl_output=False
)
parser.add_argument(
'-H',
'--headers-only',
help='Hash only the header and hashtag rows.',
action='store_const',
const=True,
default=False
)
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source:
if args.headers_only:
print(source.columns_hash)
else:
print(source.data_hash)
return EXIT_OK
def hxlmerge_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlmerge with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Merge part of one HXL dataset into another.')
parser.add_argument(
'-m',
'--merge',
help='HXL file to write (if omitted, use standard output).',
metavar='filename',
required=True
)
parser.add_argument(
'-k',
'--keys',
help='HXL tag(s) to use as a shared key.',
metavar='tag,tag...',
required=True,
type=hxl.model.TagPattern.parse_list
)
parser.add_argument(
'-t',
'--tags',
help='Comma-separated list of column tags to include from the merge dataset.',
metavar='tag,tag...',
required=True,
type=hxl.model.TagPattern.parse_list
)
parser.add_argument(
'-r',
'--replace',
help='Replace empty values in existing columns (when available) instead of adding new ones.',
action='store_const',
const=True,
default=False
)
parser.add_argument(
'-O',
'--overwrite',
help='Used with --replace, overwrite existing values.',
action='store_const',
const=True,
default=False
)
add_queries_arg(parser, 'Merged data only from rows that match at least one query.')
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output, hxl.io.data(args.merge, True) if args.merge else None as merge_source:
filter = hxl.filters.MergeDataFilter(
source, merge_source=merge_source,
keys=args.keys, tags=args.tags, replace=args.replace, overwrite=args.overwrite,
queries=args.query
)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlrename_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlrename with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Rename and retag columns in a HXL dataset')
parser.add_argument(
'-r',
'--rename',
help='Rename an old tag to a new one, with an optional new text header (may repeat option).',
action='append',
metavar='#?<original_tag>:<Text header>?#?<new_tag>',
default=[],
type=hxl.filters.RenameFilter.parse_rename
)
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.RenameFilter(source, args.rename)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlreplace_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlreplace with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Replace strings in a HXL dataset')
inline_group = parser.add_argument_group('Inline replacement')
map_group = parser.add_argument_group('External substitution map')
inline_group.add_argument(
'-p',
'--pattern',
help='String or regular expression to search for',
nargs='?'
)
inline_group.add_argument(
'-s',
'--substitution',
help='Replacement string',
nargs='?'
)
inline_group.add_argument(
'-t',
'--tags',
help='Tag patterns to match',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list
)
inline_group.add_argument(
'-r',
'--regex',
help='Use a regular expression instead of a string',
action='store_const',
const=True,
default=False
)
map_group.add_argument(
'-m',
'--map',
help='Filename or URL of a mapping table using the tags #x_pattern (required), #x_substitution (required), #x_tag (optional), and #x_regex (optional), corresponding to the inline options above, for multiple substitutions.',
metavar='PATH',
nargs='?'
)
add_queries_arg(parser, 'Replace only in rows that match at least one query.')
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
if args.map:
replacements = hxl.filters.ReplaceDataFilter.Replacement.parse_map(hxl.io.data(args.map, True))
else:
replacements = []
if args.pattern:
for tag in args.tags:
replacements.append(hxl.filters.ReplaceDataFilter.Replacement(args.pattern, args.substitution, tag, args.regex))
filter = hxl.filters.ReplaceDataFilter(source, replacements, queries=args.query)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlfill_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlfill with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Fill empty cells in a HXL dataset')
parser.add_argument(
'-t',
'--tag',
help='Fill empty cells only in matching columns (default: fill in all)',
metavar='tagpattern,...',
type=hxl.model.TagPattern.parse,
)
add_queries_arg(parser, 'Fill only in rows that match at least one query.')
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.FillDataFilter(source, pattern=args.tag, queries=args.query)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlexpand_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlexpand with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Expand lists in cells by repeating rows')
parser.add_argument(
'-t',
'--tags',
help='Comma-separated list of tag patterns for columns with lists to expand',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list,
nargs="?"
)
parser.add_argument(
"-s",
'--separator',
help='string separating list items (defaults to "|")',
metavar='string',
default="|"
)
parser.add_argument(
"-c",
'--correlate',
help='correlate list values instead of producing a cartesian product',
action='store_const',
const=True,
default=False
)
add_queries_arg(parser, 'Limit list expansion to rows matching at least one query.')
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.ExpandListsFilter(source, patterns=args.tags, separator=args.separator, correlate=args.correlate, queries=args.query)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlexplode_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlexplode with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Explode a wide dataset into a long dataset')
parser.add_argument(
'-H',
'--header-att',
help='attribute to add to the label column (defaults to "label")',
metavar='att',
default="label"
)
parser.add_argument(
'-V',
'--value-att',
help='attribute to add to the value column (defaults to "value")',
metavar='tagpattern',
default="value"
)
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.ExplodeFilter(source, header_attribute=args.header_att, value_attribute=args.value_att)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlimplode_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlexplode with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Implode a long dataset into a wide dataset.')
parser.add_argument(
'-L',
'--label',
help='HXL tag pattern for the label column',
metavar='tagpattern',
required=True,
type=hxl.model.TagPattern.parse,
)
parser.add_argument(
'-V',
'--value',
help='HXL tag pattern for the value column',
metavar='tagpattern',
required=True,
type=hxl.model.TagPattern.parse,
)
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.ImplodeFilter(source, label_pattern=args.label, value_pattern=args.value)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlselect_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlselect with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
# Command-line arguments
parser = make_args('Filter rows in a HXL dataset.')
parser.add_argument(
'-q',
'--query',
help='Query expression for selecting rows (may repeat option for logical OR). <op> may be =, !=, <, <=, >, >=, ~, or !~',
action='append',
metavar='<tagspec><op><value>',
required=True
)
parser.add_argument(
'-r',
'--reverse',
help='Show only lines *not* matching criteria',
action='store_const',
const=True,
default=False
)
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.RowFilter(source, queries=args.query, reverse=args.reverse)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlsort_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlcut with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Sort a HXL dataset.')
parser.add_argument(
'-t',
'--tags',
help='Comma-separated list of tags to for columns to use as sort keys.',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list
)
parser.add_argument(
'-r',
'--reverse',
help='Flag to reverse sort order.',
action='store_const',
const=True,
default=False
)
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.SortFilter(source, args.tags, args.reverse)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlspec_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
""" Run hxlspec with command-line arguments.
Args:
args (list): a list of command-line arguments
stdin (io.IOBase): alternative standard input (mainly for testing)
stdout (io.IOBase): alternative standard output (mainly for testing)
stderr (io.IOBase): alternative standard error (mainly for testing)
"""
def get_json (url_or_filename):
if not url_or_filename:
return json.load(stdin)
if re.match(r'^(?:https?|s?ftp)://', url_or_filename.lower()):
headers = make_headers(args)
response = requests.get(url_or_filename, verify=(not args.ignore_certs), headers=headers)
response.raise_for_status()
return response.json()
else:
with open(url_or_filename, "r") as input:
return json.load(input)
parser = make_args('Process a HXL JSON spec')
args = parser.parse_args(args)
do_common_args(args)
spec = get_json(args.infile)
source = hxl.io.from_spec(spec, allow_local_ok=True)
with make_output(args, stdout) as output:
hxl.io.write_hxl(output.output, source, show_tags=not args.strip_tags)
def hxltag_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxltag with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Add HXL tags to a raw CSV file.')
parser.add_argument(
'-a',
'--match-all',
help='Match the entire header text (not just a substring)',
action='store_const',
const=True,
default=False
)
parser.add_argument(
'-m',
'--map',
help='Mapping expression',
required=True,
action='append',
metavar='Header Text#tag',
type=hxl.converters.Tagger.parse_spec
)
parser.add_argument(
'-d',
'--default-tag',
help='Default tag for non-matching columns',
metavar='#tag',
type=hxl.model.Column.parse
)
args = parser.parse_args(args)
do_common_args(args)
with make_input(args, stdin) as input, make_output(args, stdout) as output:
tagger = hxl.converters.Tagger(input, args.map, default_tag=args.default_tag, match_all=args.match_all)
hxl.io.write_hxl(output.output, hxl.io.data(tagger), show_tags=not args.strip_tags)
return EXIT_OK
def hxlvalidate_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlvalidate with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Validate a HXL dataset.')
parser.add_argument(
'-s',
'--schema',
help='Schema file for validating the HXL dataset (if omitted, use the default core schema).',
metavar='schema',
default=None
)
parser.add_argument(
'-a',
'--all',
help='Include all rows in the output, including those without errors',
action='store_const',
const=True,
default=False
)
parser.add_argument(
'-e',
'--error-level',
help='Minimum error level to show (defaults to "info") ',
choices=['info', 'warning', 'error'],
metavar='info|warning|error',
default='info'
)
args = parser.parse_args(args)
do_common_args(args)
with make_input(args, stdin) as input, make_output(args, stdout) as output:
class Counter:
infos = 0
warnings = 0
errors = 0
def callback(e):
"""Show a validation error message."""
if e.rule.severity == 'info':
if args.error_level != 'info':
return
Counter.infos += 1
elif e.rule.severity == 'warning':
if args.error_level == 'error':
return
Counter.warnings += 1
else:
Counter.errors += 1
message = '[{}] '.format(e.rule.severity)
if e.row:
if e.rule:
message += "{},{}: ".format(e.row.row_number + 1, e.rule.tag_pattern)
else:
message += "{}: ".format(e.row.row_number + 1)
elif e.rule:
message += "<dataset>,{}: ".format(e.rule.tag_pattern)
else:
message += "<dataset>: "
if e.value:
message += '"{}" '.format(e.value)
if e.message:
message += e.message
message += "\n"
output.write(message)
output.write("Validating {} with schema {} ...\n".format(args.infile or "<standard input>", args.schema or "<default>"))
source = hxl.io.data(input)
if args.schema:
with make_input(args, None, args.schema) as schema_input:
schema = hxl.schema(schema_input, callback=callback)
else:
schema = hxl.schema(callback=callback)
schema.validate(source)
if args.error_level == 'info':
output.write("{:,} error(s), {:,} warnings, {:,} suggestions\n".format(Counter.errors, Counter.warnings, Counter.infos))
elif args.error_level == 'warning':
output.write("{:,} error(s), {:,} warnings\n".format(Counter.errors, Counter.warnings))
else:
output.write("{:,} error(s)\n".format(Counter.errors))
if Counter.errors > 0:
output.write("Validation failed.\n")
return EXIT_ERROR
else:
output.write("Validation succeeded.\n")
return EXIT_OK
#
# Utility functions
#
def run_script(func):
"""Try running a command-line script, with exception handling."""
try:
sys.exit(func(sys.argv[1:], STDIN, sys.stdout))
except KeyboardInterrupt:
logger.error("Interrupted")
sys.exit(EXIT_ERROR)
def make_args(description, hxl_output=True):
"""Set up parser with default arguments.
@param description: usage description to show
@param hxl_output: if True (default), include options for HXL output.
@returns: an argument parser, partly set up.
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
'infile',
help='HXL file to read (if omitted, use standard input).',
nargs='?'
)
if hxl_output:
parser.add_argument(
'outfile',
help='HXL file to write (if omitted, use standard output).',
nargs='?'
)
parser.add_argument(
'--sheet',
help='Select sheet from a workbook (1 is first sheet)',
metavar='number',
type=int,
nargs='?'
)
parser.add_argument(
'--selector',
help='JSONPath expression for starting point in JSON input',
metavar='path',
nargs='?'
)
parser.add_argument(
'--http-header',
help='Custom HTTP header to send with request',
metavar='header',
action='append'
)
if hxl_output:
parser.add_argument(
'--remove-headers',
help='Strip text headers from the CSV output',
action='store_const',
const=True,
default=False
)
parser.add_argument(
'--strip-tags',
help='Strip HXL tags from the CSV output',
action='store_const',
const=True,
default=False
)
parser.add_argument(
"--ignore-certs",
help="Don't verify SSL connections (useful for self-signed)",
action='store_const',
const=True,
default=False
)
parser.add_argument(
'--log',
help='Set minimum logging level',
metavar='debug|info|warning|error|critical|none',
choices=['debug', 'info', 'warning', 'error', 'critical'],
default='error'
)
return parser
def add_queries_arg(parser, help='Apply only to rows matching at least one query.'):
parser.add_argument(
'-q',
'--query',
help=help,
metavar='<tagspec><op><value>',
action='append'
)
return parser
def do_common_args(args):
"""Process standard args"""
logging.basicConfig(format='%(levelname)s (%(name)s): %(message)s', level=args.log.upper())
def make_source(args, stdin=STDIN):
"""Create a HXL input source."""
# construct the input object
input = make_input(args, stdin)
return hxl.io.data(input)
def make_input(args, stdin=sys.stdin, url_or_filename=None):
"""Create an input object"""
if url_or_filename is None:
url_or_filename = args.infile
# sheet index
sheet_index = args.sheet
if sheet_index is not None:
sheet_index -= 1
# JSONPath selector
selector = args.selector
http_headers = make_headers(args)
return hxl.io.make_input(
url_or_filename or stdin,
sheet_index=sheet_index,
selector=selector,
allow_local=True,
http_headers=http_headers,
verify_ssl=(not args.ignore_certs)
)
def make_output(args, stdout=sys.stdout):
"""Create an output stream."""
if args.outfile:
return FileOutput(args.outfile)
else:
return StreamOutput(stdout)
def make_headers (args):
# get custom headers
header_strings = []
header = os.environ.get("HXL_HTTP_HEADER")
if header is not None:
header_strings.append(header)
if args.http_header is not None:
header_strings += args.http_header
http_headers = {}
for header in header_strings:
parts = header.partition(':')
http_headers[parts[0].strip()] = parts[2].strip()
return http_headers
class FileOutput(object):
def __init__(self, filename):
self.output = open(filename, 'w')
def __enter__(self):
return self
def __exit__(self, value, type, traceback):
self.output.close()
class StreamOutput(object):
def __init__(self, output):
self.output = output
def __enter__(self):
return self
def __exit__(self, value, type, traceback):
pass
def write(self, s):
self.output.write(s)
|
from typing import Dict, Optional
from overrides import overrides
import torch
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import Seq2SeqEncoder, Seq2VecEncoder, TextFieldEmbedder
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.nn.util import get_text_field_mask
from allennlp.training.metrics import CategoricalAccuracy
@Model.register("basic_classifier")
class BasicClassifier(Model):
"""
This ``Model`` implements a basic text classifier. After embedding the text into
a text field, we will optionally encode the embeddings with a ``Seq2SeqEncoder``. The
resulting sequence is pooled using a ``Seq2VecEncoder`` and then passed to
a linear classification layer, which projects into the label space. If a
``Seq2SeqEncoder`` is not provided, we will pass the embedded text directly to the
``Seq2VecEncoder``.
Parameters
----------
vocab : ``Vocabulary``
text_field_embedder : ``TextFieldEmbedder``
Used to embed the input text into a ``TextField``
seq2seq_encoder : ``Seq2SeqEncoder``, optional (default=``None``)
Optional Seq2Seq encoder layer for the input text.
seq2vec_encoder : ``Seq2VecEncoder``
Required Seq2Vec encoder layer. If `seq2seq_encoder` is provided, this encoder
will pool its output. Otherwise, this encoder will operate directly on the output
of the `text_field_embedder`.
dropout : ``float``, optional (default = ``None``)
Dropout percentage to use.
num_labels: ``int``, optional (default = ``None``)
Number of labels to project to in classification layer. By default, the classification layer will
project to the size of the vocabulary namespace corresponding to labels.
label_namespace: ``str``, optional (default = "labels")
Vocabulary namespace corresponding to labels. By default, we use the "labels" namespace.
initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
If provided, will be used to initialize the model parameters.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training.
"""
def __init__(self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
seq2vec_encoder: Seq2VecEncoder,
seq2seq_encoder: Seq2SeqEncoder = None,
dropout: float = None,
num_labels: int = None,
label_namespace: str = "labels",
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super().__init__(vocab, regularizer)
self._text_field_embedder = text_field_embedder
if seq2seq_encoder:
self._seq2seq_encoder = seq2seq_encoder
else:
self._seq2seq_encoder = None
self._seq2vec_encoder = seq2vec_encoder
self._classifier_input_dim = self._seq2vec_encoder.get_output_dim()
if dropout:
self._dropout = torch.nn.Dropout(dropout)
else:
self._dropout = None
self._label_namespace = label_namespace
if num_labels:
self._num_labels = num_labels
else:
self._num_labels = vocab.get_vocab_size(namespace=self._label_namespace)
self._classification_layer = torch.nn.Linear(self._classifier_input_dim, self._num_labels)
self._accuracy = CategoricalAccuracy()
self._loss = torch.nn.CrossEntropyLoss()
initializer(self)
def forward(self, # type: ignore
tokens: Dict[str, torch.LongTensor],
label: torch.IntTensor = None) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
"""
Parameters
----------
tokens : Dict[str, torch.LongTensor]
From a ``TextField``
label : torch.IntTensor, optional (default = None)
From a ``LabelField``
Returns
-------
An output dictionary consisting of:
logits : torch.FloatTensor
A tensor of shape ``(batch_size, num_labels)`` representing
unnormalized log probabilities of the label.
probs : torch.FloatTensor
A tensor of shape ``(batch_size, num_labels)`` representing
probabilities of the label.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
"""
embedded_text = self._text_field_embedder(tokens)
mask = get_text_field_mask(tokens).float()
if self._seq2seq_encoder:
embedded_text = self._seq2seq_encoder(embedded_text, mask=mask)
embedded_text = self._seq2vec_encoder(embedded_text, mask=mask)
if self._dropout:
embedded_text = self._dropout(embedded_text)
logits = self._classification_layer(embedded_text)
probs = torch.nn.functional.softmax(logits, dim=-1)
output_dict = {"logits": logits, "probs": probs}
if label is not None:
loss = self._loss(logits, label.long().view(-1))
output_dict["loss"] = loss
self._accuracy(logits, label)
return output_dict
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Does a simple argmax over the probabilities, converts index to string label, and
add ``"label"`` key to the dictionary with the result.
"""
predictions = output_dict["probs"]
if predictions.dim() == 2:
predictions_list = [predictions[i] for i in range(predictions.shape[0])]
else:
predictions_list = [predictions]
classes = []
for prediction in predictions_list:
label_idx = prediction.argmax(dim=-1).item()
label_str = (self.vocab.get_index_to_token_vocabulary(self._label_namespace)
.get(label_idx, str(label_idx)))
classes.append(label_str)
output_dict["label"] = classes
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
metrics = {'accuracy': self._accuracy.get_metric(reset)}
return metrics
|
"""Schedule games in competitions.
These schedulers are used to keep track of the ids of games which have been
started, and which have reported their results.
They provide a mechanism to reissue ids of games which were in progress when an
unclean shutdown occurred.
All scheduler classes are suitable for pickling.
"""
class Simple_scheduler(object):
"""Schedule a single sequence of games.
The issued tokens are integers counting up from zero.
Public attributes (treat as read-only):
issued -- int
fixed -- int
"""
def __init__(self):
self.next_new = 0
self.outstanding = set()
self.to_reissue = set()
self.issued = 0
self.fixed = 0
# self._check_consistent()
def _check_consistent(self):
assert self.issued == \
self.next_new - len(self.to_reissue)
assert self.fixed == \
self.next_new - len(self.outstanding) - len(self.to_reissue)
def __getstate__(self):
return (self.next_new, self.outstanding, self.to_reissue)
def __setstate__(self, state):
(self.next_new, self.outstanding, self.to_reissue) = state
self.issued = self.next_new - len(self.to_reissue)
self.fixed = self.issued - len(self.outstanding)
# self._check_consistent()
def issue(self):
"""Choose the next game to start.
Returns an integer 'token'.
"""
if self.to_reissue:
result = min(self.to_reissue)
self.to_reissue.discard(result)
else:
result = self.next_new
self.next_new += 1
self.outstanding.add(result)
self.issued += 1
# self._check_consistent()
return result
def fix(self, token):
"""Note that a game's result has been reliably stored."""
self.outstanding.remove(token)
self.fixed += 1
# self._check_consistent()
def rollback(self):
"""Make issued-but-not-fixed tokens available again."""
self.issued -= len(self.outstanding)
self.to_reissue.update(self.outstanding)
self.outstanding = set()
# self._check_consistent()
class Group_scheduler(object):
"""Schedule multiple lists of games in parallel.
This schedules for a number of _groups_, each of which may have a limit on
the number of games to play. It schedules from the group (of those which
haven't reached their limit) with the fewest issued games, with smallest
group code breaking ties.
group codes might be ints or short strings
(any sortable, pickleable and hashable object should do).
The issued tokens are pairs (group code, game number), with game numbers
counting up from 0 independently for each group code.
"""
def __init__(self):
self.allocators = {}
self.limits = {}
def __getstate__(self):
return (self.allocators, self.limits)
def __setstate__(self, state):
(self.allocators, self.limits) = state
def set_groups(self, group_specs):
"""Set the groups to be scheduled.
group_specs -- iterable of pairs (group code, limit)
limit -- int or None
You can call this again after the first time. The limits will be set to
the new values. Any existing groups not in the list are forgotten.
"""
new_allocators = {}
new_limits = {}
for group_code, limit in group_specs:
if group_code in self.allocators:
new_allocators[group_code] = self.allocators[group_code]
else:
new_allocators[group_code] = Simple_scheduler()
new_limits[group_code] = limit
self.allocators = new_allocators
self.limits = new_limits
def issue(self):
"""Choose the next game to start.
Returns a pair (group code, game number)
Returns (None, None) if all groups have reached their limit.
"""
groups = [
(group_code, allocator.issued, self.limits[group_code])
for (group_code, allocator) in self.allocators.iteritems()
]
available = [
(issue_count, group_code)
for (group_code, issue_count, limit) in groups
if limit is None or issue_count < limit
]
if not available:
return None, None
_, group_code = min(available)
return group_code, self.allocators[group_code].issue()
def fix(self, group_code, game_number):
"""Note that a game's result has been reliably stored."""
self.allocators[group_code].fix(game_number)
def rollback(self):
"""Make issued-but-not-fixed tokens available again."""
for allocator in self.allocators.itervalues():
allocator.rollback()
def nothing_issued_yet(self):
"""Say whether nothing has been issued yet."""
return all(allocator.issued == 0
for allocator in self.allocators.itervalues())
def all_fixed(self):
"""Check whether all groups have reached their limits.
This returns true if all groups have limits, and each group has as many
_fixed_ tokens as its limit.
"""
return all(allocator.fixed >= self.limits[g]
for (g, allocator) in self.allocators.iteritems())
|
<reponame>pankdm/highloadcup-2019<filename>src/py/tank.py
#!/usr/bin/env python
import sys
import json
import requests
from collections import defaultdict
import time
COMPARE_RESULTS = True
MAX_RESPONSE_SIZE = 0
# example usage:
# cat input-data/elim_accounts_261218/answers/phase_1_get.answ |grep "/group" | head -n 100 |python src/py/tank.py
# cat input-data/elim_accounts_261218/answers/phase_1_get.answ | head -n 1000 | python src/py/tank.py
# cat input-data/elim_accounts_261218/answers/phase_1_get.answ | head -n 1000 | python src/py/tank.py
#
# cat input-data/test_accounts_240119/answers/phase_1_get.answ |python src/py/tank.py
# cat input-data/test_accounts_291218/answers/phase_1_get.answ |grep "/group" | head -n 100 |python src/py/tank.py
# cat input-data/test_accounts_220119/answers/phase_1_get.answ |python src/py/tank.py
#
# cat input-data/test_accounts_240119/answers/phase_3_get.answ |python src/py/tank.py
def do_post_request(query, body):
url = "http://127.0.0.1:8081{}".format(query)
r = requests.post(url, data=body)
print(type(r))
print(r.status_code)
print(r.headers)
print(r.headers['content-type'])
def do_request(query):
global MAX_RESPONSE_SIZE
url = "http://127.0.0.1:8081{}".format(query)
r = requests.get(url)
# print(type(r))
# print(r.status_code)
# print(r.headers)
# print(r.headers['content-type'])
js_str = r.text
current_size = len(js_str)
if current_size > MAX_RESPONSE_SIZE:
MAX_RESPONSE_SIZE = current_size
print 'MAX_RESPONSE_SIZE = ', MAX_RESPONSE_SIZE
if js_str == "":
js = None
else:
js = json.loads(js_str)
return r.status_code, js
def compare_accounts(index, item_got, item_expected):
if (item_got['id'] != item_expected['id']):
print '{}: wrong item id, got: {}, expected: {}'.format(
index, item_got['id'], item_expected['id'])
return False
# TODO: compare field values
# for k, v in item_expected.items():
# v_got = item_got.get(k, None)
# if (v_got != v):
# print '{}: field "{}" is different, got: {}, expected: {}'.format(
# index, k, v_got.encode('utf8'), v.encode('utf8'))
# return False
return True
def compare_groups(index, group_got, group_expected):
if group_got['count'] == 0:
print 'Got 0 items for {} !!'.format(group_got)
if (group_got['count'] != group_expected['count']):
print '{}: wrong count, got: {}, expected: {}'.format(
index, group_got['count'], group_expected['count'])
return False
# TODO: compare field values
return True
def compare_data(data_got, data_expected, f):
# print json.dumps(data_got, indent=4)
# print json.dumps(data_expected, indent=4)
ok = True
if len(data_got) != len(data_expected):
print 'Wrong response size: got: {}, expected: {}'.format(len(data_got), len(data_expected))
ok = False
index = 0
while (index < len(data_got) and index < len(data_expected)):
item_got = data_got[index]
item_expected = data_expected[index]
if (f(index, item_got, item_expected) == False):
print 'GOT: ', json.dumps(item_got, indent = 4)
print 'EXPECTED: ', json.dumps(item_expected, indent = 4)
return False
index += 1
if index < len(data_got):
print "GOT Extra: ", json.dumps(data_got[index], indent = 4)
if index < len(data_expected):
print "EXPECTED Extra: ", json.dumps(data_expected[index], indent = 4)
return ok
def compare_results(js_got, js_expected):
if "accounts" in js_expected:
data_got = js_got["accounts"]
data_expected = js_expected["accounts"]
return compare_data(data_got, data_expected, compare_accounts)
if "groups" in js_expected:
data_got = js_got["groups"]
data_expected = js_expected["groups"]
return compare_data(data_got, data_expected, compare_groups)
return True
def get_request_type(q):
parts = q.split('/')
if parts[2] == "filter" or parts[2] == "group":
return parts[2]
return parts[3]
class RequestStats:
def __init__(self):
self.num = 0
self.total_time_ms = 0
def get_average(self):
if self.num == 0:
return 0.0
return float(self.total_time_ms) / self.num
class Tank:
def __init__(self):
self.stats_by_type = defaultdict(RequestStats)
def print_stats(self, errors):
types = list(self.stats_by_type.keys())
# print types
total_time = 0
types.sort(key = lambda x : self.stats_by_type[x].total_time_ms, reverse=True)
for type in types:
stats = self.stats_by_type[type]
print ' ==> {} ({}) --> avg {:.1f} ms, total = {:.1f}'.format(
type, stats.num, stats.get_average(), stats.total_time_ms)
total_time += stats.total_time_ms
print 'total time = {:.1f} ms'.format(total_time)
print 'total errors = ', errors
def benchmark(self, query, times):
for counter in xrange(times):
start = time.time()
code_got, js_got = do_request(query)
end = time.time()
duration_ms = (end - start) * 1000.
request_type = get_request_type(query)
self.stats_by_type[request_type].num += 1
self.stats_by_type[request_type].total_time_ms += duration_ms
msg = "{} | {:.1f} ms | {}".format(counter, duration_ms, query)
print msg
self.print_stats(None)
def run(self):
self.counter = 0
errors = None
if COMPARE_RESULTS:
errors = 0
error_by_request = defaultdict(list)
ts = int(time.time())
f = open('perf-logs/perf-{}.txt'.format(ts), 'wt')
for l in sys.stdin:
parts = l.strip('\n').split('\t')
# if (len(parts) < 4):
# # print l
# continue
type = parts[0]
q = parts[1]
code_expected = int(parts[2])
# look only at 200s for now
if (code_expected != 200):
continue
js_str = " ".join(parts[3:])
if js_str == "":
js_expected = None
else:
js_expected = json.loads(js_str)
self.counter += 1
start = time.time()
# print 'doing ', q
code_got, js_got = do_request(q)
end = time.time()
duration_ms = (end - start) * 1000.
msg = "{} | {:.1f} ms | {} {} {}".format(self.counter, duration_ms, type, q, code_expected)
# print msg
print >> f, msg
request_type = get_request_type(q)
self.stats_by_type[request_type].num += 1
self.stats_by_type[request_type].total_time_ms += duration_ms
if (self.counter % 300 == 0):
print ""
print self.counter, 'requests'
self.print_stats(errors)
if code_got != code_expected:
print msg
print ("Wrong response code: {}, expected: {}".format(code_got, code_expected))
continue
# don't compare non 200 responses
if code_expected != 200:
# print 'OK'
continue
if COMPARE_RESULTS and (compare_results(js_got, js_expected) == False):
print msg
errors += 1
error_by_request[request_type].append(q)
# break
# print 'OK'
# print json.dumps(js, indent=4)
f.close()
print ""
print '==> finished ', self.counter, ' requests'
self.print_stats(0)
print 'total errors = ', errors
for type, qq in error_by_request.items():
print ' "{}" --> total errors = {}'.format(type, len(qq))
for q in qq:
print " ", q
tank = Tank()
if len(sys.argv) > 1:
query = sys.argv[1]
times = int(sys.argv[2])
tank.benchmark(query, times)
else:
tank.run()
|
# Model
from torch.nn import functional as F
from global_config import Config
import math
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
import torch.nn as nn
import torch
# Graph Neural Networks
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, num_entities, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
self.num_entities = num_entities
self.weight_adj = Parameter(torch.FloatTensor(num_entities, num_entities))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
self.weight_adj.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
# Graph2Taxo
class GRAPH2TAXO(torch.nn.Module):
"""
Graph2Taxo model
"""
def __init__(self, num_entities, num_relations):
super(GRAPH2TAXO, self).__init__()
self.num_entities = num_entities
torch.manual_seed(Config.random_seed)
self.emb_e = torch.nn.Embedding(num_entities, Config.init_emb_size, padding_idx=0)
self.gc0 = GraphConvolution(Config.init_emb_size, Config.init_emb_size, num_entities)
self.gc1 = GraphConvolution(Config.init_emb_size, Config.gc1_emb_size, num_entities)
self.gc2 = GraphConvolution(Config.gc1_emb_size, Config.gc2_emb_size, num_entities)
self.gc3 = GraphConvolution(Config.gc1_emb_size, Config.embedding_dim, num_entities)
self.conv1 = nn.Conv1d(1, Config.channels, Config.kernel_size, stride=1, padding=int(
math.floor(Config.kernel_size / 2))) # kernel size is odd, then padding = math.floor(kernel_size/2)
self.inp_drop = torch.nn.Dropout(Config.input_dropout)
self.hidden_drop = torch.nn.Dropout(Config.dropout_rate)
self.feature_map_drop = torch.nn.Dropout(Config.dropout_rate)
self.fc = torch.nn.Linear((Config.embedding_dim+8) * Config.channels, Config.embedding_dim)
self.fc2 = torch.nn.Linear((Config.embedding_dim + Config.gc1_emb_size)*2 , Config.embedding_dim)
self.fc3 = torch.nn.Linear(Config.embedding_dim, 1)
self.fc4 = torch.nn.Linear(300 * 2, Config.embedding_dim)
self.fc_gcn = torch.nn.Linear(Config.embedding_dim, 1)
self.fc_com = torch.nn.Linear(2, 1)
self.fc_dag = torch.nn.Linear(Config.embedding_dim + Config.gc1_emb_size, 1)
self.bn_init = torch.nn.BatchNorm1d(Config.init_emb_size)
self.bn0 = torch.nn.BatchNorm1d(Config.embedding_dim + 8)
self.bn1 = torch.nn.BatchNorm1d(Config.channels)
self.bn2 = torch.nn.BatchNorm1d(Config.embedding_dim)
self.bn3 = torch.nn.BatchNorm1d(Config.gc1_emb_size)
self.bn4 = torch.nn.BatchNorm1d(Config.embedding_dim)
self.bn5 = torch.nn.BatchNorm1d(Config.embedding_dim)
self.bn6 = torch.nn.BatchNorm1d(Config.embedding_dim + Config.gc1_emb_size)
self.bn7 = torch.nn.BatchNorm1d(Config.embedding_dim)
self.bn8 = torch.nn.BatchNorm1d(16)
self.bn9 = torch.nn.BatchNorm1d(Config.embedding_dim)
self.bn_word = torch.nn.BatchNorm1d(300)
self.bn_w = torch.nn.BatchNorm1d(num_entities)
self.bn_edge = torch.nn.BatchNorm1d(3)
self.loss = torch.nn.BCELoss()
def forward(self, e1, e2, rel, X, adjs, terms, e1_index, e2_index, word_embs, fre, degree, substr):
# Use the random initialized embeddings
emb_initial = self.emb_e(X)
emb_initial = F.relu(emb_initial) # option
emb_initial = self.inp_drop(emb_initial) # option
x = self.gc0(emb_initial, adjs)
x = self.bn_init(x)
x = F.relu(x)
x = self.inp_drop(x) # option
x = self.gc1(x, adjs)
x = self.bn3(x)
x = F.relu(x)
s = self.gc2(x, adjs)
s = F.softmax(s, dim=1)
out = torch.mm(s.transpose(0, 1), x)
out = F.relu(out)
out = self.inp_drop(out)
out_adj = torch.matmul(torch.matmul(s.transpose(0, 1), adjs), s)
out = self.gc3(out, out_adj)
out = F.relu(out)
out = self.inp_drop(out)
emb_dp = torch.matmul(s, out)
emb_dp = F.relu(emb_dp)
emb_dp = self.inp_drop(emb_dp)
x = torch.cat([x, emb_dp], 1)
x = self.bn6(x)
e1_embedded = x[e1]
e2_embedded = x[e2]
x = torch.cat([e1_embedded, e2_embedded], 1)
x = self.fc2(x)
x = self.bn7(x)
feas = torch.cat([fre, substr], 1)
x = torch.cat([x, feas], 1)
x = self.bn0(x)
x = x.view(x.size()[0], 1, x.size()[1])
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.feature_map_drop(x)
x = x.view(x.size()[0], -1)
x = self.fc(x)
x = self.bn2(x)
#x = F.relu(x)
#x = self.feature_map_drop(x)
x = self.fc3(x)
pred = torch.sigmoid(x)
return pred |
<reponame>milancermak/alexa-math-skill<filename>tests/functions/skill/test_main.py
import jmespath
import pytest
from src.functions.skill import main
from .fixtures import ( # pylint: disable=unused-import
dynamodb_client, launch_request, session_ended_request,
did_select_operation_intent, did_select_difficulty_intent,
did_answer_intent_correct, did_answer_intent_wrong,
build_intent_event, unhandled_intent
)
@pytest.fixture(autouse=True)
def use_mock_dynamodb_client(dynamodb_client):
# uses a new mock object every time so it's safe
main.sb.dynamodb_client = dynamodb_client
#
# assert helpers
#
def assert_has_apl(result):
assert jmespath.search('response.directives[0].type', result) == \
'Alexa.Presentation.APL.RenderDocument'
assert jmespath.search('response.directives[0].datasources', result) is not None
assert jmespath.search('response.directives[0].document', result) is not None
def assert_keypath(path, data, value):
assert jmespath.search(path, data) == value
#
# tests
#
def test_launch_request_handler(launch_request):
r = main.sb.lambda_handler()(launch_request, {})
assert isinstance(r, dict)
# TODO: could this be wrapped in a with statement? is it worth it?
assert_keypath('sessionAttributes.launch_count', r, 0)
assert_keypath('sessionAttributes.previous_session_end', r, 0)
assert_keypath('sessionAttributes.session_data.operation', r, None)
assert_keypath('sessionAttributes.session_data.difficulty', r, None)
assert_keypath('sessionAttributes.session_data.correct_result', r, 0)
assert_keypath('sessionAttributes.session_data.questions_count', r, 0)
assert_keypath('sessionAttributes.session_data.correct_answers_count', r, 0)
assert_keypath('sessionAttributes.session_data.streak_count', r, 0)
assert_keypath('response.directives', r, None)
def test_session_ended_request_handler(session_ended_request):
r = main.sb.lambda_handler()(session_ended_request, {})
assert isinstance(r, dict)
assert_keypath('sessionAttributes', r, {})
assert_keypath('response.directives', r, None)
def test_did_select_operation_handler(did_select_operation_intent):
r = main.sb.lambda_handler()(did_select_operation_intent, {})
assert isinstance(r, dict)
assert_keypath('sessionAttributes.session_data.operation', r, 'add')
assert_keypath('response.directives', r, None)
def test_did_select_difficulty_handler(did_select_difficulty_intent):
r = main.sb.lambda_handler()(did_select_difficulty_intent, {})
assert isinstance(r, dict)
assert_keypath('sessionAttributes.session_data.difficulty', r, 3)
assert_has_apl(r)
def test_did_answer_handler_correct_answer(did_answer_intent_correct):
r = main.sb.lambda_handler()(did_answer_intent_correct, {})
assert isinstance(r, dict)
assert_keypath('sessionAttributes.session_data.questions_count', r, 1)
assert_keypath('sessionAttributes.session_data.correct_answers_count', r, 1)
assert_keypath('sessionAttributes.session_data.streak_count', r, 1)
assert_has_apl(r)
def test_did_answer_handler_wrong_answer(did_answer_intent_wrong):
r = main.sb.lambda_handler()(did_answer_intent_wrong, {})
assert isinstance(r, dict)
assert_keypath('sessionAttributes.session_data.questions_count', r, 1)
assert_keypath('sessionAttributes.session_data.correct_answers_count', r, 0)
assert_keypath('sessionAttributes.session_data.streak_count', r, 0)
assert_has_apl(r)
@pytest.mark.parametrize('intent_name', ['AMAZON.HelpIntent',
'AMAZON.FallbackIntent',
'AMAZON.StopIntent',
'AMAZON.CancelIntent'])
def test_other_intents(intent_name):
intent_event = build_intent_event(intent_name)
r = main.sb.lambda_handler()(intent_event, {})
assert isinstance(r, dict)
assert_keypath('response.directives', r, None)
@pytest.mark.parametrize('intent_name', ['AMAZON.StopIntent',
'AMAZON.CancelIntent'])
def test_early_stop(intent_name):
intent_event = build_intent_event(intent_name)
del intent_event['session']['attributes']['session_data']['operation']
del intent_event['session']['attributes']['session_data']['difficulty']
intent_event['session']['attributes']['session_data']['questions_count'] = 0
r = main.sb.lambda_handler()(intent_event, {})
assert isinstance(r, dict)
assert_keypath('response.outputSpeech', r, None)
assert_keypath('response.shouldEndSession', r, True)
assert_keypath('response.directives', r, None)
def test_response_with_help_message_on_exception(unhandled_intent):
r = main.sb.lambda_handler()(unhandled_intent, {})
assert isinstance(r, dict)
assert_keypath('response.directives', r, None)
|
<reponame>JiaMingLin/tsn-pytorch
"""
Utility functions for model
"""
import os
import hashlib
import requests
from tqdm import tqdm
import torch
def deploy_model(model, cfg):
"""
Deploy model to multiple GPUs for DDP training.
"""
if cfg.DDP_CONFIG.DISTRIBUTED:
if cfg.DDP_CONFIG.GPU is not None:
torch.cuda.set_device(cfg.DDP_CONFIG.GPU)
model.cuda(cfg.DDP_CONFIG.GPU)
model = torch.nn.parallel.DistributedDataParallel(model,
device_ids=[cfg.DDP_CONFIG.GPU],
find_unused_parameters=True)
else:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model, find_unused_parameters=True)
elif cfg.DDP_CONFIG.GPU is not None:
torch.cuda.set_device(cfg.DDP_CONFIG.GPU)
model.cuda(cfg.DDP_CONFIG.GPU)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
model = torch.nn.DataParallel(model).cuda()
return model
def load_model(model, cfg, load_fc=True):
"""
Load pretrained model weights.
"""
if os.path.isfile(cfg.CONFIG.MODEL.PRETRAINED_PATH):
print("=> loading checkpoint '{}'".format(cfg.CONFIG.MODEL.PRETRAINED_PATH))
if cfg.DDP_CONFIG.GPU is None:
checkpoint = torch.load(cfg.CONFIG.MODEL.PRETRAINED_PATH)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(cfg.DDP_CONFIG.GPU)
checkpoint = torch.load(cfg.CONFIG.MODEL.PRETRAINED_PATH, map_location=loc)
model_dict = model.state_dict()
if not load_fc:
del model_dict['module.fc.weight']
del model_dict['module.fc.bias']
pretrained_dict = {k: v for k, v in checkpoint['state_dict'].items() if k in model_dict}
unused_dict = {k: v for k, v in checkpoint['state_dict'].items() if not k in model_dict}
not_found_dict = {k: v for k, v in model_dict.items() if not k in checkpoint['state_dict']}
print("unused model layers:", unused_dict.keys())
print("not found layers:", not_found_dict.keys())
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
print("=> loaded checkpoint '{}' (epoch {})"
.format(cfg.CONFIG.MODEL.PRETRAINED_PATH, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(cfg.CONFIG.MODEL.PRETRAINED_PATH))
return model, None
def save_model(model, optimizer, epoch, cfg):
# pylint: disable=line-too-long
"""
Save trained model weights.
"""
model_save_dir = os.path.join(cfg.CONFIG.LOG.BASE_PATH,
cfg.CONFIG.LOG.EXP_NAME,
cfg.CONFIG.LOG.SAVE_DIR)
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
ckpt_name = "f{}_s{}_ckpt_epoch{}.pth".format(cfg.CONFIG.DATA.CLIP_LEN, cfg.CONFIG.DATA.FRAME_RATE, epoch)
checkpoint = os.path.join(model_save_dir, ckpt_name)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_acc1': None,
'optimizer': optimizer.state_dict(),
}, filename=checkpoint)
def save_checkpoint(state, filename='checkpoint.pth'):
torch.save(state, filename)
def check_sha1(filename, sha1_hash):
"""Check whether the sha1 hash of the file content matches the expected hash.
Parameters
----------
filename : str
Path to the file.
sha1_hash : str
Expected sha1 hash in hexadecimal digits.
Returns
-------
bool
Whether the file content matches the expected hash.
"""
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
sha1_file = sha1.hexdigest()
l = min(len(sha1_file), len(sha1_hash))
return sha1.hexdigest()[0:l] == sha1_hash[0:l]
def download(url, path=None, overwrite=False, sha1_hash=None):
"""Download an given URL
Parameters
----------
url : str
URL to download
path : str, optional
Destination path to store downloaded file. By default stores to the
current directory with same name as in url.
overwrite : bool, optional
Whether to overwrite destination file if already exists.
sha1_hash : str, optional
Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified
but doesn't match.
Returns
-------
str
The file path of the downloaded file.
"""
if path is None:
fname = url.split('/')[-1]
else:
path = os.path.expanduser(path)
if os.path.isdir(path):
fname = os.path.join(path, url.split('/')[-1])
else:
fname = path
if overwrite or not os.path.exists(fname) or (sha1_hash and not check_sha1(fname, sha1_hash)):
dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname)))
if not os.path.exists(dirname):
os.makedirs(dirname)
print('Downloading %s from %s...'%(fname, url))
r = requests.get(url, stream=True)
if r.status_code != 200:
raise RuntimeError("Failed downloading url %s"%url)
total_length = r.headers.get('content-length')
with open(fname, 'wb') as f:
if total_length is None: # no content length header
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
else:
total_length = int(total_length)
for chunk in tqdm(r.iter_content(chunk_size=1024),
total=int(total_length / 1024. + 0.5),
unit='KB', unit_scale=False, dynamic_ncols=True):
f.write(chunk)
if sha1_hash and not check_sha1(fname, sha1_hash):
raise UserWarning('File {} is downloaded but the content hash does not match. ' \
'The repo may be outdated or download may be incomplete. ' \
'If the "repo_url" is overridden, consider switching to ' \
'the default repo.'.format(fname))
return fname |
<gh_stars>0
import os
import numpy as np
import pandas as pd
from mtist.graphing_utils import despine, easy_subplots, savefig
# from mtist.mtist_utils import mu.GLOBALS, mu.load_ground_truths, mu.simulate
from mtist import mtist_utils as mu
class MASTER_DATASET_DEFAULTS:
dt = 0.1
tend = 30
sample_freq = 100
# fmt: off
random_seeds = [ 36656, 2369231, 416304, 10488077, 8982779, 12733201,
9845126, 9036584, 5140131, 8493390, 3049039, 2753893,
11563241, 5589942, 2091765, 2905119, 4240255, 10011807,
5576645, 591973, 4211685, 9275155, 10793741, 41300,
2858482, 6550368, 3346496, 12305126, 8717317, 6543552,
5614865, 9104526, 10435541, 11942766, 6667140, 10471522,
115475, 2721265, 309357, 9668522, 2698393, 9638443,
11499954, 1444356, 8745245, 7964854, 1768742, 8139908,
10646715, 10999907]
expanded_random_seeds = [36656, 2369231, 416304, 10488077, 8982779, 12733201, 9845126, 9036584, 5140131, 8493390,
3049039, 2753893, 11563241, 5589942, 2091765, 2905119, 4240255, 10011807, 5576645, 591973, 4211685, 9275155,
10793741, 41300, 2858482, 6550368, 3346496, 12305126, 8717317, 6543552, 5614865, 9104526, 10435541, 11942766,
6667140, 10471522, 115475, 2721265, 309357, 9668522, 2698393, 9638443, 11499954, 1444356, 8745245, 7964854,
1768742, 8139908, 10646715, 10999907, 3891503, 508475, 2491670, 1362179, 167142, 2849045, 346009, 1379404,
5193332, 757320, 479506, 4249123, 3971818, 454229, 2800725, 802453, 811789, 1729287, 382484, 605864, 644652,
2347953, 16178, 1789594, 4309409, 126945, 4046783, 3827639, 550594, 5428888, 3040434, 1284913, 8251921, 1135245,
3667530, 452012, 1228708, 3303413, 3696988, 1189699, 4435178, 639845, 6602376, 164941, 1166057, 5125548, 2109804,
3712830, 5461680, 3889621]
"""Exapnded random seeds generated via:
old_random_seeds = random_seeds
random.seed(89237560)
extra_50_random_seeds = [round((random.random()*random.randint(500000,10000000))) for i in range(50)]
expanded_random_seeds = old_random_seeds + extra_50_random_seeds"""
# fmt: on
NOISE_SCALES = [0.01, 0.05, 0.10]
def generate_mtist_master_datasets(save_datasets=True, save_example_figures=True):
"""mu.simulate and save master datasets by parameters outlined in MASTER_DATASET_DEFAULTS class"""
### Gather current conditions ###
random_seeds = MASTER_DATASET_DEFAULTS.random_seeds
tend = MASTER_DATASET_DEFAULTS.tend
dt = MASTER_DATASET_DEFAULTS.dt
sample_freq = MASTER_DATASET_DEFAULTS.sample_freq
noises = MASTER_DATASET_DEFAULTS.NOISE_SCALES
# Initialize conditions
conditions = []
for seed in random_seeds:
for noise in noises:
conditions.append((seed, noise))
# Load ground truths
aijs, grs = mu.load_ground_truths(mu.GLOBALS.GT_DIR)
gt_names = [
"3_sp_gt_1",
"3_sp_gt_2",
"3_sp_gt_3",
"10_sp_gt_1",
"10_sp_gt_2",
"10_sp_gt_3",
"100_sp_gt",
]
### DO THE SIMULATIONS ###
# Index then by name, seed, noise
results = {}
for name, aij, gr in zip(gt_names, aijs.values(), grs.values()):
for seed, noise in conditions:
t, y = mu.simulate(aij, gr, seed, noise, tend, dt, sample_freq)
results[(name, seed, noise)] = t, y
### MAKE RESULTS INTO FORMATTED DATAFRAME ###
# Make preliminary results df
df_results = pd.DataFrame.from_dict(results, orient="index", columns=["times", "abundances"])
index_tuples = df_results.index # to extract the tuples
# Create columns from the original `results` dictionary keys
# Combine with df_results
expanded_tuple_index = pd.DataFrame(index_tuples.to_list(), columns=["name", "seed", "noise"])
df_results = expanded_tuple_index.join(df_results.reset_index(drop=True))
# add in the n_species name
n_species_col = df_results["name"].str.split("_", expand=True)[0].to_frame(name="n_species")
df_results = n_species_col.join(df_results)
# Set the index right
df_results.index.name = "master_did"
### SAVE IF NEEDED ###
if save_datasets:
try:
os.mkdir(mu.GLOBALS.MASTER_DATASET_DIR)
except Exception as e:
print(e)
# SAVE the metadata
df_results[["name", "seed", "noise"]].to_csv(
os.path.join(mu.GLOBALS.MASTER_DATASET_DIR, "master_metadata.csv")
)
# SAVE the master datasets
for idx in df_results.index:
# Get integer number of species
n_species = int(df_results.loc[idx, "n_species"])
# Obtain metadata
name, seed, noise = df_results.loc[idx, ["name", "seed", "noise"]]
# Create dataframe of only time/abundances
time_and_abundances = np.hstack(
(df_results.iloc[idx, :].times, df_results.iloc[idx, :].abundances)
)
# Combine time/abundances dataframe with the metadata
formatted_master_df = pd.DataFrame(
time_and_abundances, columns=["time"] + [f"species_{i}" for i in range(n_species)]
).assign(ground_truth=name, timeseries_id=seed, noise=noise, n_species=n_species)
# Save each dataset indexed by master dataset index
formatted_master_df.to_csv(
os.path.join(mu.GLOBALS.MASTER_DATASET_DIR, f"master_dataset_{idx}.csv")
)
elif save_datasets is False:
return df_results
if save_example_figures:
plot_master_datasets(df_results, save=True)
def plot_master_datasets(df_results, save=False):
"""Generate example figures from the final master dataset dataframe
Args:
df_results (pd.DataFrame): Results dataframe, requires generation by generate_mtist_master_datasets function
"""
# Plot all ground truths
grp = df_results.groupby(["name", "n_species", "noise"])
k = 0
for (name, n_species, noise), df in grp:
# Outer loop gets a 50-row dataset of all seeds at a single value of noise/ground truth (and thus, 'name')
fig, axes = easy_subplots(ncols=5, nrows=10, base_figsize=(3, 2))
for i, (ax, seed) in enumerate(zip(axes, df["seed"].unique())):
n_species = int(n_species)
cur_time = df.query("seed == @seed")["times"].values[0]
cur_abundances = df.query("seed == @seed")["abundances"].values[0]
[ax.plot(cur_time, cur_abundances[:, i_sp]) for i_sp in range(n_species)]
fig.suptitle(f"ground_truth_{name}_noise_{noise}")
despine(fig)
if save:
savefig(
fig,
os.path.join(
mu.GLOBALS.MASTER_DATASET_DIR, f"master_dataset_graphed_{name}_noise_{noise}"
),
ft="jpg",
)
class TOY_DATASET_DEFAULTS:
gt_names = [
"3_sp_gt_1",
"3_sp_gt_2",
"3_sp_gt_3",
"10_sp_gt_1",
"10_sp_gt_2",
"10_sp_gt_3",
"100_sp_gt",
]
TOY_MASTER_DIR = "toy_master_dir"
def generate_toy_datasets(save_datasets=True, plot_example_figures=True):
"""Same code as the generate_master_datasets function, but allow for the exclusion
of certain GTs for debugging purposes."""
### Gather current conditions ###
random_seeds = MASTER_DATASET_DEFAULTS.random_seeds
tend = MASTER_DATASET_DEFAULTS.tend
dt = MASTER_DATASET_DEFAULTS.dt
sample_freq = MASTER_DATASET_DEFAULTS.sample_freq
noises = MASTER_DATASET_DEFAULTS.NOISE_SCALES
# Initialize conditions
conditions = []
for seed in random_seeds:
for noise in noises:
conditions.append((seed, noise))
# Load ground truths
aijs, grs = mu.load_ground_truths(mu.GLOBALS.GT_DIR)
# fmt: off
gt_names = TOY_DATASET_DEFAULTS.gt_names # CHANGE EXISTS HERE
### DO THE SIMULATIONS ###
# Index then by name, seed, noise
results = {}
for name in gt_names: # CHANGE EXISTS HERE
aij = aijs[name] # CHANGE EXISTS HERE
gr = grs[name] # CHANGE EXISTS HERE
for seed, noise in conditions:
t, y = mu.simulate(aij, gr, seed, noise, tend, dt, sample_freq)
results[(name, seed, noise)] = t, y
# fmt: on
### MAKE RESULTS INTO FORMATTED DATAFRAME ###
# Make preliminary results df
df_results = pd.DataFrame.from_dict(results, orient="index", columns=["times", "abundances"])
index_tuples = df_results.index # to extract the tuples
# Create columns from the original `results` dictionary keys
# Combine with df_results
expanded_tuple_index = pd.DataFrame(index_tuples.to_list(), columns=["name", "seed", "noise"])
df_results = expanded_tuple_index.join(df_results.reset_index(drop=True))
# add in the n_species name
n_species_col = df_results["name"].str.split("_", expand=True)[0].to_frame(name="n_species")
df_results = n_species_col.join(df_results)
# Set the index right
df_results.index.name = "master_did"
### SAVE IF NEEDED ###
if save_datasets:
try:
os.mkdir(mu.GLOBALS.TOY_DATASET_DIR)
except Exception as e:
print(e)
# SAVE the metadata
df_results[["name", "seed", "noise"]].to_csv(
os.path.join(mu.GLOBALS.TOY_DATASET_DIR, "master_metadata.csv")
)
# SAVE the master datasets
for idx in df_results.index:
# Get integer number of species
n_species = int(df_results.loc[idx, "n_species"])
# Obtain metadata
name, seed, noise = df_results.loc[idx, ["name", "seed", "noise"]]
# Create dataframe of only time/abundances
time_and_abundances = np.hstack(
(df_results.iloc[idx, :].times, df_results.iloc[idx, :].abundances)
)
# Combine time/abundances dataframe with the metadata
formatted_master_df = pd.DataFrame(
time_and_abundances, columns=["time"] + [f"species_{i}" for i in range(n_species)]
).assign(ground_truth=name, timeseries_id=seed, noise=noise, n_species=n_species)
# Save each dataset indexed by master dataset index
formatted_master_df.to_csv(
os.path.join(mu.GLOBALS.TOY_DATASET_DIR, f"master_dataset_{idx}.csv")
)
elif save_datasets is False:
return df_results
if plot_example_figures:
plot_master_datasets(df_results, save=False) |
import tkinter as tk
ventana = tk.Tk()
ventana.title("RESTAURANTE(Todo lo que puedas comer)")
ventana.geometry("700x500")
ventana.configure(bg = "white")
#variables
cliente = tk.StringVar()
ruc = tk.StringVar()
producto1 = tk.StringVar()
producto2 = tk.StringVar()
producto3 = tk.StringVar()
producto4 = tk.StringVar()
producto5 = tk.StringVar()
producto6 = tk.StringVar()
producto7 = tk.StringVar()
producto8 = tk.StringVar()
producto9 = tk.StringVar()
producto10 = tk.StringVar()
producto11 = tk.StringVar()
producto12 = tk.StringVar()
producto13 = tk.StringVar()
producto14 = tk.StringVar()
producto15 = tk.StringVar()
cantidad1 = tk.StringVar()
cantidad2 = tk.StringVar()
cantidad3 = tk.StringVar()
cantidad4 = tk.StringVar()
cantidad5 = tk.StringVar()
cantidad6 = tk.StringVar()
cantidad7 = tk.StringVar()
cantidad8 = tk.StringVar()
cantidad9 = tk.StringVar()
cantidad10 = tk.StringVar()
cantidad11 = tk.StringVar()
cantidad12 = tk.StringVar()
cantidad13 = tk.StringVar()
cantidad14 = tk.StringVar()
cantidad15 = tk.StringVar()
precio1 = tk.StringVar()
precio2 = tk.StringVar()
precio3 = tk.StringVar()
precio4 = tk.StringVar()
precio5 = tk.StringVar()
precio6 = tk.StringVar()
precio7 = tk.StringVar()
precio8 = tk.StringVar()
precio9 = tk.StringVar()
precio10 = tk.StringVar()
precio11 = tk.StringVar()
precio12 = tk.StringVar()
precio13 = tk.StringVar()
precio14 = tk.StringVar()
precio15 = tk.StringVar()
resultado1 = tk.StringVar()
resultado2 = tk.StringVar()
resultado3 = tk.StringVar()
resultado4 = tk.StringVar()
resultado5 = tk.StringVar()
resultado6 = tk.StringVar()
resultado7 = tk.StringVar()
resultado8 = tk.StringVar()
resultado9 = tk.StringVar()
resultado10 = tk.StringVar()
resultado11 = tk.StringVar()
resultado12 = tk.StringVar()
resultado13 = tk.StringVar()
resultado14 = tk.StringVar()
resultado15 = tk.StringVar()
resultadoT = tk.StringVar()
resultadoTIVA = tk.StringVar()
iva = tk.StringVar()
lista = [producto1,producto2,producto3,producto4,producto5,producto6,producto7,producto8,producto9,producto10,producto11,producto12,producto13,producto14,producto15,precio1,precio2,precio3,precio4,precio5,precio6,precio7,precio8,precio9,precio10,precio11,precio12,precio13,precio14,precio15,cantidad1,cantidad2,cantidad3,cantidad4,cantidad5,cantidad6,cantidad7,cantidad8,cantidad9,cantidad10,cantidad11,cantidad12,cantidad13,cantidad14,cantidad15]
compro = False
#funciones
def digit ():
global compro
for i in lista:
if i.get().isdigit() == True:
compro = True
def calcular():
digit ()
print(compro)
if compro == True:
resultado1.set(float(cantidad1.get())*float(precio1.get()))
resultado2.set(float(cantidad2.get())*float(precio2.get()))
resultado3.set(float(cantidad3.get())*float(precio3.get()))
resultado4.set(float(cantidad4.get())*float(precio4.get()))
resultado5.set(float(cantidad5.get())*float(precio5.get()))
resultado6.set(float(cantidad6.get())*float(precio6.get()))
resultado7.set(float(cantidad7.get())*float(precio7.get()))
resultado8.set(float(cantidad8.get())*float(precio8.get()))
resultado9.set(float(cantidad9.get())*float(precio9.get()))
resultado10.set(float(cantidad10.get())*float(precio10.get()))
resultado11.set(float(cantidad11.get())*float(precio11.get()))
resultado12.set(float(cantidad12.get())*float(precio12.get()))
resultado13.set(float(cantidad13.get())*float(precio13.get()))
resultado14.set(float(cantidad14.get())*float(precio14.get()))
resultado15.set(float(cantidad15.get())*float(precio15.get()))
resultadoT.set("Subtotal: "+ str(float(resultado1.get())+float(resultado2.get())+float(resultado3.get())+float(resultado4.get())+float(resultado5.get())+float(resultado6.get())+float(resultado7.get())+float(resultado8.get())+float(resultado9.get())+float(resultado10.get())+float(resultado11.get())+float(resultado12.get())+float(resultado13.get())+float(resultado14.get())+float(resultado15.get())))
iva.set("Iva: " + str(0.12*(float(resultado1.get())+float(resultado2.get())+float(resultado3.get())+float(resultado4.get())+float(resultado5.get())+float(resultado6.get())+float(resultado7.get())+float(resultado8.get())+float(resultado9.get())+float(resultado10.get())+float(resultado11.get())+float(resultado12.get())+float(resultado13.get())+float(resultado14.get())+float(resultado15.get()))))
resultadoTIVA.set("Total: " + str(1.12*(float(resultado1.get())+float(resultado2.get())+float(resultado3.get())+float(resultado4.get())+float(resultado5.get())+float(resultado6.get())+float(resultado7.get())+float(resultado8.get())+float(resultado9.get())+float(resultado10.get())+float(resultado11.get())+float(resultado12.get())+float(resultado13.get())+float(resultado14.get())+float(resultado15.get()))))
def limpiar():
cliente.set("")
ruc.set("")
producto1.set("")
producto2.set("")
producto3.set("")
producto4.set("")
producto5.set("")
producto6.set("")
producto7.set("")
producto8.set("")
producto9.set("")
producto10.set("")
producto11.set("")
producto12.set("")
producto13.set("")
producto14.set("")
producto15.set("")
cantidad1.set("")
cantidad2.set("")
cantidad3.set("")
cantidad4.set("")
cantidad5.set("")
cantidad6.set("")
cantidad7.set("")
cantidad8.set("")
cantidad9.set("")
cantidad10.set("")
cantidad11.set("")
cantidad12.set("")
cantidad13.set("")
cantidad14.set("")
cantidad15.set("")
precio1.set("")
precio2.set("")
precio3.set("")
precio4.set("")
precio5.set("")
precio6.set("")
precio7.set("")
precio8.set("")
precio9.set("")
precio10.set("")
precio11.set("")
precio12.set("")
precio13.set("")
precio14.set("")
precio15.set("")
resultado1.set("")
resultado2.set("")
resultado3.set("")
resultado4.set("")
resultado5.set("")
resultado6.set("")
resultado7.set("")
resultado8.set("")
resultado9.set("")
resultado10.set("")
resultado11.set("")
resultado12.set("")
resultado13.set("")
resultado14.set("")
resultado15.set("")
resultadoT.set("")
resultadoTIVA .set("")
iva.set("")
tk.Label(text="FACTURA :) ", bg= "white").place(x=275,y=10)
tk.Label(text="Cliente: ", bg= "white").place(x=10,y=40)
tk.Label(text="RUC: ", bg= "white").place(x=10,y=70)
tk.Entry(textvariable = cliente,bd=1,width=45,justify="left").place(x=100,y=40)
tk.Entry(textvariable = ruc,bd=1,width=45,justify="left").place(x=100,y=70)
tk.Button(text="limpiar",width=10,height=1, bg= "Orange", command= limpiar).place(x=570,y=40)
tk.Button(text="calcular",width=10,height=1,bg= "Orange", command= calcular).place(x=570,y=70)
tk.Label(text="PRODUCTO ", bg= "white").place(x=100,y=100)
tk.Label(text="CANTIDAD ", bg= "white").place(x=220,y=100)
tk.Label(text="PRECIO U. ", bg= "white").place(x=320,y=100)
tk.Label(text="PRECIO T.", bg= "white").place(x=420,y=100)
tk.Entry(textvariable = producto1,bd=1,width=12,justify="left").place(x=90,y=130)
tk.Entry(textvariable = producto2,bd=1,width=12,justify="left").place(x=90,y=160)
tk.Entry(textvariable = producto3,bd=1,width=12,justify="left").place(x=90,y=190)
tk.Entry(textvariable = producto4,bd=1,width=12,justify="left").place(x=90,y=220)
tk.Entry(textvariable = producto5,bd=1,width=12,justify="left").place(x=90,y=250)
tk.Entry(textvariable = producto6,bd=1,width=12,justify="left").place(x=90,y=280)
tk.Entry(textvariable = producto7,bd=1,width=12,justify="left").place(x=90,y=310)
tk.Entry(textvariable = producto8,bd=1,width=12,justify="left").place(x=90,y=340)
tk.Entry(textvariable = producto9,bd=1,width=12,justify="left").place(x=90,y=370)
tk.Entry(textvariable = producto10,bd=1,width=12,justify="left").place(x=90,y=400)
tk.Entry(textvariable = producto11,bd=1,width=12,justify="left").place(x=90,y=430)
tk.Entry(textvariable = producto12,bd=1,width=12,justify="left").place(x=90,y=460)
tk.Entry(textvariable = producto13,bd=1,width=12,justify="left").place(x=90,y=490)
tk.Entry(textvariable = producto14,bd=1,width=12,justify="left").place(x=90,y=520)
tk.Entry(textvariable = producto15,bd=1,width=12,justify="left").place(x=90,y=550)
tk.Entry(textvariable = cantidad1,bd=1,width=5,justify="left").place(x=230,y=130)
tk.Entry(textvariable = cantidad2,bd=1,width=5,justify="left").place(x=230,y=160)
tk.Entry(textvariable = cantidad3,bd=1,width=5,justify="left").place(x=230,y=190)
tk.Entry(textvariable = cantidad4,bd=1,width=5,justify="left").place(x=230,y=220)
tk.Entry(textvariable = cantidad5,bd=1,width=5,justify="left").place(x=230,y=250)
tk.Entry(textvariable = cantidad6,bd=1,width=5,justify="left").place(x=230,y=280)
tk.Entry(textvariable = cantidad7,bd=1,width=5,justify="left").place(x=230,y=310)
tk.Entry(textvariable = cantidad8,bd=1,width=5,justify="left").place(x=230,y=340)
tk.Entry(textvariable = cantidad9,bd=1,width=5,justify="left").place(x=230,y=370)
tk.Entry(textvariable = cantidad10,bd=1,width=5,justify="left").place(x=230,y=400)
tk.Entry(textvariable = cantidad11,bd=1,width=5,justify="left").place(x=230,y=430)
tk.Entry(textvariable = cantidad12,bd=1,width=5,justify="left").place(x=230,y=460)
tk.Entry(textvariable = cantidad13,bd=1,width=5,justify="left").place(x=230,y=490)
tk.Entry(textvariable = cantidad14,bd=1,width=5,justify="left").place(x=230,y=520)
tk.Entry(textvariable = cantidad15,bd=1,width=5,justify="left").place(x=230,y=550)
tk.Entry(textvariable = precio1,bd=1,width=8,justify="left").place(x=315,y=130)
tk.Entry(textvariable = precio2,bd=1,width=8,justify="left").place(x=315,y=160)
tk.Entry(textvariable = precio3,bd=1,width=8,justify="left").place(x=315,y=190)
tk.Entry(textvariable = precio4,bd=1,width=8,justify="left").place(x=315,y=220)
tk.Entry(textvariable = precio5,bd=1,width=8,justify="left").place(x=315,y=250)
tk.Entry(textvariable = precio6,bd=1,width=8,justify="left").place(x=315,y=280)
tk.Entry(textvariable = precio7,bd=1,width=8,justify="left").place(x=315,y=310)
tk.Entry(textvariable = precio8,bd=1,width=8,justify="left").place(x=315,y=340)
tk.Entry(textvariable = precio9,bd=1,width=8,justify="left").place(x=315,y=370)
tk.Entry(textvariable = precio10,bd=1,width=8,justify="left").place(x=315,y=400)
tk.Entry(textvariable = precio11,bd=1,width=8,justify="left").place(x=315,y=430)
tk.Entry(textvariable = precio12,bd=1,width=8,justify="left").place(x=315,y=460)
tk.Entry(textvariable = precio13,bd=1,width=8,justify="left").place(x=315,y=490)
tk.Entry(textvariable = precio14,bd=1,width=8,justify="left").place(x=315,y=520)
tk.Entry(textvariable = precio15,bd=1,width=8,justify="left").place(x=315,y=550)
tk.Label(textvariable = resultado1, bg="white", bd=1,width=8,justify="left").place(x=420,y=130)
tk.Label(textvariable = resultado2, bg="white", bd=1,width=8,justify="left").place(x=420,y=160)
tk.Label(textvariable = resultado3, bg="white", bd=1,width=8,justify="left").place(x=420,y=190)
tk.Label(textvariable = resultado4, bg="white", bd=1,width=8,justify="left").place(x=420,y=220)
tk.Label(textvariable = resultado5, bg="white", bd=1,width=8,justify="left").place(x=420,y=250)
tk.Label(textvariable = resultado6, bg="white", bd=1,width=8,justify="left").place(x=420,y=280)
tk.Label(textvariable = resultado7, bg="white", bd=1,width=8,justify="left").place(x=420,y=310)
tk.Label(textvariable = resultado8, bg="white", bd=1,width=8,justify="left").place(x=420,y=340)
tk.Label(textvariable = resultado9, bg="white", bd=1,width=8,justify="left").place(x=420,y=370)
tk.Label(textvariable = resultado10, bg="white", bd=1,width=8,justify="left").place(x=420,y=400)
tk.Label(textvariable = resultado11, bg="white", bd=1,width=8,justify="left").place(x=420,y=430)
tk.Label(textvariable = resultado12, bg="white", bd=1,width=8,justify="left").place(x=420,y=460)
tk.Label(textvariable = resultado13, bg="white", bd=1,width=8,justify="left").place(x=420,y=490)
tk.Label(textvariable = resultado14, bg="white", bd=1,width=8,justify="left").place(x=420,y=520)
tk.Label(textvariable = resultado15, bg="white", bd=1,width=8,justify="left").place(x=420,y=550)
tk.Label(textvariable = resultadoT,justify="left").place(x=370,y=350)
tk.Label(textvariable = iva,justify="left").place(x=370,y=380)
tk.Label(textvariable = resultadoTIVA ,justify="left").place(x=480,y=10)
ventana.mainloop()
|
<reponame>splunk-soar-connectors/attivo<gh_stars>0
# -----------------------------------------
# Phantom sample App Connector python file
# -----------------------------------------
# Phantom App imports
import phantom.app as phantom
from phantom.base_connector import BaseConnector
from phantom.action_result import ActionResult
# Usage of the consts file is recommended
# from attivo_consts import *
import requests
import json
import socket
import time
import os
from datetime import datetime
from base64 import b64encode
class RetVal(tuple):
def __new__(cls, val1, val2=None):
return tuple.__new__(RetVal, (val1, val2))
class BSAPI:
BS_DEFAULT_PORT = 8443
TIMEOUT = 20
def __init__(self, bs_host, bs_port=BS_DEFAULT_PORT, verify_ssl=False, timeout=TIMEOUT):
self.bs_host = bs_host
self.bs_port = bs_port
self.timeout = timeout
self.session_key = None
self.base_url = "https://{host}:{port}/api".format(host=self.bs_host, port=self.bs_port)
self.verify_ssl = verify_ssl
def do_request(self, url, data=None, headers=None, files=None, method=None, content_type='application/json', json_dump=True):
# Guess the method if not provided
if not method:
if data:
method = 'post'
else:
method = 'get'
headers = {}
if self.session_key:
headers = {'sessionKey': self.session_key}
if content_type:
headers['content-type'] = content_type
url = self.base_url + url
# Convert data dictionary to a string
if data and json_dump:
data = json.dumps(data)
request_func = getattr(requests, method)
r = None
try:
r = request_func(url, headers=headers, data=data, files=files, verify=self.verify_ssl)
except requests.exceptions.SSLError as e:
return("SSL verification failed")
except requests.exceptions.ConnectionError as e:
return("Could not connect to {host} ({exception})".format(host=self.bs_host, exception=e))
except Exception, e:
return("Generic Exception: {exception}\nType is: {exception_type}".format(exception=e, exception_type=e.__class__.__name__))
if r.status_code in (401, 404):
return (r.text)
elif r and r.content:
return r.json()
else:
return None
def login(self, bs_user, bs_pass):
url = "/auth/login"
login_data = {'userName': b64encode(bs_user), 'password': <PASSWORD>(bs_<PASSWORD>)}
login_status = self.do_request(url, data=login_data)
if login_status and 'sessionKey' in login_status:
self.session_key = login_status['sessionKey']
return (login_status)
def logout(self):
url = "/auth/logout"
logout_status = self.do_request(url)
return (logout_status)
def deploy_decoys(self, target_ip, vlan=None, decoy_number=1):
url = "/autodeploy/config"
if vlan:
data = {"config": [{"ipAddress": target_ip, "vlanID": vlan, "numberOfIPsToAcquire": decoy_number}]}
else:
data = {"config": [{"ipAddress": target_ip, "numberOfIPsToAcquire": decoy_number}]}
deploy_status = self.do_request(url, data=data, content_type=None)
return (deploy_status)
def get_threatdirect_rules(self):
url = "/nwinterfaces/get"
td_decoys = self.do_request(url)
return (td_decoys)
def get_bs_health(self):
url = "/device/health"
health = self.do_request(url)
return health
def get_monitoring_rules(self):
url = "/interfaces/get"
monitoring_rules = self.do_request(url, data='{}', method='post', json_dump=None)
return (monitoring_rules)
def get_deceptive_objects(self, object_type, object_id):
if object_type == 'USERS':
if object_id == 'ALL':
url = "/obj_group_cfg/summary/user"
else:
url = "/obj_group_cfg/user/{}".format(object_id)
else:
response = "Unknown option: {}".format(object_type)
return (response)
deceptive_objects = self.do_request(url)
return (deceptive_objects)
def get_playbooks(self):
url = '/pb/getAll'
return self.do_request(url)
def run_playbook(self, playbook_id, attacker_ip):
'This simulates an internal playbook execution based on the attacker IP'
url = '/pb/runplaybook'
data = {'attacker_ip': attacker_ip, 'playbook_id': playbook_id}
return self.do_request(url, data=data)
def get_events(self, severity_start=None, severity_end=None, timestampStart=None, timestampEnd=None,
offset=None, acknowledged='unacknowledged', attackerIP=[], category=[],
device=[], service=[], targetOs=[], targetHost=[], targetIP=[],
targetVLAN=[], keywords=[], description=[], comments=[]):
url = "/eventsquery/alerts"
if attackerIP and attackerIP[0] is None:
attackerIP = []
if targetIP and targetIP[0] is None:
targetIP = []
if targetVLAN and targetVLAN[0] is None:
targetVLAN = []
query_data = {'severity_start': severity_start, 'severity_end': severity_end, 'timestampStart': timestampStart,
'timestampEnd': timestampEnd, 'offset': offset, 'acknowledged': acknowledged, 'attackerIp': attackerIP,
'category': category, 'device': device, 'service': service, 'targetOs': targetOs, 'targetHost': targetHost,
'targetIP': targetIP, 'targetVLAN': targetVLAN, 'keywords': keywords, 'description': description,
'comments': comments}
event_data = self.do_request(url, data=query_data)
return (event_data)
# def convert_kill_chain(self, attack_phase):
# # Reconnaissance
# # Weaponization
# # Delivery
# # Exploitation
# # Installation
# # Command & Control
# # Actions on Objectives
# conversion = {
# 'Access': '',
# 'C&C': 'Command & Control',
# 'Deceptive Credentials': 'Exploitation',
# 'Decoy Data': 'Actions on Objectives',
# 'Exploit': 'Exploitation',
# 'Information': '',
# 'MITM': '',
# 'Payload Drop': '',
# 'Recon': 'Reconnaissance',
# }
def convert_severity_phantom(self, severity):
default = 'low'
conversion = {
'Very High': 'high',
'High': 'high',
'Medium': 'medium'
}
if severity in conversion:
return conversion[severity]
else:
return default
def convert_severity(self, severity_string):
conversion = {
'Very High': 14,
'High': 11,
'Medium': 7,
'Low': 4,
'Very Low': 3,
'System Activity': 0
}
if severity_string in conversion:
return conversion[severity_string]
else:
return None
class AttivoConnector(BaseConnector):
def __init__(self):
# Call the BaseConnectors init first
super(AttivoConnector, self).__init__()
self._state = {}
def _handle_test_connectivity(self, param):
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
attivo_api = BSAPI(self.botsink, verify_ssl=self.verify_ssl)
self.save_progress("Testing connectivity to BOTsink {botsink}".format(botsink=self.botsink))
login_status = attivo_api.login(self.botsink_user, self.botsink_password)
# self.save_progress("LOGIN STATUS = ".format(login_status))
if login_status and 'sessionKey' in login_status:
self.save_progress("Attivo Login successful (session key = {key})".format(key=(login_status['sessionKey'])))
logout_status = attivo_api.logout()
if logout_status and 'status' in logout_status and logout_status['status']:
self.save_progress("Terminating session")
else:
self.save_progress("Could not terminate session ({status})".format(status=logout_status))
return action_result.set_status(phantom.APP_SUCCESS)
else:
self.save_progress("Login to {botsink} failed".format(botsink=self.botsink))
self.save_progress("API Results: {}".format(login_status))
def valid_ip(self, host):
try:
socket.inet_aton(host)
return True
except:
return False
def _handle_list_hosts(self, param):
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
attivo_api = BSAPI(self.botsink, verify_ssl=self.verify_ssl)
attivo_api.login(self.botsink_user, self.botsink_password)
# all_hosts = []
num_hosts = 0
td_monitoring = attivo_api.get_threatdirect_rules()
bs_monitoring = attivo_api.get_monitoring_rules()
try:
if td_monitoring['forwarder_vm_monitoring_rules']['forwarderVmMonitoringRules']:
for rule in td_monitoring['forwarder_vm_monitoring_rules']['forwarderVmMonitoringRules']:
if rule['type'] == 'onNet':
td_type = "EP"
else:
td_type = "VM"
host_names = []
if 'dnsName' in rule and rule['dnsName']:
host_names.append(rule['dnsName'])
host_entry = {
'ip': rule['ip'],
'mac': rule['customized_mac'],
'vlan': rule['vlanID'],
'dhcp': rule['dhcpip'],
'td_name': rule['threatDirectName'],
'td_type': td_type,
'host': ', '.join(host_names)
}
self.save_progress("ThreatDirect host entry: {}".format(host_entry))
num_hosts += 1
action_result.add_data(host_entry)
# all_hosts.append(host_entry)
except Exception as e:
return action_result.set_status(phantom.APP_ERROR,
'Error occurred while fetching threat direct rules. Error: {0}. Detailed error: {1}'.format(td_monitoring, str(e)))
try:
if bs_monitoring['cfg_monitoring_rules']['monitoringRules']:
for rule in bs_monitoring['cfg_monitoring_rules']['monitoringRules']:
vlan = rule['vlanID']
if vlan == -1:
vlan = None
host_names = []
if 'dnsName' in rule and rule['dnsName']:
host_names.append(rule['dnsName'])
if 'interfaceName' in rule and rule['interfaceName']:
host_names.append(rule['interfaceName'])
host_entry = {
'ip': rule['ipAddress'],
'mac': rule['externalMAC'],
'dhcp': rule['isDHCPIP'],
'vlan': vlan,
'user_defined': rule['userDefined'],
'host': ", ".join(host_names)
}
if td_monitoring is not None:
host_entry['td_name'] = ''
host_entry['td_type'] = ''
self.save_progress("BOTSink host entry: {}".format(host_entry))
action_result.add_data(host_entry)
num_hosts += 1
# all_hosts.append(host_entry)
except Exception as e:
return action_result.set_status(phantom.APP_ERROR,
'Error occurred while fetching monitoring rules. Error: {0}. Detailed error: {1}'.format(bs_monitoring, str(e)))
# if td_monitoring['forwarder_vm_monitoring_rules']['forwarderVmMonitoringRules']:
# headers.append('TD Name')
# headers.append('TD Type')
attivo_api.logout()
message = "{} decoy hosts present in the Attivo deception environment".format(num_hosts)
# action_result.add_data(all_hosts)
return action_result.set_status(phantom.APP_SUCCESS, status_message=message)
def _handle_check_host(self, param):
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
attivo_api = BSAPI(self.botsink, verify_ssl=self.verify_ssl)
summary = action_result.update_summary({})
host = param.get('host')
summary['is_deceptive'] = False
summary['host'] = host
message = "Host {} is NOT part of the Attivo deception environment".format(host)
# Generate BOTsink session key
attivo_api.login(self.botsink_user, self.botsink_password)
if self.valid_ip(host):
ip_address = host
host_name = None
else:
host_name = host
ip_address = None
# Check native Monitoring Rules
bs_monitoring = attivo_api.get_monitoring_rules()
try:
if bs_monitoring is not None:
for rule in bs_monitoring['cfg_monitoring_rules']['monitoringRules']:
this_ip = rule['ipAddress']
mac = rule['externalMAC']
dhcp = rule['isDHCPIP']
vlan = rule['vlanID']
if vlan == -1:
vlan = None
user_defined = rule['userDefined']
this_host_name = []
if 'dnsName' in rule and rule['dnsName']:
this_host_name.append(rule['dnsName'])
if rule['interfaceName']:
this_host_name.append(rule['interfaceName'])
if ip_address and this_ip == ip_address:
summary['is_deceptive'] = True
message = "Host {} IS part of the Attivo deception environment".format(host)
self.save_progress("BOTSink IP MATCH ({ip}) ({name}) ({user_defined}) ({mac}) ({dhcp}) ({vlan})".format(
ip=this_ip, name=this_host_name, user_defined=user_defined, mac=mac, dhcp=dhcp, vlan=vlan)
)
break
elif host_name and this_host_name and host_name in this_host_name:
summary['is_deceptive'] = True
message = "Host {} IS part of the Attivo deception environment".format(host)
self.save_progress("BOTSink HOST MATCH ({ip}) ({name}) ({user_defined}) ({mac}) ({dhcp}) ({vlan})".format(
ip=this_ip, name=this_host_name, user_defined=user_defined, mac=mac, dhcp=dhcp, vlan=vlan)
)
break
except Exception as e:
return action_result.set_status(phantom.APP_ERROR,
'Error occurred while fetching Attivo monitoring rules. Error: {0}. Detailed error: {1}'.format(bs_monitoring, str(e)))
if not summary['is_deceptive']:
# Check ThreatDirect Monitoring Rules
td_monitoring = attivo_api.get_threatdirect_rules()
if td_monitoring is not None:
for rule in td_monitoring['forwarder_vm_monitoring_rules']['forwarderVmMonitoringRules']:
this_ip = rule['ip']
this_host_name = []
mac = rule['customized_mac']
vlan = rule['vlanID']
dhcp = rule['dhcpip']
td_name = rule['threatDirectName']
if rule['type'] == 'onNet':
td_type = "EP"
else:
td_type = "VM"
if 'dnsName' in rule and rule['dnsName']:
this_host_name.append(rule['dnsName'])
if ip_address and this_ip == ip_address:
summary['is_deceptive'] = True
message = "Host {} IS part of the Attivo deception environment".format(host)
self.save_progress("TD IP MATCH ({ip}) ({host}) (mac}) ({dhcp}) ({vlan}) (td_name}) (td_type)".format(
ip=this_ip, name=this_host_name, mac=mac, dhcp=dhcp, vlan=vlan, td_name=td_name, td_type=td_type)
)
break
elif host_name and this_host_name and host_name in this_host_name:
summary['is_deceptive'] = True
message = "Host {} IS part of the Attivo deception environment".format(host)
self.save_progress("TD HOST MATCH ({ip}) ({name}) ({user_defined}) ({mac}) ({dhcp}) ({vlan})".format(
ip=this_ip, name=this_host_name, user_defined=user_defined, mac=mac, dhcp=dhcp, vlan=vlan)
)
break
if summary['is_deceptive']:
summary['td_name'] = td_name
summary['td_type'] = td_type
else:
summary['td_name'] = ''
summary['td_type'] = ''
if summary['is_deceptive']:
summary['ip'] = this_ip
summary['host_name'] = this_host_name
summary['user_defined'] = user_defined
summary['mac'] = mac
summary['dhcp'] = dhcp
summary['vlan'] = vlan
attivo_api.logout()
action_result.add_data(summary)
return action_result.set_status(phantom.APP_SUCCESS, status_message=message)
def _handle_list_users(self, param):
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
attivo_api = BSAPI(self.botsink, verify_ssl=self.verify_ssl)
attivo_api.login(self.botsink_user, self.botsink_password)
user_groups = attivo_api.get_deceptive_objects('USERS', 'ALL')
users = {}
try:
for user_group in user_groups['objGroup']:
group_id = user_group['esid']
group_name = user_group['name']
users_in_group = attivo_api.get_deceptive_objects('USERS', group_id)
self.save_progress("USERS IN GROUP: {}".format(users_in_group))
for user_object in users_in_group['objGroup']['objects']:
user = user_object['username']
if user in users:
users[user].append(group_name)
else:
users[user] = [group_name]
except Exception as e:
return action_result.set_status(phantom.APP_ERROR,
'Error occurred while fetching user groups. Error: {0}. Detailed error: {1}'.format(user_groups, str(e)))
attivo_api.logout()
# all_users = []
for user in sorted(users.keys(), key=lambda x: x.lower()):
user_entry = {'user': user, 'groups': ", ".join(users[user])}
# all_users.append(user_entry)
action_result.add_data(user_entry)
# action_result.add_data(all_users)
message = "{} users retireved from Attivo"
return action_result.set_status(phantom.APP_SUCCESS, status_message=message)
def _handle_check_user(self, param):
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
attivo_api = BSAPI(self.botsink, verify_ssl=self.verify_ssl)
summary = action_result.update_summary({})
user = param.get('user')
summary['is_deceptive'] = False
summary['user'] = user
message = "User {} is NOT part of Attivo deception".format(user)
# Lookup user
self.save_progress("Checking to see if \'{user}\' is a deceptive credential".format(user=user))
attivo_api.login(self.botsink_user, self.botsink_password)
this_user = None
user_groups = attivo_api.get_deceptive_objects('USERS', 'ALL')
in_groups = []
try:
for user_group in user_groups['objGroup']:
group_id = user_group['esid']
# self.save_progress("GROUP ID {}".format(group_id))
users_in_group = attivo_api.get_deceptive_objects('USERS', group_id)
for user_object in users_in_group['objGroup']['objects']:
this_user = user_object['username']
if this_user == user:
self.save_progress("BOTSink USER MATCH ({user}) ({groups})".format(user=this_user, groups=user_group['name']))
summary['is_deceptive'] = True
message = "User {} IS part of Attivo deception".format(user)
in_groups.append(user_group['name'])
break
except Exception as e:
return action_result.set_status(phantom.APP_ERROR,
'Error occurred while fetching User Groups. Error: {0}. Detailed error: {1}'.format(user_groups, str(e)))
if summary['is_deceptive']:
summary['user_group'] = in_groups
attivo_api.logout()
action_result.add_data(summary)
return action_result.set_status(phantom.APP_SUCCESS, status_message=message)
def _handle_list_playbooks(self, param):
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
# summary = action_result.update_summary({})
attivo_api = BSAPI(self.botsink, verify_ssl=self.verify_ssl)
attivo_api.login(self.botsink_user, self.botsink_password)
all_playbooks = attivo_api.get_playbooks()
attivo_api.logout()
try:
brief_playbook = {}
for playbook in all_playbooks['pb']:
brief_playbook = {
'id': playbook['id'],
'name': playbook['name']
}
if len(playbook['investigate']) > 0:
investigate_names = []
for investigate in playbook['investigate']:
investigate_names.append(investigate['name'])
brief_playbook['investigate'] = ', '.join(investigate_names)
else:
brief_playbook['investigate'] = []
if len(playbook['analyze']) > 0:
analyze_names = []
for analyze in playbook['analyze']:
analyze_names.append(analyze['name'])
brief_playbook['analyze'] = ', '.join(analyze_names)
else:
brief_playbook['analyze'] = []
if len(playbook['manage']) > 0:
manage_names = []
for manage in playbook['manage']:
manage_names.append(manage['name'])
brief_playbook['manage'] = ', '.join(manage_names)
else:
brief_playbook['manage'] = []
if len(playbook['isolate']) > 0:
isolate_names = []
for isolate in playbook['isolate']:
isolate_names.append(isolate['name'])
brief_playbook['isolate'] = ', '.join(isolate_names)
else:
brief_playbook['isolate'] = []
self.save_progress("Attivo Playbooks: {}".format(brief_playbook))
action_result.add_data(brief_playbook)
except Exception as e:
return action_result.set_status(phantom.APP_ERROR,
'Error occurred while fetching playbook. Error: {0}. Detailed error: {1}'.format(all_playbooks, str(e)))
message = "{} Attivo playbooks found".format(len(all_playbooks['pb']))
return action_result.set_status(phantom.APP_SUCCESS, status_message=message)
def _handle_run_playbook(self, param):
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
summary = action_result.update_summary({})
attivo_api = BSAPI(self.botsink, verify_ssl=self.verify_ssl)
playbook_name = param['playbook_name']
attacker_ip = param['attacker_ip']
playbook_id = None
# Generate BOTsink session key
attivo_api.login(self.botsink_user, self.botsink_password)
# Find playbook ID
all_playbooks = attivo_api.get_playbooks()
try:
for playbook in all_playbooks['pb']:
if playbook['name'] == playbook_name:
playbook_id = playbook['id']
break
except Exception as e:
return action_result.set_status(phantom.APP_ERROR,
'Error occurred while fetching playbooks. Error: {0}. Detailed error: {1}'.format(all_playbooks, str(e)))
if not playbook_id:
self.save_progress("ID not found for Attivo playbook named: {}".format(playbook_name))
else:
self.save_progress("Running playbook \'{name}\' ({id}) with attacker IP {attacker}".format(name=playbook_name, id=playbook_id, attacker=attacker_ip))
playbook_status = attivo_api.run_playbook(playbook_id, attacker_ip)
self.save_progress("Run Attivo playbook status = {}".format(playbook_status))
action_result.add_data(playbook_status)
attivo_api.logout()
if playbook_status is None:
playbook_status = {'error': 'Unknown Error'}
summary['status'] = "Failed"
return RetVal(action_result.set_status(phantom.APP_ERROR, playbook_status), None)
if 'error' in playbook_status:
summary['status'] = "Failed"
return RetVal(action_result.set_status(phantom.APP_ERROR, playbook_status), None)
elif 'status' in playbook_status:
summary['status'] = playbook_status['status']
if summary['status'] == 'submitted':
return action_result.set_status(phantom.APP_SUCCESS)
else:
return RetVal(action_result.set_status(phantom.APP_SUCCESS, playbook_status), None)
else:
summary['status'] = "Failed"
return RetVal(action_result.set_status(phantom.APP_ERROR, playbook_status), None)
def _handle_get_events(self, param):
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
attivo_api = BSAPI(self.botsink, verify_ssl=self.verify_ssl)
attivo_api.login(self.botsink_user, self.botsink_password)
summary = action_result.update_summary({})
attacker_ips = []
attacker_ip = param['attacker_ip']
attacker_ips.append(attacker_ip)
hours_back = param['hours_back']
severity_string = str(param['severity'])
severity = str(attivo_api.convert_severity(severity_string))
timestampEnd = str(int(time.time()) * 1000)
severity_end = "15"
self.save_progress("Getting events for source IP: {source_ip}, severity: {severity}, hours back: {hours_back}".format(
source_ip=attacker_ips[0], severity=severity, hours_back=hours_back)
)
seconds_back = int(hours_back) * 60 * 60
timestampStart = str((int(time.time()) - seconds_back) * 1000)
events = attivo_api.get_events(severity_start=severity, severity_end=severity_end, timestampStart=timestampStart, timestampEnd=timestampEnd, attackerIP=attacker_ips)
try:
if events is None:
events = []
self.save_progress("Total events retrieved: None")
else:
self.save_progress("Total events retrieved: {event_count}".format(event_count=len(events['eventdata'])))
# self.save_progress("EVENTS: {}".format(events))
attivo_api.logout()
# brief_events = []
for event in events['eventdata']:
attack_name = event['attackName']
severity = event['details']['Severity']
target_ip = event['details']['Target IP']
target_os = event['details']['Target OS']
timestamp = event['details']['Timestamp']
brief_event = {'attack_name': attack_name, 'target_ip': target_ip, 'target_os': target_os, 'timestamp': timestamp, 'severity': severity}
# brief_events.append(brief_event)
action_result.add_data(brief_event)
self.save_progress("Event: {time},{severity},{name},{target_ip},{target_os}".format(
time=timestamp, severity=severity, name=attack_name, target_ip=target_ip, target_os=target_os)
)
except Exception as e:
return action_result.set_status(phantom.APP_ERROR,
'Error occurred while fetching events. Error: {0}. Detailed error: {1}'.format(events, str(e)))
summary['ip'] = attacker_ips[0]
summary['hours_back'] = hours_back
summary['severity'] = severity_string
summary['total_events'] = len(events['eventdata'])
# action_result.add_data(brief_events)
message = "Retrieved {} events from {}".format(len(events['eventdata']), attacker_ip)
return action_result.set_status(phantom.APP_SUCCESS, status_message=message)
def _handle_deploy_decoy(self, param):
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
summary = action_result.update_summary({})
attivo_api = BSAPI(self.botsink, verify_ssl=self.verify_ssl)
vulnerable_endpoint = param['vulnerable_endpoint']
decoy_number = param.get('decoy_number', '1')
self.save_progress("Generating {num} decoys based on {ip}".format(num=decoy_number, ip=vulnerable_endpoint))
attivo_api.login(self.botsink_user, self.botsink_password)
deploy_status = attivo_api.deploy_decoys(vulnerable_endpoint, decoy_number=decoy_number)
attivo_api.logout()
action_result.add_data(deploy_status)
if 'result' in deploy_status and 'success' in deploy_status['result'][0] and deploy_status['result'][0]['success']:
summary['status'] = deploy_status['result'][0]['success']
return action_result.set_status(phantom.APP_SUCCESS)
else:
summary['status'] = "Failed"
return RetVal(action_result.set_status(phantom.APP_ERROR, deploy_status), None)
def _handle_on_poll(self, param):
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
date_pattern = "%Y-%m-%dT%H:%M:%S.%fZ"
os.environ['TZ'] = 'UTC'
config = self.get_config()
attivo_api = BSAPI(self.botsink, verify_ssl=self.verify_ssl)
attivo_api.login(self.botsink_user, self.botsink_password)
# botsink = config['botsink']
ingest_severity = config['ingest_severity']
severity_start = attivo_api.convert_severity(ingest_severity)
severity_end = "15"
if not severity_start:
self.save_progress("Attivo on_poll: Unknown severity specified ('{}'), using 'High'".format(ingest_severity))
ingest_severity = "High"
severity_start = "11"
last_run = {}
try:
last_run = self._state['last_run']
except KeyError:
pass
if not last_run or 'timestamp' not in last_run or last_run['timestamp'] == 0:
self.save_progress("Attivo on_poll: No previous last_run time discovered")
one_day_seconds = 24 * 60 * 60
days_back = int(config.get('first_fetch', 0))
first_fetch_seconds = (int(time.time()) - (one_day_seconds * days_back)) * 1000
last_run_time = first_fetch_seconds
else:
last_run_time = last_run['timestamp']
self.save_progress("Attivo on_poll: Getting new events of severity '{}' since {}".format(ingest_severity, last_run_time))
events = attivo_api.get_events(severity_start=severity_start, severity_end=severity_end,
timestampStart=last_run_time, timestampEnd='now')
if 'error' in events:
self.save_progress("Attivo on_poll ERROR: {}".format(events['error']))
return
self.save_progress("Attivo on_poll: Total new events: {}".format(len(events['eventdata'])))
new_last_run_time = 0
for event in events['eventdata']:
attack_name = event['attackName']
alert_id = event['esID']
severity_string = event['details']['Severity']
destination_ip = event['details']['Target IP']
destination_os = event['details']['Target OS']
destination_hostname = event['destIpDomain']
source_ip = event['details']['Attacker']
source_hostname = event['sourceIPDomain']
attack_description = event['attackDesc']
# phase = event['details']['Attack Phase']
# service = event['details']['Service']
event_time = event['details']['Timestamp']
date_obj = datetime.strptime(event_time, date_pattern)
event_timestamp = int((date_obj - datetime(1970, 1, 1)).total_seconds()) * 1000 + date_obj.microsecond / 1000
new_last_run_time = max(new_last_run_time, event_timestamp)
# kill_chain = attivo_api.convert_kill_chain(phase)
phantom_severity = str(attivo_api.convert_severity_phantom(severity_string))
self.save_progress("New Event: {time} ({timestamp}),{severity},{name},{source_ip},{destination_ip},{destination_os}".format(
time=event_time, timestamp=event_timestamp, severity=severity_string, name=attack_name, source_ip=source_ip,
destination_ip=destination_ip, destination_os=destination_os)
)
cef = {
'sourceAddress': source_ip,
'destinationAddress': destination_ip,
'sourceHostName': source_hostname,
'destinationHostName': destination_hostname,
'message': attack_description
}
artifact = {
'name': attack_name,
'cef': cef,
'severity': phantom_severity,
'label': 'event',
# 'ingest_app_id': 'Attivo BOTsink',
'source_data_identifier': alert_id
}
container = {
'name': attack_name,
'severity': phantom_severity,
'source_data_identifier': alert_id,
'artifacts': [artifact],
'label': 'events'
}
# Using the esID as the contianer ID. If there is a duplicate, it will not be added
ret_val, msg, cid = self.save_container(container)
self.save_progress("Attivo on_poll: CONTAINER result {}, {}, {}".format(ret_val, cid, msg))
if phantom.is_fail(ret_val):
return self.set_status(phantom.APP_ERROR, "Error saving container: {}".format(msg))
if len(events['eventdata']) > 0 and new_last_run_time > 0:
new_last_run_time += 1
self.save_progress("Attivo on_poll: Setting new last run time to {}".format(new_last_run_time))
last_run = {'timestamp': new_last_run_time}
self._state['last_run'] = last_run
return self.set_status(phantom.APP_SUCCESS)
def finalize(self):
self.save_state(self._state)
return phantom.APP_SUCCESS
def handle_action(self, param):
ret_val = phantom.APP_SUCCESS
# Get the action that we are supposed to execute for this App Run
action_id = self.get_action_identifier()
self.debug_print("action_id", self.get_action_identifier())
if action_id == 'test_connectivity':
ret_val = self._handle_test_connectivity(param)
elif action_id == 'deploy_decoy':
ret_val = self._handle_deploy_decoy(param)
elif action_id == 'get_events':
ret_val = self._handle_get_events(param)
elif action_id == 'list_playbooks':
ret_val = self._handle_list_playbooks(param)
elif action_id == 'run_playbook':
ret_val = self._handle_run_playbook(param)
elif action_id == 'list_hosts':
ret_val = self._handle_list_hosts(param)
elif action_id == 'check_host':
ret_val = self._handle_check_host(param)
elif action_id == 'list_users':
ret_val = self._handle_list_users(param)
elif action_id == 'check_user':
ret_val = self._handle_check_user(param)
elif action_id == 'on_poll':
ret_val = self._handle_on_poll(param)
return ret_val
def initialize(self):
self._state = self.load_state()
# get the asset config
config = self.get_config()
botsink = config['botsink']
botsink_user = config['botsink_user']
botsink_password = config['<PASSWORD>']
verify_ssl = config['verify_ssl']
self._base_url = "https://" + botsink + ":8443/api"
self.botsink = botsink
self.botsink_user = botsink_user
self.botsink_password = <PASSWORD>
self.verify_ssl = verify_ssl
return phantom.APP_SUCCESS
if __name__ == '__main__':
import pudb
import argparse
pudb.set_trace()
argparser = argparse.ArgumentParser()
argparser.add_argument('input_test_json', help='Input Test JSON file')
argparser.add_argument('-u', '--username', help='username', required=False)
argparser.add_argument('-p', '--password', help='password', required=False)
args = argparser.parse_args()
session_id = None
username = args.username
password = args.password
if (username is not None and password is None):
# User specified a username but not a password, so ask
import getpass
password = getpass.getpass("Password: ")
if (username and password):
login_url = BaseConnector._get_phantom_base_url() + "login"
try:
print ("Accessing the Login page")
r = requests.get(login_url, verify=False)
csrftoken = r.cookies['csrftoken']
data = dict()
data['username'] = username
data['password'] = password
data['csrfmiddlewaretoken'] = csrftoken
headers = dict()
headers['Cookie'] = 'csrftoken=' + csrftoken
headers['Referer'] = login_url
print ("Logging into Platform to get the session id")
r2 = requests.post(login_url, verify=False, data=data, headers=headers)
session_id = r2.cookies['sessionid']
except Exception as e:
print ("Unable to get session id from the platfrom. Error: " + str(e))
exit(1)
with open(args.input_test_json) as f:
in_json = f.read()
in_json = json.loads(in_json)
print(json.dumps(in_json, indent=4))
connector = AttivoConnector()
connector.print_progress_message = True
if (session_id is not None):
in_json['user_session_token'] = session_id
connector._set_csrf_info(csrftoken, headers['Referer'])
ret_val = connector._handle_action(json.dumps(in_json), None)
print (json.dumps(json.loads(ret_val), indent=4))
exit(0)
|
import time
class Log(object):
"""
:param start_time: Time (seconds) at which the logging process was started
:param end_time: Time (seconds) at which the last variable was logged
:param end_itr: Iteration at which the last variable was logged
:param objval_ave: function value at manifold mean
:param opt_estimate_var: Manifold mean of the variables
:param consens_error: deviation from mean error
:param opt_variable: optimal variable of the problem if provided
:param opt_objval: optimal function value of the problem if provided
:param opt_estimate_var: output variable of the algorithm
:param opt_estimate_obj: output function value of the algorithm
:param record_time: the cost time in recording the consensus error
:param effective_time: total CPU time - record_time; record consensus
:param consensus_it: communication rounds per iteration
:param grad_stepsize: gradient step size
:param est_var: local estimation variable
:param size: number of agents
:param graph: graph info
:param time_local_obj: CPU time of computing local gradient, obj val
:param time_local_ret: CPU time of computing local retraction
:param time_reduce: CPU time of MPI Reduce and Allreduce time
:param time_projection: CPU time of projection onto tangent space
"""
def __init__(self):
self.Algname = None
self.data_shape = None
self.start_time = None
self.end_time = None
self.end_iter = 0
self.iter_history = []
self.time_history = []
self.objval_ave = []
self.ave_grad_norm = []
self.consens_error = []
self.distance_to_opt = []
self.opt_variable = None
self.opt_objval = None
self.opt_estimate_var = None
self.opt_estimate_obj = None
self.record_time = 0
self.record_history = []
self.effective_time = 0
self.consensus_time = 0
self.consensus_it = 1
self.grad_stepsize = 0.1
self.est_var = None
self.size = 1
self.graph = None
self.time_local_obj = 0
self.time_local_ret = 0
self.reduce_time = 0
self.mean_obj_time = 0
self.time_projection = 0
def log(self, Algname=None, data_shape=None, Iter=None, objval_ave=None, opt_var=None, opt_objval=None, ave_grad_norm=None,
consen_error=None, distance_to_opt=None, opt_estimate_var=None, opt_estimate_obj=None, record_time=None, time_consensus=None,
consensus_it=None, grad_stepsize=None, est_var=None, size=None, graph=None, time_local_obj=None, time_local_ret=None,
reduce_time=None, mean_obj_time=None, time_projection=None):
""" Log the variables, grad norm, function value with an iteration and time. """
if Algname is not None:
self.Algname = Algname
if data_shape is not None:
self.data_shape = data_shape
if Iter is not None:
t_now = time.time() - self.start_time
self.iter_history.append(Iter)
self.time_history.append(t_now)
self.end_time = t_now
self.end_iter = Iter
if objval_ave is not None:
self.objval_ave.append(objval_ave)
if ave_grad_norm is not None:
self.ave_grad_norm.append(ave_grad_norm)
if consen_error is not None:
self.consens_error.append(consen_error)
if opt_var is not None:
self.opt_variable = opt_var
if opt_objval is not None:
self.opt_objval = opt_objval
if distance_to_opt is not None:
self.distance_to_opt.append(distance_to_opt)
if opt_estimate_var is not None:
self.opt_estimate_var = opt_estimate_var
if opt_estimate_obj is not None:
self.opt_estimate_obj = opt_estimate_obj
if record_time is not None:
self.record_time += record_time
self.record_history.append(self.record_time)
self.effective_time = self.end_time - self.record_time
if time_consensus is not None:
self.consensus_time = time_consensus
if consensus_it is not None:
self.consensus_it = consensus_it
if grad_stepsize is not None:
self.grad_stepsize = grad_stepsize
if est_var is not None:
self.est_var = est_var
if size is not None:
self.size = size
if graph is not None:
self.graph = graph
if time_local_obj is not None:
self.time_local_obj = time_local_obj
if time_local_ret is not None:
self.time_local_ret = time_local_ret
if reduce_time is not None:
self.reduce_time = reduce_time
if mean_obj_time is not None:
self.mean_obj_time = mean_obj_time
if time_projection is not None:
self.time_projection = time_projection
def print_rgd_value(self):
if self.effective_time is None:
self.effective_time = self.end_time
print('==================== Results ========================')
print(f'Epoch: {self.end_iter};\n'
f'Total CPU time(including compute average using All_Reduce): {self.end_time:.3f};\n'
f'Local total CPU time(exclude All_Reduce and computation on mean): {self.effective_time:.3f};\n'
f'Consensus time: {self.consensus_time:.3f};\n'
f'Local obj function time: {self.time_local_obj:.3f};\n'
f'Local retraction time: {self.time_local_ret:.3f};\n'
f'Projection time: {self.time_projection:.3f};\n'
f'MPI (All)Reduce time: {self.reduce_time:.3f};\n'
f'time of computation on mean : {self.mean_obj_time:.3f};\n')
if self.consens_error:
print(f'Consensus_error: {self.consens_error[-1]: .4e}.')
if self.ave_grad_norm:
print(f'Riemannian grad norm at manifold average: {self.ave_grad_norm[-1]:.3e}')
if self.opt_estimate_obj:
print(f'Objective val: {self.opt_estimate_obj}')
if self.distance_to_opt:
print(f'Distance to ground truth: '
f'{min(self.distance_to_opt):.3e}')
print('\n')
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'UI.ui'
#
# Created by: PyQt5 UI code generator 5.14.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QObject, pyqtSlot
from files_handler import FolderHandler
from image_handler import ImageHandler
from main_alg import MainAlg
class Ui_MainWindow(QObject):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.setFixedSize(626, 781)
MainWindow.setMouseTracking(True)
MainWindow.setToolTipDuration(4)
MainWindow.setStyleSheet("#MainWindow {\n"
"background: gray;\n"
"}\n"
"\n"
"\n"
"#MainWindow {\n"
"border: 3px solid gray;\n"
"border-radius: 40px;\n"
"background: white;\n"
"}")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setAutoFillBackground(True)
self.centralwidget.setObjectName("centralwidget")
self.textBrowser = QtWidgets.QTextBrowser(self.centralwidget)
self.textBrowser.setGeometry(QtCore.QRect(0, 300, 631, 160))
self.textBrowser.setObjectName("textBrowser")
self.Main = QtWidgets.QLabel(self.centralwidget)
self.Main.setGeometry(QtCore.QRect(0, 0, 651, 761))
self.Main.setFrameShape(QtWidgets.QFrame.NoFrame)
self.Main.setObjectName("Main")
self.frame = QtWidgets.QFrame(self.centralwidget)
self.frame.setGeometry(QtCore.QRect(0, 600, 631, 81))
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.photos_file_path = QtWidgets.QLabel(self.centralwidget)
self.photos_file_path.setGeometry(QtCore.QRect(10, 540, 161, 31))
self.photos_file_path.setObjectName("photos_file_path")
self.line_photos_path = QtWidgets.QLineEdit(self.centralwidget)
self.line_photos_path.setGeometry(QtCore.QRect(195, 550, 261, 21))
self.line_photos_path.setObjectName("line_photos_path")
self.upload_photo_tag = QtWidgets.QLabel(self.centralwidget)
self.upload_photo_tag.setGeometry(QtCore.QRect(10, 510, 180, 31))
self.upload_photo_tag.setObjectName("upload_photo_tag")
self.line_upload_photo = QtWidgets.QLineEdit(self.centralwidget)
self.line_upload_photo.setGeometry(QtCore.QRect(195, 520, 261, 21))
self.line_upload_photo.setObjectName("line_upload_photo")
self.browse_photo = QtWidgets.QPushButton(self.centralwidget)
self.browse_photo.setGeometry(QtCore.QRect(460, 520, 71, 31))
self.browse_photo.setObjectName("browse_photo")
self.browse_photo.clicked.connect(self.browse_photo_slot)
self.browse_path = QtWidgets.QPushButton(self.centralwidget)
self.browse_path.setGeometry(QtCore.QRect(460, 550, 71, 31))
self.browse_path.setObjectName("browse_path")
self.browse_path.clicked.connect(self.browse_photos_file_slot)
self.upload = QtWidgets.QPushButton(self.centralwidget)
self.upload.setGeometry(QtCore.QRect(540, 520, 71, 31))
self.upload.setObjectName("upload")
self.upload.clicked.connect(self.upload_button)
self.line_friend_name = QtWidgets.QLineEdit(self.centralwidget)
self.line_friend_name.setGeometry(QtCore.QRect(195, 490, 261, 21))
self.line_friend_name.setObjectName("line_friend_name")
self.friend_name_tag = QtWidgets.QLabel(self.centralwidget)
self.friend_name_tag.setGeometry(QtCore.QRect(10, 490, 161, 31))
self.friend_name_tag.setObjectName("friend_name_tag")
self.debuging_text = QtWidgets.QTextBrowser(self.centralwidget)
self.debuging_text.setGeometry(QtCore.QRect(90, 610, 450, 61))
self.debuging_text.setObjectName("debuging_text")
self.start = QtWidgets.QPushButton(self.centralwidget)
self.start.setGeometry(QtCore.QRect(260, 690, 113, 32))
self.start.setObjectName("start")
self.start.clicked.connect(self.start_def)
self.debugging_string = ''
self.Main.raise_()
self.textBrowser.raise_()
self.frame.raise_()
self.photos_file_path.raise_()
self.line_photos_path.raise_()
self.upload_photo_tag.raise_()
self.line_upload_photo.raise_()
self.browse_photo.raise_()
self.browse_path.raise_()
self.upload.raise_()
self.line_friend_name.raise_()
self.friend_name_tag.raise_()
self.debuging_text.raise_()
self.start.raise_()
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.current_photo_path = None
self.photos_folder_path = None
def get_browse_path(self):
"""
Get path for photos file
"""
title = "Choose a destination"
flags = QtWidgets.QFileDialog.ShowDirsOnly
folder_path = str(QtWidgets.QFileDialog.getExistingDirectory(None, title, '', flags))
self.photos_folder_path = folder_path
self.line_photos_path.setText(folder_path)
def get_photo(self):
"""
Gets the photo given the path the user choose before
"""
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.DontUseNativeDialog
folder_path, _ = QtWidgets.QFileDialog.getOpenFileName(
None,
"QFileDialog.getOpenFileName()",
"",
"All Files (*);;Python Files (*.py)",
options=options)
return folder_path
def write_debugging(self, text):
"""
Write text on debugging widget
:param text: the text we wants to write
"""
self.debugging_string = text + '\n' + self.debugging_string
self.debuging_text.hide()
self.debuging_text.setText(self.debugging_string)
self.debuging_text.show()
@pyqtSlot()
def upload_button(self):
"""
Activate the algorithm that puts the photos the user wants to upload on the fight folder
"""
if self.current_photo_path:
folder_obj = FolderHandler()
image_op = ImageHandler()
name = self.line_friend_name.text()
if not name:
self.write_debugging('Hey you! :) You must enter friend name first')
return
folder_path = folder_obj.create_folder(name)
if folder_path:
image_op.read_photo_and_write_it_on_right_folder(self.current_photo_path, folder_path)
else:
self.write_debugging("Something went wrong and we can't create a folder")
self.write_debugging('Photo {0} of {1} was uploaded'.format(self.current_photo_path.split('/')[-1], self.line_friend_name.text()))
@pyqtSlot()
def start_def(self):
"""
Starts the algorithm for face recognition
"""
if self.photos_folder_path == None:
self.write_debugging('You must enter a photos path first :)')
return
else:
self.write_debugging('Great! it will take a while, be patient')
alg = MainAlg()
self.write_debugging('Preparing data')
prepare_text, none_photos = alg.prepare_data()
for line in none_photos:
self.write_debugging(line)
self.write_debugging(prepare_text)
training_text = alg.train_the_model()
self.write_debugging(training_text)
alg.start_prediction(self.photos_folder_path)
self.write_debugging('Finished! :)')
@pyqtSlot()
def browse_photo_slot(self):
"""
Handles user photos upload
:return:
"""
photo_path = self.get_photo()
if photo_path:
self.current_photo_path = photo_path
self.line_upload_photo.hide()
self.line_upload_photo.setText(photo_path)
self.line_upload_photo.show()
@pyqtSlot()
def browse_photos_file_slot(self):
"""
Handles users photos file location
"""
folder_path = self.get_browse_path()
if folder_path:
self.photos_folder_path = folder_path
self.line_photos_path.setText(folder_path)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Share My Photos "))
self.textBrowser.setHtml(_translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#35408c;\">Wellcome ! :)</span></p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#35408c;\">We all want to share those amazing moments with the people we love</span></p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#35408c;\">and now it\'s so easy!</span></p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#35408c;\">1. On Friend\'s name line, write the name of the friend to want to share the photos with</span></p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#35408c;\">2. Upload at the Upload 5+ friends test photos line one photo each time. As more you photos upload, the system gets it will learn better</span></p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#35408c;\">3. Choose the folder in which you have the photos you want the share</span></p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#35408c;\">4. Finally, press let\'s do it! give it a few moments and don\'t worry, the system will create on the photos folder new folders to each friend with his photos</span></p>\n"
"<p align=\"center\" style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p></body></html>"))
self.Main.setText(_translate("MainWindow", "<html><head/><body><p><img src=\"./beach-background-with-sunglasses-starfish_1101-313.jpg\"/></p></body></html>"))
self.photos_file_path.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:18pt; font-weight:600; color:#eee709; vertical-align:sub;\">Where are all the photos?</span></p><p><br/></p></body></html>"))
self.upload_photo_tag.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:18pt; font-weight:600; color:#eee709; vertical-align:sub;\">Upload 5+ friends test photos</span></p></body></html>"))
self.browse_photo.setText(_translate("MainWindow", "Browse"))
self.browse_path.setText(_translate("MainWindow", "Browse"))
self.upload.setText(_translate("MainWindow", "Upload"))
self.friend_name_tag.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:18pt; font-weight:600; color:#eee709; vertical-align:sub;\">Friend's name</span></p><p><br/></p></body></html>"))
self.debuging_text.setToolTip(_translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p></body></html>"))
self.start.setText(_translate("MainWindow", "Let's do it!"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
# ================================================================
# Created by <NAME> on 9/17/18.
# Copyright (c) 2018 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ================================================================
import numpy as np
from utils.sampling import sample_at
def level_set_term_gradient(warped_live_field, epsilon=1e-5):
(live_gradient_x_field, live_gradient_y_field) = np.gradient(warped_live_field)
# TODO (hessian, maybe look at
# https://stackoverflow.com/questions/18991408/python-finite-difference-functions and
# https://stackoverflow.com/questions/31206443/numpy-second-derivative-of-a-ndimensional-array
# for clues)
def level_set_term_at_location(warped_live_field, x, y, epsilon=1e-5):
live_y_minus_one = sample_at(warped_live_field, x, y - 1)
# live_y_minus_two = sample_at(warped_live_field, x, y - 2)
live_x_minus_one = sample_at(warped_live_field, x - 1, y)
# live_x_minus_two = sample_at(warped_live_field, x - 2, y)
live_y_plus_one = sample_at(warped_live_field, x, y + 1)
# live_y_plus_two = sample_at(warped_live_field, x, y + 2)
live_x_plus_one = sample_at(warped_live_field, x + 1, y)
# live_x_plus_two = sample_at(warped_live_field, x + 2, y)
live_sdf = sample_at(warped_live_field, x, y)
live_x_minus_one_y_minus_one = sample_at(warped_live_field, x - 1, y - 1)
live_x_plus_one_y_minus_one = sample_at(warped_live_field, x + 1, y - 1)
live_x_minus_one_y_plus_one = sample_at(warped_live_field, x - 1, y + 1)
live_x_plus_one_y_plus_one = sample_at(warped_live_field, x + 1, y + 1)
x_grad = 0.5 * (live_x_plus_one - live_x_minus_one)
y_grad = 0.5 * (live_y_plus_one - live_y_minus_one)
grad_xx = live_x_plus_one - 2 * live_sdf + live_x_plus_one
grad_yy = live_y_plus_one - 2 * live_sdf + live_y_plus_one
# grad_xx = live_x_plus_two - 2*live_sdf + live_y_plus_two
# grad_yy = live_y_plus_two - 2*live_sdf + live_y_plus_two
grad_xy = 0.25 * (live_x_plus_one_y_plus_one - live_x_minus_one_y_plus_one -
live_x_plus_one_y_minus_one + live_x_minus_one_y_minus_one)
scale_factor = 10.0 # really should equal narrow-band half-width in voxels
gradient = np.array([[x_grad, y_grad]]).T * scale_factor
hessian = np.array([[grad_xx, grad_xy],
[grad_xy, grad_yy]]) * scale_factor
gradient_length = np.linalg.norm(gradient)
level_set_gradient = ((1.0 - gradient_length) / (gradient_length + epsilon) * hessian.dot(gradient)).reshape(-1)
local_energy_contribution = 0.5 * pow((gradient_length - 1.0), 2)
return level_set_gradient, local_energy_contribution
|
<filename>azurelinuxagent/ga/monitor.py<gh_stars>0
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import datetime
import json
import os
import platform
import time
import threading
import uuid
import azurelinuxagent.common.conf as conf
import azurelinuxagent.common.utils.fileutil as fileutil
import azurelinuxagent.common.logger as logger
from azurelinuxagent.common.event import add_event, WALAEventOperation
from azurelinuxagent.common.exception import EventError, ProtocolError, OSUtilError
from azurelinuxagent.common.future import ustr
from azurelinuxagent.common.osutil import get_osutil
from azurelinuxagent.common.protocol import get_protocol_util
from azurelinuxagent.common.protocol.restapi import TelemetryEventParam, \
TelemetryEventList, \
TelemetryEvent, \
set_properties
from azurelinuxagent.common.utils.restutil import IOErrorCounter
from azurelinuxagent.common.utils.textutil import parse_doc, findall, find, getattrib
from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \
DISTRO_CODE_NAME, AGENT_LONG_VERSION, \
AGENT_NAME, CURRENT_AGENT, CURRENT_VERSION
def parse_event(data_str):
try:
return parse_json_event(data_str)
except ValueError:
return parse_xml_event(data_str)
def parse_xml_param(param_node):
name = getattrib(param_node, "Name")
value_str = getattrib(param_node, "Value")
attr_type = getattrib(param_node, "T")
value = value_str
if attr_type == 'mt:uint64':
value = int(value_str)
elif attr_type == 'mt:bool':
value = bool(value_str)
elif attr_type == 'mt:float64':
value = float(value_str)
return TelemetryEventParam(name, value)
def parse_xml_event(data_str):
try:
xml_doc = parse_doc(data_str)
event_id = getattrib(find(xml_doc, "Event"), 'id')
provider_id = getattrib(find(xml_doc, "Provider"), 'id')
event = TelemetryEvent(event_id, provider_id)
param_nodes = findall(xml_doc, 'Param')
for param_node in param_nodes:
event.parameters.append(parse_xml_param(param_node))
return event
except Exception as e:
raise ValueError(ustr(e))
def parse_json_event(data_str):
data = json.loads(data_str)
event = TelemetryEvent()
set_properties("TelemetryEvent", event, data)
return event
def get_monitor_handler():
return MonitorHandler()
class MonitorHandler(object):
def __init__(self):
self.osutil = get_osutil()
self.protocol_util = get_protocol_util()
self.sysinfo = []
self.event_thread = None
def run(self):
self.init_sysinfo()
self.start()
def is_alive(self):
return self.event_thread.is_alive()
def start(self):
self.event_thread = threading.Thread(target=self.daemon)
self.event_thread.setDaemon(True)
self.event_thread.start()
def init_sysinfo(self):
osversion = "{0}:{1}-{2}-{3}:{4}".format(platform.system(),
DISTRO_NAME,
DISTRO_VERSION,
DISTRO_CODE_NAME,
platform.release())
self.sysinfo.append(TelemetryEventParam("OSVersion", osversion))
self.sysinfo.append(
TelemetryEventParam("GAVersion", CURRENT_AGENT))
try:
ram = self.osutil.get_total_mem()
processors = self.osutil.get_processor_cores()
self.sysinfo.append(TelemetryEventParam("RAM", ram))
self.sysinfo.append(TelemetryEventParam("Processors", processors))
except OSUtilError as e:
logger.warn("Failed to get system info: {0}", e)
try:
protocol = self.protocol_util.get_protocol()
vminfo = protocol.get_vminfo()
self.sysinfo.append(TelemetryEventParam("VMName",
vminfo.vmName))
self.sysinfo.append(TelemetryEventParam("TenantName",
vminfo.tenantName))
self.sysinfo.append(TelemetryEventParam("RoleName",
vminfo.roleName))
self.sysinfo.append(TelemetryEventParam("RoleInstanceName",
vminfo.roleInstanceName))
self.sysinfo.append(TelemetryEventParam("ContainerId",
vminfo.containerId))
except ProtocolError as e:
logger.warn("Failed to get system info: {0}", e)
def collect_event(self, evt_file_name):
try:
logger.verbose("Found event file: {0}", evt_file_name)
with open(evt_file_name, "rb") as evt_file:
# if fail to open or delete the file, throw exception
data_str = evt_file.read().decode("utf-8", 'ignore')
logger.verbose("Processed event file: {0}", evt_file_name)
os.remove(evt_file_name)
return data_str
except IOError as e:
msg = "Failed to process {0}, {1}".format(evt_file_name, e)
raise EventError(msg)
def collect_and_send_events(self):
event_list = TelemetryEventList()
event_dir = os.path.join(conf.get_lib_dir(), "events")
event_files = os.listdir(event_dir)
for event_file in event_files:
if not event_file.endswith(".tld"):
continue
event_file_path = os.path.join(event_dir, event_file)
try:
data_str = self.collect_event(event_file_path)
except EventError as e:
logger.error("{0}", e)
continue
try:
event = parse_event(data_str)
self.add_sysinfo(event)
event_list.events.append(event)
except (ValueError, ProtocolError) as e:
logger.warn("Failed to decode event file: {0}", e)
continue
if len(event_list.events) == 0:
return
try:
protocol = self.protocol_util.get_protocol()
protocol.report_event(event_list)
except ProtocolError as e:
logger.error("{0}", e)
def daemon(self):
period = datetime.timedelta(minutes=30)
protocol = self.protocol_util.get_protocol()
last_heartbeat = datetime.datetime.utcnow() - period
# Create a new identifier on each restart and reset the counter
heartbeat_id = str(uuid.uuid4()).upper()
counter = 0
while True:
if datetime.datetime.utcnow() >= (last_heartbeat + period):
last_heartbeat = datetime.datetime.utcnow()
incarnation = protocol.get_incarnation()
dropped_packets = self.osutil.get_firewall_dropped_packets(
protocol.endpoint)
msg = "{0};{1};{2};{3}".format(
incarnation, counter, heartbeat_id, dropped_packets)
add_event(
name=AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.HeartBeat,
is_success=True,
message=msg,
log_event=False)
counter += 1
io_errors = IOErrorCounter.get_and_reset()
hostplugin_errors = io_errors.get("hostplugin")
protocol_errors = io_errors.get("protocol")
other_errors = io_errors.get("other")
if hostplugin_errors > 0 \
or protocol_errors > 0 \
or other_errors > 0:
msg = "hostplugin:{0};protocol:{1};other:{2}"\
.format(hostplugin_errors,
protocol_errors,
other_errors)
add_event(
name=AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.HttpErrors,
is_success=True,
message=msg,
log_event=False)
try:
self.collect_and_send_events()
except Exception as e:
logger.warn("Failed to send events: {0}", e)
time.sleep(60)
def add_sysinfo(self, event):
sysinfo_names = [v.name for v in self.sysinfo]
for param in event.parameters:
if param.name in sysinfo_names:
logger.verbose("Remove existing event parameter: [{0}:{1}]",
param.name,
param.value)
event.parameters.remove(param)
event.parameters.extend(self.sysinfo)
|
# Boilerplate commons for django based web api application.
# Copyright (C) 2017 Logicify
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from collections import Iterable
from django.conf import settings
from rest_framework.exceptions import ValidationError
from rest_framework.fields import empty, Field
from rest_framework.serializers import Serializer
class BaseDto(Serializer):
def to_dict(self):
if not hasattr(self, '_data'):
return self.initial_data
else:
return self.data
def __init__(self, data=empty, **kwargs):
self.initial_data = {}
super(BaseDto, self).__init__(None, data, **kwargs)
def __setattr__(self, key, value):
if key in self.get_declared_fields():
self.initial_data[key] = value
else:
super().__setattr__(key, value)
def __getattr__(self, key):
if key in self.get_declared_fields():
return self.initial_data.get(key)
else:
if key in dir(super(BaseDto, self)):
return getattr(super(), key)
else:
raise AttributeError("Object {} doesn't have attribute {}".format(self.__class__.__name__, key))
@classmethod
def from_dict(cls, dictionary=empty):
instance = cls(dictionary)
return instance
@classmethod
def get_declared_fields(cls):
if hasattr(cls, '_declared_fields'):
return getattr(cls, '_declared_fields')
else:
return []
class ApiResponseServiceSection(BaseDto):
def __init__(self):
self.error_code = 0
self.error_message = None
self.validation_errors = []
self.api_version = settings.API_VERSION
def is_successful(self):
return self.error_code == 0
def to_dict(self):
return {
"error_code": self.error_code,
"node_id": settings.HOSTNAME,
"error_message": self.error_message,
"validation_errors": self.validation_errors,
"successful": self.is_successful(),
"api_version": self.api_version
}
class ApiResponseDto(BaseDto):
def __init__(self, payload=None):
self.payload = payload
self.service = ApiResponseServiceSection()
def to_dict(self):
serialized_payload = self.payload
if isinstance(self.payload, BaseDto):
serialized_payload = self.payload.to_representation(self.payload)
elif isinstance(self.payload, Iterable):
serialized_payload = list([(p.to_representation(p) if isinstance(p, BaseDto) else p) for p in self.payload])
return {
"payload": serialized_payload,
"service": self.service.to_dict()
}
class RelatedDtoField(Field):
def __init__(self, dto_class, required: bool = None, allow_null=False, default=empty, initial=empty) -> None:
super().__init__(read_only=False, write_only=False, source=None, required=required, default=default,
initial=initial,
label=None, help_text=None, style=None, error_messages=None, validators=None,
allow_null=allow_null)
self.dto_class = dto_class
def to_representation(self, instance: BaseDto):
if isinstance(instance, BaseDto):
return instance.to_dict()
elif isinstance(instance, dict):
return instance
else:
raise ValueError("Unexpected value passed to to_representation method. " + str(instance))
def to_internal_value(self, data: dict):
dto = self.dto_class.from_dict(data)
dto.is_valid()
return dto
def run_validators(self, value):
if not value.is_valid():
raise ValidationError(value.errors)
class PaginationOptions(object):
""" Pagination options class, has offset and limit parameters."""
def __init__(self, offset: int = 0, limit: int = 50) -> None:
""" Return pagination options object with limit and offset.
:param offset: Pagination offset
:type offset: int
:param limit: Pagination limit
:type limit: int
:rtype: PaginationOptions
"""
self.offset = offset
self.limit = limit
|
from dataclasses import replace
from hanzi_font_deconstructor.common.TransformedStroke import TransformedStroke
from .generate_svg import generate_svg, get_stroke_attrs
from .transform_stroke import transform_stroke
from .transform_stroke import transform_stroke
from .svg_to_pil import svg_to_pil
from os import path
from pathlib import Path
import random
import re
import torch
from torchvision import transforms
PROJECT_ROOT = Path(__file__).parents[2]
GLYPH_SVGS_DIR = PROJECT_ROOT / "noto_glyphs"
MASK_THRESHOLD = 0.3
# https://en.wikipedia.org/wiki/Stroke_(CJK_character)
SINGLE_STROKE_CHARS = [
"一",
"乙",
"丨",
"丶",
"丿",
"乀",
"乁",
"乚",
"乛",
"亅",
# "𠃊", # This seems weird, I don't think this shows up in real chars
"𠃋",
"𠃌",
"𠃍",
"𠃑",
"𠄌",
"㇐",
"㇀",
"㇖",
"㇇",
"㇕",
"㇆",
"㇊",
"㇅",
"㇍",
"㇈",
"㇠",
"㇎",
"㇋",
"㇌",
"㇡",
"㇑",
"㇚",
"㇙",
# "㇗",
# "㇄",
# "㇘",
"㇟",
"㇞",
"㇉",
"㇒",
"㇓",
"㇢",
"㇜",
"㇛",
"㇔",
"㇏",
"㇝",
"㇂",
"㇃",
"㇁",
]
HORIZ_STROKE_CHARS = [
"一",
"㇐",
]
VERT_STROKE_CHARS = [
"丨",
"㇑",
]
# strokes going from top left to bottom right
BOXY_STROKE_CHARS = [
"𠃍",
"㇕",
"㇎",
]
STROKE_VIEW_BOX = (-10, 0, 1010, 1000)
VIEW_BOX_WIDTH = STROKE_VIEW_BOX[2]
VIEW_BOX_HEIGHT = STROKE_VIEW_BOX[3]
MISC_SINGLE_STROKE_PATHS = [
"M884 65l34 62c-131 40 -349 62 -523 72c-2 -18 -10 -44 -18 -61c173 -12 387 -36 507 -73z",
"M542 409 l-60 26c-14 -47 -46 -122 -74 -178l57 -22c30 56 63 127 77 174z",
"M703 378l-64 21c-8 -47 -32 -118 -53 -173l61 -18c23 54 47 124 56 170z",
"M849 191l65 19c-32 75 -76 163 -113 220c-13 -9 -43 -23 -60 -30c40 -56 81 -137 108 -209z",
"M253 417v359c21 9 43 27 78 46c63 34 150 40 258 40c113 0 269 -7 370 -19c-10 22 -22 60 -24 83 c-78 5 -248 10 -349 10c-116 0 -202 -10 -270 -46c-41 -22 -73 -50 -95 -50c-34 0 -82 52 -129 108l-53 -71c48 -46 99 -84 142 -100v-290h-128v-70h200z",
"M267 239l-62 45c-27 -45 -88 -111 -142 -158l58 -39c53 43 117 108 146 152z",
"M268 753l113 -80c6 22 18 51 26 66c-186 137 -214 160 -228 178c-8 -18 -28 -51 -43 -66c21 -14 58 -48 58 -92v-331h-145v-72h219v397z",
]
def get_file_for_char(char):
code = hex(ord(char))[2:]
return path.join(GLYPH_SVGS_DIR, f"{code}.svg")
path_extractor = re.compile(r'\bd="([^"]+)"')
def path_for_char(char):
char_file = get_file_for_char(char)
with open(char_file, "r") as contents:
char_svg = contents.read().replace("\n", "")
path_match = path_extractor.search(char_svg)
if not path_match:
raise Exception(f"No SVG path found in char svg: {char}")
return path_match[1]
SINGLE_STROKE_CHAR_PATHS = [path_for_char(char) for char in SINGLE_STROKE_CHARS]
BOXY_STROKE_CHAR_PATHS = [path_for_char(char) for char in BOXY_STROKE_CHARS]
HORIZ_STROKE_CHAR_PATHS = [path_for_char(char) for char in HORIZ_STROKE_CHARS]
VERT_STROKE_CHAR_PATHS = [path_for_char(char) for char in VERT_STROKE_CHARS]
SINGLE_STROKE_PATHS = MISC_SINGLE_STROKE_PATHS + SINGLE_STROKE_CHAR_PATHS
tensorify = transforms.ToTensor()
def img_to_greyscale_tensor(img):
return tensorify(img)[3, :, :]
def get_mask_bounds(mask):
"return a tuple of (min_x, max_x, min_y, max_y)"
horiz_max_vals = torch.max(mask, 0).values
vert_max_vals = torch.max(mask, 1).values
min_x = torch.argmax(horiz_max_vals).item()
max_x = len(horiz_max_vals) - torch.argmax(torch.flip(horiz_max_vals, [0])).item()
min_y = torch.argmax(vert_max_vals).item()
max_y = len(vert_max_vals) - torch.argmax(torch.flip(vert_max_vals, [0])).item()
return (min_x, max_x, min_y, max_y)
def get_mask_span(mask):
"return a tuple of (horizontal span, vertical span)"
min_x, max_x, min_y, max_y = get_mask_bounds(mask)
return (max_x - min_x, max_y - min_y)
def is_stroke_good(mask, existing_masks) -> bool:
# if this is the first stroke, then anything is fine
if len(existing_masks) == 0:
return True
# TODO: this is probably really slow, might need to speed this up somehow
mask_size = torch.sum(mask).item()
mask_span = get_mask_span(mask)
for existing_mask in existing_masks:
existing_mask_size = torch.sum(existing_mask).item()
overlaps = torch.where(existing_mask + mask >= 2, 1, 0)
overlaps_size = torch.sum(overlaps).item()
if overlaps_size == 0:
return True
# # if this is the second stroke, ensure there's an overlap
# # we should ensure there's at least 1 overlap per training sample
# return len(existing_masks) > 1
# if the overlap is a large amount of either stroke, this is a bad stroke
if overlaps_size / existing_mask_size > 0.25:
return False
if overlaps_size / mask_size > 0.25:
return False
overlaps_span = get_mask_span(overlaps)
existing_mask_span = get_mask_span(existing_mask)
# if the overlap is a large amount of the span of either stroke, this is a bad stroke
if max(overlaps_span) / max(existing_mask_span) > 0.4:
return False
if max(overlaps_span) / max(mask_span) > 0.4:
return False
return True
def get_mask_and_attrs(transformed_stroke, size_px):
stroke_attrs = get_stroke_attrs(transformed_stroke)
stroke_svg = generate_svg([stroke_attrs], STROKE_VIEW_BOX)
stroke_img = svg_to_pil(stroke_svg, size_px, size_px)
stroke_tensor = img_to_greyscale_tensor(stroke_img)
stroke_mask = torch.where(stroke_tensor > MASK_THRESHOLD, 1, 0)
return (stroke_mask, stroke_attrs)
def get_training_input_svg_and_masks(size_px):
"""
Create a single training example
"""
num_strokes = random.randint(3, 4)
with torch.no_grad():
strokes_attrs = []
stroke_masks = []
# for 5% of training examples, make sure there's a boxy shape involved
if random.random() <= 0.05:
strokes_attrs, stroke_masks = create_boxy_strokes(size_px)
while len(strokes_attrs) < num_strokes:
pathstr = random.choice(SINGLE_STROKE_PATHS)
stroke = transform_stroke(pathstr, STROKE_VIEW_BOX)
stroke_mask, stroke_attrs = get_mask_and_attrs(stroke, size_px)
if is_stroke_good(stroke_mask, stroke_masks):
strokes_attrs.append(stroke_attrs)
stroke_masks.append(stroke_mask)
input_svg = generate_svg(strokes_attrs, STROKE_VIEW_BOX)
return (input_svg, stroke_masks)
def create_boxy_strokes(size_px):
"""
boxy strokes like in 口 or 户 really confuse the algorithm and are unlikely to form by randomly placing strokes.
This function explicitly tries to generate samples like this
"""
horiz_stroke_path = random.choice(HORIZ_STROKE_CHAR_PATHS)
vert_stroke_path = random.choice(VERT_STROKE_CHAR_PATHS)
boxy_stroke_path = random.choice(BOXY_STROKE_CHAR_PATHS)
boxy_stroke = transform_stroke(
boxy_stroke_path, STROKE_VIEW_BOX, rotate_and_skew=False
)
vert_stroke = transform_stroke(
vert_stroke_path, STROKE_VIEW_BOX, rotate_and_skew=False
)
horiz_stroke = transform_stroke(
horiz_stroke_path, STROKE_VIEW_BOX, rotate_and_skew=False
)
boxy_mask, boxy_attrs = get_mask_and_attrs(boxy_stroke, size_px)
vert_mask, _ = get_mask_and_attrs(vert_stroke, size_px)
horiz_mask, _ = get_mask_and_attrs(horiz_stroke, size_px)
boxy_bounds = get_mask_bounds(boxy_mask)
vert_bounds = get_mask_bounds(vert_mask)
horiz_bounds = get_mask_bounds(horiz_mask)
x_ratio = VIEW_BOX_WIDTH / size_px
y_ratio = VIEW_BOX_HEIGHT / size_px
# try to align the vert stroke to the top left of the boxy stroke
vert_delta_x = (boxy_bounds[0] - vert_bounds[0]) * x_ratio + random.gauss(0, 20)
vert_delta_y = (boxy_bounds[2] - vert_bounds[2]) * y_ratio + random.gauss(0, 3)
updated_vert_stroke = replace(
vert_stroke,
translate=(
vert_stroke.translate[0] + vert_delta_x,
vert_stroke.translate[1] + vert_delta_y,
),
)
updated_vert_mask, updated_vert_attrs = get_mask_and_attrs(
updated_vert_stroke, size_px
)
# try to align the horizontal stroke with the bottom right of the boxy stroke
horiz_delta_x = (boxy_bounds[1] - horiz_bounds[1]) * x_ratio + random.gauss(0, 3)
horiz_delta_y = (boxy_bounds[3] - horiz_bounds[3]) * y_ratio + random.gauss(0, 20)
updated_horiz_stroke = replace(
horiz_stroke,
translate=(
horiz_stroke.translate[0] + horiz_delta_x,
horiz_stroke.translate[1] + horiz_delta_y,
),
)
updated_horiz_mask, updated_horiz_attrs = get_mask_and_attrs(
updated_horiz_stroke, size_px
)
stroke_masks = [boxy_mask, updated_vert_mask, updated_horiz_mask]
strokes_attrs = [boxy_attrs, updated_vert_attrs, updated_horiz_attrs]
return (strokes_attrs, stroke_masks)
def get_training_input_and_mask_tensors(size_px=256):
with torch.no_grad():
input_svg, stroke_masks = get_training_input_svg_and_masks(size_px)
input_img = svg_to_pil(input_svg, size_px, size_px)
input_tensor = img_to_greyscale_tensor(input_img)
mask_sums = torch.zeros(input_tensor.shape, dtype=torch.long)
for stroke_mask in stroke_masks:
mask_sums += stroke_mask
# collapse all overlaps of more than 2 items into a single "overlap" class
mask = torch.where(mask_sums > 2, 2, mask_sums)
return (input_tensor.unsqueeze(0), mask)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-24 20:09
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Option',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('helper', models.CharField(blank=True, max_length=1000, null=True)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=1000)),
('why', models.CharField(max_length=1000)),
],
),
migrations.CreateModel(
name='Questionnaire',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='QuestionnaireEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='QuestionnaireUserData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='SkinConcern',
fields=[
('option_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='facepackwizard.Option')),
],
bases=('facepackwizard.option',),
),
migrations.CreateModel(
name='SkinType',
fields=[
('option_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='facepackwizard.Option')),
],
bases=('facepackwizard.option',),
),
migrations.AddField(
model_name='questionnaireentry',
name='option',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='facepackwizard.Option'),
),
migrations.AddField(
model_name='questionnaireentry',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='facepackwizard.Question'),
),
migrations.AddField(
model_name='questionnaireentry',
name='wizard_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='facepackwizard.QuestionnaireUserData'),
),
migrations.AddField(
model_name='questionnaire',
name='option',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='facepackwizard.Option'),
),
migrations.AddField(
model_name='questionnaire',
name='question',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='facepackwizard.Question'),
),
]
|
import pygame
import sys
import time
class Grid_WorldSim:
def __init__(self,height,width,start_loc,finish_loc,actions,reward=-1,shift=None):
self.shift = [0]*width if shift==None else shift
self.height = height
self.width = width
self.start_loc = start_loc
self.finish_loc = finish_loc
self.grid = self.make_grid()
self.r = reward
self.actions = actions
self.num_actions = len(self.actions)
self.reset_loc()
def reset_loc(self):
self.x_loc,self.y_loc = self.start_loc[0]+1,self.start_loc[1]+1
def ActionVal_init(self):
action_val = 0*np.random.uniform(low = 0,high = 1,size = [self.height+2,self.width+2,self.num_actions])
action_val[self.finish_loc[0]+1,self.finish_loc[1]+1] = 0
return action_val
def make_grid(self):
grid = np.zeros([self.height,self.width])
grid[self.finish_loc[0],self.finish_loc[1]]=-1
sudo_grid = np.ones([self.height+2,self.width+2])
sudo_grid[1:self.height+1,1:self.width+1] = grid
return sudo_grid
def is_finished(self,i,j):
return self.grid[i,j]==-1
def is_boundry(self,i,j):
return self.grid[i,j]==1
def apply_wind(self,x,y):
x_ = x
x_ -= self.shift[y-1]
if 0<x_<=self.height and 0<y<self.width:
x = x_
return x,y
def starting_state(self):
return self.start_loc[0]+1,self.start_loc[1]+1
def simulate(self,action):
action = self.actions[action]
x_temp,y_temp = self.apply_wind(self.x_loc,self.y_loc)
if not self.is_boundry(x_temp,y_temp):
self.x_loc,self.y_loc = x_temp,y_temp
x_temp,y_temp=self.x_loc+action[0],self.y_loc+action[1]
if not self.is_boundry(x_temp,y_temp):
self.x_loc,self.y_loc = x_temp,y_temp
return self.r,[self.x_loc,self.y_loc]
class TDZero:
def __init__(self,simulation,num_episodes,epsilon=0.1,alpha=0.5,gamma=1):
self.simulation = simulation
self.epsilon = epsilon
self.alpha = alpha
self.gamma = gamma
self.num_episodes = num_episodes
self.action_val = self.simulation.ActionVal_init()
self.policy = np.argmax(self.action_val,axis=2)
self.num_action = self.simulation.num_actions
def action(self,state):
if self.epsilon>0:
probs = self.epsilon/self.num_action
rand = np.random.uniform()
if rand<=self.epsilon:
action = np.random.choice(range(self.num_action))
else:
action = self.policy[state[0],state[1]]
if action==self.policy[state[0],state[1]]:
return action,1-self.epsilon+probs
else:
return action,probs
else:
return self.policy[state[0],state[1]],1
def Learn(self):
t = 0
for episode in range(self.num_episodes):
self.simulation.reset_loc()
state = self.simulation.starting_state()
action = self.action(state)[0]
while True:
r,new_state = self.simulation.simulate(action)
new_action = self.action(new_state)[0]
Q = self.action_val[state[0],state[1],action]
Q_next = self.action_val[new_state[0],new_state[1],new_action]
self.action_val[state[0],state[1],action]+=self.alpha*(r+self.gamma*Q_next-Q)
self.policy[state[0],state[1]] = np.argmax(self.action_val[state[0],state[1]])
state = new_state
action = new_action
t+=1
if self.simulation.is_finished(*state):
break
print("Episode:",episode,"Time Steps Taken",t)
def play(self,rand_start=True,pos=0):
global SCREEN, CLOCK, GRID, HEIGHT, WIDTH, blockSize, BLACK, WHITE, GREEN, RED
BLACK = (0, 0, 0)
WHITE = (200, 200, 200)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
pygame.init()
GRID = self.simulation.grid.copy()
blockSize = 20
WINDOW_HEIGHT, WINDOW_WIDTH = GRID.shape[0]*blockSize, GRID.shape[1]*blockSize
SCREEN = pygame.display.set_mode((WINDOW_WIDTH,WINDOW_HEIGHT))
CLOCK = pygame.time.Clock()
SCREEN.fill(BLACK)
HEIGHT,WIDTH = GRID.shape[0], GRID.shape[1]
self.simulation.reset_loc()
state = self.simulation.starting_state()
count=0
while True:
GRID = self.simulation.grid.copy()
GRID[state[0],state[1]] = 10
self.main()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
SCREEN.fill(BLACK)
action = self.action(state)[0]
print(state,action)
_,state = self.simulation.simulate(action)
count+=1
if self.simulation.is_finished(*state):
print("FINISHED")
print("Steps Taken",count)
pygame.quit()
sys.exit()
break
def main(self):
time.sleep(.5)
for x in range(WIDTH):
for y in range(HEIGHT):
color=WHITE
rect = pygame.Rect(x*(blockSize), y*(blockSize),
blockSize, blockSize)
if GRID[y][x]==1:
color=GREEN
SCREEN.fill(color,rect)
if GRID[y][x]==10:
color=RED
SCREEN.fill(color,rect)
if GRID[y][x]==-1:
color = WHITE
SCREEN.fill(color,rect)
pygame.draw.rect(SCREEN, color, rect, 1)
s = "0 0 0 1 1 1 2 2 1 0"
shift = [int(x) for x in s.strip().split()]
action = [[i,j] for i in range(-1,2) for j in range(-1,2) if not abs(i)==abs(j)]
grid = Grid_WorldSim(height=7,width=10,start_loc=[3,0],finish_loc=[3,7],shift = shift,actions = action)
TD = TDZero(grid,1000,alpha=0.5)
TD.epsilon=0
import cv2
TD.Learn()
TD.play()
|
# -*- coding: UTF-8 -*-
import re
from time import time
__all__ = ["DeauthMixin", "ScanMixin", "ConnectMixin", "STATION_REGEX"]
CONNECT_REGEX = re.compile(r"(?m)Device '(?P<iface>[a-z][a-z0-9]*)' success"
r"fully activated with '(?P<uid>[0-9a-f\-]+)'\.")
STATION_REGEX = re.compile(r"^\s*(?P<bssid>(?:[0-9A-F]{2}\:){5}[0-9A-F]{2})\s+"
r"(?P<station>(?:[0-9A-F]{2}\:){5}[0-9A-F]{2})\s+"
r"(?P<power>\-?\d+)\s+")
TARGET_REGEX = re.compile(r"^\s*(?P<bssid>(?:[0-9A-F]{2}\:){5}[0-9A-F]{2})\s+"
r"(?P<power>\-?\d+)\s+"
r"(?P<beacons>\d+)\s+"
r"(?P<data>\d+)\s+"
r"(?P<prate>\d+)\s+"
r"(?P<channel>\d+)\s+"
r"(?P<mb>\w+\.?)\s+"
r"(?P<enc>\w+)\s+"
r"(?P<cipher>\w*?)\s+"
r"(?P<auth>\w*?)\s+"
r"(?P<essid>[\w\-\.]+(?:\s+[\w\-\.]+)*)\s*$")
class ConnectMixin(object):
""" Mixin class for use with Command and Module """
requirements = {'system': ["nmcli"]}
def connect(self, essid, retry=True):
pswd = self.console.state['TARGETS'][essid].get('password')
cmd = ["nmcli", "device", "wifi", "connect", essid]
if pswd is not None:
cmd += ["password", pswd]
out = self.console._jobs.run(cmd)[0]
if "No network with SSID" in out:
self.logger.warning("No network with this SSID is running")
raise Exception("No network with SSID '{}'".format(essid))
elif "Error: NetworkManager is not running." in out:
if retry:
self.disconnect()
self.console._jobs.run("service network-manager restart")
return self.connect(essid, False)
else:
raise Exception("Network Manager is not running")
m = CONNECT_REGEX.search(out)
if m is not None:
iface = m.group("iface")
self.console._jobs.run("dhclient " + iface + " &", shell=True)
return iface
def disconnect(self, essid=None):
for iface, data in self.console.state['INTERFACES'].items():
if essid is not None and data[1] != essid:
continue
out = self.console._jobs.run(["nmcli", "device", "disconnect", "iface", iface])[0]
yield essid, "successfully disconnected." in out
self.console.root.interfaces
class DeauthMixin(object):
""" Mixin class for adding a .deauth() method """
requirements = {'system': ["aircrack-ng/aireplay-ng", "aircrack-ng/airodump-ng"]}
def deauth(self, bssid, station=None, n_packets=5, interval=0, timeout=None, capture=None, post_func=None,
silent=False):
t = self.console.state['TARGETS']
try:
k = self.config.option('ESSID').value
except KeyError:
k = self.config.option('TARGET').value
ch = t[k]['channel']
iface = self.console.root.mon_interfaces[0]
cmd = "sudo airodump-ng -c {}%s --bssid {} {}".format(ch, bssid, iface)
cmd = cmd % [" -w {}".format(capture), ""][capture is None]
tr = {}
i = 0
try:
for line in self.console._jobs.run_iter(cmd, timeout=timeout):
m = STATION_REGEX.search(line)
if m is not None:
s = m.group("station")
if s in self.console.root.self_mac_addresses:
continue
if station is None or station == s:
tr.setdefault(s, 0)
if interval == 0 or time() - tr[s] > interval:
if not silent:
self.logger.warning("Deauth station: {}".format(s))
cmd = "sudo aireplay-ng -0 {} -a {} -c {} {}".format(n_packets, bssid, s, iface)
self.console._jobs.background(cmd, subpool="deauth")
if i % 5 == 0:
self.console._jobs.free("deauth")
if interval == 0:
break
i += 1
tr[s] = time()
if post_func:
r = post_func(**locals())
if r is not None:
return r
finally:
self.console._jobs.terminate("deauth")
class ScanMixin(object):
""" Mixin class for use with Command and Module """
requirements = {'system': ["aircrack-ng/airodump-ng"]}
def scan(self, interface, timeout=300, silent=False):
if not silent:
self.logger.warning("Press Ctrl+C to interrupt")
s = self.console.state['STATIONS']
t = self.console.state['TARGETS']
p = self.console.state['PASSWORDS']
s.unlock()
t.unlock()
cmd = "sudo airodump-ng {}".format(interface)
try:
for line in self.console._jobs.run_iter(cmd, timeout=int(timeout)):
m = TARGET_REGEX.search(line)
if m is not None:
data = {}
for k in ["essid", "bssid", "channel", "power", "enc", "cipher", "auth"]:
v = m.group(k)
data[k] = int(v) if v.isdigit() and k != "essid" else v
e = data['essid']
data['password'] = p.get(e)
data['stations'] = []
if self._filter_func(e):
if e not in t.keys():
self.logger.info("Found {}".format(e))
else:
for k in ['password', 'stations']:
data[k] = t[e].get(k)
t[e] = data
continue
m = STATION_REGEX.search(line)
if m is not None:
e = [tgt for tgt, data in t.items() if data['bssid'] == m.group("bssid")]
if len(e) == 1:
e = e[0]
sta = m.group("station")
if sta in self.console.root.self_mac_addresses:
continue
if sta not in t[e]['stations']:
if sta in s.keys() and sta in t[s[sta]]['stations']:
t[s[sta]]['stations'].remove(sta)
t[e]['stations'].append(sta)
self.logger.info("Found {} connected to {}".format(sta, e))
s[sta] = e
except Exception as err:
self.logger.exception(err)
finally:
s.lock()
t.lock()
|
<reponame>HazeDT/DL-based-Intelligent-Diagnosis-Benchmark
#!/usr/bin/python
# -*- coding:utf-8 -*-
import argparse
import os
from datetime import datetime
from utils.logger import setlogger
import logging
from utils.train_utils_ae import train_utils
args = None
def parse_args():
parser = argparse.ArgumentParser(description='Train')
# basic parameters
parser.add_argument('--model_name', type=str, default='Vae1d', help='the name of the model')
parser.add_argument('--data_name', type=str, default='SEUCWT', help='the name of the data')
parser.add_argument('--data_dir', type=str, default= "D:\Data\Mechanical-datasets", help='the directory of the data')
parser.add_argument('--normlizetype', type=str, choices=['0-1', '1-1', 'mean-std'],default="0-1", help='data pre-processing ')
parser.add_argument('--processing_type', type=str, choices=['R_A', 'R_NA', 'O_A'], default='R_A',
help='R_A: random split with data augmentation, R_NA: random split without data augmentation, O_A: order split with data augmentation')
parser.add_argument('--cuda_device', type=str, default='0', help='assign device')
parser.add_argument('--checkpoint_dir', type=str, default='./checkpoint', help='the directory to save the model')
parser.add_argument("--pretrained", type=bool, default=True, help='whether to load the pretrained model')
parser.add_argument('--batch_size', type=int, default=64, help='batchsize of the training process')
parser.add_argument('--num_workers', type=int, default=0, help='the number of training process')
# optimization information
parser.add_argument('--opt', type=str, choices=['sgd', 'adam'], default='adam', help='the optimizer')
parser.add_argument('--lr', type=float, default=0.001, help='the initial learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='the momentum for sgd')
parser.add_argument('--weight_decay', type=float, default=1e-5, help='the weight decay')
parser.add_argument('--lr_scheduler', type=str, choices=['step', 'exp', 'stepLR', 'fix'], default='fix', help='the learning rate schedule')
parser.add_argument('--gamma', type=float, default=0.1, help='learning rate scheduler parameter for step and exp')
parser.add_argument('--steps', type=str, default='10,20,30,40', help='the learning rate decay for step and stepLR')
parser.add_argument('--steps1', type=str, default='50,80',
help='the learning rate decay for step and stepLR')
# save, load and display information
parser.add_argument('--middle_epoch', type=int, default=50, help='middle number of epoch')
parser.add_argument('--max_epoch', type=int, default=100, help='max number of epoch')
parser.add_argument('--print_step', type=int, default=100, help='the interval of log training information')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda_device.strip()
# Prepare the saving path for the model
sub_dir = args.model_name+'_'+args.data_name + '_' + datetime.strftime(datetime.now(), '%m%d-%H%M%S')
save_dir = os.path.join(args.checkpoint_dir, sub_dir)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# set the logger
setlogger(os.path.join(save_dir, 'training.log'))
# save the args
for k, v in args.__dict__.items():
logging.info("{}: {}".format(k, v))
trainer = train_utils(args, save_dir)
trainer.setup()
trainer.train()
|
<reponame>Deril-Pana/wikiBlackcoinNL
# -*- coding: utf-8 -*-
#
# (c) Copyright 2015 HP Development Company, L.P.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Author: <NAME>, <NAME>
#
#
#
# Std Lib
import sys
import os
from subprocess import Popen, PIPE
import grp
import fnmatch
import tempfile
import socket
import struct
import select
import time
import fcntl
import errno
import stat
import string
import glob
import subprocess # TODO: Replace with subprocess (commands is deprecated in Python 3.0)
import io
import re
import getpass
import locale
from .sixext.moves import html_entities
# Local
from .g import *
from .codes import *
from . import utils, tui
from . import logger
# System wide logger
log = logger.Logger('', logger.Logger.LOG_LEVEL_INFO, logger.Logger.LOG_TO_CONSOLE)
log.set_level('info')
def running_as_root():
return os.geteuid() == 0
def restart_cups():
if os.path.exists('/etc/init.d/cups'):
return '/etc/init.d/cups restart'
elif os.path.exists('/etc/init.d/cupsys'):
return '/etc/init.d/cupsys restart'
else:
return 'killall -HUP cupsd'
def restart(passwordObj):
ok = False
shutdown = utils.which('shutdown')
if shutdown and passwordObj:
cmd = "%s -r now" % (os.path.join(shutdown, "shutdown"))
cmd = passwordObj.getAuthCmd() % cmd
status, output = utils.run(cmd, passwordObj, "Need authentication to restart system")
ok = (status == 0)
return ok
def run_open_mdns_port(core, passwordObj, callback=None):
open_mdns_port_cmd = core.get_distro_ver_data('open_mdns_port')
log.debug(open_mdns_port_cmd)
if open_mdns_port_cmd and passwordObj:
x = 1
for cmd in open_mdns_port_cmd:
cmd = passwordObj.getAuthCmd() % cmd
status, output = utils.run(cmd, passwordObj, "Need authentication to open mdns port [%s]"%cmd)
if status != 0:
log.warn("An error occurred running '%s'" % cmd)
log.warn(output)
if callback is not None:
callback(cmd, "Open mDNS/Bonjour step %d" % x)
x += 1
def run_hp_tools(cmd):
if cmd is not None:
hpCommand = utils.which(cmd, True)
if not hpCommand:
hpCommand = cmd
log.debug(hpCommand)
status, output = utils.run(hpCommand)
return status == 0
else:
log.error("Command not found")
return False
def run_hp_tools_with_auth(cmd, passwordObj):
if cmd is not None and passwordObj is not None :
hpCommand = utils.which(cmd,True)
if not hpCommand: #if it is local command like. ./setup.py
hpCommand = cmd
hpCommand = passwordObj.getAuthCmd() % hpCommand
log.debug(hpCommand)
status, output = utils.run(hpCommand, passwordObj, "Need authentication to run %s command"%cmd)
return status == 0
else:
log.error("Command not found or password object is not valid")
return False
# start_service() starts the services
# Input:
# service_name (string) --> service name to be started.
# passwordObj --> root required services, needs to pass base/password object
# Output:
# ret_val (bool) --> returns True, if service is started or already running also.
# --> returns False, if failed to start service.
def start_service( service_name, passwordObj):
ret_Val = False
if not service_name or not passwordObj:
return ret_Val
if utils.which('systemctl'):
cmd_status = passwordObj.getAuthCmd()%("systemctl status %s.service"%service_name)
log.debug(cmd_status)
sts,out = utils.run(cmd_status, passwordObj, "Need authentication to get %s service status"%service_name)
if sts ==0:
if 'stop' in out or 'inactive' in out:
cmd_start = passwordObj.getAuthCmd()%("systemctl start %s.service"%service_name)
log.debug("cmd_start=%s"%cmd_start)
sts,out = utils.run(cmd_start, passwordObj, "Need authentication to start/restart %s service"%service_name)
if sts ==0:
ret_Val = True
else:
ret_Val = True
else:
log.error("Fail to start %s service, please start %s service manually."%(service_name,service_name))
elif utils.which('service'):
cmd_status = passwordObj.getAuthCmd()%("service %s status"%service_name)
log.debug(cmd_status)
sts,out = utils.run(cmd_status, passwordObj, "Need authentication to get %s service status"%service_name)
if sts ==0:
if 'stop' in out or 'inactive' in out:
cmd_start = passwordObj.getAuthCmd()%("service %s start"%service_name)
log.debug("cmd_start=%s"%cmd_start)
sts,out = utils.run(cmd_start, passwordObj, "Need authentication to start/restart %s service"%service_name)
if sts ==0:
ret_Val = True
elif 'unrecognized service' in out:
log.error("Failed to Start since %s is unrecognized service"%service_name)
else:
ret_Val = True
else:
log.error("Fail to start %s service, please start %s service manually."%(service_name,service_name))
elif os.path.exists('/etc/init.d/%s'%service_name):
cmd_status = passwordObj.getAuthCmd()%('/etc/init.d/%s status'%service_name)
log.debug(cmd_status)
sts,out = utils.run(cmd_status, passwordObj, "Need authentication to get %s service status"%service_name)
if sts ==0:
if 'stop' in out or 'inactive' in out:
cmd_start = passwordObj.getAuthCmd()%('/etc/init.d/%s start'%service_name)
log.debug("cmd_start=%s"%cmd_start)
sts,out = utils.run(cmd_start, passwordObj, "Need authentication to start/restart %s service"%service_name)
if sts ==0:
ret_Val = True
else:
ret_Val = True
else:
log.error("Fail to start %s service, please start %s service manually."%(service_name,service_name))
else:
if service_name == 'cups':
cmd = 'lpstat -r'
sts,out = utils.run(cmd, passwordObj, "Need authentication to get %s service status"%service_name)
if sts ==0 and 'is running' in out:
ret_Val = True
else:
log.error("service command not found, please start cups service manually.")
else:
log.error("Fail to start %s service, please start %s service manually."%(service_name,service_name))
return ret_Val
def run_systray():
path = utils.which('hp-systray')
if path:
path = os.path.join(path, 'hp-systray')
else:
path = os.path.join(prop.home_dir, 'systray.py')
if not os.path.exists(path):
log.warn("Unable to start hp-systray")
log.debug("Running hp-systray: %s --force-startup" % path)
os.spawnlp(os.P_NOWAIT, path, 'hp-systray', '--force-startup', "--ignore-update-firsttime")
log.debug("Waiting for hp-systray to start...")
time.sleep(1)
def disable_SmartInstall():
path = utils.which('hp-SIDisable',True)
if path:
param = '-'
sicmd = "%s %s" % (path,param)
if run_hp_tools(sicmd):
log.debug("Smart Install is disabled\n")
else:
log.error("Smart Install could not be disabled\n")
else:
try:
from . import pkit
plugin = PLUGIN_REQUIRED
plugin_reason = PLUGIN_REASON_NONE
ok, sudo_ok = pkit.run_plugin_command(plugin == PLUGIN_REQUIRED, plugin_reason)
if not ok or not sudo_ok:
log.error("Failed to install plug-in.")
except ImportError:
log.warn("Import error\n")
def close_running_hp_processes():
# check systray is running?
status,output = utils.Is_Process_Running('hp-systray')
if status is True:
ok,choice = tui.enter_choice("\nSome HPLIP applications are running. Press 'y' to close applications or press 'n' to quit upgrade(y=yes*, n=no):",['y','n'],'y')
if not ok or choice =='n':
log.info("Manually close HPLIP applications and run hp-upgrade again.")
return False
try:
# dBus
from dbus import SystemBus, lowlevel
except ImportError:
log.error("Unable to load DBus.")
pass
else:
try:
args = ['', '', EVENT_SYSTEMTRAY_EXIT, prop.username, 0, '', '']
msg = lowlevel.SignalMessage('/', 'com.hplip.StatusService', 'Event')
msg.append(signature='ssisiss', *args)
log.debug("Sending close message to hp-systray ...")
SystemBus().send_message(msg)
time.sleep(0.5)
except:
log.error("Failed to send DBus message to hp-systray/hp-toolbox.")
pass
toolbox_status,output = utils.Is_Process_Running('hp-toolbox')
if toolbox_status is True:
log.error("Failed to close either HP-Toolbox/HP-Systray. Manually close and run hp-upgrade again.")
return False
return True
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.