hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fc5a7f4ca0194617ccf4d78bfd82260295b067f8 | 219 | py | Python | mnemocards/any2dict/json_reader.py | guiferviz/mnemocard | 6b5ef0a6d4aa5f5c01f8c467c3e406b424106a43 | [
"MIT"
] | null | null | null | mnemocards/any2dict/json_reader.py | guiferviz/mnemocard | 6b5ef0a6d4aa5f5c01f8c467c3e406b424106a43 | [
"MIT"
] | null | null | null | mnemocards/any2dict/json_reader.py | guiferviz/mnemocard | 6b5ef0a6d4aa5f5c01f8c467c3e406b424106a43 | [
"MIT"
] | null | null | null | import json
from .reader import Reader
from .register import add_reader
@add_reader
class JSON(Reader):
extensions = ["json"]
def loads(self, string, **options):
return json.loads(string, **options)
| 16.846154 | 44 | 0.694064 |
515b72a9bff95d0ecb29a5106b156b07b98b31b9 | 18,980 | py | Python | src/aiodynamo/expressions.py | uvve/aiodynamo | 39100748691ffe2c34caae4dcddaddb1dfa505a5 | [
"Apache-2.0"
] | null | null | null | src/aiodynamo/expressions.py | uvve/aiodynamo | 39100748691ffe2c34caae4dcddaddb1dfa505a5 | [
"Apache-2.0"
] | 1 | 2022-01-25T02:19:03.000Z | 2022-01-25T11:00:17.000Z | src/aiodynamo/expressions.py | Tinche/aiodynamo | 963a6baecb7782fb5820179e2ec0c041a527d02e | [
"Apache-2.0"
] | null | null | null | from __future__ import annotations
import abc
import decimal
from dataclasses import dataclass, field
from itertools import chain, count
from typing import *
from .errors import CannotAddToNestedField
from .types import AttributeType, Numeric, ParametersDict
from .utils import low_level_serialize
_ParametersCache = Dict[Tuple[Any, Any], str]
Addable = Union[Numeric, Set[bytes], Set[str], Set[Numeric]]
class ProjectionExpression(metaclass=abc.ABCMeta):
@abc.abstractmethod
def encode(self, params: Parameters) -> str:
pass
@abc.abstractmethod
def __and__(self, other: F) -> ProjectionExpression:
pass
@dataclass(frozen=True)
class KeyPath:
root: str
parts: Sequence[Union[str, int]] = ()
class F(ProjectionExpression):
"""
This class represents a Field or Attribute in a DynamoDB Item.
For paths to items, provide them as separate arguments. For example,
to reference to the second item in a list stored in the key "foo", use
`F("foo", 1)`.
It can be used to create Projection Expressions using the & operator,
for example `F("foo") & F("bar")` is the Projection expression to
return the two fields "foo" and "bar".
It is also used to create Condition Expressions. See the various
methods below.
Lastly, it is also used to create Update Expressions, see methods
at the end of the class.
"""
path: KeyPath
def __init__(self, root: str, *path: Union[str, int]):
self.path = KeyPath(root, path)
def __hash__(self) -> int:
return hash(self.path)
# Projection Expressions
def __and__(self, other: F) -> ProjectionExpression:
return FieldList([self, other])
def encode(self, params: Parameters) -> str:
return params.encode_path(self.path)
# Condition Expressions
def does_not_exist(self) -> Condition:
"""
Checks that a field does not exist.
"""
return DoesNotExist(self)
def exists(self) -> Condition:
"""
Checks that a field exist.
"""
return Exists(self)
def attribute_type(self, attribute_type: AttributeType) -> Condition:
"""
Checks the attribute type of a field.
"""
return AttributeTypeCondition(self, attribute_type)
def begins_with(self, substr: str) -> Condition:
"""
Checks that a field begins with a substring.
The substring must not be empty.
Fields matching the substring provided completely are not returned.
"""
if not substr:
raise ValueError("Substring may not be empty")
return BeginsWith(self, substr)
def between(self, low: Any, high: Any) -> Condition:
"""
Checks that a field is between two given values.
"""
return Between(self, low, high)
def contains(
self, value: Union[str, bytes, int, float, decimal.Decimal]
) -> Condition:
"""
Checks if a set or list contains a certain value.
If a string or bytes object is used as a value, they must not be empty.
"""
if isinstance(value, (bytes, str)) and not value:
raise ValueError("Value may not be empty")
return Contains(self, value)
def size(self) -> Size:
"""
Allows checking for the item size. Does not return a condition,
to return a condition, call the appropriate method on the Size
class instance returned.
"""
return Size(self)
def is_in(self, values: Sequence[Any]) -> Condition:
"""
Checks if the field is in a sequence of values.
Between one and one hundred values may be provided.
"""
return In(self, values)
def gt(self, other: Any) -> Condition:
"""
Checks if a field is greater than a value.
"""
return Comparison(self, ">", other)
def gte(self, other: Any) -> Condition:
"""
Checks if a field is greater than a value.
"""
return Comparison(self, ">=", other)
def lt(self, other: Any) -> Condition:
"""
Checks if a field is less than a value.
"""
return Comparison(self, "<", other)
def lte(self, other: Any) -> Condition:
"""
Checks if a field is less or equal than a value.
"""
return Comparison(self, "<=", other)
def equals(self, other: Any) -> Condition:
"""
Checks if a field is equal to value.
"""
return Comparison(self, "=", other)
def not_equals(self, other: Any) -> Condition:
"""
Checks if a field is not equal to a value.
"""
return Comparison(self, "<>", other)
# Update Expressions
def set(self, value: Any) -> UpdateExpression:
"""
Set a field to a value.
"""
if isinstance(value, (bytes, str)) and not value:
return UpdateExpression(remove={self})
return UpdateExpression(set_updates={self: Value(value)})
def set_if_not_exists(self, value: Any) -> UpdateExpression:
"""
Set a field to a value if the field does not exist in the item yet.
"""
if isinstance(value, (bytes, str)) and not value:
return UpdateExpression()
return UpdateExpression(set_updates={self: IfNotExists(value)})
def change(self, diff: Numeric) -> UpdateExpression:
"""
Change a numeric field by a given value.
"""
return UpdateExpression(set_updates={self: Modify(diff)})
def append(self, value: List[Any]) -> UpdateExpression:
"""
Add items to a list field. Note that the value passed in should be a
list, not an individual item.
"""
return UpdateExpression(set_updates={self: Append(list(value))})
def remove(self) -> UpdateExpression:
"""
Remove a field.
"""
return UpdateExpression(remove={self})
def add(self, value: Addable) -> UpdateExpression:
"""
Add a value to a field. Only allowed for top level fields.
For numeric fields add a numeric value.
For set fields, this will set the field to the union of the existing
set and the set provided.
"""
if self.path.parts:
raise CannotAddToNestedField()
return UpdateExpression(add={self: value})
def delete(self, value: Set[Any]) -> UpdateExpression:
"""
Deletes all items in the set provided from the set stored in this field.
"""
return UpdateExpression(delete={self: value})
class KeyCondition(metaclass=abc.ABCMeta):
@abc.abstractmethod
def encode(self, params: Parameters) -> str:
pass
@dataclass(frozen=True)
class HashKey(KeyCondition):
"""
Used for Key Conditions. To also constrain the Key Condition by the
range key, create an instance of RangeKey, call a method on it and
combine the HashKey with the return value of that method call using
the & operator.
"""
name: str
value: Any
def encode(self, params: Parameters) -> str:
return f"{params.encode_path(KeyPath(self.name))} = {params.encode_value(self.value)}"
def __and__(self, other: Condition) -> KeyCondition:
return HashAndRangeKeyCondition(self, other)
@dataclass(frozen=True)
class RangeKey:
"""
Can be used to further constrain a Key Condition. Must be used
together with a HashKey instance.
The provided methods behave the same as their counterparts in the
F class.
"""
name: str
def begins_with(self, substr: str) -> Condition:
if not substr:
raise ValueError("Substring may not be empty")
return BeginsWith(F(self.name), substr)
def between(self, low: Any, high: Any) -> Condition:
return Between(F(self.name), low, high)
def contains(
self, value: Union[str, bytes, int, float, decimal.Decimal]
) -> Condition:
return Contains(F(self.name), value)
def size(self) -> Size:
return Size(F(self.name))
def is_in(self, values: Sequence[Any]) -> Condition:
return In(F(self.name), values)
def gt(self, other: Any) -> Condition:
return Comparison(F(self.name), ">", other)
def gte(self, other: Any) -> Condition:
return Comparison(F(self.name), ">=", other)
def lt(self, other: Any) -> Condition:
return Comparison(F(self.name), "<", other)
def lte(self, other: Any) -> Condition:
return Comparison(F(self.name), "<=", other)
def equals(self, other: Any) -> Condition:
return Comparison(F(self.name), "=", other)
def not_equals(self, other: Any) -> Condition:
return Comparison(F(self.name), "<>", other)
class Parameters:
def __init__(self) -> None:
self.names: Dict[str, str] = {}
self.values: Dict[str, Dict[str, Any]] = {}
self.names_gen: Iterator[int] = count()
self.values_gen: Iterator[int] = count()
self.names_cache: _ParametersCache = {}
self.values_cache: _ParametersCache = {}
def encode_name(self, name: Union[str, int]) -> str:
return self._encode(
"#n", name, self.names, self.names_gen, self.names_cache, name
)
def encode_value(self, value: Any) -> str:
tag, value = low_level_serialize(value)
return self._encode(
":v",
{tag: value},
self.values,
self.values_gen,
self.values_cache,
(tag, value),
)
def encode_path(self, path: KeyPath) -> str:
return "".join(
chain(
[self.encode_name(path.root)],
(
f"[{part}]"
if isinstance(part, int)
else f".{self.encode_name(part)}"
for part in path.parts
),
)
)
def to_request_payload(self) -> ParametersDict:
payload: ParametersDict = {}
if self.names:
payload["ExpressionAttributeNames"] = self.names
if self.values:
payload["ExpressionAttributeValues"] = {
key: value for key, value in self.values.items()
}
return payload
def _encode(
self,
prefix: str,
thing: Any,
data: Dict[str, Any],
index_gen: Iterator[int],
cache: _ParametersCache,
cache_key: Any,
) -> str:
try:
return cache[cache_key]
except KeyError:
can_cache = True
except TypeError:
can_cache = False
encoded = f"{prefix}{next(index_gen)}"
data[encoded] = thing
if can_cache:
cache[cache_key] = encoded
return encoded
@dataclass(frozen=True)
class HashAndRangeKeyCondition(KeyCondition):
hash_key: HashKey
range_key_condition: Condition
def encode(self, params: Parameters) -> str:
return f"{self.hash_key.encode(params)} AND {self.range_key_condition.encode(params)}"
class Condition(metaclass=abc.ABCMeta):
def __and__(self, other: Condition) -> Condition:
return AndCondition(self, other)
def __or__(self, other: Condition) -> Condition:
return OrCondition(self, other)
def __invert__(self) -> Condition:
return NotCondition(self)
@abc.abstractmethod
def encode(self, params: Parameters) -> str:
pass
@dataclass(frozen=True)
class NotCondition(Condition):
base: Condition
def encode(self, params: Parameters) -> str:
return f"(NOT {self.base.encode(params)})"
@dataclass(frozen=True)
class AndCondition(Condition):
lhs: Condition
rhs: Condition
def encode(self, params: Parameters) -> str:
return f"({self.lhs.encode(params)} AND {self.rhs.encode(params)})"
@dataclass(frozen=True)
class OrCondition(Condition):
lhs: Condition
rhs: Condition
def encode(self, params: Parameters) -> str:
return f"({self.lhs.encode(params)} OR {self.rhs.encode(params)})"
@dataclass(frozen=True)
class DoesNotExist(Condition):
field: F
def encode(self, params: Parameters) -> str:
return f"attribute_not_exists({params.encode_path(self.field.path)})"
@dataclass(frozen=True)
class Exists(Condition):
field: F
def encode(self, params: Parameters) -> str:
return f"attribute_exists({params.encode_path(self.field.path)})"
@dataclass(frozen=True)
class AttributeTypeCondition(Condition):
field: F
attribute_type: AttributeType
def encode(self, params: Parameters) -> str:
return f"attribute_type({params.encode_path(self.field.path)}, {self.attribute_type.value})"
@dataclass(frozen=True)
class BeginsWith(Condition):
field: F
substr: str
def encode(self, params: Parameters) -> str:
return f"begins_with({params.encode_path(self.field.path)}, {params.encode_value(self.substr)})"
@dataclass(frozen=True)
class Between(Condition):
field: F
low: Any
high: Any
def encode(self, params: Parameters) -> str:
return f"{params.encode_path(self.field.path)} BETWEEN {params.encode_value(self.low)} AND {params.encode_value(self.high)}"
@dataclass(frozen=True)
class Contains(Condition):
field: F
value: Union[str, bytes, int, float, decimal.Decimal]
def encode(self, params: Parameters) -> str:
return f"contains({params.encode_path(self.field.path)}, {params.encode_value(self.value)})"
@dataclass(frozen=True)
class In(Condition):
field: F
values: Sequence[Any]
def encode(self, params: Parameters) -> str:
encoded_values = [
encoded
for encoded in (params.encode_value(value) for value in self.values)
if encoded is not None
]
if len(encoded_values) < 1:
raise ValueError("IN Condition requires at least one value")
if len(encoded_values) > 100:
raise ValueError("IN Condition may not contain more than 100 values")
values = ",".join(encoded_values)
return f"{params.encode_path(self.field.path)} IN ({values})"
@dataclass(frozen=True)
class Comparison(Condition):
field: F
operator: str
other: Any
def encode(self, params: Parameters) -> str:
if isinstance(self.other, F):
other = params.encode_path(self.other.path)
else:
other = params.encode_value(self.other)
return f"{params.encode_path(self.field.path)} {self.operator} {other}"
@dataclass(frozen=True)
class Size:
field: F
def equals(self, other: Any) -> Condition:
return SizeCondition(self.field, "=", other)
def not_equals(self, other: Any) -> Condition:
return SizeCondition(self.field, "<>", other)
def gt(self, other: Any) -> Condition:
return SizeCondition(self.field, ">", other)
def gte(self, other: Any) -> Condition:
return SizeCondition(self.field, ">=", other)
def lte(self, other: Any) -> Condition:
return SizeCondition(self.field, "<=", other)
def lt(self, other: Any) -> Condition:
return SizeCondition(self.field, "<", other)
@dataclass(frozen=True)
class SizeCondition(Condition):
field: F
operator: str
value: Any
def encode(self, params: Parameters) -> str:
if isinstance(self.value, F):
value = params.encode_path(self.value.path)
else:
value = params.encode_value(self.value)
return f"size({params.encode_path(self.field.path)}) {self.operator} {value}"
class SetAction(metaclass=abc.ABCMeta):
@abc.abstractmethod
def encode(self, params: Parameters, field: F) -> str:
pass
@dataclass(frozen=True)
class Value(SetAction):
value: Any
def encode(self, params: Parameters, field: F) -> str:
return params.encode_value(self.value)
@dataclass(frozen=True)
class IfNotExists(SetAction):
value: Any
def encode(self, params: Parameters, field: F) -> str:
return f"if_not_exists({params.encode_path(field.path)}, {params.encode_value(self.value)})"
@dataclass(frozen=True)
class Modify(SetAction):
change: Numeric
def encode(self, params: Parameters, field: F) -> str:
if self.change < 0:
operator = "-"
value = self.change * -1
else:
operator = "+"
value = self.change
return (
f"{params.encode_path(field.path)} {operator} {params.encode_value(value)}"
)
@dataclass(frozen=True)
class Append(SetAction):
values: List[Any]
def encode(self, params: Parameters, field: F) -> str:
return f"list_append({params.encode_path(field.path)}, {params.encode_value(self.values)})"
@dataclass(frozen=True)
class UpdateExpression:
set_updates: Dict[F, SetAction] = field(default_factory=dict)
remove: Set[F] = field(default_factory=set)
add: Dict[F, Addable] = field(default_factory=dict)
delete: Dict[F, Union[Set[bytes], Set[Numeric], Set[str]]] = field(
default_factory=dict
)
def __and__(self, other: UpdateExpression) -> UpdateExpression:
return UpdateExpression(
set_updates={**self.set_updates, **other.set_updates},
remove=self.remove.union(other.remove),
add={**self.add, **other.add},
delete={**self.delete, **other.delete},
)
def encode(self, params: Parameters) -> Optional[str]:
bits = []
if self.set_updates:
set_expr = ", ".join(
f"{params.encode_path(field.path)} = {action.encode(params, field)}"
for field, action in self.set_updates.items()
)
bits.append(f"SET {set_expr}")
if self.remove:
remove_expr = ", ".join(
params.encode_path(field.path) for field in self.remove
)
bits.append(f"REMOVE {remove_expr}")
if self.add:
add_expr = ", ".join(
f"{params.encode_path(field.path)} {params.encode_value(value)}"
for field, value in self.add.items()
)
bits.append(f"ADD {add_expr}")
if self.delete:
del_expr = ", ".join(
f"{params.encode_path(field.path)} {params.encode_value(value)}"
for field, value in self.delete.items()
)
bits.append(f"DELETE {del_expr}")
if bits:
return " ".join(bits)
return None
@dataclass(frozen=True)
class FieldList(ProjectionExpression):
fields: List[F]
def __and__(self, field: F) -> ProjectionExpression:
return FieldList(self.fields + [field])
def encode(self, params: Parameters) -> str:
return ",".join(params.encode_path(field.path) for field in self.fields)
| 29.47205 | 132 | 0.6147 |
bab475c1be29c20cdeb89e8d4059d7f6c8d4512d | 3,121 | py | Python | api/tests/test_meta_records.py | jcuna/green-crn | bbb69a50801b68eb94e754d21be171e81e7a78a9 | [
"MIT"
] | null | null | null | api/tests/test_meta_records.py | jcuna/green-crn | bbb69a50801b68eb94e754d21be171e81e7a78a9 | [
"MIT"
] | 5 | 2021-06-08T20:28:10.000Z | 2022-02-26T18:56:05.000Z | api/tests/test_meta_records.py | jcuna/green-crn | bbb69a50801b68eb94e754d21be171e81e7a78a9 | [
"MIT"
] | null | null | null | from flask.testing import FlaskClient
from tests import endpoint
def test_countries(client: FlaskClient, admin_login):
resp = client.get(endpoint('/meta/countries'), headers=admin_login)
assert resp.status_code == 200
assert len(resp.json) == 2
for country in resp.json:
if country['name'] == 'Republica Dominicana':
assert len(country['provinces']) == 32
else:
assert len(country['provinces']) == 59
from dal.customer import Country
countries = Country.query.all()
assert len(countries) == 2
for country in countries:
assert len(country.provinces) > 30
def test_source_projects(client: FlaskClient, admin_login):
resp = client.get(endpoint('/meta/source-projects'), headers=admin_login)
assert resp.status_code == 200
assert len(resp.json) == 3
assert 'ENESTAR' == resp.json[0]['label']
def test_project_types(client: FlaskClient, admin_login):
resp = client.get(endpoint('/meta/project-types'), headers=admin_login)
assert resp.status_code == 200
assert len(resp.json) == 2
assert 'COMERCIAL' == resp.json[0]['label']
def test_distributors(client: FlaskClient, admin_login):
resp = client.get(endpoint('/meta/distributors'), headers=admin_login)
assert resp.status_code == 200
assert len(resp.json) == 4
assert 'EDENORTE' == resp.json[0]['label']
def test_rates(client: FlaskClient, admin_login):
resp = client.get(endpoint('/meta/rates'), headers=admin_login)
assert resp.status_code == 200
assert len(resp.json) == 4
assert 'BTS1' == resp.json[0]['label']
def test_transformers(client: FlaskClient, admin_login):
resp = client.get(endpoint('/meta/transformers'), headers=admin_login)
assert resp.status_code == 200
assert len(resp.json) == 2
assert 'PROPIO' == resp.json[0]['label']
def test_tr_capacities(client: FlaskClient, admin_login):
resp = client.get(endpoint('/meta/tr-capacities'), headers=admin_login)
assert resp.status_code == 200
assert len(resp.json) == 4
assert '37.50' == resp.json[0]['label']
def test_phases(client: FlaskClient, admin_login):
resp = client.get(endpoint('/meta/phases'), headers=admin_login)
assert resp.status_code == 200
assert len(resp.json) == 2
assert 'MONOFASICO' == resp.json[0]['label']
def test_tensions(client: FlaskClient, admin_login):
resp = client.get(endpoint('/meta/tensions'), headers=admin_login)
assert resp.status_code == 200
assert len(resp.json) == 4
assert 120 == resp.json[0]['label']
def test_panel_models(client: FlaskClient, admin_login):
resp = client.get(endpoint('/meta/panel-models'), headers=admin_login)
assert resp.status_code == 200
assert len(resp.json) == 2
assert 'Q.PEAK L-G5.0.G 375' == resp.json[0]['label']
def test_inverter_models(client: FlaskClient, admin_login):
resp = client.get(endpoint('/meta/inverter-models'), headers=admin_login)
assert resp.status_code == 200
assert len(resp.json) == 9
assert 'SUNNY BOY 3.0-US-40 - 7.7-US-40' == resp.json[0]['label']
| 30.598039 | 77 | 0.684076 |
0ae7c9bff52cd83c1b9cf7a94eb006905e663ce6 | 2,383 | py | Python | app/main/service/event_participant_service.py | luke-truitt/room40-underground | 174f57e0aa4b04bb7c983e10aaf4f4af6589d478 | [
"MIT"
] | null | null | null | app/main/service/event_participant_service.py | luke-truitt/room40-underground | 174f57e0aa4b04bb7c983e10aaf4f4af6589d478 | [
"MIT"
] | null | null | null | app/main/service/event_participant_service.py | luke-truitt/room40-underground | 174f57e0aa4b04bb7c983e10aaf4f4af6589d478 | [
"MIT"
] | 1 | 2021-07-21T13:36:13.000Z | 2021-07-21T13:36:13.000Z | import uuid
import datetime
from app.main import db
from app.main.model.event_participant import EventParticipant
def save_new_event_participant(data):
try:
new_event_participant = EventParticipant(
event_id=data['event_id'],
participant_id=data['participant_id'],
)
save_changes(new_event_participant)
response_object = {
'status': 'success',
'message': 'Successfully registered.',
}
return response_object, 201
except Exception as e:
response_object = {
'status': 'fail',
'message': 'Some error occurred. Please try again.'
}
return response_object, 401
def update_event_participant(event_participant_id, data):
try:
event_participant = get_a_event_participant(event_participant_id)
event_participant.event_id=data['event_id'],
event_participant.participant_id=data['participant_id']
save_changes(event_participant)
response_object = {
'status': 'success',
'message': 'Successfully registered.',
}
return response_object, 201
except Exception as e:
print(e)
response_object = {
'status': 'fail',
'message': 'Some error occurred. Please try again.'
}
return response_object, 401
def delete_a_event_participant(event_participant_id):
try:
EventParticipant.query.filter_by(id=event_participant_id).delete()
db.session.commit()
response_object = {
'status': 'success',
'message': 'Successfully registered.',
}
return response_object, 201
except Exception as e:
print(e)
response_object = {
'status': 'fail',
'message': 'Some error occurred. Please try again.'
}
return response_object, 401
def get_all_event_participants():
return EventParticipant.query.all()
def get_participants_from_event(event_id):
return EventParticipant.query.filter_by(event_id=event_id).all()
def get_a_event_participant(event_participant_id):
return EventParticipant.query.filter_by(id=event_participant_id).first()
def save_changes(data):
db.session.add(data)
db.session.commit()
| 27.709302 | 76 | 0.625682 |
a2084cac35a16ddd379174ca165814644dcf8bcf | 46,133 | py | Python | mypy/server/update.py | Herst/mypy | 437d7467af51199697937b4943be391bfffd23be | [
"PSF-2.0"
] | null | null | null | mypy/server/update.py | Herst/mypy | 437d7467af51199697937b4943be391bfffd23be | [
"PSF-2.0"
] | null | null | null | mypy/server/update.py | Herst/mypy | 437d7467af51199697937b4943be391bfffd23be | [
"PSF-2.0"
] | null | null | null | """Update build by processing changes using fine-grained dependencies.
Use fine-grained dependencies to update targets in other modules that
may be affected by externally-visible changes in the changed modules.
This forms the core of the fine-grained incremental daemon mode. This
module is not used at all by the 'classic' (non-daemon) incremental
mode.
Here is some motivation for this mode:
* By keeping program state in memory between incremental runs, we
only have to process changed modules, not their dependencies. The
classic incremental mode has to deserialize the symbol tables of
all dependencies of changed modules, which can be slow for large
programs.
* Fine-grained dependencies allow processing only the relevant parts
of modules indirectly affected by a change. Say, if only one function
in a large module is affected by a change in another module, only this
function is processed. The classic incremental mode always processes
an entire file as a unit, which is typically much slower.
* It's possible to independently process individual modules within an
import cycle (SCC). Small incremental changes can be fast independent
of the size of the related SCC. In classic incremental mode, any change
within a SCC requires the entire SCC to be processed, which can slow
things down considerably.
Some terms:
* A *target* is a function/method definition or the top level of a module.
We refer to targets using their fully qualified name (e.g.
'mod.Cls.method'). Targets are the smallest units of processing during
fine-grained incremental checking.
* A *trigger* represents the properties of a part of a program, and it
gets triggered/fired when these properties change. For example,
'<mod.func>' refers to a module-level function. It gets triggered if
the signature of the function changes, or if the function is removed,
for example.
Some program state is maintained across multiple build increments in
memory:
* The full ASTs of all modules are stored in memory all the time (this
includes the type map).
* A fine-grained dependency map is maintained, which maps triggers to
affected program locations (these can be targets, triggers, or
classes). The latter determine what other parts of a program need to
be processed again due to a fired trigger.
Here's a summary of how a fine-grained incremental program update happens:
* Determine which modules have changes in their source code since the
previous update.
* Process changed modules one at a time. Perform a separate full update
for each changed module, but only report the errors after all modules
have been processed, since the intermediate states can generate bogus
errors due to only seeing a partial set of changes.
* Each changed module is processed in full. We parse the module, and
run semantic analysis to create a new AST and symbol table for the
module. Reuse the existing ASTs and symbol tables of modules that
have no changes in their source code. At the end of this stage, we have
two ASTs and symbol tables for the changed module (the old and the new
versions). The latter AST has not yet been type checked.
* Take a snapshot of the old symbol table. This is used later to determine
which properties of the module have changed and which triggers to fire.
* Merge the old AST with the new AST, preserving the identities of
externally visible AST nodes for which we can find a corresponding node
in the new AST. (Look at mypy.server.astmerge for the details.) This
way all external references to AST nodes in the changed module will
continue to point to the right nodes (assuming they still have a valid
target).
* Type check the new module.
* Take another snapshot of the symbol table of the changed module.
Look at the differences between the old and new snapshots to determine
which parts of the changed modules have changed. The result is a set of
fired triggers.
* Using the dependency map and the fired triggers, decide which other
targets have become stale and need to be reprocessed.
* Create new fine-grained dependencies for the changed module. We don't
garbage collect old dependencies, since extra dependencies are relatively
harmless (they take some memory and can theoretically slow things down
a bit by causing redundant work). This is implemented in
mypy.server.deps.
* Strip the stale AST nodes that we found above. This returns them to a
state resembling the end of semantic analysis pass 1. We'll run semantic
analysis again on the existing AST nodes, and since semantic analysis
is not idempotent, we need to revert some changes made during semantic
analysis. This is implemented in mypy.server.aststrip.
* Run semantic analyzer passes 2 and 3 on the stale AST nodes, and type
check them. We also need to do the symbol table snapshot comparison
dance to find any changes, and we need to merge ASTs to preserve AST node
identities.
* If some triggers haven been fired, continue processing and repeat the
previous steps until no triggers are fired.
This is module is tested using end-to-end fine-grained incremental mode
test cases (test-data/unit/fine-grained*.test).
"""
import os
import time
import os.path
from typing import (
Dict, List, Set, Tuple, Iterable, Union, Optional, Mapping, NamedTuple, Callable,
Sequence
)
from mypy.build import (
BuildManager, State, BuildSource, BuildResult, Graph, load_graph, module_not_found,
process_fresh_modules,
PRI_INDIRECT, DEBUG_FINE_GRAINED,
)
from mypy.checker import DeferredNode
from mypy.errors import Errors, CompileError
from mypy.nodes import (
MypyFile, FuncDef, TypeInfo, Expression, SymbolNode, Var, FuncBase, ClassDef, Decorator,
Import, ImportFrom, OverloadedFuncDef, SymbolTable, LambdaExpr
)
from mypy.options import Options
from mypy.types import Type
from mypy.fscache import FileSystemCache
from mypy.semanal import apply_semantic_analyzer_patches
from mypy.server.astdiff import (
snapshot_symbol_table, compare_symbol_table_snapshots, SnapshotItem
)
from mypy.server.astmerge import merge_asts
from mypy.server.aststrip import strip_target
from mypy.server.deps import get_dependencies_of_target
from mypy.server.target import module_prefix, split_target
from mypy.server.trigger import make_trigger, WILDCARD_TAG
from mypy.typestate import TypeState
MAX_ITER = 1000
class FineGrainedBuildManager:
def __init__(self, result: BuildResult) -> None:
"""Initialize fine-grained build based on a batch build.
Args:
result: Result from the initialized build.
The manager and graph will be taken over by this class.
manager: State of the build (mutated by this class)
graph: Additional state of the build (mutated by this class)
"""
manager = result.manager
self.manager = manager
self.graph = result.graph
self.previous_modules = get_module_to_path_map(self.graph)
self.deps = get_all_dependencies(manager, self.graph)
self.previous_targets_with_errors = manager.errors.targets()
self.previous_messages = result.errors[:]
# Module, if any, that had blocking errors in the last run as (id, path) tuple.
self.blocking_error = None # type: Optional[Tuple[str, str]]
# Module that we haven't processed yet but that are known to be stale.
self.stale = [] # type: List[Tuple[str, str]]
# Disable the cache so that load_graph doesn't try going back to disk
# for the cache.
self.manager.cache_enabled = False
# Some hints to the test suite about what is going on:
# Active triggers during the last update
self.triggered = [] # type: List[str]
# Modules passed to update during the last update
self.changed_modules = [] # type: List[Tuple[str, str]]
# Modules processed during the last update
self.updated_modules = [] # type: List[str]
def update(self,
changed_modules: List[Tuple[str, str]],
removed_modules: List[Tuple[str, str]]) -> List[str]:
"""Update previous build result by processing changed modules.
Also propagate changes to other modules as needed, but only process
those parts of other modules that are affected by the changes. Retain
the existing ASTs and symbol tables of unaffected modules.
Reuses original BuildManager and Graph.
Args:
changed_modules: Modules changed since the previous update/build; each is
a (module id, path) tuple. Includes modified and added modules.
Assume this is correct; it's not validated here.
removed_modules: Modules that have been deleted since the previous update
or removed from the build.
Returns:
A list of errors.
"""
changed_modules = changed_modules + removed_modules
removed_set = {module for module, _ in removed_modules}
self.changed_modules = changed_modules
if not changed_modules:
return self.previous_messages
# Reset find_module's caches for the new build.
self.manager.find_module_cache.clear()
self.triggered = []
self.updated_modules = []
changed_modules = dedupe_modules(changed_modules + self.stale)
initial_set = {id for id, _ in changed_modules}
self.manager.log_fine_grained('==== update %s ====' % ', '.join(
repr(id) for id, _ in changed_modules))
if self.previous_targets_with_errors and is_verbose(self.manager):
self.manager.log_fine_grained('previous targets with errors: %s' %
sorted(self.previous_targets_with_errors))
if self.blocking_error:
# Handle blocking errors first. We'll exit as soon as we find a
# module that still has blocking errors.
self.manager.log_fine_grained('existing blocker: %s' % self.blocking_error[0])
changed_modules = dedupe_modules([self.blocking_error] + changed_modules)
self.blocking_error = None
while True:
result = self.update_one(changed_modules, initial_set, removed_set)
changed_modules, (next_id, next_path), blocker_messages = result
if blocker_messages is not None:
self.blocking_error = (next_id, next_path)
self.stale = changed_modules
messages = blocker_messages
break
# It looks like we are done processing everything, so now
# reprocess all targets with errors. We are careful to
# support the possibility that reprocessing an errored module
# might trigger loading of a module, but I am not sure
# if this can really happen.
if not changed_modules:
# N.B: We just checked next_id, so manager.errors contains
# the errors from it. Thus we consider next_id up to date
# when propagating changes from the errored targets,
# which prevents us from reprocessing errors in it.
changed_modules = propagate_changes_using_dependencies(
self.manager, self.graph, self.deps, set(), {next_id},
self.previous_targets_with_errors)
changed_modules = dedupe_modules(changed_modules)
if not changed_modules:
# Preserve state needed for the next update.
self.previous_targets_with_errors = self.manager.errors.targets()
messages = self.manager.errors.new_messages()
break
self.previous_messages = messages[:]
return messages
def update_one(self,
changed_modules: List[Tuple[str, str]],
initial_set: Set[str],
removed_set: Set[str]) -> Tuple[List[Tuple[str, str]],
Tuple[str, str],
Optional[List[str]]]:
"""Process a module from the list of changed modules.
Returns:
Tuple with these items:
- Updated list of pending changed modules as (module id, path) tuples
- Module which was actually processed as (id, path) tuple
- If there was a blocking error, the error messages from it
"""
t0 = time.time()
next_id, next_path = changed_modules.pop(0)
if next_id not in self.previous_modules and next_id not in initial_set:
self.manager.log_fine_grained('skip %r (module not in import graph)' % next_id)
return changed_modules, (next_id, next_path), None
result = self.update_module(next_id, next_path, next_id in removed_set)
remaining, (next_id, next_path), blocker_messages = result
changed_modules = [(id, path) for id, path in changed_modules
if id != next_id]
changed_modules = dedupe_modules(remaining + changed_modules)
t1 = time.time()
self.manager.log_fine_grained(
"update once: {} in {:.3f}s - {} left".format(
next_id, t1 - t0, len(changed_modules)))
return changed_modules, (next_id, next_path), blocker_messages
def update_module(self,
module: str,
path: str,
force_removed: bool) -> Tuple[List[Tuple[str, str]],
Tuple[str, str],
Optional[List[str]]]:
"""Update a single modified module.
If the module contains imports of previously unseen modules, only process one of
the new modules and return the remaining work to be done.
Args:
module: Id of the module
path: File system path of the module
force_removed: If True, consider module removed from the build even if path
exists (used for removing an existing file from the build)
Returns:
Tuple with these items:
- Remaining modules to process as (module id, path) tuples
- Module which was actually processed as (id, path) tuple
- If there was a blocking error, the error messages from it
"""
self.manager.log_fine_grained('--- update single %r ---' % module)
self.updated_modules.append(module)
manager = self.manager
previous_modules = self.previous_modules
graph = self.graph
# If this is an already existing module, make sure that we have
# its tree loaded so that we can snapshot it for comparison.
ensure_trees_loaded(manager, graph, [module])
# Record symbol table snapshot of old version the changed module.
old_snapshots = {} # type: Dict[str, Dict[str, SnapshotItem]]
if module in manager.modules:
snapshot = snapshot_symbol_table(module, manager.modules[module].names)
old_snapshots[module] = snapshot
manager.errors.reset()
result = update_module_isolated(module, path, manager, previous_modules, graph,
force_removed)
if isinstance(result, BlockedUpdate):
# Blocking error -- just give up
module, path, remaining, errors = result
self.previous_modules = get_module_to_path_map(graph)
return remaining, (module, path), errors
assert isinstance(result, NormalUpdate) # Work around #4124
module, path, remaining, tree = result
# TODO: What to do with stale dependencies?
triggered = calculate_active_triggers(manager, old_snapshots, {module: tree})
if is_verbose(self.manager):
filtered = [trigger for trigger in triggered
if not trigger.endswith('__>')]
self.manager.log_fine_grained('triggered: %r' % sorted(filtered))
self.triggered.extend(triggered | self.previous_targets_with_errors)
collect_dependencies([module], self.deps, graph)
remaining += propagate_changes_using_dependencies(
manager, graph, self.deps, triggered,
{module},
targets_with_errors=set())
# Preserve state needed for the next update.
self.previous_targets_with_errors.update(manager.errors.targets())
self.previous_modules = get_module_to_path_map(graph)
return remaining, (module, path), None
def find_unloaded_deps(manager: BuildManager, graph: Dict[str, State],
initial: Sequence[str]) -> List[str]:
"""Find all the deps of the nodes in initial that haven't had their tree loaded.
The key invariant here is that if a module is loaded, so are all
of their dependencies. This means that when we encounter a loaded
module, we don't need to explore its dependencies. (This
invariant is slightly violated when dependencies are added, which
can be handled by calling find_unloaded_deps directly on the new
dependencies.)
"""
worklist = list(initial)
seen = set() # type: Set[str]
unloaded = []
while worklist:
node = worklist.pop()
if node in seen or node not in graph:
continue
seen.add(node)
if node not in manager.modules:
ancestors = graph[node].ancestors or []
worklist.extend(graph[node].dependencies + ancestors)
unloaded.append(node)
return unloaded
def ensure_trees_loaded(manager: BuildManager, graph: Dict[str, State],
initial: Sequence[str]) -> None:
"""Ensure that the modules in initial and their deps have loaded trees."""
to_process = find_unloaded_deps(manager, graph, initial)
if to_process:
if is_verbose(manager):
manager.log_fine_grained("Calling process_fresh_modules on set of size {} ({})".format(
len(to_process), sorted(to_process)))
process_fresh_modules(graph, to_process, manager)
def get_all_dependencies(manager: BuildManager, graph: Dict[str, State]) -> Dict[str, Set[str]]:
"""Return the fine-grained dependency map for an entire build."""
# Deps for each module were computed during build() or loaded from the cache.
deps = {} # type: Dict[str, Set[str]]
collect_dependencies(graph, deps, graph)
TypeState.add_all_protocol_deps(deps)
return deps
# The result of update_module_isolated when no blockers, with these items:
#
# - Id of the changed module (can be different from the module argument)
# - Path of the changed module
# - New AST for the changed module (None if module was deleted)
# - Remaining changed modules that are not processed yet as (module id, path)
# tuples (non-empty if the original changed module imported other new
# modules)
NormalUpdate = NamedTuple('NormalUpdate', [('module', str),
('path', str),
('remaining', List[Tuple[str, str]]),
('tree', Optional[MypyFile])])
# The result of update_module_isolated when there is a blocking error. Items
# are similar to NormalUpdate (but there are fewer).
BlockedUpdate = NamedTuple('BlockedUpdate', [('module', str),
('path', str),
('remaining', List[Tuple[str, str]]),
('messages', List[str])])
UpdateResult = Union[NormalUpdate, BlockedUpdate]
def update_module_isolated(module: str,
path: str,
manager: BuildManager,
previous_modules: Dict[str, str],
graph: Graph,
force_removed: bool) -> UpdateResult:
"""Build a new version of one changed module only.
Don't propagate changes to elsewhere in the program. Raise CompileError on
encountering a blocking error.
Args:
module: Changed module (modified, created or deleted)
path: Path of the changed module
manager: Build manager
graph: Build graph
force_removed: If True, consider the module removed from the build even it the
file exists
Returns a named tuple describing the result (see above for details).
"""
if module not in graph:
manager.log_fine_grained('new module %r' % module)
if not manager.fscache.isfile(path) or force_removed:
delete_module(module, path, graph, manager)
return NormalUpdate(module, path, [], None)
sources = get_sources(manager.fscache, previous_modules, [(module, path)])
if module in manager.missing_modules:
manager.missing_modules.remove(module)
orig_module = module
orig_state = graph.get(module)
orig_tree = manager.modules.get(module)
def restore(ids: List[str]) -> None:
# For each of the modules in ids, restore that id's old
# manager.modules and graphs entries. (Except for the original
# module, this means deleting them.)
for id in ids:
if id == orig_module and orig_tree:
manager.modules[id] = orig_tree
elif id in manager.modules:
del manager.modules[id]
if id == orig_module and orig_state:
graph[id] = orig_state
elif id in graph:
del graph[id]
new_modules = [] # type: List[State]
try:
if module in graph:
del graph[module]
load_graph(sources, manager, graph, new_modules)
except CompileError as err:
# Parse error somewhere in the program -- a blocker
assert err.module_with_blocker
restore([module] + [st.id for st in new_modules])
return BlockedUpdate(err.module_with_blocker, path, [], err.messages)
# Reparsing the file may have brought in dependencies that we
# didn't have before. Make sure that they are loaded to restore
# the invariant that a module having a loaded tree implies that
# its dependencies do as well.
ensure_trees_loaded(manager, graph, graph[module].dependencies)
# Find any other modules brought in by imports.
changed_modules = [(st.id, st.xpath) for st in new_modules]
# If there are multiple modules to process, only process one of them and return
# the remaining ones to the caller.
if len(changed_modules) > 1:
# As an optimization, look for a module that imports no other changed modules.
module, path = find_relative_leaf_module(changed_modules, graph)
changed_modules.remove((module, path))
remaining_modules = changed_modules
# The remaining modules haven't been processed yet so drop them.
restore([id for id, _ in remaining_modules])
manager.log_fine_grained('--> %r (newly imported)' % module)
else:
remaining_modules = []
state = graph[module]
# Process the changed file.
state.parse_file()
# TODO: state.fix_suppressed_dependencies()?
try:
state.semantic_analysis()
except CompileError as err:
# There was a blocking error, so module AST is incomplete. Restore old modules.
restore([module])
return BlockedUpdate(module, path, remaining_modules, err.messages)
state.semantic_analysis_pass_three()
state.semantic_analysis_apply_patches()
# Merge old and new ASTs.
assert state.tree is not None, "file must be at least parsed"
new_modules_dict = {module: state.tree} # type: Dict[str, Optional[MypyFile]]
replace_modules_with_new_variants(manager, graph, {orig_module: orig_tree}, new_modules_dict)
# Perform type checking.
state.type_checker().reset()
state.type_check_first_pass()
state.type_check_second_pass()
state.compute_fine_grained_deps()
state.finish_passes()
graph[module] = state
return NormalUpdate(module, path, remaining_modules, state.tree)
def find_relative_leaf_module(modules: List[Tuple[str, str]], graph: Graph) -> Tuple[str, str]:
"""Find a module in a list that directly imports no other module in the list.
If no such module exists, return the lexicographically first module from the list.
Always return one of the items in the modules list.
NOTE: If both 'abc' and 'typing' have changed, an effect of the above rule is that
we prefer 'abc', even if both are in the same SCC. This works around a false
positive in 'typing', at least in tests.
Args:
modules: List of (module, path) tuples (non-empty)
graph: Program import graph that contains all modules in the module list
"""
assert modules
# Sort for repeatable results.
modules = sorted(modules)
module_set = {module for module, _ in modules}
for module, path in modules:
state = graph[module]
if len(set(state.dependencies) & module_set) == 0:
# Found it!
return module, path
# Could not find any. Just return the first module (by lexicographic order).
return modules[0]
def delete_module(module_id: str,
path: str,
graph: Graph,
manager: BuildManager) -> None:
manager.log_fine_grained('delete module %r' % module_id)
# TODO: Remove deps for the module (this only affects memory use, not correctness)
if module_id in graph:
del graph[module_id]
if module_id in manager.modules:
del manager.modules[module_id]
components = module_id.split('.')
if len(components) > 1:
# Delete reference to module in parent module.
parent_id = '.'.join(components[:-1])
# If parent module is ignored, it won't be included in the modules dictionary.
if parent_id in manager.modules:
parent = manager.modules[parent_id]
if components[-1] in parent.names:
del parent.names[components[-1]]
# If the module is removed from the build but still exists, then
# we mark it as missing so that it will get picked up by import from still.
if manager.fscache.isfile(path):
manager.missing_modules.add(module_id)
def dedupe_modules(modules: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
seen = set() # type: Set[str]
result = []
for id, path in modules:
if id not in seen:
seen.add(id)
result.append((id, path))
return result
def get_module_to_path_map(graph: Graph) -> Dict[str, str]:
return {module: node.xpath
for module, node in graph.items()}
def get_sources(fscache: FileSystemCache,
modules: Dict[str, str],
changed_modules: List[Tuple[str, str]]) -> List[BuildSource]:
sources = []
for id, path in changed_modules:
if fscache.isfile(path):
sources.append(BuildSource(path, id, None))
return sources
def collect_dependencies(new_modules: Iterable[str],
deps: Dict[str, Set[str]],
graph: Dict[str, State]) -> None:
for id in new_modules:
if id not in graph:
continue
for trigger, targets in graph[id].fine_grained_deps.items():
deps.setdefault(trigger, set()).update(targets)
# Merge also the newly added protocol deps.
TypeState.update_protocol_deps(deps)
def calculate_active_triggers(manager: BuildManager,
old_snapshots: Dict[str, Dict[str, SnapshotItem]],
new_modules: Dict[str, Optional[MypyFile]]) -> Set[str]:
"""Determine activated triggers by comparing old and new symbol tables.
For example, if only the signature of function m.f is different in the new
symbol table, return {'<m.f>'}.
"""
names = set() # type: Set[str]
for id in new_modules:
snapshot1 = old_snapshots.get(id)
if snapshot1 is None:
names.add(id)
snapshot1 = {}
new = new_modules[id]
if new is None:
snapshot2 = snapshot_symbol_table(id, SymbolTable())
names.add(id)
else:
snapshot2 = snapshot_symbol_table(id, new.names)
diff = compare_symbol_table_snapshots(id, snapshot1, snapshot2)
package_nesting_level = id.count('.')
for item in diff.copy():
if (item.count('.') <= package_nesting_level + 1
and item.split('.')[-1] not in ('__builtins__',
'__file__',
'__name__',
'__package__',
'__doc__')):
# Activate catch-all wildcard trigger for top-level module changes (used for
# "from m import *"). This also gets triggered by changes to module-private
# entries, but as these unneeded dependencies only result in extra processing,
# it's a minor problem.
#
# TODO: Some __* names cause mistriggers. Fix the underlying issue instead of
# special casing them here.
diff.add(id + WILDCARD_TAG)
if item.count('.') > package_nesting_level + 1:
# These are for changes within classes, used by protocols.
diff.add(item.rsplit('.', 1)[0] + WILDCARD_TAG)
names |= diff
return {make_trigger(name) for name in names}
def replace_modules_with_new_variants(
manager: BuildManager,
graph: Dict[str, State],
old_modules: Dict[str, Optional[MypyFile]],
new_modules: Dict[str, Optional[MypyFile]]) -> None:
"""Replace modules with newly builds versions.
Retain the identities of externally visible AST nodes in the
old ASTs so that references to the affected modules from other
modules will still be valid (unless something was deleted or
replaced with an incompatible definition, in which case there
will be dangling references that will be handled by
propagate_changes_using_dependencies).
"""
for id in new_modules:
preserved_module = old_modules.get(id)
new_module = new_modules[id]
if preserved_module and new_module is not None:
merge_asts(preserved_module, preserved_module.names,
new_module, new_module.names)
manager.modules[id] = preserved_module
graph[id].tree = preserved_module
def propagate_changes_using_dependencies(
manager: BuildManager,
graph: Dict[str, State],
deps: Dict[str, Set[str]],
triggered: Set[str],
up_to_date_modules: Set[str],
targets_with_errors: Set[str]) -> List[Tuple[str, str]]:
"""Transitively rechecks targets based on triggers and the dependency map.
Returns a list (module id, path) tuples representing modules that contain
a target that needs to be reprocessed but that has not been parsed yet."""
num_iter = 0
remaining_modules = [] # type: List[Tuple[str, str]]
# Propagate changes until nothing visible has changed during the last
# iteration.
while triggered or targets_with_errors:
num_iter += 1
if num_iter > MAX_ITER:
raise RuntimeError('Max number of iterations (%d) reached (endless loop?)' % MAX_ITER)
todo, unloaded, stale_protos = find_targets_recursive(manager, graph,
triggered, deps, up_to_date_modules)
# TODO: we sort to make it deterministic, but this is *incredibly* ad hoc
remaining_modules.extend((id, graph[id].xpath) for id in sorted(unloaded))
# Also process targets that used to have errors, as otherwise some
# errors might be lost.
for target in targets_with_errors:
id = module_prefix(graph, target)
if id is not None and id not in up_to_date_modules:
if id not in todo:
todo[id] = set()
manager.log_fine_grained('process target with error: %s' % target)
more_nodes, _ = lookup_target(manager, target)
todo[id].update(more_nodes)
triggered = set()
# First invalidate subtype caches in all stale protocols.
# We need to do this to avoid false negatives if the protocol itself is
# unchanged, but was marked stale because its sub- (or super-) type changed.
for info in stale_protos:
TypeState.reset_subtype_caches_for(info)
# Then fully reprocess all targets.
# TODO: Preserve order (set is not optimal)
for id, nodes in sorted(todo.items(), key=lambda x: x[0]):
assert id not in up_to_date_modules
triggered |= reprocess_nodes(manager, graph, id, nodes, deps)
# Changes elsewhere may require us to reprocess modules that were
# previously considered up to date. For example, there may be a
# dependency loop that loops back to an originally processed module.
up_to_date_modules = set()
targets_with_errors = set()
if is_verbose(manager):
manager.log_fine_grained('triggered: %r' % list(triggered))
return remaining_modules
def find_targets_recursive(
manager: BuildManager,
graph: Graph,
triggers: Set[str],
deps: Dict[str, Set[str]],
up_to_date_modules: Set[str]) -> Tuple[Dict[str, Set[DeferredNode]],
Set[str], Set[TypeInfo]]:
"""Find names of all targets that need to reprocessed, given some triggers.
Returns: A tuple containing a:
* Dictionary from module id to a set of stale targets.
* A set of module ids for unparsed modules with stale targets.
"""
result = {} # type: Dict[str, Set[DeferredNode]]
worklist = triggers
processed = set() # type: Set[str]
stale_protos = set() # type: Set[TypeInfo]
unloaded_files = set() # type: Set[str]
# Find AST nodes corresponding to each target.
#
# TODO: Don't rely on a set, since the items are in an unpredictable order.
while worklist:
processed |= worklist
current = worklist
worklist = set()
for target in current:
if target.startswith('<'):
worklist |= deps.get(target, set()) - processed
else:
module_id = module_prefix(graph, target)
if module_id is None:
# Deleted module.
continue
if module_id in up_to_date_modules:
# Already processed.
continue
if (module_id not in manager.modules
or manager.modules[module_id].is_cache_skeleton):
# We haven't actually parsed and checked the module, so we don't have
# access to the actual nodes.
# Add it to the queue of files that need to be processed fully.
unloaded_files.add(module_id)
continue
if module_id not in result:
result[module_id] = set()
manager.log_fine_grained('process: %s' % target)
deferred, stale_proto = lookup_target(manager, target)
if stale_proto:
stale_protos.add(stale_proto)
result[module_id].update(deferred)
return result, unloaded_files, stale_protos
def reprocess_nodes(manager: BuildManager,
graph: Dict[str, State],
module_id: str,
nodeset: Set[DeferredNode],
deps: Dict[str, Set[str]]) -> Set[str]:
"""Reprocess a set of nodes within a single module.
Return fired triggers.
"""
if module_id not in graph:
manager.log_fine_grained('%s not in graph (blocking errors or deleted?)' %
module_id)
return set()
file_node = manager.modules[module_id]
old_symbols = find_symbol_tables_recursive(file_node.fullname(), file_node.names)
old_symbols = {name: names.copy() for name, names in old_symbols.items()}
old_symbols_snapshot = snapshot_symbol_table(file_node.fullname(), file_node.names)
def key(node: DeferredNode) -> int:
# Unlike modules which are sorted by name within SCC,
# nodes within the same module are sorted by line number, because
# this is how they are processed in normal mode.
return node.node.line
nodes = sorted(nodeset, key=key)
options = graph[module_id].options
manager.errors.set_file_ignored_lines(
file_node.path, file_node.ignored_lines, options.ignore_errors)
targets = set()
for node in nodes:
target = target_from_node(module_id, node.node)
if target is not None:
targets.add(target)
manager.errors.clear_errors_in_targets(file_node.path, targets)
# Strip semantic analysis information.
for deferred in nodes:
strip_target(deferred.node)
semantic_analyzer = manager.semantic_analyzer
patches = [] # type: List[Tuple[int, Callable[[], None]]]
# Second pass of semantic analysis. We don't redo the first pass, because it only
# does local things that won't go stale.
for deferred in nodes:
with semantic_analyzer.file_context(
file_node=file_node,
fnam=file_node.path,
options=options,
active_type=deferred.active_typeinfo):
manager.semantic_analyzer.refresh_partial(deferred.node, patches)
# Third pass of semantic analysis.
for deferred in nodes:
with semantic_analyzer.file_context(
file_node=file_node,
fnam=file_node.path,
options=options,
active_type=deferred.active_typeinfo,
scope=manager.semantic_analyzer_pass3.scope):
manager.semantic_analyzer_pass3.refresh_partial(deferred.node, patches)
with semantic_analyzer.file_context(
file_node=file_node,
fnam=file_node.path,
options=options,
active_type=None):
apply_semantic_analyzer_patches(patches)
# Merge symbol tables to preserve identities of AST nodes. The file node will remain
# the same, but other nodes may have been recreated with different identities, such as
# NamedTuples defined using assignment statements.
new_symbols = find_symbol_tables_recursive(file_node.fullname(), file_node.names)
for name in old_symbols:
if name in new_symbols:
merge_asts(file_node, old_symbols[name], file_node, new_symbols[name])
# Type check.
checker = graph[module_id].type_checker()
checker.reset()
# We seem to need additional passes in fine-grained incremental mode.
checker.pass_num = 0
checker.last_pass = 3
more = checker.check_second_pass(nodes)
while more:
more = False
if graph[module_id].type_checker().check_second_pass():
more = True
new_symbols_snapshot = snapshot_symbol_table(file_node.fullname(), file_node.names)
# Check if any attribute types were changed and need to be propagated further.
changed = compare_symbol_table_snapshots(file_node.fullname(),
old_symbols_snapshot,
new_symbols_snapshot)
new_triggered = {make_trigger(name) for name in changed}
# Dependencies may have changed.
update_deps(module_id, nodes, graph, deps, options)
# Report missing imports.
graph[module_id].verify_dependencies()
return new_triggered
def find_symbol_tables_recursive(prefix: str, symbols: SymbolTable) -> Dict[str, SymbolTable]:
"""Find all nested symbol tables.
Args:
prefix: Full name prefix (used for return value keys and to filter result so that
cross references to other modules aren't included)
symbols: Root symbol table
Returns a dictionary from full name to corresponding symbol table.
"""
result = {}
result[prefix] = symbols
for name, node in symbols.items():
if isinstance(node.node, TypeInfo) and node.node.fullname().startswith(prefix + '.'):
more = find_symbol_tables_recursive(prefix + '.' + name, node.node.names)
result.update(more)
return result
def update_deps(module_id: str,
nodes: List[DeferredNode],
graph: Dict[str, State],
deps: Dict[str, Set[str]],
options: Options) -> None:
for deferred in nodes:
node = deferred.node
type_map = graph[module_id].type_map()
tree = graph[module_id].tree
assert tree is not None, "Tree must be processed at this stage"
new_deps = get_dependencies_of_target(module_id, tree, node, type_map,
options.python_version)
for trigger, targets in new_deps.items():
deps.setdefault(trigger, set()).update(targets)
# Merge also the newly added protocol deps (if any).
TypeState.update_protocol_deps(deps)
def lookup_target(manager: BuildManager,
target: str) -> Tuple[List[DeferredNode], Optional[TypeInfo]]:
"""Look up a target by fully-qualified name.
The first item in the return tuple is a list of deferred nodes that
needs to be reprocessed. If the target represents a TypeInfo corresponding
to a protocol, return it as a second item in the return tuple, otherwise None.
"""
def not_found() -> None:
manager.log_fine_grained(
"Can't find matching target for %s (stale dependency?)" % target)
modules = manager.modules
items = split_target(modules, target)
if items is None:
not_found() # Stale dependency
return [], None
module, rest = items
if rest:
components = rest.split('.')
else:
components = []
node = modules[module] # type: Optional[SymbolNode]
file = None # type: Optional[MypyFile]
active_class = None
active_class_name = None
for c in components:
if isinstance(node, TypeInfo):
active_class = node
active_class_name = node.name()
if isinstance(node, MypyFile):
file = node
if (not isinstance(node, (MypyFile, TypeInfo))
or c not in node.names):
not_found() # Stale dependency
return [], None
node = node.names[c].node
if isinstance(node, TypeInfo):
# A ClassDef target covers the body of the class and everything defined
# within it. To get the body we include the entire surrounding target,
# typically a module top-level, since we don't support processing class
# bodies as separate entitites for simplicity.
assert file is not None
if node.fullname() != target:
# This is a reference to a different TypeInfo, likely due to a stale dependency.
# Processing them would spell trouble -- for example, we could be refreshing
# a deserialized TypeInfo with missing attributes.
not_found()
return [], None
result = [DeferredNode(file, None, None)]
stale_info = None # type: Optional[TypeInfo]
if node.is_protocol:
stale_info = node
for name, symnode in node.names.items():
node = symnode.node
if isinstance(node, FuncDef):
method, _ = lookup_target(manager, target + '.' + name)
result.extend(method)
return result, stale_info
if isinstance(node, Decorator):
# Decorator targets actually refer to the function definition only.
node = node.func
if not isinstance(node, (FuncDef,
MypyFile,
OverloadedFuncDef)):
# The target can't be refreshed. It's possible that the target was
# changed to another type and we have a stale dependency pointing to it.
not_found()
return [], None
if node.fullname() != target:
# Stale reference points to something unexpected. We shouldn't process since the
# context will be wrong and it could be a partially initialized deserialized node.
not_found()
return [], None
return [DeferredNode(node, active_class_name, active_class)], None
def is_verbose(manager: BuildManager) -> bool:
return manager.options.verbosity >= 1 or DEBUG_FINE_GRAINED
def target_from_node(module: str,
node: Union[FuncDef, MypyFile, OverloadedFuncDef, LambdaExpr]
) -> Optional[str]:
"""Return the target name corresponding to a deferred node.
Args:
module: Must be module id of the module that defines 'node'
Returns the target name, or None if the node is not a valid target in the given
module (for example, if it's actually defined in another module).
"""
if isinstance(node, MypyFile):
if module != node.fullname():
# Actually a reference to another module -- likely a stale dependency.
return None
return module
elif isinstance(node, (OverloadedFuncDef, FuncDef)):
if node.info is not None:
return '%s.%s' % (node.info.fullname(), node.name())
else:
return '%s.%s' % (module, node.name())
else:
assert False, "Lambda expressions can't be deferred in fine-grained incremental mode"
| 42.834726 | 99 | 0.645482 |
91392ce0a72738fb127b69128b82c3b92c9fc56d | 16,688 | py | Python | sdno-mpls-optimizer/tunnel_handler.py | openov2/sdno-optimize | a169b0d31541126db61af77ea3acb66d4a91072f | [
"CC-BY-4.0"
] | null | null | null | sdno-mpls-optimizer/tunnel_handler.py | openov2/sdno-optimize | a169b0d31541126db61af77ea3acb66d4a91072f | [
"CC-BY-4.0"
] | null | null | null | sdno-mpls-optimizer/tunnel_handler.py | openov2/sdno-optimize | a169b0d31541126db61af77ea3acb66d4a91072f | [
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2016-2017 China Telecommunication Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tornado.web
import json
import time
from db_util import mysql_utils
ip2num = lambda x:sum([256**j*int(i) for j,i in enumerate(x.split('.')[::-1])])
num2ip = lambda x: '.'.join([str(x/(256**i)%256) for i in range(3,-1,-1)])
def get_mask_int(mask):
sum=0
for i in range(mask):
sum = sum*2+1
sum = sum << (32-mask)
return sum
class ms_tunnel_handler(tornado.web.RequestHandler):
def initialize(self):
super(ms_tunnel_handler, self).initialize()
self.resp_func = {'ms_tunnel_get_lsp_by_flow': self.get_lsp_by_flow, 'ms_tunnel_get_lsp':self.get_lsp, \
'ms_tunnel_add_lsp':self.add_lsp, 'ms_tunnel_del_lsp':self.del_lsp, \
'ms_tunnel_update_lsp':self.update_lsp, 'ms_tunnel_add_flow':self.add_flow, \
'ms_tunnel_del_flow':self.del_flow, 'ms_tunnel_update_flow':self.update_flow,
'ms_tunnel_get_flow':self.get_flow,
'ms_tunnel_get_customer_by_lsp':self.get_customer_by_lsp, 'ms_tunnel_get_lsp_by_cust':self.get_lsp_by_customer, \
'ms_tunnel_add_customer_to_lsp':self.add_customer_to_lsp, 'ms_tunnel_del_customer_from_lsp':self.del_customer_from_lsp,
'ms_tunnel_get_cust_by_lsp':self.get_customer_by_lsp,
'ms_tunnel_get_lsp_by_uids':self.get_lsp_by_uids
}
self.log = 0
pass
def form_response(self, req):
resp = {}
resp['response'] = req['request']
#resp['ts'] = req['ts']
resp['ts'] = time.strftime("%Y%m%d%H%M%S")
resp['trans_id'] = req['trans_id']
resp['err_code'] = 0
resp['msg'] = ''
self.set_header('Content-Type', 'application/json')
return resp
def post(self):
ctnt = self.request.body
if self.log == 1:
print 'The request:'
print str(ctnt)
req = json.loads(str(ctnt))
resp = self.form_response(req)
result = self.resp_func[req['request']](req['args'])
resp['result'] = result
if self.log == 1:
print 'response:'
print json.dumps(resp)
self.write(json.dumps(resp))
pass
def get_lsp_by_flow(self,args):
flow_uids = args['flow_uids']
flow_lsps = {}
db = mysql_utils('tunnel')
for fid in flow_uids:
sql_str = 'select * from t_assigned_flow inner join t_lsp on t_assigned_flow.lsp_id = t_lsp.id ' + \
'and t_assigned_flow.flow_id=%s' % fid
# sql_str = 'select * from t_assigned_flow where t_assigned_flow.flow_id=%s' % fid
# print sql_str
results = db.exec_sql(sql_str)
lsps = []
if results:
for res in results:
one_lsp = {}
one_lsp['lsp_uid'] = str(res[1])
one_lsp['flow_user_data'] = str(res[9])
# one_lsp['ip'] = results[0][3] + '/' + str(results[0][4])
# one_lsp['customer_id'] = str(results[0][6])
one_lsp['lsp_name'] = res[12]
# one_lsp['bandwidth'] = results[0][18]
# one_lsp['delay'] = results[0][19]
# one_lsp['priority'] = results[0][20]
# one_lsp['status'] = results[0][23]
one_lsp['lsp_user_data'] = res[25]
one_lsp['hop_list'] = res[26]
# one_lsp['path'] = results[0][26]
lsps.append(one_lsp)
else:
pass
flow_lsps[fid] = lsps
db.close()
return flow_lsps
def get_lsp(self, args):
if 'lsp_uids' in args:
uids = args['lsp_uids']
p_list = '(' + ','.join(str(uid) for uid in uids) + ')'
sql_str = 'select * from t_lsp where t_lsp.status < 2 and t_lsp.id in %s' % p_list
elif 'from_router_uid' in args:
nid = args['from_router_uid']
sql_str = 'select * from t_lsp where t_lsp.status < 2 and t_lsp.from_router_id = \'%s\'' % str(nid)
else:
sql_str = 'select * from t_lsp where t_lsp.status < 2'
db = mysql_utils('tunnel')
results = db.exec_sql(sql_str)
db.close()
lsps = {}
lsp = []
if results is not None:
for result in results:
one_lsp = {}
one_lsp['uid'] = str(result[0])
one_lsp['name'] = result[1]
one_lsp['from_router_uid'] = result[2]
one_lsp['to_router_uid'] = result[5]
one_lsp['bandwidth'] = result[8]
one_lsp['delay'] = result[9]
one_lsp['priority'] = result[10]
one_lsp['control_type'] = result[11]
one_lsp['path_type'] = result[12]
one_lsp['status'] = result[13]
one_lsp['user_data'] = None if result[14] is None or not result[14].startswith('{') else json.loads(result[14])
one_lsp['hop_list'] = [] if result[15] is None else [x for x in result[15].split(',')]
one_lsp['path'] = [] if result[16] is None else [x for x in result[16].split(',')]
lsp.append(one_lsp)
lsps['lsps'] = lsp
return lsps
def add_lsp(self, args):
lsp = args
added_lsp = []
str_keys = ['from_port_uid', 'to_port_uid','from_router_ip', 'to_router_ip','user_data', 'host_list', 'path', 'from_router_name', 'to_router_name']
num_keys = ['delay', 'priority', 'control_type', 'path_type', 'status']
for k in str_keys:
if k not in lsp or lsp[k] is None:
lsp[k] = ''
for k in num_keys:
if k not in lsp or lsp[k] is None:
lsp[k] = 'null'
lsp['hop_list'] = ','.join(lsp['hop_list'])
lsp['path'] = ','.join(lsp['path'])
#insert into t_lsp values (1, 'lsp-1', 1, '1.1.1.1', 1, 2, '2.2.2.2', 1, 100.0, 20.0, 1, 1, 0);
sql_str = 'insert into t_lsp(name,from_router_id,from_router_ip,from_port_id,to_router_id,to_router_ip,' \
+ 'to_port_id, bandwidth,delay,priority,control_type,path_type,status,user_data,hop_list,path) values (\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',%s,%s,%s,%s,%s,%s,\'%s\',\'%s\',\'%s\')' \
% (lsp['name'],lsp['from_router_uid'],lsp['from_router_ip'],lsp['from_port_uid'],lsp['to_router_uid'],lsp['to_router_ip'],lsp['to_port_uid'],\
lsp['bandwidth'],lsp['delay'],lsp['priority'],lsp['control_type'],lsp['path_type'],lsp['status'],lsp['user_data'],lsp['hop_list'],lsp['path'])
# print sql_str
db = mysql_utils('tunnel')
result = db.exec_sql(sql_str)
if not result:
db.commit()
lsp_id = db.exec_sql('SELECT LAST_INSERT_ID()')[0][0]
db.close()
return {'uid':str(lsp_id)}
def del_lsp(self, args):
if 'uids' in args:
uids = args['uids']
elif 'uid' in args:
uids = [args['uid']]
sql_str = 'delete from t_lsp where t_lsp.id in (%s)' % (",".join(str(uid) for uid in uids))
db = mysql_utils('tunnel')
result = db.exec_sql(sql_str)
if not result:
db.commit()
db.close()
return result
def update_lsp(self, args):
lsp = args
lsp_old = self.get_lsp_by_uid(lsp['uid'])['lsps'][0]
for k in lsp_old:
if k not in lsp:
lsp[k]=lsp_old[k]
if lsp[k] is None:
lsp[k] = ''
num_keys = ['delay', 'priority', 'control_type', 'path_type', 'status']
for k in num_keys:
if k in lsp and lsp[k] == '':
lsp[k] = 'null'
sql_str = 'update t_lsp set name=\'%s\',from_router_id=\'%s\',to_router_id=\'%s\',bandwidth=%s,delay=%s,priority=%s,status=%s,user_data=\'%s\',path=\'%s\' where t_lsp.id=%s' \
% (lsp['name'],lsp['from_router_uid'],lsp['to_router_uid'],lsp['bandwidth'],lsp['delay'],lsp['priority'],lsp['status'],json.dumps(lsp['user_data']),
','.join((str(lsp['from_router_uid']), str(lsp['to_router_uid']))) if 'path' not in lsp or len(lsp['path']) == 0 else ','.join(lsp['path']),lsp['uid'])
# print sql_str
db = mysql_utils('tunnel')
result = db.exec_sql(sql_str)
if not result:
db.commit()
db.close()
print result
pass
def add_flow(self, args):
flow_uid = args['flow_uid']
lsp_uid = args['lsp_uid']
status = args['status'] if 'status' in args else 0
user_data = args['user_data'] if 'user_data' in args else ''
#insert into t_assigned_flow values (1, 1, 16843265, 16843266, '1.1.2.1', '1.1.2.2');
# ip = customer['ips'].split('/')[0]
# print ip
# mask = int(customer['ips'].split('/')[1])
# print mask
sql_str = 'insert into t_assigned_flow(lsp_id,flow_id,status, user_data) values (%s,%s,%s,\'%s\')' \
% (lsp_uid,flow_uid, status, user_data)
print sql_str
db = mysql_utils('tunnel')
result = db.exec_sql(sql_str)
if not result:
db.commit()
db.close()
return {}
def del_flow(self, args):
flow_uid = args['flow_uid']
lsp_uid = args['lsp_uid']
sql_str = 'delete from t_assigned_flow where flow_id=%s and lsp_id=%s' % (str(flow_uid), str(lsp_uid))
db = mysql_utils('tunnel')
db.exec_sql(sql_str)
db.commit()
db.close()
return {}
def get_flow(self, args):
# Get flow details (status, user_data...) of a specific LSP.
if 'lsp_uid' in args:
if 'flow_uids' in args :
flow_uids = args['flow_uids']
else:
flow_uids = None
lsp_uid = args['lsp_uid']
else:
print 'Get_flow: lsp_uid must be specified'
return {}
flow = {}
if flow_uids:
sql_str = 'SELECT * from t_assigned_flow WHERE flow_id in (%s) and lsp_id = %s' % (','.join(flow_uids), lsp_uid)
else:
sql_str = 'SELECT * from t_assigned_flow WHERE lsp_id = %s' % lsp_uid
db = mysql_utils('tunnel')
result = db.exec_sql(sql_str)
db.close()
flows = {}
if result is not None:
for f in result:
flow = {}
flow['flow_uid'] = f[10]
flow['lsp_uid'] = f[1]
flow['status'] = f[8]
if f[9] and len(f[9]) > 1:
flow['user_data'] = json.loads(f[9])
else:
flow['user_data'] = None
flows[f[10]] = flow
return flows
def update_flow(self,args):
if 'flow_uid' in args and 'lsp_uid' in args:
flow_uid = args['flow_uid']
lsp_uid = args['lsp_uid']
else:
print 'Update_flow: flow_uid and lsp_uid must be specified'
return {}
sql_str = 'SELECT * from t_assigned_flow WHERE flow_id = %s and lsp_id = %s' % (flow_uid, lsp_uid)
db = mysql_utils('tunnel')
result = db.exec_sql(sql_str)
if not result:
print 'Update_flow: Can not find the flow'
db.close()
return {}
flow = result[0]
status = args['status'] if 'status' in args else flow[8]
user_data = json.dumps(args['user_data']) if 'user_data' in args else flow[9]
sql_str = 'UPDATE t_assigned_flow SET status = %s, user_data = \'%s\' WHERE flow_id = %s and lsp_id=%s'\
% (status, user_data, flow_uid, lsp_uid)
db.exec_sql(sql_str)
db.commit()
db.close()
return {}
def get_lsp_by_uid(self, uid):
lsps = {}
lsp = []
sql_str = 'select * from t_lsp where t_lsp.id = %s' % uid
print sql_str
db = mysql_utils('tunnel')
results = db.exec_sql(sql_str)
db.close()
for result in results:
one_lsp = {}
one_lsp['uid'] = uid
one_lsp['name'] = result[1]
one_lsp['from_router_uid'] = result[2]
one_lsp['to_router_uid'] = result[5]
one_lsp['bandwidth'] = result[8]
one_lsp['delay'] = result[9]
one_lsp['priority'] = result[10]
lsp.append(one_lsp)
lsps['lsps'] = lsp
return lsps
def get_lsp_by_customer(self, args):
customers = args['cust_uids']
lsps = {}
sql_str = 'select * from t_assigned_flow inner join t_lsp on t_assigned_flow.lsp_id = t_lsp.id ' + \
'and t_assigned_flow.customer_id in (%s)' % ','.join(str(c) for c in customers)
# print sql_str
db = mysql_utils('tunnel')
results = db.exec_sql(sql_str)
db.close()
# print results
for result in results:
cust_uid = str(result[6])
if cust_uid not in lsps:
lsps[cust_uid] = []
one_lsp = {}
one_lsp['lsp_uid'] = str(result[1])
one_lsp['customer_id'] = str(result[6])
one_lsp['lsp_name'] = result[11]
one_lsp['bandwidth'] = result[18]
one_lsp['delay'] = result[19]
one_lsp['priority'] = result[20]
one_lsp['status'] = result[23]
one_lsp['user_data'] = result[24]
one_lsp['hop_list'] = result[25]
one_lsp['path'] = result[26]
lsps[cust_uid].append(one_lsp)
return lsps
def get_customer_by_lsp(self, args):
if 'lsp_uids' in args:
lsps = args['lsp_uids']
sql_str = 'select * from t_assigned_flow where t_assigned_flow.lsp_id in (%s) and t_assigned_flow.customer_id is not null'\
% ','.join(str(p) for p in lsps)
else:
sql_str = 'select * from t_assigned_flow t_assigned_flow.customer_id is not null'
db = mysql_utils('tunnel')
results = db.exec_sql(sql_str)
db.close()
customers = {}
if results is not None:
for result in results:
lsp_uid = str(result[1])
cust_uid = str(result[6])
if lsp_uid not in customers:
customers[lsp_uid] = []
customers[lsp_uid].append(cust_uid)
return customers
def add_customer_to_lsp(self, args):
cust_uid = args['cust_uid']
lsp_uid = args['lsp_uid']
sql_str = 'insert into t_assigned_flow (customer_id, lsp_id) VALUES (%s,%s)' % (cust_uid, lsp_uid)
db = mysql_utils('tunnel')
result = db.exec_sql(sql_str)
if not result:
db.commit()
db.close()
return {}
def del_customer_from_lsp(self, args):
cust_uid = args['cust_uid']
lsp_uid = args['lsp_uid']
sql_str = 'delete from t_assigned_flow where t_assigned_flow.customer_id=%s and t_assigned_flow.lsp_id=%s' % (cust_uid, lsp_uid)
db = mysql_utils('tunnel')
result = db.exec_sql(sql_str)
if not result:
db.commit()
db.close()
return {}
def get_lsp_by_uids(self, args):
uids = args['lsp_uids']
lsps = {}
sql_str = 'select * from t_lsp where t_lsp.id in (%s)' % ','.join(uids)
db = mysql_utils('tunnel')
results = db.exec_sql(sql_str)
db.close()
if results is not None:
for result in results:
uid = result[0]
one_lsp = {}
one_lsp['uid'] = uid
one_lsp['name'] = result[1]
one_lsp['from_router_uid'] = result[2]
one_lsp['to_router_uid'] = result[5]
one_lsp['bandwidth'] = result[8]
one_lsp['delay'] = result[9]
one_lsp['priority'] = result[10]
one_lsp['status'] = result[13]
lsps[uid] = one_lsp
return lsps
| 36.357298 | 206 | 0.543444 |
2e5d5af639cb85da74232b1130cd832343351b86 | 3,896 | py | Python | rlpyt/envs/po_fourrooms/po_fourrooms_env.py | DavidSlayback/rlpyt | 445adbd3917842caae0cae0d06e4b2866c8f1258 | [
"MIT"
] | null | null | null | rlpyt/envs/po_fourrooms/po_fourrooms_env.py | DavidSlayback/rlpyt | 445adbd3917842caae0cae0d06e4b2866c8f1258 | [
"MIT"
] | null | null | null | rlpyt/envs/po_fourrooms/po_fourrooms_env.py | DavidSlayback/rlpyt | 445adbd3917842caae0cae0d06e4b2866c8f1258 | [
"MIT"
] | null | null | null | import gym
import gym_classics
from gym_classics.envs.four_rooms import FourRooms, NoisyGridworld
from rlpyt.envs.gym import GymEnvWrapper, GymSpaceWrapper
import numpy as np
all_squares = [(i,j) for i in range(11) for j in range(11)]
oc_initial_goal = [(8,4)] # Goal in east doorway
# Base FourRooms from gym-classics starts in (0,0) i.e. bottom-left, goal is (10,10) i.e., top-right. Probably want a stochastic version
class StochasticFourRooms(FourRooms):
""" Classic FourRooms, but with parameterizable start and end
Args:
possible_starts: Set of possible (x,y) start locations, or None for default. Drawn from each episode
possible_goals: Set of possible (x,y) end locations, or None for default. Drawn at initialization only
"""
def __init__(self, possible_starts=None, possible_goals=None, goal_seed=None):
blocks = frozenset({(5,0), (5,2), (5,3), (5,4), (5,5), (5,6), (5,7), (5,9), (5,10),
(0,5), (2,5), (3,5), (4,5),
(6,4), (7,4), (9,4), (10,4)})
if possible_starts is None: possible_starts = {(0,0)}
possible_starts = possible_starts - blocks
goal = (10,10) # Default
if possible_goals is not None:
possible_goals = list(possible_goals - blocks)
np.random.seed(goal_seed)
goal = possible_goals[np.random.randint(0, len(possible_goals))]
possible_starts = possible_starts - {goal}
self._goal = goal
super(NoisyGridworld, self).__init__(dims=(11, 11), starts=possible_starts, blocks=blocks)
def transfer(self, goal=None):
""" Transfer original goal to a different one. Return goal (in case it doesn't change due to walls)"""
if goal is not None:
if isinstance(goal, (int, float)):
goal = self._decode(int(goal))
self._goal = goal if goal not in self._blocks else self._goal
return self._encode(self._goal)
# Partially-observable four rooms
# Default state space: n=104 discrete state space
# Revised state space: One paper proposes being able to observe walls around you. Chris says maybe direction of goal?
# First stab: Merely the existence of 4 surrounding walls
class FourRoomsFourWallsWrapper(gym.ObservationWrapper):
def __init__(self, env):
super().__init__(env)
n_spaces_wall = (2 ** 4) # Number of states for walls
self.observation_space = gym.spaces.Discrete(n_spaces_wall)
self._dir = np.array([[0,1], [1,0], [0,-1], [-1,0]]) # Directions
def observation(self, observation):
o = 0 # Base observation, no adjacent walls
dims = np.array(self.env._dims)
obs_decode = np.array(self.env._decode(observation)) # Convert to (x,y)
obs_adj = self._dir + obs_decode # Coordinates of adjacent squares (can assume these are all valid grid coordinates)
blocks = self.env._blocks # Coordinates of blockers
for i, arr in enumerate(obs_adj):
o += (i * 2) * (np.any((arr < 0) | (arr >= dims)) or (tuple(arr) in blocks))
return o
def make_po_fourrooms(fomdp=False, max_episode_steps=2000):
""" max_episode_steps is a possible kwargs"""
TL = gym.wrappers.TimeLimit
e = StochasticFourRooms(possible_starts=set(all_squares), possible_goals=set(oc_initial_goal))
if not fomdp: e = FourRoomsFourWallsWrapper(e)
e = TL(e, max_episode_steps)
return GymEnvWrapper(e)
if __name__ == "__main__":
e = StochasticFourRooms({(0,0), (1,1), (2,2)}, possible_goals={(10,10), (9,9)})
t = e.transfer()
t2 = e.transfer(64)
t3 = e.transfer((9,9))
e = FourRoomsFourWallsWrapper(e)
s = e.reset()
s_d = list(e.env._state)
print("Initial_State_{}".format(s_d))
for t in range(200):
o, r, d, info = e.step(e.action_space.sample())
print("Observation_{}".format(o)) | 48.7 | 136 | 0.650154 |
f4d74ad68f69215b379539fd519707ea6c453928 | 3,069 | py | Python | forceatlas.py | tpoisot/nxfa2 | 155497b4c7db73b2449b17bb12862b2d2f41b8c9 | [
"WTFPL"
] | 19 | 2015-04-08T15:23:47.000Z | 2021-09-29T15:41:47.000Z | forceatlas.py | tpoisot/nxfa2 | 155497b4c7db73b2449b17bb12862b2d2f41b8c9 | [
"WTFPL"
] | 1 | 2015-06-15T12:35:01.000Z | 2015-06-15T13:27:55.000Z | forceatlas.py | tpoisot/nxfa2 | 155497b4c7db73b2449b17bb12862b2d2f41b8c9 | [
"WTFPL"
] | 8 | 2015-01-26T23:07:43.000Z | 2016-12-13T22:53:43.000Z | #! /usr/bin/python
import networkx as nx
from scipy.sparse import spdiags, coo_matrix
import scipy as sp
import numpy as np
import matplotlib.pyplot as plt
## Now the layout function
def forceatlas2_layout(G, iterations=10, linlog=False, pos=None, nohubs=False,
kr=0.001, k=None, dim=2):
"""
Options values are
g The graph to layout
iterations Number of iterations to do
linlog Whether to use linear or log repulsion
random_init Start with a random position
If false, start with FR
avoidoverlap Whether to avoid overlap of points
degreebased Degree based repulsion
"""
# We add attributes to store the current and previous convergence speed
for n in G:
G.node[n]['prevcs'] = 0
G.node[n]['currcs'] = 0
# To numpy matrix
# This comes from the spares FR layout in nx
A = nx.to_scipy_sparse_matrix(G, dtype='f')
nnodes, _ = A.shape
try:
A = A.tolil()
except Exception as e:
A = (coo_matrix(A)).tolil()
if pos is None:
pos = np.asarray(np.random.random((nnodes, dim)), dtype=A.dtype)
else:
pos = pos.astype(A.dtype)
if k is None:
k = np.sqrt(1.0 / nnodes)
# Iterations
# the initial "temperature" is about .1 of domain area (=1x1)
# this is the largest step allowed in the dynamics.
t = 0.1
# simple cooling scheme.
# linearly step down by dt on each iteration so last iteration is size dt.
dt = t / float(iterations + 1)
displacement = np.zeros((dim, nnodes))
for iteration in range(iterations):
displacement *= 0
# loop over rows
for i in range(A.shape[0]):
# difference between this row's node position and all others
delta = (pos[i] - pos).T
# distance between points
distance = np.sqrt((delta ** 2).sum(axis=0))
# enforce minimum distance of 0.01
distance = np.where(distance < 0.01, 0.01, distance)
# the adjacency matrix row
Ai = np.asarray(A.getrowview(i).toarray())
# displacement "force"
Dist = k * k / distance ** 2
if nohubs:
Dist = Dist / float(Ai.sum(axis=1) + 1)
if linlog:
Dist = np.log(Dist + 1)
displacement[:, i] += \
(delta * (Dist - Ai * distance / k)).sum(axis=1)
# update positions
length = np.sqrt((displacement ** 2).sum(axis=0))
length = np.where(length < 0.01, 0.01, length)
pos += (displacement * t / length).T
# cool temperature
t -= dt
# Return the layout
return dict(zip(G, pos))
if __name__ == "__main__":
## Read a food web with > 100 nodes
FW = nx.read_edgelist('web.edges', create_using=nx.DiGraph())
positions = forceatlas2_layout(FW, linlog=False, nohubs=False,
iterations=100)
nx.draw(FW, positions)
plt.show()
| 34.1 | 78 | 0.572499 |
b34f797694715dc966830d71bf1b516711540753 | 1,475 | py | Python | result_to_plot.py | WeiWeic6222848/CT3 | 83eca467391b21c9edfc835623b4038369ffbbbb | [
"MIT"
] | null | null | null | result_to_plot.py | WeiWeic6222848/CT3 | 83eca467391b21c9edfc835623b4038369ffbbbb | [
"MIT"
] | null | null | null | result_to_plot.py | WeiWeic6222848/CT3 | 83eca467391b21c9edfc835623b4038369ffbbbb | [
"MIT"
] | null | null | null | # this file is added for clean plots and tables - CT3
from tabulate import tabulate
import pandas as pd
import matplotlib.pyplot as plt
resultB = pd.read_json("results/2b_eval_on_test.json")
resultC = pd.read_json("results/3_different_size.json")
scores = resultB.keys()
for algorithm in resultB.columns:
for key in resultB[algorithm].keys():
if key != 'example':
resultB[algorithm][key] = resultB[algorithm][key]['mean']
Table2 = resultB.drop(["example"])
print(tabulate(Table2, headers='keys', tablefmt='psql'))
Table3 = resultB.loc["example"].to_frame().T
examples = Table3[::1]
Table3 = Table3.drop(["example"])
for i in range(10):
rank = i + 1
items = {}
for key in examples:
items[key] = examples[key][0][i]['title']
Table3 = Table3.append(pd.Series(items, name="Rank {}".format(str(rank))))
print(tabulate(Table3, headers='keys', tablefmt='psql'))
recall100 = resultC.loc["RecallAt100"]
recall50 = resultC.loc["RecallAt50"]
recall25 = resultC.loc["RecallAt25"]
y = resultC.columns
l1 = plt.plot(y, recall100, 'bo--', label="recall@100")
l2 = plt.plot(y, recall50, 'gs--', label="recall@50")
l3 = plt.plot(y, recall25, 'x--', label="recall@25", color='orange')
plt.legend()
plt.xlabel("# of annotated movie pairs for training")
plt.ylabel("mean score")
plt.show()
ranking = resultC.loc["SumOfRanks"]
plt.plot(y, ranking, 'r+--', label="recall@100")
plt.xlabel("# of annotated movie pairs for training")
plt.show()
| 32.065217 | 78 | 0.68678 |
94dd8e2171b63e90e8b5d28aae4245460826fa6c | 4,607 | py | Python | tests/unit/conftest.py | SteveMayze/leawood_gateway | 4eee019f14ebc0c72dd8323acd5fc8c8981cc2fd | [
"MIT"
] | null | null | null | tests/unit/conftest.py | SteveMayze/leawood_gateway | 4eee019f14ebc0c72dd8323acd5fc8c8981cc2fd | [
"MIT"
] | null | null | null | tests/unit/conftest.py | SteveMayze/leawood_gateway | 4eee019f14ebc0c72dd8323acd5fc8c8981cc2fd | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from typing import Callable, TypedDict
from leawood.domain.messages import Message, Telegram
from leawood.domain import messages
from leawood.domain.model import Node
from leawood.domain.hardware import Gateway, Modem, Sensor
from leawood.services.messagebus import LocalMessageBus
from leawood.services import messagebus
from leawood.config import Config
import logging
import pytest
import time
from leawood.services.repository import Repository
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
MAX_WAIT = 1
def wait_for_empty_queue(message_bus, state):
start_time = time.time()
while True:
try:
assert message_bus.empty() == state
return
except (AssertionError) as error:
if time.time() - start_time > MAX_WAIT:
raise error
time.sleep( 0.5)
def wait_for_runnning_state(worker, state):
start_time = time.time()
while True:
try:
assert worker.is_running() == state
return
except (AssertionError) as error:
if time.time() - start_time > MAX_WAIT:
raise error
time.sleep( 0.5)
@dataclass
class FakeMessage():
"""
This represents the raw message sent and received by the Modem.
The payload should contain at least the serial_id and operation
plus an optional data or metdata section for the additional
information to support the operation.
The modem implementation should transform this into a Message
object.
"""
addr64bit:str
payload: bytearray
class FakeModem(Modem):
def __init__(self):
self.spy = {}
self._receive_message_callback = None
def send_message(self, message: Message):
logger.info(f'Send message: {message}')
assert isinstance(message.serial_id, str)
self.spy[message.serial_id] = message
def register_receive_callback(self, callback: Callable):
self._receive_message_callback = callback
def receive_message(self, in_message: FakeMessage):
logger.info(f'Received message: {in_message}')
message = messages.create_message_from_data(in_message.addr64bit, in_message.payload)
self._receive_message_callback(message)
def open(self):
pass
def close(self):
pass
class FakeRepository(Repository):
def __init__(self) -> None:
super().__init__()
self.repository_cache = {}
self.spy = {}
def _add_node(self, node: Node):
self.spy['_add_node'] = node
self.repository_cache[node.serial_id] = node
def _get_node(self, serial_id: str) -> Node:
self.spy['_get_node'] = serial_id
if serial_id in self.repository_cache:
return self.repository_cache[serial_id]
return None
def _post_sensor_data(self, node: Node, message: Message):
self.spy['_post_sensor_data'] = message
pass
class ModemMessageBuilder():
def create_modem_message(self, addr64bit: str, payload: TypedDict):
# TODO - convert the data into a bytearray!
telegram = Telegram(payload.pop('serial_id'), payload.pop('operation'), payload)
data = messages.transform_telegram_to_bytearray(telegram)
return FakeMessage(addr64bit, data)
@pytest.fixture
def modem_message_builder():
return ModemMessageBuilder()
@pytest.fixture
def config():
args = ["--serial-port", "COM1", "--baud", "9600", "--sleeptime", "0"]
return Config(args)
@pytest.fixture(scope='function')
def modem() -> Modem:
modem = FakeModem()
yield modem
modem.spy = {}
@pytest.fixture(scope='function')
def repository() -> Repository:
repository = FakeRepository()
yield repository
repository.repository_cache = {}
repository.spy = {}
@pytest.fixture(scope='function')
def message_bus():
message_bus = LocalMessageBus()
return message_bus
@pytest.fixture(scope='function')
def gateway(message_bus, repository, modem):
gateway = Gateway(message_bus, repository, modem)
messagebus.activate(message_bus)
logger.info(f'Waiting for the message bus to start')
wait_for_runnning_state(message_bus, True)
yield gateway
logger.info(f'Waiting for the message bus to shut down')
messagebus.shutdown(message_bus)
wait_for_runnning_state(message_bus, False)
@pytest.fixture(scope='function')
def sensor():
sensor = Sensor(None, None)
sensor.serial_id = '0102030405060708'
sensor.addr64bit = '090a0b0c0d0e0f10'
return sensor
| 28.091463 | 93 | 0.685696 |
4079571aa7e6adf628a05b0bbbe409e69922f3de | 715 | py | Python | tests/test_annulus.py | Michael-4/Annulus | 1a1db2b2ddf289528ebfcef44b5ad4c43aebca49 | [
"MIT"
] | 3 | 2018-03-15T07:59:49.000Z | 2019-07-30T19:47:39.000Z | tests/test_annulus.py | Michael-4/Annulus | 1a1db2b2ddf289528ebfcef44b5ad4c43aebca49 | [
"MIT"
] | null | null | null | tests/test_annulus.py | Michael-4/Annulus | 1a1db2b2ddf289528ebfcef44b5ad4c43aebca49 | [
"MIT"
] | null | null | null | import pytest
import annulus
import cv2
import os.path
@pytest.fixture
def image():
path = os.path.abspath(os.path.dirname(__file__)) + "/../examples/image.png"
image = cv2.imread(path)
return image
def test_annulus(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.blur(gray, (5, 5))
binary = annulus.binarize(gray, block_size = 65)
detector = annulus.AnnulusDetection()
annuli = detector.detect(gray, binary, high_quality = True)
assert annuli is not None, len(annuli) > 0
grid = annulus.Grid(outer_circle_diamater = 0.02, marker_spacing = 0.03)
H, idx, grid, pixel = grid.find_numbered_grid(annuli, binary)
assert H is not None
| 25.535714 | 80 | 0.682517 |
2fcd4c2a60b4b63582aa9eb0f1aa385019a7e30a | 627 | py | Python | hello/manage.py | manper32/WiseR | 253811568749b370bbe26db6de85eba83ec46e8a | [
"0BSD"
] | null | null | null | hello/manage.py | manper32/WiseR | 253811568749b370bbe26db6de85eba83ec46e8a | [
"0BSD"
] | null | null | null | hello/manage.py | manper32/WiseR | 253811568749b370bbe26db6de85eba83ec46e8a | [
"0BSD"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ClaroCV.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.5 | 73 | 0.682616 |
4e84bedce724f87ada728b1c871f1068e7e1c00c | 484 | py | Python | parsec/commands/histories/show_dataset.py | simonbray/parsec | c0e123cbf7cb1289ec722357a6262f716575e4d9 | [
"Apache-2.0"
] | null | null | null | parsec/commands/histories/show_dataset.py | simonbray/parsec | c0e123cbf7cb1289ec722357a6262f716575e4d9 | [
"Apache-2.0"
] | null | null | null | parsec/commands/histories/show_dataset.py | simonbray/parsec | c0e123cbf7cb1289ec722357a6262f716575e4d9 | [
"Apache-2.0"
] | null | null | null | import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, dict_output
@click.command('show_dataset')
@click.argument("history_id", type=str)
@click.argument("dataset_id", type=str)
@pass_context
@custom_exception
@dict_output
def cli(ctx, history_id, dataset_id):
"""Get details about a given history dataset.
Output:
Information about the dataset
"""
return ctx.gi.histories.show_dataset(history_id, dataset_id)
| 24.2 | 64 | 0.77686 |
07de2eb1dc471ac6912d1181ae70d36915f61f21 | 55,860 | py | Python | sanic/app.py | jmarcet/sanic | 7384047f958ce9b6e6df531f9172429b87ac7ad2 | [
"MIT"
] | null | null | null | sanic/app.py | jmarcet/sanic | 7384047f958ce9b6e6df531f9172429b87ac7ad2 | [
"MIT"
] | null | null | null | sanic/app.py | jmarcet/sanic | 7384047f958ce9b6e6df531f9172429b87ac7ad2 | [
"MIT"
] | null | null | null | from __future__ import annotations
import asyncio
import logging
import logging.config
import re
import sys
from asyncio import (
AbstractEventLoop,
CancelledError,
Task,
ensure_future,
get_event_loop_policy,
get_running_loop,
wait_for,
)
from asyncio.futures import Future
from collections import defaultdict, deque
from contextlib import suppress
from functools import partial
from inspect import isawaitable
from socket import socket
from traceback import format_exc
from types import SimpleNamespace
from typing import (
TYPE_CHECKING,
Any,
AnyStr,
Awaitable,
Callable,
Coroutine,
Deque,
Dict,
Iterable,
List,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
)
from urllib.parse import urlencode, urlunparse
from warnings import filterwarnings
from sanic_routing.exceptions import ( # type: ignore
FinalizationError,
NotFound,
)
from sanic_routing.route import Route # type: ignore
from sanic.application.ext import setup_ext
from sanic.application.state import ApplicationState, Mode, ServerStage
from sanic.asgi import ASGIApp
from sanic.base.root import BaseSanic
from sanic.blueprint_group import BlueprintGroup
from sanic.blueprints import Blueprint
from sanic.compat import OS_IS_WINDOWS, enable_windows_color_support
from sanic.config import SANIC_PREFIX, Config
from sanic.exceptions import (
InvalidUsage,
SanicException,
ServerError,
URLBuildError,
)
from sanic.handlers import ErrorHandler
from sanic.http import Stage
from sanic.log import (
LOGGING_CONFIG_DEFAULTS,
deprecation,
error_logger,
logger,
)
from sanic.mixins.listeners import ListenerEvent
from sanic.mixins.runner import RunnerMixin
from sanic.models.futures import (
FutureException,
FutureListener,
FutureMiddleware,
FutureRegistry,
FutureRoute,
FutureSignal,
FutureStatic,
)
from sanic.models.handler_types import ListenerType, MiddlewareType
from sanic.models.handler_types import Sanic as SanicVar
from sanic.request import Request
from sanic.response import BaseHTTPResponse, HTTPResponse, ResponseStream
from sanic.router import Router
from sanic.server.websockets.impl import ConnectionClosed
from sanic.signals import Signal, SignalRouter
from sanic.touchup import TouchUp, TouchUpMeta
if TYPE_CHECKING: # no cov
try:
from sanic_ext import Extend # type: ignore
from sanic_ext.extensions.base import Extension # type: ignore
except ImportError:
Extend = TypeVar("Extend") # type: ignore
if OS_IS_WINDOWS: # no cov
enable_windows_color_support()
filterwarnings("once", category=DeprecationWarning)
class Sanic(BaseSanic, RunnerMixin, metaclass=TouchUpMeta):
"""
The main application instance
"""
__touchup__ = (
"handle_request",
"handle_exception",
"_run_response_middleware",
"_run_request_middleware",
)
__slots__ = (
"_asgi_app",
"_asgi_client",
"_blueprint_order",
"_delayed_tasks",
"_ext",
"_future_exceptions",
"_future_listeners",
"_future_middleware",
"_future_registry",
"_future_routes",
"_future_signals",
"_future_statics",
"_state",
"_task_registry",
"_test_client",
"_test_manager",
"blueprints",
"config",
"configure_logging",
"ctx",
"error_handler",
"go_fast",
"listeners",
"named_request_middleware",
"named_response_middleware",
"request_class",
"request_middleware",
"response_middleware",
"router",
"signal_router",
"sock",
"strict_slashes",
"websocket_enabled",
"websocket_tasks",
)
_app_registry: Dict[str, "Sanic"] = {}
_uvloop_setting = None # TODO: Remove in v22.6
test_mode = False
def __init__(
self,
name: str = None,
config: Optional[Config] = None,
ctx: Optional[Any] = None,
router: Optional[Router] = None,
signal_router: Optional[SignalRouter] = None,
error_handler: Optional[ErrorHandler] = None,
env_prefix: Optional[str] = SANIC_PREFIX,
request_class: Optional[Type[Request]] = None,
strict_slashes: bool = False,
log_config: Optional[Dict[str, Any]] = None,
configure_logging: bool = True,
register: Optional[bool] = None,
dumps: Optional[Callable[..., AnyStr]] = None,
) -> None:
super().__init__(name=name)
# logging
if configure_logging:
dict_config = log_config or LOGGING_CONFIG_DEFAULTS
logging.config.dictConfig(dict_config) # type: ignore
if config and env_prefix != SANIC_PREFIX:
raise SanicException(
"When instantiating Sanic with config, you cannot also pass "
"env_prefix"
)
# First setup config
self.config: Config = config or Config(env_prefix=env_prefix)
# Then we can do the rest
self._asgi_client: Any = None
self._blueprint_order: List[Blueprint] = []
self._delayed_tasks: List[str] = []
self._future_registry: FutureRegistry = FutureRegistry()
self._state: ApplicationState = ApplicationState(app=self)
self._task_registry: Dict[str, Task] = {}
self._test_client: Any = None
self._test_manager: Any = None
self.asgi = False
self.auto_reload = False
self.blueprints: Dict[str, Blueprint] = {}
self.configure_logging: bool = configure_logging
self.ctx: Any = ctx or SimpleNamespace()
self.error_handler: ErrorHandler = error_handler or ErrorHandler()
self.listeners: Dict[str, List[ListenerType[Any]]] = defaultdict(list)
self.named_request_middleware: Dict[str, Deque[MiddlewareType]] = {}
self.named_response_middleware: Dict[str, Deque[MiddlewareType]] = {}
self.request_class: Type[Request] = request_class or Request
self.request_middleware: Deque[MiddlewareType] = deque()
self.response_middleware: Deque[MiddlewareType] = deque()
self.router: Router = router or Router()
self.signal_router: SignalRouter = signal_router or SignalRouter()
self.sock: Optional[socket] = None
self.strict_slashes: bool = strict_slashes
self.websocket_enabled: bool = False
self.websocket_tasks: Set[Future[Any]] = set()
# Register alternative method names
self.go_fast = self.run
if register is not None:
deprecation(
"The register argument is deprecated and will stop working "
"in v22.6. After v22.6 all apps will be added to the Sanic "
"app registry.",
22.6,
)
self.config.REGISTER = register
if self.config.REGISTER:
self.__class__.register_app(self)
self.router.ctx.app = self
self.signal_router.ctx.app = self
if dumps:
BaseHTTPResponse._dumps = dumps # type: ignore
@property
def loop(self):
"""
Synonymous with asyncio.get_event_loop().
.. note::
Only supported when using the `app.run` method.
"""
if self.state.stage is ServerStage.STOPPED and self.asgi is False:
raise SanicException(
"Loop can only be retrieved after the app has started "
"running. Not supported with `create_server` function"
)
return get_event_loop_policy().get_event_loop()
# -------------------------------------------------------------------- #
# Registration
# -------------------------------------------------------------------- #
def register_listener(
self, listener: ListenerType[SanicVar], event: str
) -> ListenerType[SanicVar]:
"""
Register the listener for a given event.
:param listener: callable i.e. setup_db(app, loop)
:param event: when to register listener i.e. 'before_server_start'
:return: listener
"""
try:
_event = ListenerEvent[event.upper()]
except (ValueError, AttributeError):
valid = ", ".join(
map(lambda x: x.lower(), ListenerEvent.__members__.keys())
)
raise InvalidUsage(f"Invalid event: {event}. Use one of: {valid}")
if "." in _event:
self.signal(_event.value)(
partial(self._listener, listener=listener)
)
else:
self.listeners[_event.value].append(listener)
return listener
def register_middleware(
self, middleware: MiddlewareType, attach_to: str = "request"
) -> MiddlewareType:
"""
Register an application level middleware that will be attached
to all the API URLs registered under this application.
This method is internally invoked by the :func:`middleware`
decorator provided at the app level.
:param middleware: Callback method to be attached to the
middleware
:param attach_to: The state at which the middleware needs to be
invoked in the lifecycle of an *HTTP Request*.
**request** - Invoke before the request is processed
**response** - Invoke before the response is returned back
:return: decorated method
"""
if attach_to == "request":
if middleware not in self.request_middleware:
self.request_middleware.append(middleware)
if attach_to == "response":
if middleware not in self.response_middleware:
self.response_middleware.appendleft(middleware)
return middleware
def register_named_middleware(
self,
middleware: MiddlewareType,
route_names: Iterable[str],
attach_to: str = "request",
):
"""
Method for attaching middleware to specific routes. This is mainly an
internal tool for use by Blueprints to attach middleware to only its
specific routes. But, it could be used in a more generalized fashion.
:param middleware: the middleware to execute
:param route_names: a list of the names of the endpoints
:type route_names: Iterable[str]
:param attach_to: whether to attach to request or response,
defaults to "request"
:type attach_to: str, optional
"""
if attach_to == "request":
for _rn in route_names:
if _rn not in self.named_request_middleware:
self.named_request_middleware[_rn] = deque()
if middleware not in self.named_request_middleware[_rn]:
self.named_request_middleware[_rn].append(middleware)
if attach_to == "response":
for _rn in route_names:
if _rn not in self.named_response_middleware:
self.named_response_middleware[_rn] = deque()
if middleware not in self.named_response_middleware[_rn]:
self.named_response_middleware[_rn].appendleft(middleware)
return middleware
def _apply_exception_handler(
self,
handler: FutureException,
route_names: Optional[List[str]] = None,
):
"""Decorate a function to be registered as a handler for exceptions
:param exceptions: exceptions
:return: decorated function
"""
for exception in handler.exceptions:
if isinstance(exception, (tuple, list)):
for e in exception:
self.error_handler.add(e, handler.handler, route_names)
else:
self.error_handler.add(exception, handler.handler, route_names)
return handler.handler
def _apply_listener(self, listener: FutureListener):
return self.register_listener(listener.listener, listener.event)
def _apply_route(self, route: FutureRoute) -> List[Route]:
params = route._asdict()
websocket = params.pop("websocket", False)
subprotocols = params.pop("subprotocols", None)
if websocket:
self.enable_websocket()
websocket_handler = partial(
self._websocket_handler,
route.handler,
subprotocols=subprotocols,
)
websocket_handler.__name__ = route.handler.__name__ # type: ignore
websocket_handler.is_websocket = True # type: ignore
params["handler"] = websocket_handler
ctx = params.pop("route_context")
routes = self.router.add(**params)
if isinstance(routes, Route):
routes = [routes]
for r in routes:
r.ctx.websocket = websocket
r.ctx.static = params.get("static", False)
r.ctx.__dict__.update(ctx)
return routes
def _apply_static(self, static: FutureStatic) -> Route:
return self._register_static(static)
def _apply_middleware(
self,
middleware: FutureMiddleware,
route_names: Optional[List[str]] = None,
):
if route_names:
return self.register_named_middleware(
middleware.middleware, route_names, middleware.attach_to
)
else:
return self.register_middleware(
middleware.middleware, middleware.attach_to
)
def _apply_signal(self, signal: FutureSignal) -> Signal:
return self.signal_router.add(*signal)
def dispatch(
self,
event: str,
*,
condition: Optional[Dict[str, str]] = None,
context: Optional[Dict[str, Any]] = None,
fail_not_found: bool = True,
inline: bool = False,
reverse: bool = False,
) -> Coroutine[Any, Any, Awaitable[Any]]:
return self.signal_router.dispatch(
event,
context=context,
condition=condition,
inline=inline,
reverse=reverse,
fail_not_found=fail_not_found,
)
async def event(
self, event: str, timeout: Optional[Union[int, float]] = None
):
signal = self.signal_router.name_index.get(event)
if not signal:
if self.config.EVENT_AUTOREGISTER:
self.signal_router.reset()
self.add_signal(None, event)
signal = self.signal_router.name_index[event]
self.signal_router.finalize()
else:
raise NotFound("Could not find signal %s" % event)
return await wait_for(signal.ctx.event.wait(), timeout=timeout)
def enable_websocket(self, enable=True):
"""Enable or disable the support for websocket.
Websocket is enabled automatically if websocket routes are
added to the application.
"""
if not self.websocket_enabled:
# if the server is stopped, we want to cancel any ongoing
# websocket tasks, to allow the server to exit promptly
self.listener("before_server_stop")(self._cancel_websocket_tasks)
self.websocket_enabled = enable
def blueprint(
self,
blueprint: Union[
Blueprint, List[Blueprint], Tuple[Blueprint], BlueprintGroup
],
**options: Any,
):
"""Register a blueprint on the application.
:param blueprint: Blueprint object or (list, tuple) thereof
:param options: option dictionary with blueprint defaults
:return: Nothing
"""
if isinstance(blueprint, (list, tuple, BlueprintGroup)):
for item in blueprint:
params = {**options}
if isinstance(blueprint, BlueprintGroup):
if blueprint.url_prefix:
merge_from = [
options.get("url_prefix", ""),
blueprint.url_prefix,
]
if not isinstance(item, BlueprintGroup):
merge_from.append(item.url_prefix or "")
merged_prefix = "/".join(
u.strip("/") for u in merge_from
).rstrip("/")
params["url_prefix"] = f"/{merged_prefix}"
for _attr in ["version", "strict_slashes"]:
if getattr(item, _attr) is None:
params[_attr] = getattr(
blueprint, _attr
) or options.get(_attr)
if item.version_prefix == "/v":
if blueprint.version_prefix == "/v":
params["version_prefix"] = options.get(
"version_prefix"
)
else:
params["version_prefix"] = blueprint.version_prefix
self.blueprint(item, **params)
return
if blueprint.name in self.blueprints:
assert self.blueprints[blueprint.name] is blueprint, (
'A blueprint with the name "%s" is already registered. '
"Blueprint names must be unique." % (blueprint.name,)
)
else:
self.blueprints[blueprint.name] = blueprint
self._blueprint_order.append(blueprint)
if (
self.strict_slashes is not None
and blueprint.strict_slashes is None
):
blueprint.strict_slashes = self.strict_slashes
blueprint.register(self, options)
def url_for(self, view_name: str, **kwargs):
"""Build a URL based on a view name and the values provided.
In order to build a URL, all request parameters must be supplied as
keyword arguments, and each parameter must pass the test for the
specified parameter type. If these conditions are not met, a
`URLBuildError` will be thrown.
Keyword arguments that are not request parameters will be included in
the output URL's query string.
There are several _special_ keyword arguments that will alter how the
URL will be returned:
1. **_anchor**: ``str`` - Adds an ``#anchor`` to the end
2. **_scheme**: ``str`` - Should be either ``"http"`` or ``"https"``,
default is ``"http"``
3. **_external**: ``bool`` - Whether to return the path or a full URL
with scheme and host
4. **_host**: ``str`` - Used when one or more hosts are defined for a
route to tell Sanic which to use
(only applies with ``_external=True``)
5. **_server**: ``str`` - If not using ``_host``, this will be used
for defining the hostname of the URL
(only applies with ``_external=True``),
defaults to ``app.config.SERVER_NAME``
If you want the PORT to appear in your URL, you should set it in:
.. code-block::
app.config.SERVER_NAME = "myserver:7777"
`See user guide re: routing
<https://sanicframework.org/guide/basics/routing.html#generating-a-url>`__
:param view_name: string referencing the view name
:param kwargs: keys and values that are used to build request
parameters and query string arguments.
:return: the built URL
Raises:
URLBuildError
"""
# find the route by the supplied view name
kw: Dict[str, str] = {}
# special static files url_for
if "." not in view_name:
view_name = f"{self.name}.{view_name}"
if view_name.endswith(".static"):
name = kwargs.pop("name", None)
if name:
view_name = view_name.replace("static", name)
kw.update(name=view_name)
route = self.router.find_route_by_view_name(view_name, **kw)
if not route:
raise URLBuildError(
f"Endpoint with name `{view_name}` was not found"
)
uri = route.path
if getattr(route.ctx, "static", None):
filename = kwargs.pop("filename", "")
# it's static folder
if "__file_uri__" in uri:
folder_ = uri.split("<__file_uri__:", 1)[0]
if folder_.endswith("/"):
folder_ = folder_[:-1]
if filename.startswith("/"):
filename = filename[1:]
kwargs["__file_uri__"] = filename
if (
uri != "/"
and uri.endswith("/")
and not route.strict
and not route.raw_path[:-1]
):
uri = uri[:-1]
if not uri.startswith("/"):
uri = f"/{uri}"
out = uri
# _method is only a placeholder now, don't know how to support it
kwargs.pop("_method", None)
anchor = kwargs.pop("_anchor", "")
# _external need SERVER_NAME in config or pass _server arg
host = kwargs.pop("_host", None)
external = kwargs.pop("_external", False) or bool(host)
scheme = kwargs.pop("_scheme", "")
if route.ctx.hosts and external:
if not host and len(route.ctx.hosts) > 1:
raise ValueError(
f"Host is ambiguous: {', '.join(route.ctx.hosts)}"
)
elif host and host not in route.ctx.hosts:
raise ValueError(
f"Requested host ({host}) is not available for this "
f"route: {route.ctx.hosts}"
)
elif not host:
host = list(route.ctx.hosts)[0]
if scheme and not external:
raise ValueError("When specifying _scheme, _external must be True")
netloc = kwargs.pop("_server", None)
if netloc is None and external:
netloc = host or self.config.get("SERVER_NAME", "")
if external:
if not scheme:
if ":" in netloc[:8]:
scheme = netloc[:8].split(":", 1)[0]
else:
scheme = "http"
if "://" in netloc[:8]:
netloc = netloc.split("://", 1)[-1]
# find all the parameters we will need to build in the URL
# matched_params = re.findall(self.router.parameter_pattern, uri)
route.finalize()
for param_info in route.params.values():
# name, _type, pattern = self.router.parse_parameter_string(match)
# we only want to match against each individual parameter
try:
supplied_param = str(kwargs.pop(param_info.name))
except KeyError:
raise URLBuildError(
f"Required parameter `{param_info.name}` was not "
"passed to url_for"
)
# determine if the parameter supplied by the caller
# passes the test in the URL
if param_info.pattern:
pattern = (
param_info.pattern[1]
if isinstance(param_info.pattern, tuple)
else param_info.pattern
)
passes_pattern = pattern.match(supplied_param)
if not passes_pattern:
if param_info.cast != str:
msg = (
f'Value "{supplied_param}" '
f"for parameter `{param_info.name}` does "
"not match pattern for type "
f"`{param_info.cast.__name__}`: "
f"{pattern.pattern}"
)
else:
msg = (
f'Value "{supplied_param}" for parameter '
f"`{param_info.name}` does not satisfy "
f"pattern {pattern.pattern}"
)
raise URLBuildError(msg)
# replace the parameter in the URL with the supplied value
replacement_regex = f"(<{param_info.name}.*?>)"
out = re.sub(replacement_regex, supplied_param, out)
# parse the remainder of the keyword arguments into a querystring
query_string = urlencode(kwargs, doseq=True) if kwargs else ""
# scheme://netloc/path;parameters?query#fragment
out = urlunparse((scheme, netloc, out, "", query_string, anchor))
return out
# -------------------------------------------------------------------- #
# Request Handling
# -------------------------------------------------------------------- #
async def handle_exception(
self, request: Request, exception: BaseException
): # no cov
"""
A handler that catches specific exceptions and outputs a response.
:param request: The current request object
:param exception: The exception that was raised
:raises ServerError: response 500
"""
await self.dispatch(
"http.lifecycle.exception",
inline=True,
context={"request": request, "exception": exception},
)
if (
request.stream is not None
and request.stream.stage is not Stage.HANDLER
):
error_logger.exception(exception, exc_info=True)
logger.error(
"The error response will not be sent to the client for "
f'the following exception:"{exception}". A previous response '
"has at least partially been sent."
)
# ----------------- deprecated -----------------
handler = self.error_handler._lookup(
exception, request.name if request else None
)
if handler:
deprecation(
"An error occurred while handling the request after at "
"least some part of the response was sent to the client. "
"Therefore, the response from your custom exception "
f"handler {handler.__name__} will not be sent to the "
"client. Beginning in v22.6, Sanic will stop executing "
"custom exception handlers in this scenario. Exception "
"handlers should only be used to generate the exception "
"responses. If you would like to perform any other "
"action on a raised exception, please consider using a "
"signal handler like "
'`@app.signal("http.lifecycle.exception")`\n'
"For further information, please see the docs: "
"https://sanicframework.org/en/guide/advanced/"
"signals.html",
22.6,
)
try:
response = self.error_handler.response(request, exception)
if isawaitable(response):
response = await response
except BaseException as e:
logger.error("An error occurred in the exception handler.")
error_logger.exception(e)
# ----------------------------------------------
return
# -------------------------------------------- #
# Request Middleware
# -------------------------------------------- #
response = await self._run_request_middleware(
request, request_name=None
)
# No middleware results
if not response:
try:
response = self.error_handler.response(request, exception)
if isawaitable(response):
response = await response
except Exception as e:
if isinstance(e, SanicException):
response = self.error_handler.default(request, e)
elif self.debug:
response = HTTPResponse(
(
f"Error while handling error: {e}\n"
f"Stack: {format_exc()}"
),
status=500,
)
else:
response = HTTPResponse(
"An error occurred while handling an error", status=500
)
if response is not None:
try:
request.reset_response()
response = await request.respond(response)
except BaseException:
# Skip response middleware
if request.stream:
request.stream.respond(response)
await response.send(end_stream=True)
raise
else:
if request.stream:
response = request.stream.response
# Marked for cleanup and DRY with handle_request/handle_exception
# when ResponseStream is no longer supporder
if isinstance(response, BaseHTTPResponse):
await self.dispatch(
"http.lifecycle.response",
inline=True,
context={
"request": request,
"response": response,
},
)
await response.send(end_stream=True)
elif isinstance(response, ResponseStream):
resp = await response(request)
await self.dispatch(
"http.lifecycle.response",
inline=True,
context={
"request": request,
"response": resp,
},
)
await response.eof()
else:
raise ServerError(
f"Invalid response type {response!r} (need HTTPResponse)"
)
async def handle_request(self, request: Request): # no cov
"""Take a request from the HTTP Server and return a response object
to be sent back The HTTP Server only expects a response object, so
exception handling must be done here
:param request: HTTP Request object
:return: Nothing
"""
await self.dispatch(
"http.lifecycle.handle",
inline=True,
context={"request": request},
)
# Define `response` var here to remove warnings about
# allocation before assignment below.
response = None
try:
await self.dispatch(
"http.routing.before",
inline=True,
context={"request": request},
)
# Fetch handler from router
route, handler, kwargs = self.router.get(
request.path,
request.method,
request.headers.getone("host", None),
)
request._match_info = {**kwargs}
request.route = route
await self.dispatch(
"http.routing.after",
inline=True,
context={
"request": request,
"route": route,
"kwargs": kwargs,
"handler": handler,
},
)
if (
request.stream
and request.stream.request_body
and not route.ctx.ignore_body
):
if hasattr(handler, "is_stream"):
# Streaming handler: lift the size limit
request.stream.request_max_size = float("inf")
else:
# Non-streaming handler: preload body
await request.receive_body()
# -------------------------------------------- #
# Request Middleware
# -------------------------------------------- #
response = await self._run_request_middleware(
request, request_name=route.name
)
# No middleware results
if not response:
# -------------------------------------------- #
# Execute Handler
# -------------------------------------------- #
if handler is None:
raise ServerError(
(
"'None' was returned while requesting a "
"handler from the router"
)
)
# Run response handler
response = handler(request, **request.match_info)
if isawaitable(response):
response = await response
if request.responded:
if response is not None:
error_logger.error(
"The response object returned by the route handler "
"will not be sent to client. The request has already "
"been responded to."
)
if request.stream is not None:
response = request.stream.response
elif response is not None:
response = await request.respond(response)
elif not hasattr(handler, "is_websocket"):
response = request.stream.response # type: ignore
# Marked for cleanup and DRY with handle_request/handle_exception
# when ResponseStream is no longer supporder
if isinstance(response, BaseHTTPResponse):
await self.dispatch(
"http.lifecycle.response",
inline=True,
context={
"request": request,
"response": response,
},
)
await response.send(end_stream=True)
elif isinstance(response, ResponseStream):
resp = await response(request)
await self.dispatch(
"http.lifecycle.response",
inline=True,
context={
"request": request,
"response": resp,
},
)
await response.eof()
else:
if not hasattr(handler, "is_websocket"):
raise ServerError(
f"Invalid response type {response!r} "
"(need HTTPResponse)"
)
except CancelledError:
raise
except Exception as e:
# Response Generation Failed
await self.handle_exception(request, e)
async def _websocket_handler(
self, handler, request, *args, subprotocols=None, **kwargs
):
if self.asgi:
ws = request.transport.get_websocket_connection()
await ws.accept(subprotocols)
else:
protocol = request.transport.get_protocol()
ws = await protocol.websocket_handshake(request, subprotocols)
# schedule the application handler
# its future is kept in self.websocket_tasks in case it
# needs to be cancelled due to the server being stopped
fut = ensure_future(handler(request, ws, *args, **kwargs))
self.websocket_tasks.add(fut)
cancelled = False
try:
await fut
except Exception as e:
self.error_handler.log(request, e)
except (CancelledError, ConnectionClosed):
cancelled = True
finally:
self.websocket_tasks.remove(fut)
if cancelled:
ws.end_connection(1000)
else:
await ws.close()
# -------------------------------------------------------------------- #
# Testing
# -------------------------------------------------------------------- #
@property
def test_client(self): # noqa
if self._test_client:
return self._test_client
elif self._test_manager:
return self._test_manager.test_client
from sanic_testing.testing import SanicTestClient # type: ignore
self._test_client = SanicTestClient(self)
return self._test_client
@property
def asgi_client(self): # noqa
"""
A testing client that uses ASGI to reach into the application to
execute hanlers.
:return: testing client
:rtype: :class:`SanicASGITestClient`
"""
if self._asgi_client:
return self._asgi_client
elif self._test_manager:
return self._test_manager.asgi_client
from sanic_testing.testing import SanicASGITestClient # type: ignore
self._asgi_client = SanicASGITestClient(self)
return self._asgi_client
# -------------------------------------------------------------------- #
# Execution
# -------------------------------------------------------------------- #
async def _run_request_middleware(
self, request, request_name=None
): # no cov
# The if improves speed. I don't know why
named_middleware = self.named_request_middleware.get(
request_name, deque()
)
applicable_middleware = self.request_middleware + named_middleware
# request.request_middleware_started is meant as a stop-gap solution
# until RFC 1630 is adopted
if applicable_middleware and not request.request_middleware_started:
request.request_middleware_started = True
for middleware in applicable_middleware:
await self.dispatch(
"http.middleware.before",
inline=True,
context={
"request": request,
"response": None,
},
condition={"attach_to": "request"},
)
response = middleware(request)
if isawaitable(response):
response = await response
await self.dispatch(
"http.middleware.after",
inline=True,
context={
"request": request,
"response": None,
},
condition={"attach_to": "request"},
)
if response:
return response
return None
async def _run_response_middleware(
self, request, response, request_name=None
): # no cov
named_middleware = self.named_response_middleware.get(
request_name, deque()
)
applicable_middleware = self.response_middleware + named_middleware
if applicable_middleware:
for middleware in applicable_middleware:
await self.dispatch(
"http.middleware.before",
inline=True,
context={
"request": request,
"response": response,
},
condition={"attach_to": "response"},
)
_response = middleware(request, response)
if isawaitable(_response):
_response = await _response
await self.dispatch(
"http.middleware.after",
inline=True,
context={
"request": request,
"response": _response if _response else response,
},
condition={"attach_to": "response"},
)
if _response:
response = _response
if isinstance(response, BaseHTTPResponse):
response = request.stream.respond(response)
break
return response
def _build_endpoint_name(self, *parts):
parts = [self.name, *parts]
return ".".join(parts)
@classmethod
def _cancel_websocket_tasks(cls, app, loop):
for task in app.websocket_tasks:
task.cancel()
@staticmethod
async def _listener(
app: Sanic, loop: AbstractEventLoop, listener: ListenerType
):
try:
maybe_coro = listener(app) # type: ignore
except TypeError:
maybe_coro = listener(app, loop) # type: ignore
if maybe_coro and isawaitable(maybe_coro):
await maybe_coro
# -------------------------------------------------------------------- #
# Task management
# -------------------------------------------------------------------- #
@classmethod
def _prep_task(
cls,
task,
app,
loop,
):
if callable(task):
try:
task = task(app)
except TypeError:
task = task()
return task
@classmethod
def _loop_add_task(
cls,
task,
app,
loop,
*,
name: Optional[str] = None,
register: bool = True,
) -> Task:
if not isinstance(task, Future):
prepped = cls._prep_task(task, app, loop)
if sys.version_info < (3, 8): # no cov
task = loop.create_task(prepped)
if name:
error_logger.warning(
"Cannot set a name for a task when using Python 3.7. "
"Your task will be created without a name."
)
task.get_name = lambda: name
else:
task = loop.create_task(prepped, name=name)
if name and register and sys.version_info > (3, 7):
app._task_registry[name] = task
return task
@staticmethod
async def dispatch_delayed_tasks(app, loop):
for name in app._delayed_tasks:
await app.dispatch(name, context={"app": app, "loop": loop})
app._delayed_tasks.clear()
@staticmethod
async def run_delayed_task(app, loop, task):
prepped = app._prep_task(task, app, loop)
await prepped
def add_task(
self,
task: Union[Future[Any], Coroutine[Any, Any, Any], Awaitable[Any]],
*,
name: Optional[str] = None,
register: bool = True,
) -> Optional[Task]:
"""
Schedule a task to run later, after the loop has started.
Different from asyncio.ensure_future in that it does not
also return a future, and the actual ensure_future call
is delayed until before server start.
`See user guide re: background tasks
<https://sanicframework.org/guide/basics/tasks.html#background-tasks>`__
:param task: future, couroutine or awaitable
"""
try:
loop = self.loop # Will raise SanicError if loop is not started
return self._loop_add_task(
task, self, loop, name=name, register=register
)
except SanicException:
task_name = f"sanic.delayed_task.{hash(task)}"
if not self._delayed_tasks:
self.after_server_start(partial(self.dispatch_delayed_tasks))
if name:
raise RuntimeError(
"Cannot name task outside of a running application"
)
self.signal(task_name)(partial(self.run_delayed_task, task=task))
self._delayed_tasks.append(task_name)
return None
def get_task(
self, name: str, *, raise_exception: bool = True
) -> Optional[Task]:
try:
return self._task_registry[name]
except KeyError:
if raise_exception:
raise SanicException(
f'Registered task named "{name}" not found.'
)
return None
async def cancel_task(
self,
name: str,
msg: Optional[str] = None,
*,
raise_exception: bool = True,
) -> None:
task = self.get_task(name, raise_exception=raise_exception)
if task and not task.cancelled():
args: Tuple[str, ...] = ()
if msg:
if sys.version_info >= (3, 9):
args = (msg,)
else: # no cov
raise RuntimeError(
"Cancelling a task with a message is only supported "
"on Python 3.9+."
)
task.cancel(*args)
try:
await task
except CancelledError:
...
def purge_tasks(self):
for key, task in self._task_registry.items():
if task.done() or task.cancelled():
self._task_registry[key] = None
self._task_registry = {
k: v for k, v in self._task_registry.items() if v is not None
}
def shutdown_tasks(
self, timeout: Optional[float] = None, increment: float = 0.1
):
for task in self.tasks:
if task.get_name() != "RunServer":
task.cancel()
if timeout is None:
timeout = self.config.GRACEFUL_SHUTDOWN_TIMEOUT
while len(self._task_registry) and timeout:
with suppress(RuntimeError):
running_loop = get_running_loop()
running_loop.run_until_complete(asyncio.sleep(increment))
self.purge_tasks()
timeout -= increment
@property
def tasks(self):
return iter(self._task_registry.values())
# -------------------------------------------------------------------- #
# ASGI
# -------------------------------------------------------------------- #
async def __call__(self, scope, receive, send):
"""
To be ASGI compliant, our instance must be a callable that accepts
three arguments: scope, receive, send. See the ASGI reference for more
details: https://asgi.readthedocs.io/en/latest
"""
self.asgi = True
if scope["type"] == "lifespan":
self.motd("")
self._asgi_app = await ASGIApp.create(self, scope, receive, send)
asgi_app = self._asgi_app
await asgi_app()
_asgi_single_callable = True # We conform to ASGI 3.0 single-callable
# -------------------------------------------------------------------- #
# Configuration
# -------------------------------------------------------------------- #
def update_config(self, config: Union[bytes, str, dict, Any]):
"""
Update app.config. Full implementation can be found in the user guide.
`See user guide re: configuration
<https://sanicframework.org/guide/deployment/configuration.html#basics>`__
"""
self.config.update_config(config)
@property
def asgi(self):
return self.state.asgi
@asgi.setter
def asgi(self, value: bool):
self.state.asgi = value
@property
def debug(self):
return self.state.is_debug
@debug.setter
def debug(self, value: bool):
deprecation(
"Setting the value of a Sanic application's debug value directly "
"is deprecated and will be removed in v22.9. Please set it using "
"the CLI, app.run, app.prepare, or directly set "
"app.state.mode to Mode.DEBUG.",
22.9,
)
mode = Mode.DEBUG if value else Mode.PRODUCTION
self.state.mode = mode
@property
def auto_reload(self):
return self.config.AUTO_RELOAD
@auto_reload.setter
def auto_reload(self, value: bool):
self.config.AUTO_RELOAD = value
@property
def state(self):
return self._state
@property
def is_running(self):
deprecation(
"Use of the is_running property is no longer used by Sanic "
"internally. The property is now deprecated and will be removed "
"in version 22.9. You may continue to set the property for your "
"own needs until that time. If you would like to check whether "
"the application is operational, please use app.state.stage. More "
"information is available at ___.",
22.9,
)
return self.state.is_running
@is_running.setter
def is_running(self, value: bool):
deprecation(
"Use of the is_running property is no longer used by Sanic "
"internally. The property is now deprecated and will be removed "
"in version 22.9. You may continue to set the property for your "
"own needs until that time. If you would like to check whether "
"the application is operational, please use app.state.stage. More "
"information is available at ___.",
22.9,
)
self.state.is_running = value
@property
def is_stopping(self):
deprecation(
"Use of the is_stopping property is no longer used by Sanic "
"internally. The property is now deprecated and will be removed "
"in version 22.9. You may continue to set the property for your "
"own needs until that time. If you would like to check whether "
"the application is operational, please use app.state.stage. More "
"information is available at ___.",
22.9,
)
return self.state.is_stopping
@is_stopping.setter
def is_stopping(self, value: bool):
deprecation(
"Use of the is_stopping property is no longer used by Sanic "
"internally. The property is now deprecated and will be removed "
"in version 22.9. You may continue to set the property for your "
"own needs until that time. If you would like to check whether "
"the application is operational, please use app.state.stage. More "
"information is available at ___.",
22.9,
)
self.state.is_stopping = value
@property
def reload_dirs(self):
return self.state.reload_dirs
@property
def ext(self) -> Extend:
if not hasattr(self, "_ext"):
setup_ext(self, fail=True)
if not hasattr(self, "_ext"):
raise RuntimeError(
"Sanic Extensions is not installed. You can add it to your "
"environment using:\n$ pip install sanic[ext]\nor\n$ pip "
"install sanic-ext"
)
return self._ext # type: ignore
def extend(
self,
*,
extensions: Optional[List[Type[Extension]]] = None,
built_in_extensions: bool = True,
config: Optional[Union[Config, Dict[str, Any]]] = None,
**kwargs,
) -> Extend:
if hasattr(self, "_ext"):
raise RuntimeError(
"Cannot extend Sanic after Sanic Extensions has been setup."
)
setup_ext(
self,
extensions=extensions,
built_in_extensions=built_in_extensions,
config=config,
fail=True,
**kwargs,
)
return self.ext
# -------------------------------------------------------------------- #
# Class methods
# -------------------------------------------------------------------- #
@classmethod
def register_app(cls, app: "Sanic") -> None:
"""
Register a Sanic instance
"""
if not isinstance(app, cls):
raise SanicException("Registered app must be an instance of Sanic")
name = app.name
if name in cls._app_registry and not cls.test_mode:
raise SanicException(f'Sanic app name "{name}" already in use.')
cls._app_registry[name] = app
@classmethod
def get_app(
cls, name: Optional[str] = None, *, force_create: bool = False
) -> "Sanic":
"""
Retrieve an instantiated Sanic instance
"""
if name is None:
if len(cls._app_registry) > 1:
raise SanicException(
'Multiple Sanic apps found, use Sanic.get_app("app_name")'
)
elif len(cls._app_registry) == 0:
raise SanicException("No Sanic apps have been registered.")
else:
return list(cls._app_registry.values())[0]
try:
return cls._app_registry[name]
except KeyError:
if force_create:
return cls(name)
raise SanicException(f'Sanic app name "{name}" not found.')
# -------------------------------------------------------------------- #
# Lifecycle
# -------------------------------------------------------------------- #
def finalize(self):
try:
self.router.finalize()
except FinalizationError as e:
if not Sanic.test_mode:
raise e
def signalize(self, allow_fail_builtin=True):
self.signal_router.allow_fail_builtin = allow_fail_builtin
try:
self.signal_router.finalize()
except FinalizationError as e:
if not Sanic.test_mode:
raise e
async def _startup(self):
self._future_registry.clear()
if not hasattr(self, "_ext"):
setup_ext(self)
if hasattr(self, "_ext"):
self.ext._display()
if self.state.is_debug:
self.config.TOUCHUP = False
# Setup routers
self.signalize(self.config.TOUCHUP)
self.finalize()
# TODO: Replace in v22.6 to check against apps in app registry
if (
self.__class__._uvloop_setting is not None
and self.__class__._uvloop_setting != self.config.USE_UVLOOP
):
error_logger.warning(
"It looks like you're running several apps with different "
"uvloop settings. This is not supported and may lead to "
"unintended behaviour."
)
self.__class__._uvloop_setting = self.config.USE_UVLOOP
# Startup time optimizations
if self.state.primary:
# TODO:
# - Raise warning if secondary apps have error handler config
ErrorHandler.finalize(self.error_handler, config=self.config)
if self.config.TOUCHUP:
TouchUp.run(self)
self.state.is_started = True
async def _server_event(
self,
concern: str,
action: str,
loop: Optional[AbstractEventLoop] = None,
) -> None:
event = f"server.{concern}.{action}"
if action not in ("before", "after") or concern not in (
"init",
"shutdown",
):
raise SanicException(f"Invalid server event: {event}")
if self.state.verbosity >= 1:
logger.debug(f"Triggering server events: {event}")
reverse = concern == "shutdown"
if loop is None:
loop = self.loop
await self.dispatch(
event,
fail_not_found=False,
reverse=reverse,
inline=True,
context={
"app": self,
"loop": loop,
},
)
| 35.220681 | 82 | 0.538149 |
0bbfcdfd2aa623405d0eebc074c0781f050ee3d8 | 1,278 | py | Python | gui/labeled_textarea.py | keremkoseoglu/Joseki | dbf2dbbc71bb98bad139a98f178b4793445f3f2c | [
"MIT"
] | 2 | 2019-03-11T07:17:30.000Z | 2019-03-25T12:18:18.000Z | gui/labeled_textarea.py | keremkoseoglu/Joseki | dbf2dbbc71bb98bad139a98f178b4793445f3f2c | [
"MIT"
] | 13 | 2020-05-17T11:54:09.000Z | 2022-03-31T13:06:39.000Z | gui/labeled_textarea.py | keremkoseoglu/Joseki | dbf2dbbc71bb98bad139a98f178b4793445f3f2c | [
"MIT"
] | 1 | 2019-07-14T18:00:44.000Z | 2019-07-14T18:00:44.000Z | """ Labeled text are module """
import tkinter
from config.constants import GUI_CELL_WIDTH
class LabeledTextarea:
""" Labeled text are class """
_LINE_FEED = "\r\n"
def __init__(self,
parent: tkinter.Toplevel,
label_text: str,
text_value,
x_pos: int,
y_pos: int):
self._parent = parent
self._label = tkinter.Label(parent, text=label_text)
self._label.place(x=x_pos, y=y_pos)
self._text_box = tkinter.Text(parent, height=10)
self._text_box.place(x=x_pos + GUI_CELL_WIDTH, y=y_pos)
if isinstance(text_value, str):
self.set_value(text_value)
elif isinstance(text_value, list):
string_value = ""
for val in text_value:
string_value += val
self.set_value(string_value)
def disable(self):
""" Disables control """
self._text_box.configure(state="disabled")
def get_value(self):
""" Returns value in text area """
return self._text_box.get("1.0", tkinter.END)
def set_value(self, value: str):
""" Sets value in text area """
self._text_box.insert(tkinter.INSERT, value)
self._parent.update()
| 29.72093 | 63 | 0.582942 |
5e154eb21b029d120c099e5a50837ec81e958376 | 3,979 | py | Python | Python/extinction.py | agsreejith/CUTE-SNR | 9b263401c635ebaaab06835757a9436ae0dfdba5 | [
"MIT"
] | null | null | null | Python/extinction.py | agsreejith/CUTE-SNR | 9b263401c635ebaaab06835757a9436ae0dfdba5 | [
"MIT"
] | null | null | null | Python/extinction.py | agsreejith/CUTE-SNR | 9b263401c635ebaaab06835757a9436ae0dfdba5 | [
"MIT"
] | null | null | null | import numpy as np
def extinction_amores(glong,glat,distance):
#Interstellar Extinction in the Galaxy (Amores & L�pine - 2004)
#This program corresponds to the Axysimetric Model (Model A)
#If you have any difficulty, sugestion | comments, please contact:
#[email protected] | [email protected]
#You enter longitude, latitude & distance of a point in the Galaxy & get extinction
#Converted to python by A. G.Sreejith
r0=7.5 #adopted distance of the Galactic center
conv=np.pi/180.
step = 0.05 #steps of the gas density integration to obtain column density, in pc
#glong=100.0 #galactic longitude# an arbitrary value given here
#glat=0.0 #galactic latitude
#dist = 20.0 #distance of the point to which we will calculate the extinction in kpc
#print,'Interstellar Extinction in the Galaxy (Amores & L�pine - 2005, AJ, 130, 679)'
#
#read,glong,glat,PROMPT='Give the galactic longitude & latitude (Degrees,Degrees)....: '
#read,dist,PROMPT='Distance [kpc](positive value)...�
dist=distance
nstep=int(dist/step)
if nstep == 0:
nstep = 1
#computes trigonometric functions only once
yproj=np.cos(glong*conv)
xproj=np.sin(glong*conv)
bproj=np.sin(glat*conv)
dproj=np.cos(glat*conv)
av=0.0 #for the integration of the colunar density
#declaring & puting values in the variables. The arrays will contain the
#value of quantities like galactic radius | gas density for each step along the line-of sight
#if you work with other language you should probably define these quantities in a loop
dis= np.zeros(nstep)
x = np.zeros(nstep)
y = np.zeros(nstep)
yy = np.zeros(nstep)
r = np.zeros(nstep)
z = np.zeros(nstep)
zCO= np.zeros(nstep)
zH = np.zeros(nstep)
ah1= np.zeros(nstep)
aco= np.zeros(nstep)
zmet=np.zeros(nstep)
agas=np.zeros(nstep)
ipas=np.arange(0,nstep)/1 +1
# generates an array with a sequence of numbers, used as index for
# distance along line-of-sight
nel=len(ipas)
dis=ipas*step - step
x=(dis*xproj)*dproj
y=dis*yproj*dproj
yy=r0-y
r=np.sqrt(x*x+yy*yy)
z=dis*bproj
zCO=0.036*np.exp(0.08*r) #H2 scale-height
zH = zCO*1.8 #H1 scale-height (Guilbert 1978)
zc = 0.02 #shift takes in to account that the sun is not precisely in the galactic plane
ah1=0.7*np.exp(-r/7.0-((1.9/r)**2)) #function that calculates the HI density
aco = 58.*np.exp(-r/1.20-((3.50/r)**2)) + 240.*np.exp(-(r**2/0.095)) # H2 density# last term is for galactic center
ah1[0] = 0.0
aco[0] = 0.0
for i in range(0, nel):
if r[i] <= 1.2: zmet[i] = 9.6
if 1.2 < r[i] <= 9.0 : zmet[i] = (r0/r[i])**0.5
if r[i] > 9.0: zmet[i] = (r0/r[i])**0.1
# this defines the metallicity correction, see section 3 of the paper
gam1=1.0
gam2=2.0
#See the final tuning (section 4.1) correction factor for interval l=120-200
tune=1.
if 120 <= glong <= 200 : tune=2.
agas=gam1*(ah1*zmet*np.exp(-0.5*((z-zc)/zH)**2))+gam2*aco*np.exp(-0.5*((z-zc)/zCO)**2)
av=np.sum(agas)*step*3.086*.57*tune
# "total" instruction gives the sum of the array elements
# it is equivaletn to integrate along the line-of-sight. The step is in units of kpc=
#3.08 *10**21 cm & the conversion factor gamma= .53 10**-21 mag cm2
rs = 3.05 #ratio between total to selective extinction
ebv = av/rs
#print('Ebv')
#print(ebv)
#status = Check_Math() # Get status & reset accumulated math error register.
#IF(status AND NOT floating_point_underflow) NE 0 THEN $
# Message, 'IDL Check_Math() error: ' + StrTrim(status, 2)
return ebv,av
| 36.842593 | 120 | 0.60769 |
344e6a5a48c0fd5dbe673b747ae8946b9353a6f0 | 1,442 | py | Python | tests/test_1_sub.py | timsears/imagezmq | 258453be9d86d213b31d83dcbcfcc68f26198328 | [
"MIT"
] | null | null | null | tests/test_1_sub.py | timsears/imagezmq | 258453be9d86d213b31d83dcbcfcc68f26198328 | [
"MIT"
] | null | null | null | tests/test_1_sub.py | timsears/imagezmq | 258453be9d86d213b31d83dcbcfcc68f26198328 | [
"MIT"
] | null | null | null | """test_1_sub.py -- basic receive images test in PUB/SUB mode.
A simple test program that uses imagezmq to receive images from a program that
is sending images. This test pair uses the PUB/SUB messaging pattern.
1. Run this program in its own terminal window:
python test_1_sub.py
There is no particular order in which sending and receiving scripts should be
run.
2.Run the image sending program in a different terminal window:
python test_1_pub.py
A cv2.imshow() window will appear showing the tramsmitted image. The sending
program sends images with an incrementing counter so you can see what is sent
and what is received.
If you terminate receiving script pay attention to the fact that sending script
will continue to increment and send images.
If you start receiving script again it will start picking images from the
current position.
To end the programs, press Ctrl-C in the terminal window of the sending program
first. Then press Ctrl-C in the terminal window of the receiving proram. You
may have to press Ctrl-C in the display window as well.
"""
import sys
import cv2
sys.path.insert(0, '../imagezmq') # imagezmq.py is in ../imagezmq
import imagezmq
image_hub = imagezmq.ImageHub(open_port='tcp://127.0.0.1:5555', REQ_REP=False)
while True: # press Ctrl-C to stop image display program
image_name, image = image_hub.recv_image()
cv2.imshow(image_name, image)
cv2.waitKey(1) # wait until a key is pressed
| 36.05 | 79 | 0.776699 |
c0465484641dcd90ac642433664b011370e50b8d | 45,549 | py | Python | src/CADRE/comm.py | JustinSGray/OpenMDAO-CADRE | d8378a8a571179990531d8a409efe727cbdf2bb7 | [
"Apache-2.0"
] | 1 | 2021-07-11T19:15:22.000Z | 2021-07-11T19:15:22.000Z | src/CADRE/comm.py | JustinSGray/OpenMDAO-CADRE | d8378a8a571179990531d8a409efe727cbdf2bb7 | [
"Apache-2.0"
] | null | null | null | src/CADRE/comm.py | JustinSGray/OpenMDAO-CADRE | d8378a8a571179990531d8a409efe727cbdf2bb7 | [
"Apache-2.0"
] | 1 | 2015-11-19T18:18:01.000Z | 2015-11-19T18:18:01.000Z | ''' Communications Discpline for CADRE '''
import numpy as np
import scipy.sparse
import MBI
import os
from openmdao.lib.datatypes.api import Float, Array
from openmdao.main.api import Component
from CADRE.kinematics import fixangles, computepositionspherical, \
computepositionsphericaljacobian, computepositionrotd,\
computepositionrotdjacobian
import rk4
# Allow non-standard variable names for scientific calc
# pylint: disable-msg=C0103
class Comm_DataDownloaded(rk4.RK4):
""" Integrate the incoming data rate to compute the time history of data
downloaded from the satelite."""
def __init__(self, n_times):
super(Comm_DataDownloaded, self).__init__()
# Inputs
self.add(
'Dr',
Array(
np.zeros(n_times),
iotype='in',
shape=(n_times,),
units="Gibyte/s",
desc="Download rate over time"
)
)
# Initial State
self.add(
'Data0',
Array(
[0.0],
iotype='in',
shape=(1,),
units="Gibyte",
desc="Initial downloaded data state"
)
)
# States
self.add(
'Data',
Array(
np.zeros((1, n_times)),
iotype='out',
shape=(1, n_times),
units="Gibyte",
desc="Downloaded data state over time"
)
)
self.state_var = "Data"
self.init_state_var = "Data0"
self.external_vars = ["Dr"]
self.dfdy = np.array([[0.]])
self.dfdx = np.array([[1.]])
def list_deriv_vars(self):
input_keys = ('Data0', 'Dr',)
output_keys = ('Data',)
return input_keys, output_keys
def f_dot(self, external, state):
return external[0]
def df_dy(self, external, state):
return self.dfdy
def df_dx(self, external, state):
return self.dfdx
class Comm_AntRotation(Component):
''' Fixed antenna angle to time history of the quaternion. '''
# Inputs
antAngle = Float(0., iotype="in", copy=None)
def __init__(self, n):
super(Comm_AntRotation, self).__init__()
# Outputs
self.add(
'q_A',
Array(
np.zeros((4, n)),
iotype='out',
shape=(4, n),
units="unitless",
desc="Quarternion matrix in antenna angle frame over time"
)
)
self.dq_dt = np.zeros(4)
def list_deriv_vars(self):
input_keys = ('q_A',)
output_keys = ('antAngle',)
return input_keys, output_keys
def provideJ(self):
""" Calculate and save derivatives. (i.e., Jacobian) """
rt2 = np.sqrt(2)
self.dq_dt[0] = - np.sin(self.antAngle / 2.) / 2.
self.dq_dt[1] = np.cos(self.antAngle / 2.) / rt2 / 2.
self.dq_dt[2] = - np.cos(self.antAngle / 2.) / rt2 / 2.
self.dq_dt[3] = 0.0
def execute(self):
""" Calculate output. """
rt2 = np.sqrt(2)
self.q_A[0, :] = np.cos(self.antAngle/2.)
self.q_A[1, :] = np.sin(self.antAngle/2.) / rt2
self.q_A[2, :] = - np.sin(self.antAngle/2.) / rt2
self.q_A[3, :] = 0.0
def apply_deriv(self, arg, result):
""" Matrix-vector product with the Jacobian. """
if 'antAngle' in arg and 'q_A' in result:
for k in xrange(4):
result['q_A'][k, :] += self.dq_dt[k] * arg['antAngle']
def apply_derivT(self, arg, result):
""" Matrix-vector product with the transpose of the Jacobian. """
if 'q_A' in arg and 'antAngle' in result:
for k in xrange(4):
result['antAngle'] += self.dq_dt[k] * np.sum(arg['q_A'][k, :])
class Comm_AntRotationMtx(Component):
""" Translate antenna angle into the body frame. """
def __init__(self, n):
super(Comm_AntRotationMtx, self).__init__()
self.n = n
# Inputs
self.add(
'q_A',
Array(
np.zeros((4, self.n)),
iotype='in',
shape=(4, self.n),
desc="Quarternion matrix in antenna angle frame over time"
)
)
# Outputs
self.add(
'O_AB',
Array(
np.zeros((3, 3, self.n)),
iotype='out',
shape=(3, 3, self.n),
units="unitless",
desc="Rotation matrix from antenna angle to body-fixed frame over time"
)
)
self.J = np.empty((self.n, 3, 3, 4))
def list_deriv_vars(self):
input_keys = ('q_A',)
output_keys = ('O_AB',)
return input_keys, output_keys
def provideJ(self):
""" Calculate and save derivatives. (i.e., Jacobian) """
A = np.zeros((4, 3))
B = np.zeros((4, 3))
dA_dq = np.zeros((4, 3, 4))
dB_dq = np.zeros((4, 3, 4))
dA_dq[0, :, 0] = (1, 0, 0)
dA_dq[1, :, 0] = (0, 1, 0)
dA_dq[2, :, 0] = (0, 0, 1)
dA_dq[3, :, 0] = (0, 0, 0)
dA_dq[0, :, 1] = (0, 0, 0)
dA_dq[1, :, 1] = (0, 0, -1)
dA_dq[2, :, 1] = (0, 1, 0)
dA_dq[3, :, 1] = (1, 0, 0)
dA_dq[0, :, 2] = (0, 0, 1)
dA_dq[1, :, 2] = (0, 0, 0)
dA_dq[2, :, 2] = (-1, 0, 0)
dA_dq[3, :, 2] = (0, 1, 0)
dA_dq[0, :, 3] = (0, -1, 0)
dA_dq[1, :, 3] = (1, 0, 0)
dA_dq[2, :, 3] = (0, 0, 0)
dA_dq[3, :, 3] = (0, 0, 1)
dB_dq[0, :, 0] = (1, 0, 0)
dB_dq[1, :, 0] = (0, 1, 0)
dB_dq[2, :, 0] = (0, 0, 1)
dB_dq[3, :, 0] = (0, 0, 0)
dB_dq[0, :, 1] = (0, 0, 0)
dB_dq[1, :, 1] = (0, 0, 1)
dB_dq[2, :, 1] = (0, -1, 0)
dB_dq[3, :, 1] = (1, 0, 0)
dB_dq[0, :, 2] = (0, 0, -1)
dB_dq[1, :, 2] = (0, 0, 0)
dB_dq[2, :, 2] = (1, 0, 0)
dB_dq[3, :, 2] = (0, 1, 0)
dB_dq[0, :, 3] = (0, 1, 0)
dB_dq[1, :, 3] = (-1, 0, 0)
dB_dq[2, :, 3] = (0, 0, 0)
dB_dq[3, :, 3] = (0, 0, 1)
for i in range(0, self.n):
A[0, :] = ( self.q_A[0, i], -self.q_A[3, i], self.q_A[2, i])
A[1, :] = ( self.q_A[3, i], self.q_A[0, i], -self.q_A[1, i])
A[2, :] = (-self.q_A[2, i], self.q_A[1, i], self.q_A[0, i])
A[3, :] = ( self.q_A[1, i], self.q_A[2, i], self.q_A[3, i])
B[0, :] = ( self.q_A[0, i], self.q_A[3, i], -self.q_A[2, i])
B[1, :] = (-self.q_A[3, i], self.q_A[0, i], self.q_A[1, i])
B[2, :] = ( self.q_A[2, i], -self.q_A[1, i], self.q_A[0, i])
B[3, :] = ( self.q_A[1, i], self.q_A[2, i], self.q_A[3, i])
for k in range(0, 4):
self.J[i, :,:, k] = np.dot(dA_dq[:,:, k].T, B) + \
np.dot(A.T, dB_dq[:, :, k])
def execute(self):
""" Calculate output. """
A = np.zeros((4, 3))
B = np.zeros((4, 3))
for i in range(0, self.n):
A[0, :] = ( self.q_A[0, i], -self.q_A[3, i], self.q_A[2, i])
A[1, :] = ( self.q_A[3, i], self.q_A[0, i], -self.q_A[1, i])
A[2, :] = (-self.q_A[2, i], self.q_A[1, i], self.q_A[0, i])
A[3, :] = ( self.q_A[1, i], self.q_A[2, i], self.q_A[3, i])
B[0, :] = ( self.q_A[0, i], self.q_A[3, i], -self.q_A[2, i])
B[1, :] = (-self.q_A[3, i], self.q_A[0, i], self.q_A[1, i])
B[2, :] = ( self.q_A[2, i], -self.q_A[1, i], self.q_A[0, i])
B[3, :] = ( self.q_A[1, i], self.q_A[2, i], self.q_A[3, i])
self.O_AB[:, :, i] = np.dot(A.T, B)
def apply_deriv(self, arg, result):
""" Matrix-vector product with the Jacobian. """
if 'q_A' in arg and 'O_AB' in result:
for u in xrange(3):
for v in xrange(3):
for k in xrange(4):
result['O_AB'][u, v, :] += \
self.J[:, u, v, k] * arg['q_A'][k, :]
def apply_derivT(self, arg, result):
""" Matrix-vector product with the transpose of the Jacobian. """
if 'O_AB' in arg and 'q_A' in result:
for u in range(3):
for v in range(3):
for k in range(4):
result['q_A'][k, :] += self.J[:, u, v, k] * \
arg['O_AB'][u, v, :]
class Comm_BitRate(Component):
''' Compute the data rate the satellite receives. '''
# constants
pi = 2 * np.arccos(0.)
c = 299792458
Gr = 10 ** (12.9 / 10.)
Ll = 10 ** (-2.0 / 10.)
f = 437e6
k = 1.3806503e-23
SNR = 10 ** (5.0 / 10.)
T = 500.
alpha = c ** 2 * Gr * Ll / 16.0 / pi ** 2 / f ** 2 / k / SNR / T / 1e6
def __init__(self, n):
super(Comm_BitRate, self).__init__()
self.n = n
# Inputs
self.add(
'P_comm',
Array(
np.zeros(self.n),
iotype='in',
shape=(self.n, ),
units="W",
desc="Communication power over time"
)
)
self.add(
'gain',
Array(
np.zeros(self.n),
iotype='in',
shape=(self.n, ),
units="unitless",
desc="Transmitter gain over time"
)
)
self.add(
'GSdist',
Array(
np.zeros(self.n),
iotype='in',
shape=(self.n, ),
units="km",
desc="Distance from ground station to satellite over time"
)
)
self.add(
'CommLOS',
Array(
np.zeros(self.n),
iotype='in',
shape=(self.n, ),
units="unitless",
desc="Satellite to ground station line of sight over time"
)
)
# Outputs
self.add(
'Dr',
Array(
np.zeros(self.n),
iotype='out',
shape=(self.n, ),
units="Gibyte/s",
desc="Download rate over time"
)
)
def list_deriv_vars(self):
input_keys = ('P_comm', 'gain', 'GSdist', 'CommLOS',)
output_keys = ('Dr',)
return input_keys, output_keys
def provideJ(self):
""" Calculate and save derivatives. (i.e., Jacobian) """
S2 = 0.
self.dD_dP = np.zeros(self.n)
self.dD_dGt = np.zeros(self.n)
self.dD_dS = np.zeros(self.n)
self.dD_dLOS = np.zeros(self.n)
for i in range(0, self.n):
if np.abs(self.GSdist[i]) > 1e-10:
S2 = self.GSdist[i] * 1e3
else:
S2 = 1e-10
self.dD_dP[i] = self.alpha * self.gain[i] * \
self.CommLOS[i] / S2 ** 2
self.dD_dGt[i] = self.alpha * self.P_comm[i] * \
self.CommLOS[i] / S2 ** 2
self.dD_dS[i] = -2.0 * 1e3 * self.alpha * self.P_comm[i] * \
self.gain[i] * self.CommLOS[i] / S2 ** 3
self.dD_dLOS[i] = self.alpha * \
self.P_comm[i] * self.gain[i] / S2 ** 2
def execute(self):
""" Calculate output. """
for i in range(0, self.n):
if np.abs(self.GSdist[i]) > 1e-10:
S2 = self.GSdist[i] * 1e3
else:
S2 = 1e-10
self.Dr[i] = self.alpha * self.P_comm[i] * self.gain[i] * \
self.CommLOS[i] / S2 ** 2
def apply_deriv(self, arg, result):
""" Matrix-vector product with the Jacobian. """
if 'Dr' in result:
if 'P_comm' in arg:
result['Dr'] += self.dD_dP * arg['P_comm']
if 'gain' in arg:
result['Dr'] += self.dD_dGt * arg['gain']
if 'GSdist' in arg:
result['Dr'] += self.dD_dS * arg['GSdist']
if 'CommLOS' in arg:
result['Dr'] += self.dD_dLOS * arg['CommLOS']
def apply_derivT(self, arg, result):
""" Matrix-vector product with the transpose of the Jacobian. """
if 'Dr' in arg:
if 'P_comm' in result:
result['P_comm'] += self.dD_dP.T * arg['Dr']
if 'gain' in result:
result['gain'] += self.dD_dGt.T * arg['Dr']
if 'GSdist' in result:
result['GSdist'] += self.dD_dS.T * arg['Dr']
if 'CommLOS' in result:
result['CommLOS'] += self.dD_dLOS.T * arg['Dr']
class Comm_Distance(Component):
'''Calculates distance from ground station to satellitle.'''
def __init__(self, n):
super(Comm_Distance, self).__init__()
self.n = n
# Inputs
self.add(
'r_b2g_A',
Array(
np.zeros((3, self.n)),
iotype='in',
shape=(3, self.n),
units="km",
desc="Position vector from satellite to ground station in antenna angle frame over time"
)
)
# Outputs
self.add(
'GSdist',
Array(
np.zeros(self.n),
iotype='out',
shape=(self.n,),
units="km",
desc="Distance from ground station to satellite over time"
)
)
def list_deriv_vars(self):
input_keys = ('r_b2g_A',)
output_keys = ('GSdist',)
return input_keys, output_keys
def provideJ(self):
""" Calculate and save derivatives (i.e., Jacobian). """
self.J = np.zeros((self.n, 3))
for i in range(0, self.n):
norm = np.dot(self.r_b2g_A[:, i], self.r_b2g_A[:, i]) ** 0.5
if norm > 1e-10:
self.J[i, :] = self.r_b2g_A[:, i] / norm
else:
self.J[i, :] = 0.
def execute(self):
""" Calculate output. """
for i in range(0, self.n):
self.GSdist[i] = np.dot(
self.r_b2g_A[:, i], self.r_b2g_A[:, i]) ** 0.5
def apply_deriv(self, arg, result):
""" Matrix-vector product with the Jacobian. """
if 'r_b2g_A' in arg and 'GSdist' in result:
for k in xrange(3):
result['GSdist'] += self.J[:, k] * arg['r_b2g_A'][k, :]
def apply_derivT(self, arg, result):
""" Matrix-vector product with the transpose of the Jacobian. """
if 'GSdist' in arg and 'r_b2g_A' in result:
for k in xrange(3):
result['r_b2g_A'][k, :] += self.J[:, k] * arg['GSdist']
class Comm_EarthsSpin(Component):
''' Returns the Earth quaternion as a function of time. '''
def __init__(self, n):
super(Comm_EarthsSpin, self).__init__()
self.n = n
# Inputs
self.add('t', Array(np.zeros(self.n),
iotype='in',
shape=(self.n, ),
units="s",
desc="Time"))
# Outputs
self.add('q_E', Array(
np.zeros((4, self.n)),
iotype='out',
shape=(4, self.n),
units="unitless",
desc="Quarternion matrix in Earth-fixed frame over time"
)
)
def list_deriv_vars(self):
input_keys = ('t',)
output_keys = ('q_E',)
return input_keys, output_keys
def provideJ(self):
""" Calculate and save derivatives (i.e., Jacobian). """
ntime = self.n
self.dq_dt = np.zeros((ntime, 4))
fact = np.pi / 3600.0 / 24.0
theta = fact * self.t
self.dq_dt[:, 0] = -np.sin(theta) * fact
self.dq_dt[:, 3] = -np.cos(theta) * fact
def execute(self):
""" Calculate output. """
fact = np.pi / 3600.0 / 24.0
theta = fact * self.t
self.q_E[0, :] = np.cos(theta)
self.q_E[3, :] = -np.sin(theta)
def apply_deriv(self, arg, result):
""" Matrix-vector product with the Jacobian. """
if 't' in arg and 'q_E' in result:
for k in range(4):
result['q_E'][k, :] += self.dq_dt[:, k] * arg['t']
def apply_derivT(self, arg, result):
""" Matrix-vector product with the transpose of the Jacobian. """
if 'q_E' in arg and 't' in result:
for k in range(4):
result['t'] += self.dq_dt[:, k] * arg['q_E'][k, :]
class Comm_EarthsSpinMtx(Component):
''' Quaternion to rotation matrix for the earth spin. '''
def __init__(self, n):
super(Comm_EarthsSpinMtx, self).__init__()
self.n = n
# Inputs
self.add(
'q_E',
Array(
np.zeros((4, self.n)),
iotype='in',
shape=(4, self.n),
units="unitless",
desc="Quarternion matrix in Earth-fixed frame over time"
)
)
# Outputs
self.add(
'O_IE',
Array(
np.zeros((3, 3, self.n)),
iotype='out',
shape=(3, 3, self.n),
units="unitless",
desc="Rotation matrix from Earth-centered inertial frame to Earth-fixed frame over time"
)
)
def list_deriv_vars(self):
input_keys = ('q_E',)
output_keys = ('O_IE',)
return input_keys, output_keys
def provideJ(self):
""" Calculate and save derivatives. (i.e., Jacobian) """
A = np.zeros((4, 3))
B = np.zeros((4, 3))
dA_dq = np.zeros((4, 3, 4))
dB_dq = np.zeros((4, 3, 4))
self.J = np.zeros((self.n, 3, 3, 4))
dA_dq[0, :, 0] = (1, 0, 0)
dA_dq[1, :, 0] = (0, 1, 0)
dA_dq[2, :, 0] = (0, 0, 1)
dA_dq[3, :, 0] = (0, 0, 0)
dA_dq[0, :, 1] = (0, 0, 0)
dA_dq[1, :, 1] = (0, 0, -1)
dA_dq[2, :, 1] = (0, 1, 0)
dA_dq[3, :, 1] = (1, 0, 0)
dA_dq[0, :, 2] = (0, 0, 1)
dA_dq[1, :, 2] = (0, 0, 0)
dA_dq[2, :, 2] = (-1, 0, 0)
dA_dq[3, :, 2] = (0, 1, 0)
dA_dq[0, :, 3] = (0, -1, 0)
dA_dq[1, :, 3] = (1, 0, 0)
dA_dq[2, :, 3] = (0, 0, 0)
dA_dq[3, :, 3] = (0, 0, 1)
dB_dq[0, :, 0] = (1, 0, 0)
dB_dq[1, :, 0] = (0, 1, 0)
dB_dq[2, :, 0] = (0, 0, 1)
dB_dq[3, :, 0] = (0, 0, 0)
dB_dq[0, :, 1] = (0, 0, 0)
dB_dq[1, :, 1] = (0, 0, 1)
dB_dq[2, :, 1] = (0, -1, 0)
dB_dq[3, :, 1] = (1, 0, 0)
dB_dq[0, :, 2] = (0, 0, -1)
dB_dq[1, :, 2] = (0, 0, 0)
dB_dq[2, :, 2] = (1, 0, 0)
dB_dq[3, :, 2] = (0, 1, 0)
dB_dq[0, :, 3] = (0, 1, 0)
dB_dq[1, :, 3] = (-1, 0, 0)
dB_dq[2, :, 3] = (0, 0, 0)
dB_dq[3, :, 3] = (0, 0, 1)
for i in range(0, self.n):
A[0, :] = ( self.q_E[0, i], -self.q_E[3, i], self.q_E[2, i])
A[1, :] = ( self.q_E[3, i], self.q_E[0, i], -self.q_E[1, i])
A[2, :] = (-self.q_E[2, i], self.q_E[1, i], self.q_E[0, i])
A[3, :] = ( self.q_E[1, i], self.q_E[2, i], self.q_E[3, i])
B[0, :] = ( self.q_E[0, i], self.q_E[3, i], -self.q_E[2, i])
B[1, :] = (-self.q_E[3, i], self.q_E[0, i], self.q_E[1, i])
B[2, :] = ( self.q_E[2, i], -self.q_E[1, i], self.q_E[0, i])
B[3, :] = ( self.q_E[1, i], self.q_E[2, i], self.q_E[3, i])
for k in range(0, 4):
self.J[i, :,:, k] = np.dot(dA_dq[:,:, k].T, B) + \
np.dot(A.T, dB_dq[:, :, k])
def execute(self):
""" Calculate output. """
A = np.zeros((4, 3))
B = np.zeros((4, 3))
for i in range(0, self.n):
A[0, :] = ( self.q_E[0, i], -self.q_E[3, i], self.q_E[2, i])
A[1, :] = ( self.q_E[3, i], self.q_E[0, i], -self.q_E[1, i])
A[2, :] = (-self.q_E[2, i], self.q_E[1, i], self.q_E[0, i])
A[3, :] = ( self.q_E[1, i], self.q_E[2, i], self.q_E[3, i])
B[0, :] = ( self.q_E[0, i], self.q_E[3, i], -self.q_E[2, i])
B[1, :] = (-self.q_E[3, i], self.q_E[0, i], self.q_E[1, i])
B[2, :] = ( self.q_E[2, i], -self.q_E[1, i], self.q_E[0, i])
B[3, :] = ( self.q_E[1, i], self.q_E[2, i], self.q_E[3, i])
self.O_IE[:, :, i] = np.dot(A.T, B)
def apply_deriv(self, arg, result):
""" Matrix-vector product with the Jacobian. """
if 'q_E' in arg and 'O_IE' in result:
for u in range(3):
for v in range(3):
for k in range(4):
result['O_IE'][u, v, :] += self.J[:, u, v, k] * \
arg['q_E'][k, :]
def apply_derivT(self, arg, result):
""" Matrix-vector product with the transpose of the Jacobian. """
if 'O_IE' in arg and 'q_E' in result:
for u in range(3):
for v in range(3):
for k in range(4):
result['q_E'][k, :] += self.J[:, u, v, k] * \
arg['O_IE'][u, v, :]
class Comm_GainPattern(Component):
''' Determines transmitter gain based on an external az-el map. '''
def __init__(self, n, rawG=None):
super(Comm_GainPattern, self).__init__()
self.n = n
if rawG is None:
fpath = os.path.dirname(os.path.realpath(__file__))
rawGdata = np.genfromtxt(fpath + '/data/Comm/Gain.txt')
rawG = (10 ** (rawGdata / 10.0)).reshape((361, 361), order='F')
# Inputs
self.add(
'azimuthGS',
Array(
np.zeros(n),
iotype='in',
shape=(n,),
units="rad",
desc="Azimuth angle from satellite to ground station in Earth-fixed frame over time"
)
)
self.add(
'elevationGS',
Array(
np.zeros(n),
iotype='in',
shape=(self.n,),
units="rad",
desc="Elevation angle from satellite to ground station in Earth-fixed frame over time"
)
)
# Outputs
self.add('gain', Array(np.zeros(n),
iotype='out',
shape=(n,),
units="unitless",
desc="Transmitter gain over time"))
pi = np.pi
az = np.linspace(0, 2 * pi, 361)
el = np.linspace(0, 2 * pi, 361)
self.MBI = MBI.MBI(rawG, [az, el], [15, 15], [4, 4])
self.x = np.zeros((self.n, 2), order='F')
def list_deriv_vars(self):
input_keys = ('azimuthGS', 'elevationGS',)
output_keys = ('gain',)
return input_keys, output_keys
def provideJ(self):
""" Calculate and save derivatives. (i.e., Jacobian) """
self.dg_daz = self.MBI.evaluate(self.x, 1)[:, 0]
self.dg_del = self.MBI.evaluate(self.x, 2)[:, 0]
def execute(self):
""" Calculate output. """
result = fixangles(self.n, self.azimuthGS, self.elevationGS)
self.x[:, 0] = result[0]
self.x[:, 1] = result[1]
self.gain = self.MBI.evaluate(self.x)[:, 0]
def apply_deriv(self, arg, result):
""" Matrix-vector product with the Jacobian. """
if 'azimuthGS' in arg and 'gain' in result:
result['gain'] += self.dg_daz * arg['azimuthGS']
if 'elevationGS' in arg and 'gain' in result:
result['gain'] += self.dg_del * arg['elevationGS']
def apply_derivT(self, arg, result):
""" Matrix-vector product with the transpose of the Jacobian. """
if 'azimuthGS' in result and 'gain' in arg:
result['azimuthGS'] += self.dg_daz * arg['gain']
if 'elevationGS' in result and 'gain' in arg:
result['elevationGS'] += self.dg_del * arg['gain']
class Comm_GSposEarth(Component):
''' Returns position of the ground station in Earth frame. '''
# constants
Re = 6378.137
d2r = np.pi / 180.
# Inputs
lon = Float(0.0, iotype="in", units="rad",
desc="Longitude of ground station in Earth-fixed frame")
lat = Float(0.0, iotype="in", units="rad",
desc="Latitude of ground station in Earth-fixed frame")
alt = Float(0.0, iotype="in", units="rad",
desc="Altitude of ground station in Earth-fixed frame")
def __init__(self, n):
super(Comm_GSposEarth, self).__init__()
self.n = n
# Outputs
self.add(
'r_e2g_E',
Array(
np.zeros((3, self.n)),
iotype='out',
shape=(3, self.n),
units="km",
desc="Position vector from earth to ground station in Earth-fixed frame over time"
)
)
def list_deriv_vars(self):
input_keys = ('lon', 'lat','alt',)
output_keys = ('r_e2g_E',)
return input_keys, output_keys
def provideJ(self):
""" Calculate and save derivatives (i.e., Jacobian). """
self.dr_dlon = np.zeros(3)
self.dr_dlat = np.zeros(3)
self.dr_dalt = np.zeros(3)
cos_lat = np.cos(self.d2r * self.lat)
sin_lat = np.sin(self.d2r * self.lat)
cos_lon = np.cos(self.d2r * self.lon)
sin_lon = np.sin(self.d2r * self.lon)
r_GS = (self.Re + self.alt)
self.dr_dlon[0] = -self.d2r * r_GS * cos_lat * sin_lon
self.dr_dlat[0] = -self.d2r * r_GS * sin_lat * cos_lon
self.dr_dalt[0] = cos_lat * cos_lon
self.dr_dlon[1] = self.d2r * r_GS * cos_lat * cos_lon
self.dr_dlat[1] = -self.d2r * r_GS * sin_lat * sin_lon
self.dr_dalt[1] = cos_lat * sin_lon
self.dr_dlon[2] = 0.
self.dr_dlat[2] = self.d2r * r_GS * cos_lat
self.dr_dalt[2] = sin_lat
def execute(self):
""" Calculate output. """
cos_lat = np.cos(self.d2r * self.lat)
r_GS = (self.Re + self.alt)
self.r_e2g_E[0, :] = r_GS * cos_lat * np.cos(self.d2r*self.lon)
self.r_e2g_E[1, :] = r_GS * cos_lat * np.sin(self.d2r*self.lon)
self.r_e2g_E[2, :] = r_GS * np.sin(self.d2r*self.lat)
def apply_deriv(self, arg, result):
""" Matrix-vector product with the Jacobian. """
if 'lon' in arg:
for k in xrange(3):
result['r_e2g_E'][k, :] += self.dr_dlon[k] * arg['lon']
if 'lat' in arg:
for k in xrange(3):
result['r_e2g_E'][k, :] += self.dr_dlat[k] * arg['lat']
if 'alt' in arg:
for k in xrange(3):
result['r_e2g_E'][k, :] += self.dr_dalt[k] * arg['alt']
def apply_derivT(self, arg, result):
""" Matrix-vector product with the transpose of the Jacobian. """
if 'r_e2g_E' in arg:
for k in xrange(3):
if 'lon' in result:
result['lon'] += self.dr_dlon[k] * np.sum(arg['r_e2g_E'][k, :])
if 'lat' in result:
result['lat'] += self.dr_dlat[k] * np.sum(arg['r_e2g_E'][k, :])
if 'alt' in result:
result['alt'] += self.dr_dalt[k] * np.sum(arg['r_e2g_E'][k, :])
class Comm_GSposECI(Component):
''' Convert time history of ground station position from earth frame
to inertial frame.
'''
def __init__(self, n):
super(Comm_GSposECI, self).__init__()
self.n = n
# Inputs
self.add(
'O_IE',
Array(
np.zeros((3, 3, self.n)),
iotype='in',
shape=(3, 3, self.n),
units="unitless",
desc="Rotation matrix from Earth-centered inertial frame to Earth-fixed frame over time"
)
)
self.add(
'r_e2g_E',
Array(
np.zeros((3, self.n)),
iotype='in',
shape=(3, self.n),
units="km",
desc="Position vector from earth to ground station in Earth-fixed frame over time"
)
)
# Outputs
self.add(
'r_e2g_I',
Array(
np.zeros((3, self.n)),
iotype='out',
shape=(3, self.n),
units="km",
desc="Position vector from earth to ground station in Earth-centered inertial frame over time"
)
)
def list_deriv_vars(self):
input_keys = ('O_IE', 'r_e2g_E',)
output_keys = ('r_e2g_I',)
return input_keys, output_keys
def provideJ(self):
""" Calculate and save derivatives (i.e., Jacobian). """
self.J1 = np.zeros((self.n, 3, 3, 3))
for k in range(0, 3):
for v in range(0, 3):
self.J1[:, k, k, v] = self.r_e2g_E[v, :]
self.J2 = np.transpose(self.O_IE, (2, 0, 1))
def execute(self):
""" Calculate output. """
for i in range(0, self.n):
self.r_e2g_I[:, i] = np.dot(self.O_IE[:, :, i],
self.r_e2g_E[:, i])
def apply_deriv(self, arg, result):
""" Matrix-vector product with the Jacobian. """
if 'r_e2g_I' in result:
for k in xrange(3):
for u in xrange(3):
if 'O_IE' in arg:
for v in xrange(3):
result['r_e2g_I'][k, :] += self.J1[:, k, u, v] * \
arg['O_IE'][u, v, :]
if 'r_e2g_E' in arg:
result['r_e2g_I'][k, :] += self.J2[:, k, u] * \
arg['r_e2g_E'][u, :]
def apply_derivT(self, arg, result):
""" Matrix-vector product with the transpose of the Jacobian. """
if 'r_e2g_I' in arg:
for k in xrange(3):
if 'O_IE' in result:
for u in xrange(3):
for v in xrange(3):
result['O_IE'][u, v, :] += self.J1[:, k, u, v] * \
arg['r_e2g_I'][k, :]
if 'r_e2g_E' in result:
for j in xrange(3):
result['r_e2g_E'][j, :] += self.J2[:, k, j] * \
arg['r_e2g_I'][k, :]
class Comm_LOS(Component):
''' Determines if the Satellite has line of sight with the ground
stations. '''
# constants
Re = 6378.137
def __init__(self, n):
super(Comm_LOS, self).__init__()
self.n = n
# Inputs
self.add(
'r_b2g_I',
Array(
np.zeros((3, n)),
iotype='in',
shape=(3, self.n),
units="km",
desc="Position vector from satellite to ground station in Earth-centered inertial frame over time"
)
)
self.add(
'r_e2g_I',
Array(
np.zeros((3, n)),
iotype='in',
shape=(3, self.n),
units="km",
desc="Position vector from earth to ground station in Earth-centered inertial frame over time"
)
)
# Outputs
self.add(
'CommLOS',
Array(
np.zeros(n),
iotype='out',
shape=(self.n, ),
units="unitless",
desc="Satellite to ground station line of sight over time"
)
)
def list_deriv_vars(self):
input_keys = ('r_b2g_I', 'r_e2g_I',)
output_keys = ('CommLOS',)
return input_keys, output_keys
def provideJ(self):
""" Calculate and save derivatives (i.e., Jacobian). """
self.dLOS_drb = np.zeros((self.n, 3))
self.dLOS_dre = np.zeros((self.n, 3))
Rb = 10.0
for i in range(0, self.n):
proj = np.dot(self.r_b2g_I[:, i], self.r_e2g_I[:, i]) / self.Re
if proj > 0:
self.dLOS_drb[i, :] = 0.
self.dLOS_dre[i, :] = 0.
elif proj < -Rb:
self.dLOS_drb[i, :] = 0.
self.dLOS_dre[i, :] = 0.
else:
x = (proj - 0) / (-Rb - 0)
dx_dproj = -1. / Rb
dLOS_dx = 6 * x - 6 * x ** 2
dproj_drb = self.r_e2g_I[:, i]
dproj_dre = self.r_b2g_I[:, i]
self.dLOS_drb[i, :] = dLOS_dx * dx_dproj * dproj_drb
self.dLOS_dre[i, :] = dLOS_dx * dx_dproj * dproj_dre
def execute(self):
""" Calculate output. """
Rb = 100.0
for i in range(0, self.n):
proj = np.dot(self.r_b2g_I[:, i], self.r_e2g_I[:, i]) / self.Re
if proj > 0:
self.CommLOS[i] = 0.
elif proj < -Rb:
self.CommLOS[i] = 1.
else:
x = (proj - 0) / (-Rb - 0)
self.CommLOS[i] = 3 * x ** 2 - 2 * x ** 3
def apply_deriv(self, arg, result):
""" Matrix-vector product with the Jacobian. """
if 'CommLOS' in result:
for k in xrange(3):
if 'r_b2g_I' in arg:
result['CommLOS'] += self.dLOS_drb[:, k] * arg['r_b2g_I'][k, :]
if 'r_e2g_I' in arg:
result['CommLOS'] += self.dLOS_dre[:, k] * arg['r_e2g_I'][k, :]
def apply_derivT(self, arg, result):
""" Matrix-vector product with the transpose of the Jacobian. """
if 'CommLOS' in arg:
for k in xrange(3):
if 'r_b2g_I' in result:
result['r_b2g_I'][k, :] += self.dLOS_drb[:, k] * arg['CommLOS']
if 'r_e2g_I' in result:
result['r_e2g_I'][k, :] += self.dLOS_dre[:, k] * arg['CommLOS']
class Comm_VectorAnt(Component):
'''Transform from antenna to body frame'''
def __init__(self, n):
super(Comm_VectorAnt, self).__init__()
self.n = n
# Inputs
self.add(
'r_b2g_B',
Array(
np.zeros((3, n)),
iotype='in',
shape=(3, n),
units="km",
desc="Position vector from satellite to ground station in body-fixed frame over time"
)
)
self.add(
'O_AB',
Array(
np.zeros((3, 3, n)),
iotype='in',
shape=(3, 3, n),
units="unitless",
desc="Rotation matrix from antenna angle to body-fixed frame over time"
)
)
# Outputs
self.add(
'r_b2g_A',
Array(
np.zeros((3, n)),
iotype='out',
shape=(3, n),
units="km",
desc="Position vector from satellite to ground station in antenna angle frame over time"
)
)
def list_deriv_vars(self):
input_keys = ('r_b2g_B', 'O_AB',)
output_keys = ('r_b2g_A',)
return input_keys, output_keys
def provideJ(self):
""" Calculate and save derivatives (i.e., Jacobian). """
self.J1, self.J2 = computepositionrotdjacobian(self.n, self.r_b2g_B,
self.O_AB)
def execute(self):
""" Calculate output. """
self.r_b2g_A = computepositionrotd(self.n, self.r_b2g_B, self.O_AB)
def apply_deriv(self, arg, result):
""" Matrix-vector product with the Jacobian. """
if 'r_b2g_A' in result:
for k in xrange(3):
if 'O_AB' in arg:
for u in xrange(3):
for v in xrange(3):
result['r_b2g_A'][k, :] += self.J1[:, k, u, v] * \
arg['O_AB'][u, v, :]
if 'r_b2g_B' in arg:
for j in xrange(3):
result['r_b2g_A'][k, :] += self.J2[:, k, j] * \
arg['r_b2g_B'][j, :]
def apply_derivT(self, arg, result):
""" Matrix-vector product with the transpose of the Jacobian. """
if 'r_b2g_A' in arg:
for k in xrange(3):
if 'O_AB' in result:
for u in xrange(3):
for v in xrange(3):
result['O_AB'][u, v, :] += self.J1[:, k, u, v] * \
arg['r_b2g_A'][k, :]
if 'r_b2g_B' in result:
for j in xrange(3):
result['r_b2g_B'][j, :] += self.J2[:, k, j] * \
arg['r_b2g_A'][k, :]
class Comm_VectorBody(Component):
'''Transform from body to inertial frame.'''
def __init__(self, n):
super(Comm_VectorBody, self).__init__()
self.n = n
# Inputs
self.add(
'r_b2g_I',
Array(
np.zeros((3, n)),
iotype='in',
shape=(3, n),
units="km",
desc="Position vector from satellite to ground station in Earth-centered inertial frame over time"
)
)
self.add(
'O_BI',
Array(
np.zeros((3, 3, n)),
iotype='in',
shape=(3, 3, n),
units="unitless",
desc="Rotation matrix from body-fixed frame to Earth-centered inertial frame over time"
)
)
# Outputs
self.add(
'r_b2g_B',
Array(
np.zeros((3, n)),
iotype='out',
shape=(3, n),
units="km",
desc="Position vector from satellite to ground station in body-fixed frame over time"
)
)
def list_deriv_vars(self):
input_keys = ('r_b2g_I', 'O_BI',)
output_keys = ('r_b2g_B',)
return input_keys, output_keys
def provideJ(self):
""" Calculate and save derivatives (i.e., Jacobian). """
self.J1 = np.zeros((self.n, 3, 3, 3))
for k in range(0, 3):
for v in range(0, 3):
self.J1[:, k, k, v] = self.r_b2g_I[v, :]
self.J2 = np.transpose(self.O_BI, (2, 0, 1))
def execute(self):
""" Calculate output. """
for i in range(0, self.n):
self.r_b2g_B[:, i] = np.dot(self.O_BI[:, :, i], self.r_b2g_I[:, i])
def apply_deriv(self, arg, result):
""" Matrix-vector product with the Jacobian. """
if 'r_b2g_B' in result:
for k in range(3):
if 'O_BI' in arg:
for u in range(3):
for v in range(3):
result['r_b2g_B'][k, :] += self.J1[:, k, u, v] * \
arg['O_BI'][u, v, :]
if 'r_b2g_I' in arg:
for j in range(3):
result['r_b2g_B'][k, :] += self.J2[:, k, j] * \
arg['r_b2g_I'][j, :]
def apply_derivT(self, arg, result):
""" Matrix-vector product with the transpose of the Jacobian. """
if 'r_b2g_B' in arg:
for k in range(3):
if 'O_BI' in result:
for u in range(3):
for v in range(3):
result['O_BI'][u, v, :] += self.J1[:, k, u, v] * \
arg['r_b2g_B'][k, :]
if 'r_b2g_I' in result:
for j in range(3):
result['r_b2g_I'][j, :] += self.J2[:, k, j] * \
arg['r_b2g_B'][k, :]
class Comm_VectorECI(Component):
'''Determine vector between satellite and ground station.'''
def __init__(self, n):
super(Comm_VectorECI, self).__init__()
self.n = n
# Inputs
self.add(
'r_e2g_I',
Array(
np.zeros((3, n)),
iotype='in',
shape=(3, n),
units="km",
desc="Position vector from earth to ground station in Earth-centered inertial frame over time"
)
)
self.add(
'r_e2b_I',
Array(
np.zeros((6, n)),
iotype='in',
shape=(6, n),
units="unitless",
desc="Position and velocity vector from earth to satellite in Earth-centered inertial frame over time"
)
)
# Outputs
self.add(
'r_b2g_I',
Array(
np.zeros((3, n)),
iotype='out',
shape=(3, n),
units="km",
desc="Position vector from satellite to ground station in Earth-centered inertial frame over time"
)
)
def list_deriv_vars(self):
input_keys = ('r_e2g_I', 'r_e2b_I',)
output_keys = ('r_b2g_I',)
return input_keys, output_keys
def provideJ(self):
""" Calculate and save derivatives (i.e., Jacobian). """
# Derivatives are simple
return
def execute(self):
""" Calculate output. """
self.r_b2g_I = self.r_e2g_I - self.r_e2b_I[:3, :]
def apply_deriv(self, arg, result):
""" Matrix-vector product with the Jacobian. """
if 'r_e2g_I' in arg:
result['r_b2g_I'] += arg['r_e2g_I']
if 'r_e2b_I' in arg:
result['r_b2g_I'] += -arg['r_e2b_I'][:3, :]
def apply_derivT(self, arg, result):
""" Matrix-vector product with the transpose of the Jacobian. """
if 'r_b2g_I' in arg:
if 'r_e2g_I' in result:
result['r_e2g_I'] += arg['r_b2g_I']
if 'r_e2b_I' in result:
result['r_e2b_I'][:3, :] += -arg['r_b2g_I']
class Comm_VectorSpherical(Component):
'''Convert satellite-ground vector into Az-El.'''
def __init__(self, n):
super(Comm_VectorSpherical, self).__init__()
self.n = n
# Inputs
self.add(
'r_b2g_A',
Array(
np.zeros((3, n)),
iotype='in',
shape=(3, self.n),
units="km",
desc="Position vector from satellite to ground station in antenna angle frame over time"
)
)
# Outputs
self.add(
'azimuthGS',
Array(
np.zeros(n),
iotype='out',
shape=(n,),
units="rad",
desc="Azimuth angle from satellite to ground station in Earth-fixed frame over time"
)
)
self.add(
'elevationGS',
Array(
np.zeros(n),
iotype='out',
shape=(n,),
units="rad",
desc="Elevation angle from satellite to ground station in Earth-fixed frame over time"
)
)
def list_deriv_vars(self):
input_keys = ('r_b2g_A',)
output_keys = ('azimuthGS','elevationGS',)
return input_keys, output_keys
def provideJ(self):
""" Calculate and save derivatives (i.e., Jacobian). """
self.Ja1, self.Ji1, self.Jj1, self.Ja2, self.Ji2, self.Jj2 = \
computepositionsphericaljacobian(self.n, 3 * self.n, self.r_b2g_A)
self.J1 = scipy.sparse.csc_matrix((self.Ja1, (self.Ji1, self.Jj1)),
shape=(self.n, 3 * self.n))
self.J2 = scipy.sparse.csc_matrix((self.Ja2, (self.Ji2, self.Jj2)),
shape=(self.n, 3 * self.n))
self.J1T = self.J1.transpose()
self.J2T = self.J2.transpose()
def execute(self):
""" Calculate output. """
azimuthGS, elevationGS = computepositionspherical(self.n, self.r_b2g_A)
self.azimuthGS = azimuthGS
self.elevationGS = elevationGS
def apply_deriv(self, arg, result):
""" Matrix-vector product with the Jacobian. """
if 'r_b2g_A' in arg:
r_b2g_A = arg['r_b2g_A'].reshape((3 * self.n), order='F')
if 'azimuthGS' in result:
result['azimuthGS'] += self.J1.dot(r_b2g_A)
if 'elevationGS' in result:
result['elevationGS'] += self.J2.dot(r_b2g_A)
def apply_derivT(self, arg, result):
""" Matrix-vector product with the transpose of the Jacobian. """
if 'azimuthGS' in arg:
az_GS = arg['azimuthGS']
result['r_b2g_A'] += (self.J1T.dot(az_GS)).reshape((3, self.n),
order='F')
if 'elevationGS' in arg:
el_GS = arg['elevationGS']
result['r_b2g_A'] += (self.J2T.dot(el_GS)).reshape((3, self.n),
order='F')
| 30.901628 | 118 | 0.452436 |
fd0015fe8aa832f10d50c8397dce9214a0d34519 | 1,511 | py | Python | corehq/apps/locations/resources/v0_1.py | dslowikowski/commcare-hq | ad8885cf8dab69dc85cb64f37aeaf06106124797 | [
"BSD-3-Clause"
] | 1 | 2017-02-10T03:14:51.000Z | 2017-02-10T03:14:51.000Z | corehq/apps/locations/resources/v0_1.py | dslowikowski/commcare-hq | ad8885cf8dab69dc85cb64f37aeaf06106124797 | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/locations/resources/v0_1.py | dslowikowski/commcare-hq | ad8885cf8dab69dc85cb64f37aeaf06106124797 | [
"BSD-3-Clause"
] | null | null | null | from tastypie import fields
from corehq.apps.locations.models import Location, root_locations
from corehq.apps.api.resources.v0_1 import CustomResourceMeta, LoginAndDomainAuthentication
from corehq.apps.api.util import get_object_or_not_exist
import json
from corehq.apps.api.resources import HqBaseResource
class LocationResource(HqBaseResource):
type = "location"
uuid = fields.CharField(attribute='location_id', readonly=True, unique=True)
location_type = fields.CharField(attribute='location_type', readonly=True)
is_archived = fields.BooleanField(attribute='is_archived', readonly=True)
name = fields.CharField(attribute='name', readonly=True, unique=True)
def obj_get(self, bundle, **kwargs):
domain = kwargs['domain']
location_id = kwargs['pk']
return get_object_or_not_exist(Location, location_id, domain)
def obj_get_list(self, bundle, **kwargs):
domain = kwargs['domain']
parent_id = bundle.request.GET.get('parent_id', None)
include_inactive = json.loads(bundle.request.GET.get('include_inactive', 'false'))
if parent_id:
parent = get_object_or_not_exist(Location, parent_id, domain)
return parent.sql_location.child_locations(include_archive_ancestors=include_inactive)
return root_locations(domain)
class Meta(CustomResourceMeta):
authentication = LoginAndDomainAuthentication()
object_class = Location
resource_name = 'location'
limit = 0
| 41.972222 | 98 | 0.735275 |
081ae4e264681d5f9bddb003c7b7934eda51e4c8 | 4,933 | py | Python | monoscene/scripts/generate_output.py | Teaksters/MonoScene | 0a5803052b54e57eb98556e53d3bf45be890b269 | [
"Apache-2.0"
] | null | null | null | monoscene/scripts/generate_output.py | Teaksters/MonoScene | 0a5803052b54e57eb98556e53d3bf45be890b269 | [
"Apache-2.0"
] | null | null | null | monoscene/scripts/generate_output.py | Teaksters/MonoScene | 0a5803052b54e57eb98556e53d3bf45be890b269 | [
"Apache-2.0"
] | null | null | null | from pytorch_lightning import Trainer
from monoscene.models.monoscene import MonoScene
from monoscene.data.NYU.nyu_dm import NYUDataModule
from monoscene.data.semantic_kitti.kitti_dm import KittiDataModule
from monoscene.data.kitti_360.kitti_360_dm import Kitti360DataModule
import hydra
from omegaconf import DictConfig
import torch
import numpy as np
import os
from hydra.utils import get_original_cwd
from tqdm import tqdm
import pickle
@hydra.main(config_name="../config/monoscene.yaml")
def main(config: DictConfig):
torch.set_grad_enabled(False)
# Setup dataloader
if config.dataset == "kitti" or config.dataset == "kitti_360":
feature = 64
project_scale = 2
full_scene_size = (256, 256, 32)
if config.dataset == "kitti":
data_module = KittiDataModule(
root=config.kitti_root,
preprocess_root=config.kitti_preprocess_root,
frustum_size=config.frustum_size,
batch_size=int(config.batch_size / config.n_gpus),
num_workers=int(config.num_workers_per_gpu * config.n_gpus),
)
data_module.setup()
data_loader = data_module.val_dataloader()
# data_loader = data_module.test_dataloader() # use this if you want to infer on test set
else:
data_module = Kitti360DataModule(
root=config.kitti_360_root,
sequences=[config.kitti_360_sequence],
n_scans=2000,
batch_size=1,
num_workers=3,
)
data_module.setup()
data_loader = data_module.dataloader()
elif config.dataset == "NYU":
project_scale = 1
feature = 200
full_scene_size = (60, 36, 60)
data_module = NYUDataModule(
root=config.NYU_root,
preprocess_root=config.NYU_preprocess_root,
n_relations=config.n_relations,
frustum_size=config.frustum_size,
batch_size=int(config.batch_size / config.n_gpus),
num_workers=int(config.num_workers_per_gpu * config.n_gpus),
)
data_module.setup()
data_loader = data_module.val_dataloader()
# data_loader = data_module.test_dataloader() # use this if you want to infer on test set
else:
print("dataset not support")
# Load pretrained models
if config.dataset == "NYU":
model_path = os.path.join(
get_original_cwd(), "trained_models", "monoscene_nyu.ckpt"
)
else:
model_path = os.path.join(
get_original_cwd(), "trained_models", "monoscene_kitti.ckpt"
)
model = MonoScene.load_from_checkpoint(
model_path,
feature=feature,
project_scale=project_scale,
fp_loss=config.fp_loss,
full_scene_size=full_scene_size,
)
model.cuda()
model.eval()
# Save prediction and additional data
# to draw the viewing frustum and remove scene outside the room for NYUv2
output_path = os.path.join(config.output_path, config.dataset)
with torch.no_grad():
for batch in tqdm(data_loader):
batch["img"] = batch["img"].cuda()
print(batch)
pred = model(batch)
print(pred)
y_pred = torch.softmax(pred["ssc_logit"], dim=1).detach().cpu().numpy()
y_pred = np.argmax(y_pred, axis=1)
for i in range(config.batch_size):
out_dict = {"y_pred": y_pred[i].astype(np.uint16)}
if "target" in batch:
out_dict["target"] = (
batch["target"][i].detach().cpu().numpy().astype(np.uint16)
)
if config.dataset == "NYU":
write_path = output_path
filepath = os.path.join(write_path, batch["name"][i] + ".pkl")
out_dict["cam_pose"] = batch["cam_pose"][i].detach().cpu().numpy()
out_dict["vox_origin"] = (
batch["vox_origin"][i].detach().cpu().numpy()
)
else:
write_path = os.path.join(output_path, batch["sequence"][i])
filepath = os.path.join(write_path, batch["frame_id"][i] + ".pkl")
out_dict["fov_mask_1"] = (
batch["fov_mask_1"][i].detach().cpu().numpy()
)
out_dict["cam_k"] = batch["cam_k"][i].detach().cpu().numpy()
out_dict["T_velo_2_cam"] = (
batch["T_velo_2_cam"][i].detach().cpu().numpy()
)
os.makedirs(write_path, exist_ok=True)
with open(filepath, "wb") as handle:
pickle.dump(out_dict, handle)
print("wrote to", filepath)
if __name__ == "__main__":
main()
| 37.946154 | 101 | 0.57612 |
6bfa1749ef56b40281fab67eaa21665ece53fa05 | 4,301 | py | Python | code/10-sequence-lookup/verify_amplicon_type.py | GLOMICON/emp | c1f752d1ae4c009328bbdcecf9666dbd4dac39b6 | [
"BSD-3-Clause"
] | 1 | 2020-01-30T15:06:26.000Z | 2020-01-30T15:06:26.000Z | code/10-sequence-lookup/verify_amplicon_type.py | GLOMICON/emp | c1f752d1ae4c009328bbdcecf9666dbd4dac39b6 | [
"BSD-3-Clause"
] | null | null | null | code/10-sequence-lookup/verify_amplicon_type.py | GLOMICON/emp | c1f752d1ae4c009328bbdcecf9666dbd4dac39b6 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2016, The Deblur Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import click
import numpy as np
import pandas as pd
def count_starting_kmers(input_fasta_fp, num_seqs, seed):
"""Generate value_counts dataframe of 5' tetramers for random subsample
of a fasta file"""
kmer_length = 4
if seed:
np.random.seed(seed)
starting_kmers = []
with open(input_fasta_fp) as handle:
lines = pd.Series(handle.readlines())
num_lines = len(lines)
if num_lines/2 < num_seqs:
rand_line_nos = np.random.choice(np.arange(1,num_lines,2),
size=num_seqs, replace=True)
else:
rand_line_nos = np.random.choice(np.arange(1,num_lines,2),
size=num_seqs, replace=False)
rand_lines = lines[rand_line_nos]
for sequence in rand_lines:
starting_kmers.append(sequence[:kmer_length])
starting_kmer_value_counts = pd.Series(starting_kmers).value_counts()
return(starting_kmer_value_counts)
@click.command()
@click.option('--input_fasta_fp', '-f', required=True,
type=click.Path(resolve_path=True, readable=True, exists=True,
file_okay=True),
help="Input fasta file from Deblur (.fa, .fna, .fasta)")
@click.option('--num_seqs', '-n', required=False, type=int, default=10000,
help="Number of sequences to randomly subsample [default: 10000]")
@click.option('--cutoff', '-c', required=False, type=float, default=0.5,
help="Minimum fraction of sequences required to match "
"a diagnostic 5' tetramer [default: 0.5]")
@click.option('--seed', '-s', required=False, type=int,
help="Random number seed [default: None]")
def verify_amplicon_type(input_fasta_fp, num_seqs, cutoff, seed):
"""Determine the most likely amplicon type of a fasta file based on the
first four nucleotides.
The most frequent 5' tetramer in a random subsample of sequences must
match, above a given cutoff fraction of sequences, one of the following
diagnostic tetramers:
Tetramer\tAmplicon\tForward primer
TACG\t16S rRNA\t515f
GTAG\tITS rRNA\tITS1f
GCT[AC]\t18S rRNA\tEuk1391f
"""
starting_kmer_value_counts = count_starting_kmers(input_fasta_fp, num_seqs,
seed)
top_kmer = starting_kmer_value_counts.index[0]
top_kmer_count = starting_kmer_value_counts[0]
second_kmer = starting_kmer_value_counts.index[1]
second_kmer_count = starting_kmer_value_counts[1]
third_kmer = starting_kmer_value_counts.index[2]
third_kmer_count = starting_kmer_value_counts[2]
top_kmer_frac = top_kmer_count/num_seqs
top2_kmer_frac = (top_kmer_count+second_kmer_count)/num_seqs
top3_kmer_frac = (top_kmer_count+second_kmer_count+third_kmer_count)/num_seqs
if (top_kmer == 'TACG') & (top_kmer_frac > cutoff):
print('Amplicon type: 16S/515f (%s%% of sequences start with %s)' %
(round(top_kmer_frac*100, 1), top_kmer))
elif (top_kmer == 'GTAG') & (top_kmer_frac > cutoff):
print('Amplicon type: ITS/ITS1f (%s%% of sequences start with %s)' %
(round(top_kmer_frac*100, 1), top_kmer))
elif (top_kmer in ['GCTA', 'GCTC', 'ACAC']) & (second_kmer in ['GCTA', 'GCTC',
'ACAC']) & (third_kmer in ['GCTA', 'GCTC', 'ACAC']) & (
top3_kmer_frac > cutoff):
print('Amplicon type: 18S/Euk1391f (%s%% of sequences start with %s, %s, or %s)' %
(round(top3_kmer_frac*100, 1), top_kmer, second_kmer, third_kmer))
else:
print('Could not determine amplicon type'),
print('(most frequent starting tetramer was %s with %s%%)' %
(top_kmer, round(top_kmer_frac*100, 1)))
if __name__ == '__main__':
verify_amplicon_type()
| 46.75 | 90 | 0.613578 |
7b3b195501f31e5a761789a638311d6719c70d21 | 53 | py | Python | src/hw.py | romilly/pi-towers-workshop | dabd9c70a102f3c0501547ab14c678bb58281ec9 | [
"MIT"
] | 1 | 2018-05-26T19:52:05.000Z | 2018-05-26T19:52:05.000Z | src/hw.py | romilly/pi-towers-workshop | dabd9c70a102f3c0501547ab14c678bb58281ec9 | [
"MIT"
] | null | null | null | src/hw.py | romilly/pi-towers-workshop | dabd9c70a102f3c0501547ab14c678bb58281ec9 | [
"MIT"
] | null | null | null | from microbit import *
display.scroll('Hello World') | 17.666667 | 29 | 0.773585 |
5e97f63a4ca325ce0f7058d0112d4de091aba760 | 6,083 | py | Python | netket/vqs/mc/mc_state/expect_grad.py | NetKet/netket | 96758e814fc3128e6821564d6cc2852bac40ecf2 | [
"Apache-2.0"
] | null | null | null | netket/vqs/mc/mc_state/expect_grad.py | NetKet/netket | 96758e814fc3128e6821564d6cc2852bac40ecf2 | [
"Apache-2.0"
] | null | null | null | netket/vqs/mc/mc_state/expect_grad.py | NetKet/netket | 96758e814fc3128e6821564d6cc2852bac40ecf2 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Any, Callable, Tuple
import jax
from jax import numpy as jnp
from netket import jax as nkjax
from netket import config
from netket.stats import Stats, statistics
from netket.utils import mpi
from netket.utils.types import PyTree
from netket.utils.dispatch import dispatch, TrueT, FalseT
from netket.operator import (
AbstractOperator,
DiscreteOperator,
Squared,
)
from netket.vqs.mc import (
get_local_kernel_arguments,
get_local_kernel,
)
from .state import MCState
@dispatch
def expect_and_grad( # noqa: F811
vstate: MCState,
Ô: AbstractOperator,
use_covariance: TrueT,
*,
mutable: Any,
) -> Tuple[Stats, PyTree]:
σ, args = get_local_kernel_arguments(vstate, Ô)
local_estimator_fun = get_local_kernel(vstate, Ô)
Ō, Ō_grad, new_model_state = grad_expect_hermitian(
local_estimator_fun,
vstate._apply_fun,
mutable,
vstate.parameters,
vstate.model_state,
σ,
args,
)
if mutable is not False:
vstate.model_state = new_model_state
return Ō, Ō_grad
# pure state, squared operator
@dispatch.multi(
(MCState, Squared[DiscreteOperator], FalseT),
(MCState, Squared[AbstractOperator], FalseT),
(MCState, AbstractOperator, FalseT),
)
def expect_and_grad( # noqa: F811
vstate,
Ô,
use_covariance,
*,
mutable: Any,
) -> Tuple[Stats, PyTree]:
if not isinstance(Ô, Squared) and not config.FLAGS["NETKET_EXPERIMENTAL"]:
raise RuntimeError(
"""
Computing the gradient of non hermitian operator is an
experimental feature under development and is known not to
return wrong values sometimes.
If you want to debug it, set the environment variable
NETKET_EXPERIMENTAL=1
"""
)
σ, args = get_local_kernel_arguments(vstate, Ô)
local_estimator_fun = get_local_kernel(vstate, Ô)
Ō, Ō_grad, new_model_state = grad_expect_operator_kernel(
local_estimator_fun,
vstate._apply_fun,
vstate.sampler.machine_pow,
mutable,
vstate.parameters,
vstate.model_state,
σ,
args,
)
if mutable is not False:
vstate.model_state = new_model_state
return Ō, Ō_grad
@partial(jax.jit, static_argnums=(0, 1, 2))
def grad_expect_hermitian(
local_value_kernel: Callable,
model_apply_fun: Callable,
mutable: bool,
parameters: PyTree,
model_state: PyTree,
σ: jnp.ndarray,
local_value_args: PyTree,
) -> Tuple[PyTree, PyTree]:
σ_shape = σ.shape
if jnp.ndim(σ) != 2:
σ = σ.reshape((-1, σ_shape[-1]))
n_samples = σ.shape[0] * mpi.n_nodes
O_loc = local_value_kernel(
model_apply_fun,
{"params": parameters, **model_state},
σ,
local_value_args,
)
Ō = statistics(O_loc.reshape(σ_shape[:-1]).T)
O_loc -= Ō.mean
# Then compute the vjp.
# Code is a bit more complex than a standard one because we support
# mutable state (if it's there)
is_mutable = mutable is not False
_, vjp_fun, *new_model_state = nkjax.vjp(
lambda w: model_apply_fun({"params": w, **model_state}, σ, mutable=mutable),
parameters,
conjugate=True,
has_aux=is_mutable,
)
Ō_grad = vjp_fun(jnp.conjugate(O_loc) / n_samples)[0]
Ō_grad = jax.tree_map(
lambda x, target: (x if jnp.iscomplexobj(target) else 2 * x.real).astype(
target.dtype
),
Ō_grad,
parameters,
)
new_model_state = new_model_state[0] if is_mutable else None
return Ō, jax.tree_map(lambda x: mpi.mpi_sum_jax(x)[0], Ō_grad), new_model_state
@partial(jax.jit, static_argnums=(0, 1, 2, 3))
def grad_expect_operator_kernel(
local_value_kernel: Callable,
model_apply_fun: Callable,
machine_pow: int,
mutable: bool,
parameters: PyTree,
model_state: PyTree,
σ: jnp.ndarray,
local_value_args: PyTree,
) -> Tuple[PyTree, PyTree, Stats]:
σ_shape = σ.shape
if jnp.ndim(σ) != 2:
σ = σ.reshape((-1, σ_shape[-1]))
is_mutable = mutable is not False
logpsi = lambda w, σ: model_apply_fun(
{"params": w, **model_state}, σ, mutable=mutable
)
log_pdf = (
lambda w, σ: machine_pow * model_apply_fun({"params": w, **model_state}, σ).real
)
def expect_closure_pars(pars):
return nkjax.expect(
log_pdf,
partial(local_value_kernel, logpsi),
pars,
σ,
local_value_args,
n_chains=σ_shape[0],
)
Ō, Ō_pb, Ō_stats = nkjax.vjp(
expect_closure_pars, parameters, has_aux=True, conjugate=True
)
Ō_pars_grad = Ō_pb(jnp.ones_like(Ō))[0]
# This term below is needed otherwise it does not match the value obtained by
# (ha@ha).collect(). I'm unsure of why it is needed.
Ō_pars_grad = jax.tree_map(
lambda x, target: x / 2 if jnp.iscomplexobj(target) else x,
Ō_pars_grad,
parameters,
)
if is_mutable:
raise NotImplementedError(
"gradient of non-hermitian operators over mutable models "
"is not yet implemented."
)
new_model_state = None
return (
Ō_stats,
jax.tree_map(lambda x: mpi.mpi_mean_jax(x)[0], Ō_pars_grad),
new_model_state,
)
| 26.563319 | 88 | 0.646556 |
8130473b3627356e66cdd50d29a7893580ddc193 | 9,650 | py | Python | csbuild/project_generator_slickedit.py | brandonmbare/csbuild | ccdddb46f96364f50947f827efd3b3ef8af4a27e | [
"MIT"
] | null | null | null | csbuild/project_generator_slickedit.py | brandonmbare/csbuild | ccdddb46f96364f50947f827efd3b3ef8af4a27e | [
"MIT"
] | null | null | null | csbuild/project_generator_slickedit.py | brandonmbare/csbuild | ccdddb46f96364f50947f827efd3b3ef8af4a27e | [
"MIT"
] | null | null | null | # Copyright (C) 2013 Jaedyn K. Draper
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import xml.etree.ElementTree as ET
import xml.dom.minidom as minidom
import os
import sys
from . import project_generator
from . import projectSettings
from . import log
import csbuild
class ExtensionType:
WORKSPACE = "vpw"
PROJECT = "vpj"
class OutputType:
WORKSPACE = "Workspace"
PROJECT = "Project"
class project_generator_slickedit(project_generator.project_generator):
"""
Generator used to create a SlickEdit project files.
"""
def __init__(self, path, solutionName, extraArgs):
project_generator.project_generator.__init__(self, path, solutionName, extraArgs)
self._extraBuildArgs = self.extraargs.replace(",", " ")
# Base class methods.
@staticmethod
def AdditionalArgs(parser):
# No additional command line arguments at this time.
pass
def WriteProjectFiles(self):
log.LOG_BUILD("Writing SlickEdit workspace {}...".format(self.solutionname))
# Create the workspace root path if it doesn't exist.
if not os.access(self.rootpath, os.F_OK):
os.makedirs(self.rootpath)
projectFiles = set()
self._WriteSubGroup(self.rootpath, projectSettings.rootGroup, projectFiles)
self._WriteWorkspace(projectFiles)
# Private methods.
def _WriteWorkspace(self, projectFiles):
CreateRootNode = ET.Element
AddNode = ET.SubElement
rootNode = CreateRootNode(OutputType.WORKSPACE)
rootNode.set("Version", "10.0")
rootNode.set("VendorName", "SlickEdit")
projectListNode = AddNode(rootNode, "Projects")
# Add the path of each project file to the workspace.
for project in projectFiles:
projectNode = AddNode(projectListNode, "Project")
relativeProjectPath = os.path.relpath(project, self.rootpath)
projectNode.set("File", relativeProjectPath)
xmlString = ET.tostring(rootNode)
outputPath = os.path.join(self.rootpath, "{}.{}".format(self.solutionname, ExtensionType.WORKSPACE))
self._SaveXmlFile(xmlString, outputPath, OutputType.WORKSPACE, ExtensionType.WORKSPACE)
def _WriteSubGroup(self, projectOutputPath, projectGroup, projectFiles):
# Write out each project first.
for projectName, projectSettingsMap in projectGroup.projects.items():
# Construct the output path to the project file.
projectFilePath = os.path.join(projectOutputPath, "{}.{}".format(projectName, ExtensionType.PROJECT))
# Cache the project output path so we can use it later when we build the workspace file.
projectFiles.add(projectFilePath)
# Save the project file to disk.
self._WriteProject(projectFilePath, projectName, projectSettingsMap)
# Next, iterate through each subgroup and handle each one recursively.
for subGroupName, subGroup in projectGroup.subgroups.items():
groupPath = os.path.join(projectOutputPath, subGroupName)
# Create the group path if it doesn't exist.
if not os.access(groupPath, os.F_OK):
os.makedirs(groupPath)
self._WriteSubGroup(groupPath, subGroup, projectFiles)
def _WriteProject(self, projectFilePath, projectName, projectSettingsMap):
CreateRootNode = ET.Element
AddNode = ET.SubElement
# When the main makefile is executed, chdir is called to set the currect working directory to the same directory
# as the makefile itself, so using that directory is acceptable for project working directory.
mainfileDirPath = os.getcwd()
projectDirPath = os.path.dirname(projectFilePath)
rootNode = CreateRootNode(OutputType.PROJECT)
rootNode.set("Version", "10.0")
rootNode.set("VendorName", "SlickEdit")
rootNode.set("WorkingDir", mainfileDirPath)
filesNode = AddNode(rootNode, "Files")
sourcesNode = AddNode(filesNode, "Folder")
headersNode = AddNode(filesNode, "Folder")
sourcesNode.set("Name", "Source Files")
headersNode.set("Name", "Header Files")
sourceFileList = set()
headerFileList = set()
# Because the list of sources and headers can differ between configurations and architectures,
# we need to generate a complete list so the project can reference them all.
for configName, archMap in projectSettingsMap.items():
for archName, settings in archMap.items():
sourceFileList.update(set(settings.allsources))
headerFileList.update(set(settings.allheaders))
# Add each source file to the project.
for sourceFile in sourceFileList:
relativeFilePath = os.path.relpath(sourceFile, projectDirPath)
fileNode = AddNode(sourcesNode, "F")
fileNode.set("N", relativeFilePath)
# Add each header file to the project.
for headerFile in headerFileList:
relativeFilePath = os.path.relpath(headerFile, projectDirPath)
fileNode = AddNode(headersNode, "F")
fileNode.set("N", relativeFilePath)
# Create a dictionary of the build targets and their command line specification.
# It's assumed that (ALL_TARGETS) will not be defined by the makefiles.
# TODO: Add handling for any custom build targets named (ALL_TARGETS).
buildTargets = { "(ALL_TARGETS)": "--all-targets" }
for targetName, _ in projectSettingsMap.items():
buildTargets.update({ targetName: targetName })
# Output nodes for each build target.
for targetName, targetCommand in buildTargets.items():
# Create the config node for this build target.
configNode = AddNode(rootNode, "Config")
configNode.set("Name", targetName)
menuNode = AddNode(configNode, "Menu")
# Create the individual nodes representing the compilation options available under this project.
# SlickEdit refers to these options as "targets", so don't confuse that with csbuild targets.
compileProjectNode = AddNode(menuNode, "Target")
buildAllNode = AddNode(menuNode, "Target")
rebuildAllNode = AddNode(menuNode, "Target")
cleanAllNode = AddNode(menuNode, "Target")
def SetCommonTargetOptions(targetNode):
targetNode.set("RunFromDir", "%rw") # Project working directory.
targetNode.set("CaptureOutputWith", "ProcessBuffer") # Send csbuild output to SlickEdit output window.
targetNode.set("SaveOption", "SaveWorkspaceFiles") # Save all workspace files when initiating this target.
SetCommonTargetOptions(compileProjectNode)
SetCommonTargetOptions(buildAllNode)
SetCommonTargetOptions(rebuildAllNode)
SetCommonTargetOptions(cleanAllNode)
compileProjectNode.set("Name", "Compile Project")
compileProjectNode.set("MenuCaption", "Compile &Project")
buildAllNode.set("Name", "Build All")
buildAllNode.set("MenuCaption", "&Build All")
rebuildAllNode.set("Name", "Rebuild All")
rebuildAllNode.set("MenuCaption", "&Rebuild All")
cleanAllNode.set("Name", "Clean All")
cleanAllNode.set("MenuCaption", "&Clean All")
commandNode = AddNode(compileProjectNode, "Exec")
commandNode.set("CmdLine", "{} {} {} --project={} {}".format(sys.executable, csbuild.mainfile, targetCommand, projectName, self._extraBuildArgs))
commandNode = AddNode(buildAllNode, "Exec")
commandNode.set("CmdLine", "{} {} {} {}".format(sys.executable, csbuild.mainfile, targetCommand, self._extraBuildArgs))
commandNode = AddNode(rebuildAllNode, "Exec")
commandNode.set("CmdLine", "{} {} {} --rebuild {}".format(sys.executable, csbuild.mainfile, targetCommand, self._extraBuildArgs))
commandNode = AddNode(cleanAllNode, "Exec")
commandNode.set("CmdLine", "{} {} {} --clean {}".format(sys.executable, csbuild.mainfile, targetCommand, self._extraBuildArgs))
# Grab a string of the XML document we've created and save it.
xmlString = ET.tostring(rootNode)
self._SaveXmlFile(xmlString, projectFilePath, OutputType.PROJECT, ExtensionType.PROJECT)
def _SaveXmlFile(self, xmlString, xmlFilename, outputType, outputTypeExt):
# Convert to the original XML to a string on Python3.
if sys.version_info >= (3, 0):
xmlString = xmlString.decode("utf-8")
finalXmlString = '<!DOCTYPE {} SYSTEM "http://www.slickedit.com/dtd/vse/10.0/{}.dtd"><!-- Auto-generated by CSBuild for use with SlickEdit. -->{}'.format(outputType, outputTypeExt, xmlString)
# Use minidom to reformat the XML since ElementTree doesn't do it for us.
formattedXmlString = minidom.parseString(finalXmlString).toprettyxml("\t", "\n")
inputLines = formattedXmlString.split("\n")
outputLines = []
# Copy each line of the XML to a list of strings.
for line in inputLines:
# Disregard the ?xml line at the start since SlickEdit doesn't care about that.
if not line.startswith("<?xml") and line.strip():
outputLines.append(line)
# Concatenate each string with a newline.
finalXmlString = "\n".join(outputLines)
# Open the output file and write the new XML string to it.
with open(xmlFilename, "w") as f:
f.write(finalXmlString)
| 38.91129 | 193 | 0.750155 |
ddaeb9a831930aa4f691296e519cf5a391d93940 | 332 | py | Python | hooks/pre_gen_project.py | aft-analytics/cookiecutter-poetry | 7577c44a0cceb8f524064f46d5aca56453d4a34e | [
"MIT"
] | 5 | 2021-04-06T06:32:30.000Z | 2022-02-20T07:40:45.000Z | hooks/pre_gen_project.py | aft-analytics/cookiecutter-poetry | 7577c44a0cceb8f524064f46d5aca56453d4a34e | [
"MIT"
] | 8 | 2020-12-31T11:45:38.000Z | 2021-08-08T15:03:33.000Z | hooks/pre_gen_project.py | aft-analytics/cookiecutter-poetry | 7577c44a0cceb8f524064f46d5aca56453d4a34e | [
"MIT"
] | 1 | 2021-08-11T03:02:32.000Z | 2021-08-11T03:02:32.000Z | #!/usr/bin/env python
import re
import sys
MODULE_REGEX = r"^[_a-zA-Z][_a-zA-Z0-9]+$"
module_name = "{{ cookiecutter.module_name }}"
if not re.match(MODULE_REGEX, module_name):
print(
"ERROR: The project slug (%s) is not a valid Python module name." % module_name
)
# Exit to cancel project
sys.exit(1)
| 19.529412 | 87 | 0.650602 |
53181a663b722e20d08e38bf31fddfbd9a255f00 | 1,727 | py | Python | scripts/fbi/hate_crime/preprocess_aggregations_test.py | rpatil524/data | 9e76c7f22a75ad4e52522444a080ed3f5c6da7dd | [
"Apache-2.0"
] | null | null | null | scripts/fbi/hate_crime/preprocess_aggregations_test.py | rpatil524/data | 9e76c7f22a75ad4e52522444a080ed3f5c6da7dd | [
"Apache-2.0"
] | null | null | null | scripts/fbi/hate_crime/preprocess_aggregations_test.py | rpatil524/data | 9e76c7f22a75ad4e52522444a080ed3f5c6da7dd | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for geo_id_resolver.py"""
import filecmp
import os
import unittest
from preprocess_aggregations import process_main
class HatecrimeAggTest(unittest.TestCase):
def test_process_main(self):
_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
process_main(input_csv=os.path.join(_SCRIPT_PATH, 'testdata',
'hate_crime_sample.csv'),
output_path='./tmp')
self.assertTrue(
filecmp.cmp(os.path.join(_SCRIPT_PATH, 'testdata',
'aggregations_expected',
'aggregation.csv'),
os.path.join(_SCRIPT_PATH, 'tmp', 'aggregation.csv'),
shallow=False))
self.assertTrue(
filecmp.cmp(os.path.join(_SCRIPT_PATH, 'testdata',
'aggregations_expected',
'aggregation.mcf'),
os.path.join(_SCRIPT_PATH, 'tmp', 'aggregation.mcf'),
shallow=False))
if __name__ == '__main__':
unittest.main()
| 38.377778 | 77 | 0.605675 |
676a6c2689bf2e22172bc38208042014f6169ca6 | 1,619 | py | Python | sklearn/covariance/tests/test_elliptic_envelope.py | emarkou/scikit-learn | d73822f84f2832dcc25f0ff58769f60871a78025 | [
"BSD-3-Clause"
] | 6,989 | 2017-07-18T06:23:18.000Z | 2022-03-31T15:58:36.000Z | sklearn/covariance/tests/test_elliptic_envelope.py | emarkou/scikit-learn | d73822f84f2832dcc25f0ff58769f60871a78025 | [
"BSD-3-Clause"
] | 1,978 | 2017-07-18T09:17:58.000Z | 2022-03-31T14:28:43.000Z | sklearn/covariance/tests/test_elliptic_envelope.py | emarkou/scikit-learn | d73822f84f2832dcc25f0ff58769f60871a78025 | [
"BSD-3-Clause"
] | 1,228 | 2017-07-18T09:03:13.000Z | 2022-03-29T05:57:40.000Z | """
Testing for Elliptic Envelope algorithm (sklearn.covariance.elliptic_envelope).
"""
import numpy as np
import pytest
from sklearn.covariance import EllipticEnvelope
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.exceptions import NotFittedError
def test_elliptic_envelope():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
with pytest.raises(NotFittedError):
clf.predict(X)
with pytest.raises(NotFittedError):
clf.decision_function(X)
clf.fit(X)
y_pred = clf.predict(X)
scores = clf.score_samples(X)
decisions = clf.decision_function(X)
assert_array_almost_equal(
scores, -clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decisions < 0))
def test_score_samples():
X_train = [[1, 1], [1, 2], [2, 1]]
clf1 = EllipticEnvelope(contamination=0.2).fit(X_train)
clf2 = EllipticEnvelope().fit(X_train)
assert_array_equal(clf1.score_samples([[2., 2.]]),
clf1.decision_function([[2., 2.]]) + clf1.offset_)
assert_array_equal(clf2.score_samples([[2., 2.]]),
clf2.decision_function([[2., 2.]]) + clf2.offset_)
assert_array_equal(clf1.score_samples([[2., 2.]]),
clf2.score_samples([[2., 2.]]))
| 35.195652 | 79 | 0.671402 |
ee4de7c0828acbccc00f6e4a83fa87cbd8783b3b | 2,082 | py | Python | designate/api/admin/controllers/extensions/export.py | kiall/designate-py3 | 2b135d64bb0ced77327a563e037b270d1e5ca308 | [
"Apache-2.0"
] | null | null | null | designate/api/admin/controllers/extensions/export.py | kiall/designate-py3 | 2b135d64bb0ced77327a563e037b270d1e5ca308 | [
"Apache-2.0"
] | null | null | null | designate/api/admin/controllers/extensions/export.py | kiall/designate-py3 | 2b135d64bb0ced77327a563e037b270d1e5ca308 | [
"Apache-2.0"
] | null | null | null | # COPYRIGHT 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from oslo_log import log as logging
from designate.api.v2.controllers import rest
from designate import utils
from designate import policy
LOG = logging.getLogger(__name__)
class ExportController(rest.RestController):
@pecan.expose(template=None, content_type='text/dns')
@utils.validate_uuid('zone_id')
def get_one(self, zone_id):
context = pecan.request.environ['context']
policy.check('zone_export', context)
servers = self.central_api.get_domain_servers(context, zone_id)
domain = self.central_api.get_domain(context, zone_id)
criterion = {'domain_id': zone_id}
recordsets = self.central_api.find_recordsets(context, criterion)
records = []
for recordset in recordsets:
criterion = {
'domain_id': domain['id'],
'recordset_id': recordset['id']
}
raw_records = self.central_api.find_records(context, criterion)
for record in raw_records:
records.append({
'name': recordset['name'],
'type': recordset['type'],
'ttl': recordset['ttl'],
'data': record['data'],
})
return utils.render_template('bind9-zone.jinja2',
servers=servers,
domain=domain,
records=records)
| 33.580645 | 75 | 0.62488 |
df9e0cc26767e4b23fdf29ffabc20b8a62f8b7c8 | 3,179 | py | Python | sbnet_tensorflow/benchmark/reduce_mask_tests.py | digital-idiot/sbnet | dbea11110f2559b56858a9cc2e216d016294f287 | [
"Apache-2.0"
] | 1 | 2018-03-02T02:24:07.000Z | 2018-03-02T02:24:07.000Z | sbnet_tensorflow/benchmark/reduce_mask_tests.py | lawrencewxj/sbnet | cf8ea06430c8d8c8d7c5af266a6f926fdde12312 | [
"Apache-2.0"
] | null | null | null | sbnet_tensorflow/benchmark/reduce_mask_tests.py | lawrencewxj/sbnet | cf8ea06430c8d8c8d7c5af266a6f926fdde12312 | [
"Apache-2.0"
] | 1 | 2021-04-22T08:25:04.000Z | 2021-04-22T08:25:04.000Z | """
Sparse Blocks Network
Copyright (c) 2017, Uber Technologies, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import division, print_function
import numpy as np
import tensorflow as tf
from sparse_conv_lib import convert_mask_to_indices, convert_mask_to_indices_custom
from sparse_conv_lib import calc_block_params
class ReduceMaskTests(tf.test.TestCase):
def _test_reduce_mask(self, mask, bsize, ksize, strides, padding):
with tf.Session():
mask = tf.constant(mask)
indices = convert_mask_to_indices(mask, bsize, ksize, strides, padding, 0.0)
x_shape = [1] + [int(ss) for ss in mask.get_shape()[1:]] + [1]
block_params = calc_block_params(x_shape, bsize, ksize, strides, padding)
indices_custom = convert_mask_to_indices_custom(mask, block_params, 0.0)
activeBlockIndicesResult = indices_custom.active_block_indices.eval()
binCountsResult = indices_custom.bin_counts.eval()
activeBlockIndicesResult = activeBlockIndicesResult[:binCountsResult[0]]
sortIdx = activeBlockIndicesResult.argsort()
activeBlockIndicesResult = activeBlockIndicesResult[sortIdx]
clippedResults = np.copy(activeBlockIndicesResult.view(np.uint16))
clippedResults = clippedResults.reshape([-1, 4])[:, [2, 1, 0]]
indices_val = indices.eval()
np.testing.assert_array_equal(indices_val, clippedResults)
def test_basic(self):
bsize = [1, 3, 3, 1]
ksize = [3, 3, 1, 1]
strides = [1, 1, 1, 1]
padding = 'SAME'
mask = np.array(
[[
[0, 0, 0, 0, 0], # YAPF_NO_FORMAT
[0, 0, 1, 0, 0],
[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]
]],
dtype=np.float32)
self._test_reduce_mask(mask, bsize, ksize, strides, padding)
def test_larger(self):
bsize = [1, 5, 5, 1]
ksize = [2, 2, 1, 1]
strides = [1, 1, 1, 1]
padding = 'VALID'
mask = np.array(
[[
[0, 0, 0, 0, 0, 1, 1, 1], # YAPF_NO_FORMAT
[0, 0, 0, 0, 0, 1, 1, 1],
[1, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]
]],
dtype=np.float32)
self._test_reduce_mask(mask, bsize, ksize, strides, padding)
if __name__ == '__main__':
tf.test.main()
| 36.965116 | 88 | 0.578798 |
a9ac90cdfab854fc2c1489c576d404b56f0692e5 | 3,782 | py | Python | pyaff4/standards_test.py | aff4/python-aff4 | 94a3583475c07ad92147f70ff8a19e9e36f12aa9 | [
"Apache-2.0"
] | 34 | 2017-10-21T16:12:58.000Z | 2022-02-18T00:37:08.000Z | pyaff4/standards_test.py | aff4/python-aff4 | 94a3583475c07ad92147f70ff8a19e9e36f12aa9 | [
"Apache-2.0"
] | 23 | 2017-11-06T17:01:04.000Z | 2021-12-26T14:09:38.000Z | pyaff4/standards_test.py | aff4/python-aff4 | 94a3583475c07ad92147f70ff8a19e9e36f12aa9 | [
"Apache-2.0"
] | 17 | 2019-02-11T00:47:02.000Z | 2022-03-14T02:52:04.000Z | from __future__ import print_function
from __future__ import unicode_literals
# Copyright 2016,2017 Schatz Forensic Pty Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from future import standard_library
standard_library.install_aliases()
import logging
import os
import io
import unittest
from pyaff4 import data_store
from pyaff4 import lexicon
from pyaff4 import plugins
from pyaff4 import rdfvalue
from pyaff4 import zip
from pyaff4 import hashes
from pyaff4 import version
LOGGER = logging.getLogger("pyaff4")
referenceImagesPath = os.path.join(os.path.dirname(__file__), "..", "test_images")
stdLinear = os.path.join(referenceImagesPath, "AFF4Std", "Base-Linear.aff4")
def conditional_on_images(f):
if not os.access(stdLinear, os.R_OK):
LOGGER.info("Test images not cloned into repository. Tests disabled."
"To enable type `git submodules init`")
def _decorator():
print (f.__name__ + ' has been disabled')
return _decorator
return f
class StandardsTest(unittest.TestCase):
stdLinearURN = rdfvalue.URN.FromFileName(stdLinear)
@conditional_on_images
def testLocateImage(self):
resolver = data_store.MemoryDataStore()
with zip.ZipFile.NewZipFile(resolver, version.aff4v10, self.stdLinearURN) as zip_file:
for subject in resolver.QueryPredicateObject(zip_file.urn,
"http://www.w3.org/1999/02/22-rdf-syntax-ns#type",
"http://aff4.org/Schema#DiskImage"):
self.assertEquals(
subject,
"aff4://cf853d0b-5589-4c7c-8358-2ca1572b87eb")
for subject in resolver.QueryPredicateObject(zip_file.urn,
"http://www.w3.org/1999/02/22-rdf-syntax-ns#type",
"http://aff4.org/Schema#Image"):
self.assertEquals(
subject,
"aff4://cf853d0b-5589-4c7c-8358-2ca1572b87eb")
for subject in resolver.QueryPredicateObject(zip_file.urn,
"http://www.w3.org/1999/02/22-rdf-syntax-ns#type",
"http://aff4.org/Schema#ContiguousImage"):
self.assertEquals(
subject,
"aff4://cf853d0b-5589-4c7c-8358-2ca1572b87eb")
@conditional_on_images
def testReadMap(self):
resolver = data_store.MemoryDataStore()
with zip.ZipFile.NewZipFile(resolver, version.aff4v10, self.stdLinearURN) as zip_file:
imageStream = resolver.AFF4FactoryOpen(
"aff4://c215ba20-5648-4209-a793-1f918c723610")
imageStream.SeekRead(0x163)
res = imageStream.Read(17)
self.assertEquals(res, b"Invalid partition")
@conditional_on_images
def testReadImageStream(self):
resolver = data_store.MemoryDataStore()
with zip.ZipFile.NewZipFile(resolver, version.aff4v10, self.stdLinearURN) as zip_file:
mapStream = resolver.AFF4FactoryOpen(
"aff4://c215ba20-5648-4209-a793-1f918c723610")
mapStream.SeekRead(0x163)
res = mapStream.Read(17)
self.assertEquals(res, b"Invalid partition")
if __name__ == '__main__':
unittest.main()
| 35.679245 | 94 | 0.665785 |
e052aea42c46c12a90b1c934a3ff6562d311657d | 12,206 | py | Python | components/isceobj/Alos2Proc/runSlcOffset.py | 4restwilliams/isce2 | fcf16355a773a94c53d83db380a72b2d024ff8fc | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-10-06T12:21:02.000Z | 2019-10-06T12:21:02.000Z | components/isceobj/Alos2Proc/runSlcOffset.py | stoormgeo/isce2 | d22bf1048ff0f7a077981ea4fbe8e0e6bc563961 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | components/isceobj/Alos2Proc/runSlcOffset.py | stoormgeo/isce2 | d22bf1048ff0f7a077981ea4fbe8e0e6bc563961 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-04-29T10:02:28.000Z | 2020-04-29T10:02:28.000Z | #
# Author: Cunren Liang
# Copyright 2015-present, NASA-JPL/Caltech
#
import os
import glob
import logging
import datetime
import numpy as np
import isceobj
import mroipac
from mroipac.ampcor.Ampcor import Ampcor
from isceobj.Alos2Proc.Alos2ProcPublic import topo
from isceobj.Alos2Proc.Alos2ProcPublic import geo2rdr
from isceobj.Alos2Proc.Alos2ProcPublic import waterBodyRadar
from isceobj.Alos2Proc.Alos2ProcPublic import reformatGeometricalOffset
from isceobj.Alos2Proc.Alos2ProcPublic import writeOffset
from isceobj.Alos2Proc.Alos2ProcPublic import cullOffsets
from isceobj.Alos2Proc.Alos2ProcPublic import computeOffsetFromOrbit
logger = logging.getLogger('isce.alos2insar.runSlcOffset')
def runSlcOffset(self):
'''estimate SLC offsets
'''
catalog = isceobj.Catalog.createCatalog(self._insar.procDoc.name)
self.updateParamemetersFromUser()
masterTrack = self._insar.loadTrack(master=True)
slaveTrack = self._insar.loadTrack(master=False)
demFile = os.path.abspath(self._insar.dem)
wbdFile = os.path.abspath(self._insar.wbd)
for i, frameNumber in enumerate(self._insar.masterFrames):
frameDir = 'f{}_{}'.format(i+1, frameNumber)
os.chdir(frameDir)
for j, swathNumber in enumerate(range(self._insar.startingSwath, self._insar.endingSwath + 1)):
swathDir = 's{}'.format(swathNumber)
os.chdir(swathDir)
print('estimating offset frame {}, swath {}'.format(frameNumber, swathNumber))
masterSwath = masterTrack.frames[i].swaths[j]
slaveSwath = slaveTrack.frames[i].swaths[j]
##########################################
#1. set number of matching points
##########################################
#set initinial numbers
if (self._insar.modeCombination == 21) or (self._insar.modeCombination == 22):
numberOfOffsetsRange = 10
numberOfOffsetsAzimuth = 40
else:
numberOfOffsetsRange = 20
numberOfOffsetsAzimuth = 20
#change the initial numbers using water body
if self.useWbdForNumberOffsets and (self._insar.wbd != None):
numberRangeLooks=100
numberAzimuthLooks=100
#compute land ratio using topo module
topo(masterSwath, masterTrack, demFile, 'lat.rdr', 'lon.rdr', 'hgt.rdr', losFile='los.rdr',
incFile=None, mskFile=None,
numberRangeLooks=numberRangeLooks, numberAzimuthLooks=numberAzimuthLooks, multilookTimeOffset=False)
waterBodyRadar('lat.rdr', 'lon.rdr', wbdFile, 'wbd.rdr')
wbdImg = isceobj.createImage()
wbdImg.load('wbd.rdr.xml')
width = wbdImg.width
length = wbdImg.length
wbd = np.fromfile('wbd.rdr', dtype=np.byte).reshape(length, width)
landRatio = np.sum(wbd==0) / (length*width)
if (landRatio <= 0.00125):
print('\n\nWARNING: land too small for estimating slc offsets at frame {}, swath {}'.format(frameNumber, swathNumber))
print('proceed to use geometric offsets for forming interferogram')
print('but please consider not using this swath\n\n')
catalog.addItem('warning message', 'land too small for estimating slc offsets at frame {}, swath {}, use geometric offsets'.format(frameNumber, swathNumber), 'runSlcOffset')
#compute geomtricla offsets
geo2rdr(slaveSwath, slaveTrack, 'lat.rdr', 'lon.rdr', 'hgt.rdr', 'rg.rdr', 'az.rdr', numberRangeLooks=numberRangeLooks, numberAzimuthLooks=numberAzimuthLooks, multilookTimeOffset=False)
reformatGeometricalOffset('rg.rdr', 'az.rdr', 'cull.off', rangeStep=numberRangeLooks, azimuthStep=numberAzimuthLooks, maximumNumberOfOffsets=2000)
os.remove('lat.rdr')
os.remove('lat.rdr.vrt')
os.remove('lat.rdr.xml')
os.remove('lon.rdr')
os.remove('lon.rdr.vrt')
os.remove('lon.rdr.xml')
os.remove('hgt.rdr')
os.remove('hgt.rdr.vrt')
os.remove('hgt.rdr.xml')
os.remove('los.rdr')
os.remove('los.rdr.vrt')
os.remove('los.rdr.xml')
os.remove('wbd.rdr')
os.remove('wbd.rdr.vrt')
os.remove('wbd.rdr.xml')
os.remove('rg.rdr')
os.remove('rg.rdr.vrt')
os.remove('rg.rdr.xml')
os.remove('az.rdr')
os.remove('az.rdr.vrt')
os.remove('az.rdr.xml')
os.chdir('../')
continue
os.remove('lat.rdr')
os.remove('lat.rdr.vrt')
os.remove('lat.rdr.xml')
os.remove('lon.rdr')
os.remove('lon.rdr.vrt')
os.remove('lon.rdr.xml')
os.remove('hgt.rdr')
os.remove('hgt.rdr.vrt')
os.remove('hgt.rdr.xml')
os.remove('los.rdr')
os.remove('los.rdr.vrt')
os.remove('los.rdr.xml')
os.remove('wbd.rdr')
os.remove('wbd.rdr.vrt')
os.remove('wbd.rdr.xml')
#put the results on a grid with a specified interval
interval = 0.2
axisRatio = int(np.sqrt(landRatio)/interval)*interval + interval
if axisRatio > 1:
axisRatio = 1
numberOfOffsetsRange = int(numberOfOffsetsRange/axisRatio)
numberOfOffsetsAzimuth = int(numberOfOffsetsAzimuth/axisRatio)
else:
catalog.addItem('warning message', 'no water mask used to determine number of matching points. frame {} swath {}'.format(frameNumber, swathNumber), 'runSlcOffset')
#user's settings
if self.numberRangeOffsets != None:
numberOfOffsetsRange = self.numberRangeOffsets[i][j]
if self.numberAzimuthOffsets != None:
numberOfOffsetsAzimuth = self.numberAzimuthOffsets[i][j]
catalog.addItem('number of offsets range frame {} swath {}'.format(frameNumber, swathNumber), numberOfOffsetsRange, 'runSlcOffset')
catalog.addItem('number of offsets azimuth frame {} swath {}'.format(frameNumber, swathNumber), numberOfOffsetsAzimuth, 'runSlcOffset')
##########################################
#2. match using ampcor
##########################################
ampcor = Ampcor(name='insarapp_slcs_ampcor')
ampcor.configure()
mSLC = isceobj.createSlcImage()
mSLC.load(self._insar.masterSlc+'.xml')
mSLC.setAccessMode('read')
mSLC.createImage()
sSLC = isceobj.createSlcImage()
sSLC.load(self._insar.slaveSlc+'.xml')
sSLC.setAccessMode('read')
sSLC.createImage()
ampcor.setImageDataType1('complex')
ampcor.setImageDataType2('complex')
ampcor.setMasterSlcImage(mSLC)
ampcor.setSlaveSlcImage(sSLC)
#MATCH REGION
#compute an offset at image center to use
rgoff, azoff = computeOffsetFromOrbit(masterSwath, masterTrack, slaveSwath, slaveTrack,
masterSwath.numberOfSamples * 0.5,
masterSwath.numberOfLines * 0.5)
#it seems that we cannot use 0, haven't look into the problem
if rgoff == 0:
rgoff = 1
if azoff == 0:
azoff = 1
firstSample = 1
if rgoff < 0:
firstSample = int(35 - rgoff)
firstLine = 1
if azoff < 0:
firstLine = int(35 - azoff)
ampcor.setAcrossGrossOffset(rgoff)
ampcor.setDownGrossOffset(azoff)
ampcor.setFirstSampleAcross(firstSample)
ampcor.setLastSampleAcross(mSLC.width)
ampcor.setNumberLocationAcross(numberOfOffsetsRange)
ampcor.setFirstSampleDown(firstLine)
ampcor.setLastSampleDown(mSLC.length)
ampcor.setNumberLocationDown(numberOfOffsetsAzimuth)
#MATCH PARAMETERS
#full-aperture mode
if (self._insar.modeCombination == 21) or \
(self._insar.modeCombination == 22) or \
(self._insar.modeCombination == 31) or \
(self._insar.modeCombination == 32):
ampcor.setWindowSizeWidth(64)
ampcor.setWindowSizeHeight(512)
#note this is the half width/length of search area, number of resulting correlation samples: 32*2+1
ampcor.setSearchWindowSizeWidth(32)
ampcor.setSearchWindowSizeHeight(32)
#triggering full-aperture mode matching
ampcor.setWinsizeFilt(8)
ampcor.setOversamplingFactorFilt(64)
#regular mode
else:
ampcor.setWindowSizeWidth(64)
ampcor.setWindowSizeHeight(64)
ampcor.setSearchWindowSizeWidth(32)
ampcor.setSearchWindowSizeHeight(32)
#REST OF THE STUFF
ampcor.setAcrossLooks(1)
ampcor.setDownLooks(1)
ampcor.setOversamplingFactor(64)
ampcor.setZoomWindowSize(16)
#1. The following not set
#Matching Scale for Sample/Line Directions (-) = 1. 1.
#should add the following in Ampcor.py?
#if not set, in this case, Ampcor.py'value is also 1. 1.
#ampcor.setScaleFactorX(1.)
#ampcor.setScaleFactorY(1.)
#MATCH THRESHOLDS AND DEBUG DATA
#2. The following not set
#in roi_pac the value is set to 0 1
#in isce the value is set to 0.001 1000.0
#SNR and Covariance Thresholds (-) = {s1} {s2}
#should add the following in Ampcor?
#THIS SHOULD BE THE ONLY THING THAT IS DIFFERENT FROM THAT OF ROI_PAC
#ampcor.setThresholdSNR(0)
#ampcor.setThresholdCov(1)
ampcor.setDebugFlag(False)
ampcor.setDisplayFlag(False)
#in summary, only two things not set which are indicated by 'The following not set' above.
#run ampcor
ampcor.ampcor()
offsets = ampcor.getOffsetField()
ampcorOffsetFile = 'ampcor.off'
writeOffset(offsets, ampcorOffsetFile)
#finalize image, and re-create it
#otherwise the file pointer is still at the end of the image
mSLC.finalizeImage()
sSLC.finalizeImage()
##########################################
#3. cull offsets
##########################################
refinedOffsets = cullOffsets(offsets)
if refinedOffsets == None:
print('******************************************************************')
print('WARNING: There are not enough offsets left, so we are forced to')
print(' use offset without culling. frame {}, swath {}'.format(frameNumber, swathNumber))
print('******************************************************************')
catalog.addItem('warning message', 'not enough offsets left, use offset without culling. frame {} swath {}'.format(frameNumber, swathNumber), 'runSlcOffset')
refinedOffsets = offsets
cullOffsetFile = 'cull.off'
writeOffset(refinedOffsets, cullOffsetFile)
os.chdir('../')
os.chdir('../')
catalog.printToLog(logger, "runSlcOffset")
self._insar.procDoc.addAllFromCatalog(catalog)
| 44.224638 | 205 | 0.556612 |
a249d855edb7bfa214d842b6df7432a5c6d7bae9 | 3,894 | py | Python | tests/test_mapping_action.py | madman-bob/python-argparse-utils | e3a816596d1b374825a4b8d45b56fbce4758a4f4 | [
"MIT"
] | 7 | 2019-07-05T20:17:08.000Z | 2021-09-27T04:56:40.000Z | tests/test_mapping_action.py | madman-bob/python-argparse-utils | e3a816596d1b374825a4b8d45b56fbce4758a4f4 | [
"MIT"
] | 2 | 2019-04-03T09:43:40.000Z | 2020-05-05T17:47:22.000Z | tests/test_mapping_action.py | madman-bob/python-argparse-utils | e3a816596d1b374825a4b8d45b56fbce4758a4f4 | [
"MIT"
] | 1 | 2020-12-11T10:47:49.000Z | 2020-12-11T10:47:49.000Z | from argparse import ArgumentParser
from collections import OrderedDict
from contextlib import redirect_stderr
from enum import Enum
from io import StringIO
from unittest import TestCase
from argparse_utils import mapping_action, enum_action
class TestMappingAction(TestCase):
@property
def options(self):
return OrderedDict([
('x', 1),
('y', 2),
('z', object()),
])
class Colours(Enum):
red = 1
green = 2
blue = 3
def test_basic_mapping_action(self):
options = self.options
parser = ArgumentParser()
parser.add_argument('-a', action=mapping_action(options))
with self.subTest(arg='x'):
args = parser.parse_args('-a x'.split())
self.assertEqual(args.a, 1)
with self.subTest(arg='z'):
args = parser.parse_args('-a z'.split())
self.assertIs(args.a, options['z'])
def test_mapping_action_multiple_keys(self):
parser = ArgumentParser()
parser.add_argument('-a', nargs='*', action=mapping_action(self.options))
args = parser.parse_args('-a x y'.split())
self.assertEqual(args.a, [1, 2])
def test_mapping_action_invalid_key(self):
parser = ArgumentParser()
parser.add_argument('-a', action=mapping_action(self.options))
error_message = StringIO()
with redirect_stderr(error_message), self.assertRaises(SystemExit):
parser.parse_args('-a w'.split())
self.assertRegex(error_message.getvalue(), r"invalid choice: 'w' \(choose from x, y, z\)")
def test_mapping_action_help(self):
parser = ArgumentParser()
parser.add_argument('-a', action=mapping_action(self.options))
self.assertRegex(parser.format_help(), r"-a \{x,y,z\}")
def test_mapping_action_key_normalizer(self):
parser = ArgumentParser()
parser.add_argument('-a', action=mapping_action(self.options, str.upper))
with self.subTest("Help message normalized"):
self.assertRegex(parser.format_help(), r"-a \{X,Y,Z\}")
with self.subTest("Arg normalized"):
args = parser.parse_args('-a X'.split())
self.assertEqual(args.a, 1)
def test_basic_enum_action(self):
parser = ArgumentParser()
parser.add_argument('-a', action=enum_action(self.Colours))
args = parser.parse_args('-a red'.split())
self.assertEqual(args.a, self.Colours.red)
def test_enum_action_multiple_keys(self):
parser = ArgumentParser()
parser.add_argument('-a', nargs='*', action=enum_action(self.Colours))
args = parser.parse_args('-a red green'.split())
self.assertEqual(args.a, [self.Colours.red, self.Colours.green])
def test_enum_action_invalid_key(self):
parser = ArgumentParser()
parser.add_argument('-a', action=enum_action(self.Colours))
error_message = StringIO()
with redirect_stderr(error_message), self.assertRaises(SystemExit):
parser.parse_args('-a purple'.split())
self.assertRegex(error_message.getvalue(), r"invalid choice: 'purple' \(choose from red, green, blue\)")
def test_enum_action_help(self):
parser = ArgumentParser()
parser.add_argument('-a', action=enum_action(self.Colours))
self.assertRegex(parser.format_help(), r"-a \{red,green,blue\}")
def test_enum_action_key_normalizer(self):
parser = ArgumentParser()
parser.add_argument('-a', action=enum_action(self.Colours, str.upper))
with self.subTest("Help message normalized"):
self.assertRegex(parser.format_help(), r"-a \{RED,GREEN,BLUE\}")
with self.subTest("Arg normalized"):
args = parser.parse_args('-a GrEeN'.split())
self.assertEqual(args.a, self.Colours.green)
| 31.918033 | 112 | 0.639959 |
4fb3f06e19a8fa8ba24f75811e58cf51293cc250 | 20,446 | py | Python | src/config/fabric-ansible/ansible-playbooks/filter_plugins/discover_os_computes.py | atsgen/tf-controller | 9321889cdd3d7108980cc88937b2e82956502cc5 | [
"Apache-2.0"
] | 37 | 2020-09-21T10:42:26.000Z | 2022-01-09T10:16:40.000Z | src/config/fabric-ansible/ansible-playbooks/filter_plugins/discover_os_computes.py | atsgen/tf-controller | 9321889cdd3d7108980cc88937b2e82956502cc5 | [
"Apache-2.0"
] | null | null | null | src/config/fabric-ansible/ansible-playbooks/filter_plugins/discover_os_computes.py | atsgen/tf-controller | 9321889cdd3d7108980cc88937b2e82956502cc5 | [
"Apache-2.0"
] | 21 | 2020-08-25T12:48:42.000Z | 2022-03-22T04:32:18.000Z | #!/usr/bin/python
#
# Copyright (c) 2020 Juniper Networks, Inc. All rights reserved.
#
from builtins import object
import logging
import re
import sys
import traceback
sys.path.append('/opt/contrail/fabric_ansible_playbooks/filter_plugins') # noqa
sys.path.append('/opt/contrail/fabric_ansible_playbooks/common') # noqa
from contrail_command import CreateCCNode
from import_server import FilterModule as FilterModuleImportServer
from job_manager.job_utils import JobVncApi
DOCUMENTATION = '''
---
Discover OS Computes.
This file contains implementation of identifying all leaf nodes in provided fabric network and creating OS compute # noqa: E501
find_leaf_devices filter:
Collect all devices which are added to a given fabric network.
Identify physical_router_role for each device and collect data only for leaf devices.
If device is leaf, credentials are gathered and returned to run "show lldp neighbors detail" command.
create_os_node_filter filter:
For output data from command "show lldp neighbors detail" collect needed data to create os node object.
Then return list of all objects founded in network in format:
nodes:
- name: node-1
node_type: ovs-compute
ports:
- name: ens224
mac_address: 00:0c:29:13:37:bb
switch_name: VM283DD71D00
'''
LEAF = 'leaf'
OVS = "ovs"
SRIOV = "sriov"
OVS_COMPUTE = "ovs-compute"
SRIOV_COMPUTE = "sriov-compute"
REGEX_NODE_TYPE = r"node_type: (\w+)"
FAILURE = 'failure'
SUCCESS = 'success'
STATUS = 'status'
ERRMSG = 'errmsg'
LEAF_DEVICES = 'leaf_devices'
OS_COMPUTE_NODES = 'os_compute_nodes'
class FilterModule(object):
"""Fabric filter plugins."""
@staticmethod
def _init_logging():
"""Initialize logging.
:return: type=<logging.Logger>
"""
logger = logging.getLogger('OsComputesDiscoveryFilter')
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.WARN)
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s', # noqa: E501
datefmt='%Y/%m/%d %H:%M:%S')
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
return logger
def __init__(self):
"""Initialize Fabric Filter Module."""
self._logger = FilterModule._init_logging()
def filters(self):
"""Return filters that will be used."""
return {
'find_leaf_devices_filter': self.find_leaf_devices_filter,
'create_os_node_filter': self.create_os_node_filter
}
@staticmethod
def _validate_job_ctx(job_ctx):
"""Validate input params."""
job_input = job_ctx.get('input')
if not job_input:
raise ValueError('Invalid job_ctx: missing job_input')
if not job_ctx.get('job_template_fqname'):
raise ValueError('Invalid job_ctx: missing job_template_fqname')
if not job_input.get('fabric_uuid'):
raise ValueError('Invalid job_ctx: missing fabric_uuid')
@staticmethod
def _get_password(device_obj):
"""Get and return decrypted password."""
password = device_obj.physical_router_user_credentials.get_password()
return JobVncApi.decrypt_password(encrypted_password=password,
pwd_key=device_obj.uuid)
@staticmethod
def get_fabric_name(fabric):
"""
Get and return fabric_name.
:param fabric: string
:return fabric_name: string
"""
return fabric.get_fq_name_str()
@staticmethod
def get_physical_router_devices(fabric):
"""
Get and return list of physical routers in provided fabric.
:param fabric: string
:return physical_router_refs: list
"""
physical_router_refs = fabric.get_physical_router_back_refs()
if physical_router_refs is None:
physical_router_refs = []
return physical_router_refs
# ***************** find_leaf_devices filter *****************************
def find_leaf_devices(self, fabric_uuid, vnc_api):
"""
Find and return all Leaf devices for given Fabric Network.
For found devices, collect and return authentication data.
Credentials data will be used to run commands directly on a device.
:param fabric_uuid: string
:param vnc_api: vnc_api established connection
:return:
# example
# [
# {
# "host": "10.10.10.2",
# "password": "admin",
# "username": "admin"
# },
# {
# "host": "10.10.10.4",
# "password": "admin",
# "username": "admin"
# }
# ]
"""
fabric = vnc_api.fabric_read(id=fabric_uuid)
fabric_name = FilterModule.get_fabric_name(fabric)
self._logger.info("Begin process of discovering leaf devices in fabric network %s" % fabric_name) # noqa: E501
physical_router_refs = FilterModule.get_physical_router_devices(fabric)
self._logger.info(
"In fabric %s Found the following list of physical routers %s" % (fabric_name, physical_router_refs)) # noqa: E501
results = []
for p_router in physical_router_refs:
physical_router = vnc_api.physical_router_read(id=p_router['uuid'])
if physical_router.physical_router_role != LEAF:
continue
host_details = {
'username': physical_router.physical_router_user_credentials.username, # noqa: E501
'password': (FilterModule._get_password(physical_router)),
'host': physical_router.physical_router_management_ip
}
results.append(host_details)
self._logger.\
info("In fabric %s Found the following leaf device %s "
"On this device 'show lldp neighbor details' command "
"will be applied"
% (fabric_name, physical_router.physical_router_management_ip)) # noqa: E501
return results
def find_leaf_devices_filter(self, job_ctx):
"""
Validate input and call method to find leaf devices in provided fabric.
:param job_ctx: Dictionary
# example:
# {
# 'job_transaction_descr': 'Discover OS Computes',
# 'fabric_uuid': '123412341234-123412341234',
# 'contrail_command_host': '10.10.10.10:9091',
# 'cc_username': 'root',
# 'cc_password': "root"
# }
:return: Dictionary
# if success, returns
# {
# 'status': 'success',
# 'leaf_devices': [
# {
# 'username': u'admin',
# 'host': '10.10.10.4',
# 'password': 'admin'
# }
# ]
# }
# if failure, returns
# {
# 'status': 'failure',
# 'error_msg': <string: error message>
# }
"""
try:
FilterModule._validate_job_ctx(job_ctx)
job_input = job_ctx.get('input')
vnc_api = JobVncApi.vnc_init(job_ctx)
fabric_uuid = job_input['fabric_uuid']
leaf_devices = self.find_leaf_devices(fabric_uuid, vnc_api)
except Exception as e:
errmsg = "Unexpected error: %s\n%s" % (
str(e), traceback.format_exc()
)
return {
STATUS: FAILURE,
ERRMSG: errmsg,
}
return {
STATUS: SUCCESS,
LEAF_DEVICES: leaf_devices,
}
# ***************** create_os_node_filter filter *************************
def get_mapping_ip_to_hostname(self, vnc_api, fabric_uuid):
"""
Create a dictionary with mapping IP address to device Hostname.
:param vnc_api: vnc_api class established connection
:param fabric_uuid: string
:return: Dictionary
# example:
# {
# '10.10.10.4': 'Router_1',
# '10.10.10.7': 'Router_2'
# }
"""
fabric = vnc_api.fabric_read(id=fabric_uuid)
physical_router_refs = FilterModule.get_physical_router_devices(fabric)
ip_to_hostname = {}
for dev in physical_router_refs:
physical_router = vnc_api.physical_router_read(id=dev['uuid'])
device_ip_address = physical_router.physical_router_management_ip
device_hostname = physical_router.get_physical_router_hostname()
ip_to_hostname[device_ip_address] = device_hostname
self._logger.debug("Found the following IP to Hostname mapping dictionary: %s" % ip_to_hostname) # noqa: E501
return ip_to_hostname
@staticmethod
def get_node_type(system_description):
"""
Basing on provided system_description verify and return node_type of OS compute. # noqa: E501
There are 2 possibilities: OVS and SRiOV, system description is mandatory value that
should be set in LLDP system description on connected OS Compute node.
:param system_description: string
example: "node_type: OVS"
:return: string or None
example: "ovs-compute"
"""
node_type = re.search(REGEX_NODE_TYPE, system_description)
if not node_type:
return None
if node_type.group(1).lower() == OVS:
return OVS_COMPUTE
elif node_type.group(1).lower() == SRIOV:
return SRIOV_COMPUTE
@staticmethod
def create_node_properties(device_neighbor_details,
node_type,
device_display_name):
"""
Create and return node properties.
:param device_neighbor_details: Dictionary
# example:
# {
# 'lldp-remote-system-name': 'node-4',
# 'lldp-local-interface': 'xe-0/0/3',
# 'lldp-remote-port-description': u'ens224',
# 'lldp-remote-port-id': '00:0c:29:8b:ef:26',
# (...)
# }
:param node_type: String
:param device_display_name: String
:return: Dictionary
# example:
# {
# 'nodes_type': 'ovs-compute',
# 'name': u'node-1',
# 'ports':
# [{
# 'mac_address': u'00:0c:29:13:37:bb',
# 'port_name': u'xe-0/0/0',
# 'switch_name': u'VM283DD71D00',
# 'name': u'ens224'
# }]
# }
"""
port = {
'port_name': str(device_neighbor_details['lldp-local-interface']), # noqa: E501
'switch_name': device_display_name,
'name': str(device_neighbor_details['lldp-remote-port-description']), # noqa: E501
'mac_address': str(device_neighbor_details['lldp-remote-port-id']) # noqa: E501
}
node = {
'node_type': node_type,
'name': str(device_neighbor_details['lldp-remote-system-name']),
'ports': [port]
}
return node
@staticmethod
def import_nodes_to_contrail(all_nodes, cc_node_obj):
"""
Import nodes to CC using import_server job trigger.
:param all_nodes: Dictionary
:param cc_node_obj: CreateCCNode object class
:return: None
"""
logging.info("Begin adding nodes {} to Contrail Command".format(str(all_nodes))) # noqa: E501
FilterModuleImportServer().import_nodes(all_nodes, cc_node_obj)
@staticmethod
def get_switch_name(node):
"""
Get and return switch_name.
There is always only one element in a list.
"""
return node['ports'][0]['switch_name']
@staticmethod
def get_ip_address(device_command_output):
"""
Get and return IP address of a device.
The structure of input Dictionary is gathered directly from Juniper device. # noqa: E501
"""
return device_command_output['item']['host']
@staticmethod
def get_dev_neighbors_details(device_command_output):
"""
Get and return LLDP neighbor details.
The structure of input Dictionary is gathered directly from Juniper device. # noqa: E501
"""
return device_command_output['parsed_output']['lldp-neighbors-information']['lldp-neighbor-information'] # noqa: E501
@staticmethod
def get_system_description(device_neighbor_details):
"""
Get and return LLDP neighbor system description.
The structure of input Dictionary is gathered directly from Juniper device. # noqa: E501
"""
return device_neighbor_details['lldp-system-description']['lldp-remote-system-description'] # noqa: E501
@staticmethod
def get_hostname(ip_to_hostname_mapping, device_ip_address):
"""Get and return hostname."""
return ip_to_hostname_mapping[device_ip_address]
def create_os_node(self, vnc_api, devices_command_output,
fabric_uuid, cc_node_obj):
"""
Create and return list of OS Object nodes and its properties.
Nodes are created basing on devices_command_output. # noqa: E501
Device that is going to be created as a node in Autodiscovery process must have
contain "node_type: <ovs/sriov>" information in its LLDP description.
If this description is not added, the device will be skipped.
:param cc_node_obj: CreateCCNode object class
:param fabric_uuid: String
:param vnc_api: vnc_api class established connection:
:param devices_command_output: Dictionary
:return: list
# example:
# [
# {
# 'nodes_type': 'ovs-compute',
# 'name': u'node-1',
# 'ports':
# [{
# 'mac_address': u'00:0c:29:13:37:bb',
# 'port_name': u'xe-0/0/0',
# 'switch_name': u'VM283DD71D00',
# 'name': u'ens224'
# }]
# }
# ]
"""
self._logger.info("Begin process of creating OS nodes object in fabric network") # noqa: E501
nodes = []
ip_to_hostname = self.get_mapping_ip_to_hostname(vnc_api, fabric_uuid)
for device_command_output in devices_command_output['results']:
device_ip_address = FilterModule.get_ip_address(device_command_output) # noqa: E501
device_hostname = FilterModule.get_hostname(ip_to_hostname, device_ip_address) # noqa: E501
devices_neighbors_details = FilterModule.get_dev_neighbors_details(device_command_output) # noqa: E501
for device_neighbor_details in devices_neighbors_details:
system_description = FilterModule.get_system_description(device_neighbor_details) # noqa: E501
node_type = FilterModule.get_node_type(system_description)
if node_type is None:
continue
node = FilterModule.\
create_node_properties(device_neighbor_details,
node_type,
device_hostname)
nodes.append(node)
switch_name = FilterModule.get_switch_name(node)
self._logger.info("On device %s found node: %s connected to %s" % (device_hostname, node, switch_name)) # noqa: E501
created_nodes = {
'nodes': nodes
}
self._logger.info("Nodes found and created: %s" % created_nodes)
FilterModule.import_nodes_to_contrail(created_nodes, cc_node_obj)
return created_nodes
def create_os_node_filter(self, job_ctx, devices_command_outputs):
"""
Param (devices_command_outputs) is a result from "show lldp neighbors detail" command. # noqa: E501
This param was gathered automatically in previous task, when above command was run on all
leaf devices in fabric.
:param devices_command_outputs: Dictionary
# example:
# {
# 'msg': u'All items completed',
# 'changed': False,
# 'results': [
# {
# "parsed_output": {
# "lldp-neighbors-information": {
# "lldp-neighbor-information": [
# {
# (...)
# "lldp-local-interface": "xe-0/0/0",
# (...)
# "lldp-remote-management-address": "10.5.5.5",
# (...)
# "lldp-remote-port-description": "ens256",
# "lldp-remote-port-id": "00:0c:29:13:37:c5"
# }
# ]
# }
# }
# }
# ]
# }
:param job_ctx: Dictionary
# example:
# {
# 'job_transaction_descr': 'Discover OS Computes',
# 'fabric_uuid': '123412341234-123412341234',
# 'contrail_command_host': '10.10.10.10:9091',
# 'cc_username': 'root',
# 'cc_password': "root"
# }
:return: Dictionary
# if success, returns
# {
# 'status': 'success'
# 'os_compute_nodes':
# {
# 'nodes':
# [
# {
# 'name': 'node-1'
# 'node_type': 'ovs-compute',
# 'ports': [{
# 'address': '00:0c:29:13:37:c5',
# 'port_name': 'xe-0/0/0',
# 'switch_name': 'VM283DF6BA00',
# 'name': 'ens256'
# }]
# }
# ]
# }
# }
# if failure, returns
# {
# 'status': 'failure',
# 'error_msg': <string: error message>
# }
"""
try:
FilterModule._validate_job_ctx(job_ctx)
job_input = job_ctx.get('input')
vnc_api = JobVncApi.vnc_init(job_ctx)
fabric_uuid = job_input['fabric_uuid']
cluster_id = job_ctx.get('contrail_cluster_id')
cluster_token = job_ctx.get('auth_token')
cc_host = job_input['contrail_command_host']
cc_username = job_input['cc_username']
cc_password = job_input['cc_password']
cc_node_obj = CreateCCNode(cc_host,
cluster_id,
cluster_token,
cc_username,
cc_password)
os_compute_nodes = self.create_os_node(vnc_api,
devices_command_outputs,
fabric_uuid,
cc_node_obj)
except Exception as e:
errmsg = "Unexpected error: %s\n%s" % (
str(e), traceback.format_exc()
)
return {
STATUS: FAILURE,
ERRMSG: errmsg,
}
return {
STATUS: SUCCESS,
OS_COMPUTE_NODES: os_compute_nodes,
}
| 38.216822 | 133 | 0.539959 |
c08f985291e1d9859c141869ec43563efaad7158 | 4,394 | py | Python | test/types.py | carloscanova/python-odml | dff793bff86187b67be139c0f32c7cd036ba8db4 | [
"BSD-4-Clause"
] | null | null | null | test/types.py | carloscanova/python-odml | dff793bff86187b67be139c0f32c7cd036ba8db4 | [
"BSD-4-Clause"
] | null | null | null | test/types.py | carloscanova/python-odml | dff793bff86187b67be139c0f32c7cd036ba8db4 | [
"BSD-4-Clause"
] | null | null | null | import unittest
import odml.types as typ
import odml
import datetime
class TestTypes(unittest.TestCase):
def setUp(self):
pass
def test_date(self):
date = datetime.date(2011, 12, 1)
date_string = '2011-12-01'
self.assertEqual(date, typ.date_get(date_string))
self.assertEqual(typ.date_set(date), date_string)
def test_time(self):
time = datetime.time(12,34,56)
time_string = '12:34:56'
self.assertEqual(time, typ.time_get(time_string))
self.assertEqual(typ.time_set(time), time_string)
def test_datetime(self):
date = datetime.datetime(2011, 12, 1, 12, 34, 56)
date_string = '2011-12-01 12:34:56'
self.assertEqual(date, typ.datetime_get(date_string))
self.assertEqual(typ.datetime_set(date), date_string)
def test_empty_binary(self):
v = odml.Value("", dtype="string")
v.dtype = "binary"
v.encoding = "base64"
self.assertIsNone(v.value)
v.encoding = "quoted-printable"
self.assertIsNone(v.value)
v.encoding = "hexadecimal"
self.assertIsNone(v.value)
self.assertEqual(v.checksum, 'crc32$00000000')
v.checksum = "md5"
self.assertEqual(v.checksum, 'md5$d41d8cd98f00b204e9800998ecf8427e')
def test_8bit_binary(self):
data = ''.join([chr(i) for i in xrange(256)])
v = odml.Value(data, dtype="binary")
v.encoder = "base64"
b64_data = 'AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7PD0+P0BBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWltcXV5fYGFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6e3x9fn+AgYKDhIWGh4iJiouMjY6PkJGSk5SVlpeYmZqbnJ2en6ChoqOkpaanqKmqq6ytrq+wsbKztLW2t7i5uru8vb6/wMHCw8TFxsfIycrLzM3Oz9DR0tPU1dbX2Nna29zd3t/g4eLj5OXm5+jp6uvs7e7v8PHy8/T19vf4+fr7/P3+/w=='
self.assertEqual(v.value, b64_data)
v.value = b64_data
self.assertEqual(v.data, data)
self.assertEqual(v.value, b64_data)
v.encoder = "quoted-printable"
qp_data = '=00=01=02=03=04=05=06=07=08=09\n=0B=0C\r=0E=0F=10=11=12=13=14=15=16=17=18=19=1A=1B=1C=1D=1E=1F !"#$%&\'()*+,-=\n./0123456789:;<=3D>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuv=\nwxyz{|}~=7F=80=81=82=83=84=85=86=87=88=89=8A=8B=8C=8D=8E=8F=90=91=92=93=94=\n=95=96=97=98=99=9A=9B=9C=9D=9E=9F=A0=A1=A2=A3=A4=A5=A6=A7=A8=A9=AA=AB=AC=AD=\n=AE=AF=B0=B1=B2=B3=B4=B5=B6=B7=B8=B9=BA=BB=BC=BD=BE=BF=C0=C1=C2=C3=C4=C5=C6=\n=C7=C8=C9=CA=CB=CC=CD=CE=CF=D0=D1=D2=D3=D4=D5=D6=D7=D8=D9=DA=DB=DC=DD=DE=DF=\n=E0=E1=E2=E3=E4=E5=E6=E7=E8=E9=EA=EB=EC=ED=EE=EF=F0=F1=F2=F3=F4=F5=F6=F7=F8=\n=F9=FA=FB=FC=FD=FE=FF'
self.assertEqual(v.value, qp_data)
v.value = qp_data
self.assertEqual(v.data, data)
self.assertEqual(v.value, qp_data)
v.encoder = "hexadecimal"
hex_data = '000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff'
self.assertEqual(v.value, hex_data)
v.value = hex_data
self.assertEqual(v.data, data)
self.assertEqual(v.value, hex_data)
self.assertEqual(v.checksum, 'crc32$29058c73')
v.checksum = "md5"
self.assertEqual(v.checksum, 'md5$e2c865db4162bed963bfaa9ef6ac18f0')
v.encoder = ''
v.data = v.data[:127] # chrs > 128 cannot be converted to ascii
v.dtype = "string"
self.assertIsNone(v.encoder)
self.assertIsNone(v.checksum)
self.assertEqual(v.value, data[:127])
def test_int(self):
v = odml.Value(value="123456789012345678901", dtype="int")
self.assertEqual(v.data, 123456789012345678901)
self.assertEqual(v.value, "123456789012345678901")
v = odml.Value(value="-123456789012345678901", dtype="int")
self.assertEqual(v.data, -123456789012345678901)
v = odml.Value(value="123.45", dtype="int")
self.assertEqual(v.data, 123)
if __name__ == '__main__':
unittest.main()
| 49.931818 | 621 | 0.69868 |
69ec655f57a8b026ecb950d53ff941c30a570a77 | 217 | py | Python | Understand-Reactions/Dataset/rename.py | juanjoneri/RIPS-2018 | 5502c49b511574a468eb2c603e04b6436db215bd | [
"CECILL-B"
] | null | null | null | Understand-Reactions/Dataset/rename.py | juanjoneri/RIPS-2018 | 5502c49b511574a468eb2c603e04b6436db215bd | [
"CECILL-B"
] | null | null | null | Understand-Reactions/Dataset/rename.py | juanjoneri/RIPS-2018 | 5502c49b511574a468eb2c603e04b6436db215bd | [
"CECILL-B"
] | null | null | null | import os
if __name__ == '__main__':
for i, filename in enumerate(os.listdir('.')):
extension = filename.split('.').pop()
if extension != 'py':
os.rename(filename, f'{i}.{extension}')
| 27.125 | 51 | 0.56682 |
dc7051e907ec4d873187a966f2f076874071f0b6 | 212 | py | Python | samples/s3.py | AndreiHondrari/python_exploration | cb4ac0b92ddc48c322201ba31cd6e7c5ee6af06d | [
"MIT"
] | 3 | 2019-05-04T12:19:09.000Z | 2019-08-30T07:12:31.000Z | samples/s3.py | AndreiHondrari/python_exploration | cb4ac0b92ddc48c322201ba31cd6e7c5ee6af06d | [
"MIT"
] | null | null | null | samples/s3.py | AndreiHondrari/python_exploration | cb4ac0b92ddc48c322201ba31cd6e7c5ee6af06d | [
"MIT"
] | null | null | null | # MAXIMIZING XOR
L = input()
R = input()
results = []
if L >= 1 and R >= L and R <= 10 ** 3:
for A in range(L, R+1):
for B in range(A, R+1):
results.append(A ^ B)
print(max(results))
| 15.142857 | 38 | 0.5 |
7742fc03646d7e87a98b7433cd78233655defce4 | 34 | py | Python | twinkle/__init__.py | emCOMP/twinkle | f1ddfb833b9cb05421317290000232d17f0bad00 | [
"MIT"
] | 2 | 2015-06-24T15:04:10.000Z | 2021-02-12T07:39:58.000Z | twinkle/__init__.py | emCOMP/twinkle | f1ddfb833b9cb05421317290000232d17f0bad00 | [
"MIT"
] | null | null | null | twinkle/__init__.py | emCOMP/twinkle | f1ddfb833b9cb05421317290000232d17f0bad00 | [
"MIT"
] | null | null | null | """
"""
__version__ = '0.0.5'
| 4.25 | 21 | 0.411765 |
5c394f52877267773a67d79f8b12b7f6e97188e3 | 734 | py | Python | setup.py | ahuang11/HoloExt | 01e4f0aaf3b8244c9e029056b54a0e3320957fad | [
"MIT"
] | 11 | 2018-02-14T16:48:21.000Z | 2021-02-24T20:34:43.000Z | setup.py | ahuang11/HoloExt | 01e4f0aaf3b8244c9e029056b54a0e3320957fad | [
"MIT"
] | 18 | 2018-02-20T05:10:03.000Z | 2019-07-26T13:36:28.000Z | setup.py | ahuang11/holoext | 01e4f0aaf3b8244c9e029056b54a0e3320957fad | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name='holoext',
version='1.0.4',
description='Holoviews Extension',
url='http://github.com/ahuang11/holoext',
author='Andrew Huang',
author_email='[email protected]',
license='MIT',
packages=['holoext'],
include_package_data=True,
install_requires=[
'matplotlib',
'numpy',
'pandas',
'holoviews',
'bokeh',
'dask'
],
keywords=['data', 'visualization',
'holoviews', 'bokeh',
'mod', 'extension',
'andrew', 'huang'],
zip_safe=False)
| 29.36 | 47 | 0.463215 |
031d4724f3fe20feaf181d0da946fe1563df0390 | 873 | py | Python | INBa/2015/Sarocvashin_M/task_7_23.py | YukkaSarasti/pythonintask | eadf4245abb65f4400a3bae30a4256b4658e009c | [
"Apache-2.0"
] | null | null | null | INBa/2015/Sarocvashin_M/task_7_23.py | YukkaSarasti/pythonintask | eadf4245abb65f4400a3bae30a4256b4658e009c | [
"Apache-2.0"
] | null | null | null | INBa/2015/Sarocvashin_M/task_7_23.py | YukkaSarasti/pythonintask | eadf4245abb65f4400a3bae30a4256b4658e009c | [
"Apache-2.0"
] | null | null | null | # Задача 6. Вариант 23
#1-50. Разработайте систему начисления очков для задачи 6, в соответствии с которой игрок получал бы большее количество баллов за меньшее количество попыток.
# Сароквашин Максим
# 13.05.2016
import random
print("Компьютер загадал название одного из семи дней недели, а Вы должны его угадать.\n")
days = ('Понедельник','Вторник','Срела','Четверг','Пятница','Суббота','Воскресенье')
day = random.randint(0,6)
x = 0
i = 0
score = 0
while(x != 7):
print(day[x])
x += 1
answer = input("\nВведите день: ")
while(answer != days[day]):
print("Неверно, попробуйте ещё раз.")
answer = input("\nВведите день: ")
i += 1
if i == 0:
score = 10
elif 0<i<6:
score = 10 - i*2
else:
score = 0
print("Верно, Вы победили!")
print("Число попыток: "+str(i))
print("Вы заработали "+str(score)+" баллов")
input("\nДля выхода нажмите Enter.")
| 20.302326 | 157 | 0.674685 |
fd669aa1329054339a26f0a996cdde07287287e0 | 4,039 | py | Python | teradata/datadog_checks/teradata/utils.py | OuesFa/integrations-core | 0ffe4ca306580a2e775b515152384034c2dfdc03 | [
"BSD-3-Clause"
] | null | null | null | teradata/datadog_checks/teradata/utils.py | OuesFa/integrations-core | 0ffe4ca306580a2e775b515152384034c2dfdc03 | [
"BSD-3-Clause"
] | null | null | null | teradata/datadog_checks/teradata/utils.py | OuesFa/integrations-core | 0ffe4ca306580a2e775b515152384034c2dfdc03 | [
"BSD-3-Clause"
] | null | null | null | # (C) Datadog, Inc. 2022-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import time
from typing import Any, AnyStr, Sequence, Set, Tuple
from datadog_checks.base import AgentCheck
from datadog_checks.teradata.config_models.instance import Table
def filter_tables(self, row):
# type: (Any, Sequence) -> Sequence
tables_to_collect, tables_to_exclude = self._tables_filter
table_name = row[3]
# No tables filter
if not tables_to_collect and not tables_to_exclude:
return row
# Table filtered out
if table_name in tables_to_exclude:
return []
# Table included
if table_name in tables_to_collect:
return row
# Table excluded
return []
def create_tables_filter(self):
# type: (Any) -> Tuple[Set, Set]
tables_to_collect = set()
tables_to_exclude = set()
tables = self.config.tables
if isinstance(tables, tuple):
tables_to_collect = set(tables)
return tables_to_collect, tables_to_exclude
if isinstance(tables, Table):
if tables.include and tables.exclude:
for table in tables.include:
if table not in tables.exclude:
tables_to_collect.add(table)
tables_to_exclude = set(tables.exclude)
return tables_to_collect, tables_to_exclude
if tables.include:
tables_to_collect = set(tables.include)
if tables.exclude:
tables_to_exclude = set(tables.exclude)
return (tables_to_collect, tables_to_exclude)
def timestamp_validator(self, row):
# type: (Any, Sequence) -> Sequence
now = time.time()
row_ts = row[0]
if type(row_ts) is not int:
msg = 'Returned timestamp `{}` is invalid.'.format(row_ts)
self.log.warning(msg)
self._query_errors += 1
return []
diff = now - row_ts
# Valid metrics should be no more than 10 min in the future or 1h in the past
if (diff > 3600) or (diff < -600):
msg = 'Resource Usage stats are invalid. {}'
if diff > 3600:
msg = msg.format('Row timestamp is more than 1h in the past. Is `SPMA` Resource Usage Logging enabled?')
elif diff < -600:
msg = msg.format('Row timestamp is more than 10 min in the future. Try checking system time settings.')
self.log.warning(msg)
self._query_errors += 1
return []
return row
def tags_normalizer(self, row, query_name):
# type: (Any, Sequence, AnyStr) -> Sequence
base_tags = [{"name": "td_amp", "col": row[0]}, {"name": "td_account", "col": row[1]}]
tags_map = [
{"stats_name": "DBC.DiskSpaceV", "tags": base_tags + [{"name": "td_database", "col": row[2]}]},
{
"stats_name": "DBC.AllSpaceV",
"tags": base_tags + [{"name": "td_database", "col": row[2]}, {"name": "td_table", "col": row[3]}],
},
{
"stats_name": "DBC.AMPUsageV",
"tags": base_tags + [{"name": "td_user", "col": row[2]}],
},
]
for stats_type in tags_map:
if query_name == stats_type['stats_name']:
for idx, tag in enumerate(stats_type['tags']):
# tag value may be type int
if not len(str(tag['col'])):
row[idx] = "undefined"
return row
@AgentCheck.metadata_entrypoint
def submit_version(check, row):
# type (Any) -> None
"""
Example version: 17.10.03.01
https://docs.teradata.com/r/Teradata-VantageTM-Data-Dictionary/July-2021/Views-Reference/DBCInfoV/Example-Using-DBCInfoV
"""
try:
teradata_version = row[0]
version_parts = {
name: part for name, part in zip(('major', 'minor', 'maintenance', 'patch'), teradata_version.split('.'))
}
check.set_metadata('version', teradata_version, scheme='parts', final_scheme='semver', part_map=version_parts)
except Exception as e:
check.log.warning("Could not collect version info: %s", e)
| 33.941176 | 124 | 0.618222 |
085feb84916555840e5126f729199455de471bf7 | 2,493 | py | Python | plotly/graph_objs/layout/mapbox/layer/_line.py | omridanan/plotly.py | a8d26670cba49ce15ce9b7639ae0f55a6088a825 | [
"MIT"
] | null | null | null | plotly/graph_objs/layout/mapbox/layer/_line.py | omridanan/plotly.py | a8d26670cba49ce15ce9b7639ae0f55a6088a825 | [
"MIT"
] | null | null | null | plotly/graph_objs/layout/mapbox/layer/_line.py | omridanan/plotly.py | a8d26670cba49ce15ce9b7639ae0f55a6088a825 | [
"MIT"
] | 1 | 2019-02-18T04:12:56.000Z | 2019-02-18T04:12:56.000Z | from plotly.basedatatypes import BaseLayoutHierarchyType
import copy
class Line(BaseLayoutHierarchyType):
# width
# -----
@property
def width(self):
"""
Sets the line width. Has an effect only when `type` is set to
*line*.
The 'width' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self['width']
@width.setter
def width(self, val):
self['width'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'layout.mapbox.layer'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
width
Sets the line width. Has an effect only when `type` is
set to *line*.
"""
def __init__(self, arg=None, width=None, **kwargs):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly.graph_objs.layout.mapbox.layer.Line
width
Sets the line width. Has an effect only when `type` is
set to *line*.
Returns
-------
Line
"""
super(Line, self).__init__('line')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.mapbox.layer.Line
constructor must be a dict or
an instance of plotly.graph_objs.layout.mapbox.layer.Line"""
)
# Import validators
# -----------------
from plotly.validators.layout.mapbox.layer import (line as v_line)
# Initialize validators
# ---------------------
self._validators['width'] = v_line.WidthValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('width', None)
self.width = width if width is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
| 25.96875 | 74 | 0.509426 |
a0de2b0f586f9c2f890087336e864bfb37f48ff8 | 6,170 | py | Python | fec_raw/management/commands/downloadfecrawdata.py | datadesk/django-fec-raw-data | 9d1f49e5ecc1552c55b635c63c1bf021871e4c0b | [
"MIT"
] | 3 | 2016-06-01T18:16:36.000Z | 2021-07-20T14:51:40.000Z | fec_raw/management/commands/downloadfecrawdata.py | datadesk/django-fec-raw-data | 9d1f49e5ecc1552c55b635c63c1bf021871e4c0b | [
"MIT"
] | 9 | 2015-11-24T06:22:56.000Z | 2021-06-10T17:45:57.000Z | fec_raw/management/commands/downloadfecrawdata.py | datadesk/django-fec-raw-data | 9d1f49e5ecc1552c55b635c63c1bf021871e4c0b | [
"MIT"
] | 1 | 2020-12-01T21:22:53.000Z | 2020-12-01T21:22:53.000Z | import os
import subprocess
import feedparser
from tomorrow import threads
from django.conf import settings
from fec_raw import get_download_directory
from fec_raw.management.commands import FecCommand
class Command(FecCommand):
help = "Download FEC filings to data directory"
# Path to ruby script that uses Fech to save a filing
FECH_PATH = os.path.join(
os.path.dirname(
os.path.dirname(
os.path.dirname(__file__)
)
),
'fech_filing.rb'
)
# If there are no downloaded filings
DEFAULT_FIRST_FILING = 1000000
# Where to download files
DATA_DIR = get_download_directory()
def add_arguments(self, parser):
parser.add_argument(
'--filing',
action='store',
nargs=1,
type=int,
required=False,
help='ID of single filing to retrieve',
dest='filing'
)
parser.add_argument(
'--first',
'-f',
action='store',
nargs=1,
type=int,
required=False,
help='ID of first record to be added',
dest='first_record'
)
parser.add_argument(
'--last',
'-l',
action='store',
nargs=1,
type=int,
required=False,
help='ID of last record to be added',
dest='last_record'
)
parser.add_argument(
'--force',
action='store_true',
dest='force',
default=False,
help='Force a reload of the filings',
)
def handle(self, *args, **options):
"""
Determines the first and last filing numbers to download, either
from the provided options or from get_latest_downloaded_filing_number
and get_latest_filing_number. Loops through those records and, if it
hasn't been previously downloaded, downloads it.
"""
self.header("Downloading raw FEC filings")
# Create the raw data directory if it doesn't exist
raw_data_path = os.path.join(self.DATA_DIR, 'raw')
os.path.exists(raw_data_path) or os.makedirs(raw_data_path)
# Figure out which filings we're going to try to get
# If we want a specific filing, set both the first and last record to
# that filing
if options['filing']:
options['first_record'] = options['filing']
options['last_record'] = options['filing']
if options['first_record']:
first_record = options['first_record'][0]
else:
first_record = self.get_latest_downloaded_filing_number() + 1
if options['last_record']:
last_record = options['last_record'][0]
else:
last_record = self.get_latest_filing_number()
if first_record > last_record:
self.failure('No records to download')
return
self.log(
'Attempting to download filings {} to {}'.format(
first_record,
last_record
)
)
# Loop through all filings we're interested in
for filing_no in range(first_record, last_record + 1):
# Create string version of the filing number
filing_id = str(filing_no)
# It should exist at this path
filing_path = os.path.join(
self.DATA_DIR,
'raw',
filing_id + '.fec'
)
# If it does AND we're not forcing a refresh ...
if os.path.isfile(filing_path) and not options['force']:
# Skip to the next record
self.log('Already downloaded filing {}'.format(filing_id))
continue
# Otherwise ...
else:
# Interface with fech to get the data
self.fech_filing(filing_id)
@threads(getattr(settings, 'FEC_DOWNLOAD_THREADS', 4))
def fech_filing(self, filing_no):
"""
Interfaces with Ruby library Fech to return data for a particular filing
number, saves data to CSVs in DATA_DIR
"""
self.log('Fech-ing filing {}'.format(filing_no))
p = subprocess.Popen(
['ruby', self.FECH_PATH, filing_no, self.DATA_DIR],
stdout=subprocess.PIPE,
stderr=None
)
# start the subprocess
output = p.communicate()
# this is the stdout of the ruby script
message = output[0]
# TODO: Refactor/fix the way this error handling works
if message[0] == 'E':
self.failure(' - Failed to download filing {}'.format(filing_no))
elif message[0] == 'S':
self.success(' - Downloaded filing {}'.format(filing_no))
def get_latest_downloaded_filing_number(self):
"""
Checks data directory to get latest previously downloaded
filing number.
"""
files = os.listdir(os.path.join(self.DATA_DIR, 'raw'))
try:
filing_numbers = [int(filename.split('.')[0]) for filename in files if not (filename.startswith('.') or filename.startswith('fech'))]
return sorted(filing_numbers, reverse=True)[0]
except:
return self.DEFAULT_FIRST_FILING
def get_latest_filing_number(self):
"""
Uses FEC RSS feed to get the ID of the latest filing.
"""
self.log('Getting latest filing number from FEC...')
url = 'http://efilingapps.fec.gov/rss/generate?preDefinedFilingType=ALL'
d = feedparser.parse(url)
# Sorted entries by date, most recent first
entries = sorted(
d.entries,
key=lambda entry: entry['published_parsed'],
reverse=True
)
# Get filing ID from link that looks like:
# http://docquery.fec.gov/dcdev/posted/1020510.fec
link = entries[0]['link']
latest = int(link.split('/')[-1].replace('.fec', ''))
self.log('Latest filing number is {}'.format(latest))
return latest
| 33.532609 | 145 | 0.568233 |
479ce04ea1fa055dedddd2bc352083ee13d4bf50 | 288 | py | Python | configs/segformer/segformer_mit-b3_8x1_1024x1024_160k_cityscapes.py | heytanay/mmsegmentation | 7ddd2fe2ecff9c95999bd00ec05cc37eafb558f8 | [
"Apache-2.0"
] | 11 | 2022-02-04T01:09:45.000Z | 2022-03-08T05:49:16.000Z | configs/segformer/segformer_mit-b3_8x1_1024x1024_160k_cityscapes.py | heytanay/mmsegmentation | 7ddd2fe2ecff9c95999bd00ec05cc37eafb558f8 | [
"Apache-2.0"
] | 2 | 2022-02-25T03:07:23.000Z | 2022-03-08T12:54:05.000Z | configs/segformer/segformer_mit-b3_8x1_1024x1024_160k_cityscapes.py | heytanay/mmsegmentation | 7ddd2fe2ecff9c95999bd00ec05cc37eafb558f8 | [
"Apache-2.0"
] | 1 | 2022-01-25T05:13:37.000Z | 2022-01-25T05:13:37.000Z | _base_ = ['./segformer_mit-b0_8x1_1024x1024_160k_cityscapes.py']
model = dict(
backbone=dict(
init_cfg=dict(type='Pretrained', checkpoint='pretrain/mit_b3.pth'),
embed_dims=64,
num_layers=[3, 4, 18, 3]),
decode_head=dict(in_channels=[64, 128, 320, 512]))
| 32 | 75 | 0.663194 |
b7461c50618b3e4aea4230265612c4c2c7442fa9 | 75,513 | py | Python | pypureclient/flasharray/FA_2_6/api/host_groups_api.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 14 | 2018-12-07T18:30:27.000Z | 2022-02-22T09:12:33.000Z | pypureclient/flasharray/FA_2_6/api/host_groups_api.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 28 | 2019-09-17T21:03:52.000Z | 2022-03-29T22:07:35.000Z | pypureclient/flasharray/FA_2_6/api/host_groups_api.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 15 | 2020-06-11T15:50:08.000Z | 2022-03-21T09:27:25.000Z | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re
# python 2 and python 3 compatibility library
import six
from typing import List, Optional
from .. import models
class HostGroupsApi(object):
def __init__(self, api_client):
self.api_client = api_client
def api26_host_groups_delete_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> None
"""Delete a host group
Deletes a host group. The `names` query parameter is required.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api26_host_groups_delete_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.6/host-groups', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api26_host_groups_get_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
continuation_token=None, # type: str
filter=None, # type: str
limit=None, # type: int
names=None, # type: List[str]
offset=None, # type: int
sort=None, # type: List[str]
total_item_count=None, # type: bool
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.HostGroupGetResponse
"""List host groups
Returns a list of host groups.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api26_host_groups_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param str continuation_token: A token used to retrieve the next page of data with some consistency guaranteed. The token is a Base64 encoded value. Set `continuation_token` to the system-generated token taken from the `x-next-token` header field of the response. A query has reached its last page when the response does not include a token. Pagination requires the `limit` and `continuation_token` query parameters.
:param str filter: Narrows down the results to only the response objects that satisfy the filter criteria.
:param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size.
:param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.
:param int offset: The starting position based on the results of the query in relation to the full set of response objects returned.
:param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values.
:param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: HostGroupGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if names is not None:
if not isinstance(names, list):
names = [names]
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api26_host_groups_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api26_host_groups_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'continuation_token' in params:
query_params.append(('continuation_token', params['continuation_token']))
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
if 'total_item_count' in params:
query_params.append(('total_item_count', params['total_item_count']))
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.6/host-groups', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='HostGroupGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api26_host_groups_hosts_delete_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
group_names=None, # type: List[str]
member_names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> None
"""Remove a host from a host group
Removes a host from a host group. Removing a host from a host group automatically disconnects the host from all volumes associated with the group. Hosts can be removed from host groups at any time. The `group_names` and `member_names` parameters are required and must be set together, and only one host group can be specified at a time.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api26_host_groups_hosts_delete_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param list[str] group_names: Performs the operation on the unique group name specified. Examples of groups include host groups, pods, protection groups, and volume groups. Enter multiple names in comma-separated format. For example, `hgroup01,hgroup02`.
:param list[str] member_names: Performs the operation on the unique member name specified. Examples of members include volumes, hosts, host groups, and directories. Enter multiple names in comma-separated format. For example, `vol01,vol02`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
if group_names is not None:
if not isinstance(group_names, list):
group_names = [group_names]
if member_names is not None:
if not isinstance(member_names, list):
member_names = [member_names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
collection_formats = {}
path_params = {}
query_params = []
if 'group_names' in params:
query_params.append(('group_names', params['group_names']))
collection_formats['group_names'] = 'csv'
if 'member_names' in params:
query_params.append(('member_names', params['member_names']))
collection_formats['member_names'] = 'csv'
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.6/host-groups/hosts', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api26_host_groups_hosts_get_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
continuation_token=None, # type: str
filter=None, # type: str
group_names=None, # type: List[str]
limit=None, # type: int
member_names=None, # type: List[str]
offset=None, # type: int
sort=None, # type: List[str]
total_item_count=None, # type: bool
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.MemberNoIdAllGetResponse
"""List host groups that are associated with hosts
Returns a list of host groups that are associated with hosts.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api26_host_groups_hosts_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param str continuation_token: A token used to retrieve the next page of data with some consistency guaranteed. The token is a Base64 encoded value. Set `continuation_token` to the system-generated token taken from the `x-next-token` header field of the response. A query has reached its last page when the response does not include a token. Pagination requires the `limit` and `continuation_token` query parameters.
:param str filter: Narrows down the results to only the response objects that satisfy the filter criteria.
:param list[str] group_names: Performs the operation on the unique group name specified. Examples of groups include host groups, pods, protection groups, and volume groups. Enter multiple names in comma-separated format. For example, `hgroup01,hgroup02`.
:param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size.
:param list[str] member_names: Performs the operation on the unique member name specified. Examples of members include volumes, hosts, host groups, and directories. Enter multiple names in comma-separated format. For example, `vol01,vol02`.
:param int offset: The starting position based on the results of the query in relation to the full set of response objects returned.
:param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values.
:param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: MemberNoIdAllGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if group_names is not None:
if not isinstance(group_names, list):
group_names = [group_names]
if member_names is not None:
if not isinstance(member_names, list):
member_names = [member_names]
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api26_host_groups_hosts_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api26_host_groups_hosts_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'continuation_token' in params:
query_params.append(('continuation_token', params['continuation_token']))
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'group_names' in params:
query_params.append(('group_names', params['group_names']))
collection_formats['group_names'] = 'csv'
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'member_names' in params:
query_params.append(('member_names', params['member_names']))
collection_formats['member_names'] = 'csv'
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
if 'total_item_count' in params:
query_params.append(('total_item_count', params['total_item_count']))
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.6/host-groups/hosts', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MemberNoIdAllGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api26_host_groups_hosts_post_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
group_names=None, # type: List[str]
member_names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.MemberNoIdAllResponse
"""Add a host to a host group
Adds a host to a host group. Adding a host to a host group automatically connects the host to all volumes associated with the group. Multiple hosts can be belong to a host group, but a host can only belong to one host group. Hosts can be added to host groups at any time. The `group_names` and `member_names` parameters are required and must be set together, and only one host group can be specified at a time.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api26_host_groups_hosts_post_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param list[str] group_names: Performs the operation on the unique group name specified. Examples of groups include host groups, pods, protection groups, and volume groups. Enter multiple names in comma-separated format. For example, `hgroup01,hgroup02`.
:param list[str] member_names: Performs the operation on the unique member name specified. Examples of members include volumes, hosts, host groups, and directories. Enter multiple names in comma-separated format. For example, `vol01,vol02`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: MemberNoIdAllResponse
If the method is called asynchronously,
returns the request thread.
"""
if group_names is not None:
if not isinstance(group_names, list):
group_names = [group_names]
if member_names is not None:
if not isinstance(member_names, list):
member_names = [member_names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
collection_formats = {}
path_params = {}
query_params = []
if 'group_names' in params:
query_params.append(('group_names', params['group_names']))
collection_formats['group_names'] = 'csv'
if 'member_names' in params:
query_params.append(('member_names', params['member_names']))
collection_formats['member_names'] = 'csv'
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.6/host-groups/hosts', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MemberNoIdAllResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api26_host_groups_patch_with_http_info(
self,
host_group=None, # type: models.HostGroupPatch
authorization=None, # type: str
x_request_id=None, # type: str
names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.HostGroupResponse
"""Manage a host group
Manages a host group. The `names` query parameter is required.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api26_host_groups_patch_with_http_info(host_group, async_req=True)
>>> result = thread.get()
:param HostGroupPatch host_group: (required)
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: HostGroupResponse
If the method is called asynchronously,
returns the request thread.
"""
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
# verify the required parameter 'host_group' is set
if host_group is None:
raise TypeError("Missing the required parameter `host_group` when calling `api26_host_groups_patch`")
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
if 'host_group' in params:
body_params = params['host_group']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.6/host-groups', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='HostGroupResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api26_host_groups_performance_by_array_get_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
filter=None, # type: str
limit=None, # type: int
names=None, # type: List[str]
offset=None, # type: int
sort=None, # type: List[str]
total_item_count=None, # type: bool
total_only=None, # type: bool
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.ResourcePerformanceNoIdByArrayGetResponse
"""List host group performance data by array
Displays real-time and historical performance data, real-time latency data, and average I/O size data. The displayed data is for each volume that is connected to a host group on the current array and for each volume that is connected to a host group on any remote arrays that are visible to the current array. The data is displayed as a total across all host groups on each array and by individual host group.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api26_host_groups_performance_by_array_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param str filter: Narrows down the results to only the response objects that satisfy the filter criteria.
:param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size.
:param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.
:param int offset: The starting position based on the results of the query in relation to the full set of response objects returned.
:param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values.
:param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`.
:param bool total_only: If set to `true`, returns the aggregate value of all items after filtering. Where it makes more sense, the average value is displayed instead. The values are displayed for each name where meaningful. If `total_only=true`, the `items` list will be empty.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: ResourcePerformanceNoIdByArrayGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if names is not None:
if not isinstance(names, list):
names = [names]
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api26_host_groups_performance_by_array_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api26_host_groups_performance_by_array_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
if 'total_item_count' in params:
query_params.append(('total_item_count', params['total_item_count']))
if 'total_only' in params:
query_params.append(('total_only', params['total_only']))
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.6/host-groups/performance/by-array', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResourcePerformanceNoIdByArrayGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api26_host_groups_performance_get_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
filter=None, # type: str
limit=None, # type: int
names=None, # type: List[str]
offset=None, # type: int
sort=None, # type: List[str]
total_item_count=None, # type: bool
total_only=None, # type: bool
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.ResourcePerformanceNoIdGetResponse
"""List host group performance data
Displays real-time and historical performance data, real-time latency data, and average I/O sizes across all volumes, displayed both by host group and as a total across all host groups.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api26_host_groups_performance_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param str filter: Narrows down the results to only the response objects that satisfy the filter criteria.
:param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size.
:param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.
:param int offset: The starting position based on the results of the query in relation to the full set of response objects returned.
:param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values.
:param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`.
:param bool total_only: If set to `true`, returns the aggregate value of all items after filtering. Where it makes more sense, the average value is displayed instead. The values are displayed for each name where meaningful. If `total_only=true`, the `items` list will be empty.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: ResourcePerformanceNoIdGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if names is not None:
if not isinstance(names, list):
names = [names]
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api26_host_groups_performance_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api26_host_groups_performance_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
if 'total_item_count' in params:
query_params.append(('total_item_count', params['total_item_count']))
if 'total_only' in params:
query_params.append(('total_only', params['total_only']))
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.6/host-groups/performance', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResourcePerformanceNoIdGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api26_host_groups_post_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.HostGroupResponse
"""Create a host group
Creates a host group. The `names` query parameter is required.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api26_host_groups_post_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: HostGroupResponse
If the method is called asynchronously,
returns the request thread.
"""
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.6/host-groups', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='HostGroupResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api26_host_groups_protection_groups_delete_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
group_names=None, # type: List[str]
member_names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> None
"""Delete a host group from a protection group
Deletes a host group member from a protection group. After the member has been removed, it is no longer protected by the group. Any protection group snapshots that were taken before the member was removed are not affected. Removing a member from a protection group does not delete the member from the array, and the member can be added back to the protection group at any time. The `group_names` parameter represents the name of the protection group, and the `member_names` parameter represents the name of the host group. The `group_names` and `member_names` parameters are required and must be set together.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api26_host_groups_protection_groups_delete_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param list[str] group_names: Performs the operation on the unique group name specified. Examples of groups include host groups, pods, protection groups, and volume groups. Enter multiple names in comma-separated format. For example, `hgroup01,hgroup02`.
:param list[str] member_names: Performs the operation on the unique member name specified. Examples of members include volumes, hosts, host groups, and directories. Enter multiple names in comma-separated format. For example, `vol01,vol02`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
if group_names is not None:
if not isinstance(group_names, list):
group_names = [group_names]
if member_names is not None:
if not isinstance(member_names, list):
member_names = [member_names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
collection_formats = {}
path_params = {}
query_params = []
if 'group_names' in params:
query_params.append(('group_names', params['group_names']))
collection_formats['group_names'] = 'csv'
if 'member_names' in params:
query_params.append(('member_names', params['member_names']))
collection_formats['member_names'] = 'csv'
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.6/host-groups/protection-groups', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api26_host_groups_protection_groups_get_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
continuation_token=None, # type: str
filter=None, # type: str
group_names=None, # type: List[str]
limit=None, # type: int
member_names=None, # type: List[str]
offset=None, # type: int
sort=None, # type: List[str]
total_item_count=None, # type: bool
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.MemberNoIdAllGetResponse
"""List host groups that are members of protection groups
Displays a list of host group members that belong to one or more protection groups.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api26_host_groups_protection_groups_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param str continuation_token: A token used to retrieve the next page of data with some consistency guaranteed. The token is a Base64 encoded value. Set `continuation_token` to the system-generated token taken from the `x-next-token` header field of the response. A query has reached its last page when the response does not include a token. Pagination requires the `limit` and `continuation_token` query parameters.
:param str filter: Narrows down the results to only the response objects that satisfy the filter criteria.
:param list[str] group_names: Performs the operation on the unique group name specified. Examples of groups include host groups, pods, protection groups, and volume groups. Enter multiple names in comma-separated format. For example, `hgroup01,hgroup02`.
:param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size.
:param list[str] member_names: Performs the operation on the unique member name specified. Examples of members include volumes, hosts, host groups, and directories. Enter multiple names in comma-separated format. For example, `vol01,vol02`.
:param int offset: The starting position based on the results of the query in relation to the full set of response objects returned.
:param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values.
:param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: MemberNoIdAllGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if group_names is not None:
if not isinstance(group_names, list):
group_names = [group_names]
if member_names is not None:
if not isinstance(member_names, list):
member_names = [member_names]
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api26_host_groups_protection_groups_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api26_host_groups_protection_groups_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'continuation_token' in params:
query_params.append(('continuation_token', params['continuation_token']))
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'group_names' in params:
query_params.append(('group_names', params['group_names']))
collection_formats['group_names'] = 'csv'
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'member_names' in params:
query_params.append(('member_names', params['member_names']))
collection_formats['member_names'] = 'csv'
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
if 'total_item_count' in params:
query_params.append(('total_item_count', params['total_item_count']))
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.6/host-groups/protection-groups', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MemberNoIdAllGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api26_host_groups_protection_groups_post_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
group_names=None, # type: List[str]
member_names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.MemberNoIdAllResponse
"""Create a host group
Creates a host group member and assigns to a protection group. Members that are already in the protection group are not affected. For asynchronous replication, only members of the same type can belong to a protection group. The `group_names` parameter represents the name of the protection group, and the `member_names` parameter represents the name of the host group. The `group_names` and `member_names` parameters are required and must be set together.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api26_host_groups_protection_groups_post_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param list[str] group_names: Performs the operation on the unique group name specified. Examples of groups include host groups, pods, protection groups, and volume groups. Enter multiple names in comma-separated format. For example, `hgroup01,hgroup02`.
:param list[str] member_names: Performs the operation on the unique member name specified. Examples of members include volumes, hosts, host groups, and directories. Enter multiple names in comma-separated format. For example, `vol01,vol02`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: MemberNoIdAllResponse
If the method is called asynchronously,
returns the request thread.
"""
if group_names is not None:
if not isinstance(group_names, list):
group_names = [group_names]
if member_names is not None:
if not isinstance(member_names, list):
member_names = [member_names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
collection_formats = {}
path_params = {}
query_params = []
if 'group_names' in params:
query_params.append(('group_names', params['group_names']))
collection_formats['group_names'] = 'csv'
if 'member_names' in params:
query_params.append(('member_names', params['member_names']))
collection_formats['member_names'] = 'csv'
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.6/host-groups/protection-groups', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MemberNoIdAllResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api26_host_groups_space_get_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
filter=None, # type: str
limit=None, # type: int
names=None, # type: List[str]
offset=None, # type: int
sort=None, # type: List[str]
total_item_count=None, # type: bool
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.ResourceSpaceNoIdGetResponse
"""List host group space information
Returns provisioned (virtual) size and physical storage consumption data for each host group.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api26_host_groups_space_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param str filter: Narrows down the results to only the response objects that satisfy the filter criteria.
:param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size.
:param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.
:param int offset: The starting position based on the results of the query in relation to the full set of response objects returned.
:param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values.
:param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: ResourceSpaceNoIdGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if names is not None:
if not isinstance(names, list):
names = [names]
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api26_host_groups_space_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api26_host_groups_space_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
if 'total_item_count' in params:
query_params.append(('total_item_count', params['total_item_count']))
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.6/host-groups/space', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResourceSpaceNoIdGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
| 53.366078 | 671 | 0.655304 |
d34f45131ef036096a0c95ff36f43f2c503f85f2 | 5,063 | py | Python | p587.py | arpit0891/Project-euler | ab36b33c578578595bb518508fa2fe5862f4a044 | [
"MIT"
] | 1 | 2020-05-14T09:22:32.000Z | 2020-05-14T09:22:32.000Z | p587.py | prve17/Project-Euler | 1ff72404ca9ebe7de2eab83d43960d86bc487515 | [
"MIT"
] | 1 | 2020-03-13T12:42:28.000Z | 2020-05-13T13:26:32.000Z | p587.py | prve17/Project-Euler | 1ff72404ca9ebe7de2eab83d43960d86bc487515 | [
"MIT"
] | 3 | 2020-05-13T13:39:46.000Z | 2020-06-26T10:44:53.000Z | import itertools, math
# Start by defining the coordinate system in a convenient way. The position and scale of the diagram don't
# matter because we only care about the ratio of areas, not the absolute areas. So, let the bottom left
# of the diagram be the origin (x = 0, y = 0), and let each circle to have a radius of 1.
#
# The leftmost circle is centered at (1, 1), and its equation is (x - 1)^2 + (y - 1)^2 = 1.
# The diagonal line has slope = s = 1 / n (for any positive n), and the line's equation is y = s * x.
# From basic geometry, the area of the blue L-section is 1 - pi / 4.
#
# Let's find the x-coordinate where the diagonal line intersects the first circle.
# Take the equation of the circle and substitute y = s * x for the line:
#
# (x - 1)^2 + (s*x - 1)^2 = 1.
# (x^2 - 2x + 1) + (s^2 x^2 - 2s*x + 1) = 1.
# (1 + s^2)x^2 + (-2 - 2s)x + 1 = 0.
#
# We can apply the quadratic formula with a = 1 + s^2, b = -2 - 2s, c = 1. There are two solutions for x,
# and we only want the smaller value. Thus, let X = (-b - sqrt(b^2 - 4ac)) / (2a). Or equivalently
# with more numerical stability (using the Citardauq formula), X = (2c) / (-b + sqrt(b^2 - 4ac)).
#
# The orange concave triangle can be divided into two parts by a vertical line:
#
# - The left part is a proper triangle, whose area is easily seen as x * y / 2 = X^2 * s / 2.
#
# - The right part is the region between the circle and the baseline. Let's re-express
# the circle's equation in terms of y, and only keep the lower semicircle:
#
# (x - 1)^2 + (y - 1)^2 = 1.
# (y - 1)^2 = 1 - (x - 1)^2.
# y - 1 = -sqrt(1 - (x - 1)^2).
# y = 1 - sqrt(1 - (x - 1)^2).
# y = 1 - sqrt(1 - (x^2 - 2x + 1)).
# y = 1 - sqrt(2x - x^2).
#
# Now, the indefinite integral of f(x) = 1 - sqrt(2x - x^2) with respect to x
# is F(x) = (x - 1) - [sqrt(2x - x^2) * (x - 1) + asin(x - 1)] / 2.
# Finding this integral is not obvious, but verifying it is a fairly straightforward
# mechanical procedure involving differentiation and simplification.
#
# The area of the right part is the integral of f(x) for x from X to 1, because the start is
# the x-coordinate where line meets the circle, and the end is where the circle meets the baseline.
# Hence the area is equal to F(1) - F(X).
#
# All in all, for any given n, the area of the orange concave triangle is X^2 * s / 2 + F(1) - F(X).
# The rest of the algorithm is a brute-force search with n = 1, 2, 3, ... until the ratio condition is met.
#
# Additional notes:
# - Intuitively, as n increases and the slope gets smaller, the area of the orange concave triangle should strictly
# decrease. This statement is in fact true, but proving it involves a big pile of differentiation and algebra.
# 0. We need to show that X (which is the x-coordinate of the line-circle intersection) increases with n.
# We'd differentiate X with respect to n, and get an expression that is always positive for any positive n.
# 1. Because X increases with n, the area of the right part, with its always-positive integrand, must decrease.
# 2. As for the left part, we'd differentiate X^2 * s / 2 with respect to n, and get a huge messy formula.
# It turns out this formula is negative for all n > 1. Hence the area of this triangle also decreases with n.
# After we prove that increasing n leads to decreasing orange area, we could use
# binary search to find the minimum value of n needed to meet the ratio requirement.
# - The use of floating-point arithmetic, for basic arithmetic operations (+ - * /) and irrational functions (sqrt,
# asin) alike, is inherently difficult or impossible to prove the correctness of. Furthermore, the algorithms
# for irrational functions are hard to understand and beyond the scope of this problem, and the error bounds for
# all operations are difficult to reason about.
# It should be possible to solve this particular problem using only integer arithmetic in a provably correct way.
# The basic idea would be to round the result of each operation both down and up to an integer fraction,
# keep track of pessimistic intervals that are guaranteed to contain the true value, accept a comparison only
# if the intervals don't overlap, and recompute everything at a higher precision if a comparison is inconclusive.
# Note: Because it doesn't seem easy to compute pi and asin(), it might be better to
# approximate integrals directly using the Darboux definition of lower and upper sums.
def compute():
# The indefinite integral of (1 - sqrt(2x - x^2)) dx.
def integral(x):
t = x - 1.0
return t - (math.sqrt(x * (2.0 - x)) * t + math.asin(t)) / 2.0
lsectionarea = 1.0 - math.pi / 4.0
for i in itertools.count(1):
slope = 1.0 / i
a = slope**2 + 1.0
b = -2.0 * (slope + 1.0)
c = 1.0
x = (2.0 * c) / (-b + math.sqrt(b * b - 4 * a * c))
concavetrianglearea = (x**2 * slope / 2) + (integral(1.0) - integral(x))
if concavetrianglearea / lsectionarea < 0.001:
return str(i)
if __name__ == "__main__":
print(compute())
| 56.88764 | 115 | 0.670551 |
e0bd14649c77149e31626c73576f05dc7fdc7cc6 | 1,915 | py | Python | test/functional/rpc_help.py | bitcorub/bitrub | 28711e4e8ebdee144a1437ece07afcf792a7cf60 | [
"MIT"
] | 1 | 2019-12-09T18:33:47.000Z | 2019-12-09T18:33:47.000Z | test/functional/rpc_help.py | bitcorub/bitrub | 28711e4e8ebdee144a1437ece07afcf792a7cf60 | [
"MIT"
] | null | null | null | test/functional/rpc_help.py | bitcorub/bitrub | 28711e4e8ebdee144a1437ece07afcf792a7cf60 | [
"MIT"
] | 1 | 2019-12-12T20:05:36.000Z | 2019-12-12T20:05:36.000Z | #!/usr/bin/env python3
# Copyright (c) 2018 The BitRub Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPC help output."""
from test_framework.test_framework import BitRubTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
import os
class HelpRpcTest(BitRubTestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
self.test_categories()
self.dump_help()
def test_categories(self):
node = self.nodes[0]
# wrong argument count
assert_raises_rpc_error(-1, 'help', node.help, 'foo', 'bar')
# invalid argument
assert_raises_rpc_error(-1, 'JSON value is not a string as expected', node.help, 0)
# help of unknown command
assert_equal(node.help('foo'), 'help: unknown command: foo')
# command titles
titles = [line[3:-3] for line in node.help().splitlines() if line.startswith('==')]
components = ['Blockchain', 'Control', 'Generating', 'Mining', 'Network', 'Rawtransactions', 'Util']
if self.is_wallet_compiled():
components.append('Wallet')
if self.is_zmq_compiled():
components.append('Zmq')
assert_equal(titles, components)
def dump_help(self):
dump_dir = os.path.join(self.options.tmpdir, 'rpc_help_dump')
os.mkdir(dump_dir)
calls = [line.split(' ', 1)[0] for line in self.nodes[0].help().splitlines() if line and not line.startswith('==')]
for call in calls:
with open(os.path.join(dump_dir, call), 'w', encoding='utf-8') as f:
# Make sure the node can generate the help at runtime without crashing
f.write(self.nodes[0].help(call))
if __name__ == '__main__':
HelpRpcTest().main()
| 33.017241 | 123 | 0.649086 |
b87b61491503f7954aaf7772fac605b05d7af6e1 | 9,686 | py | Python | indico/modules/events/cloning.py | yamiacat/indico | 754c02cd7cd25bf1eab0ca5f497eb24b135dd51c | [
"MIT"
] | null | null | null | indico/modules/events/cloning.py | yamiacat/indico | 754c02cd7cd25bf1eab0ca5f497eb24b135dd51c | [
"MIT"
] | null | null | null | indico/modules/events/cloning.py | yamiacat/indico | 754c02cd7cd25bf1eab0ca5f497eb24b135dd51c | [
"MIT"
] | null | null | null | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from collections import OrderedDict
from operator import attrgetter
from indico.core import signals
from indico.util.caching import memoize_request
from indico.util.decorators import cached_classproperty
from indico.util.signals import named_objects_from_signal
class EventCloner:
"""
Base class to define cloning operations to be executed when an
event is cloned.
:param old_event: The event that's being cloned
"""
#: unique name of the clone action
name = None
#: the displayed name of the cloner
friendly_name = None
#: cloners that must be selected for this one to be available.
#: they are also guaranteed to run before this one.
requires = frozenset()
#: cloners that must run before this one (if enabled), but this
#: one runs even if they are not enabled
uses = frozenset()
#: Whether the clone operation is selected by default.
#: Use this to deselect options which are less common and thus
#: should not be enabled by default when cloning an event.
is_default = False
#: Whether this cloner is internal and never shown in the list.
#: An internal cloner is executed when `is_default` is set to
#: ``True`` or another cloner depends on it (always use `requires`
#: for this; `uses` will not enable an internal cloner). If
#: you override `is_visible` for an internal cloner (which only
#: makes sense when turning `is_internal` into a property), make
#: sure to check the super return value of `is_visible` to prevent
#: an internal cloner from showing up in the cloner selection.
is_internal = False
#: Whether this cloner is always available when pulled in as a
#: 'requires' dependency. This allows requiring a cloner without
#: having to keep it available even if there are no clonable
#: objects. For example, you may have something that uses the
#: 'tracks' cloner since it can reference tracks (and thus needs
#: them cloned) but also contains various other things that may
#: be clone-worthy even without tracks being set-up. While one
#: may think about using 'uses' instead of 'requires' first this
#: would result in people having to explicitly enable the other
#: cloner even if it makes no sense to not run it.
always_available_dep = False
#: Whether this cloner only allows cloning into new events and
#: is not available when importing into an existing event.
new_event_only = False
@classmethod
def get_cloners(cls, old_event):
"""Return the list of cloners (sorted for display)."""
return sorted((cloner_cls(old_event) for cloner_cls in get_event_cloners().values()),
key=attrgetter('friendly_name'))
@classmethod
def run_cloners(cls, old_event, new_event, cloners, n_occurrence=0, event_exists=False):
all_cloners = OrderedDict((name, cloner_cls(old_event, n_occurrence))
for name, cloner_cls in get_event_cloners().items())
if any(cloner.is_internal for name, cloner in all_cloners.items() if name in cloners):
raise Exception('An internal cloner was selected')
if event_exists:
if any(cloner.new_event_only for name, cloner in all_cloners.items() if name in cloners):
raise Exception('A new event only cloner was selected')
if any(cloner.has_conflicts(new_event) for name, cloner in all_cloners.items() if name in cloners):
raise Exception('Cloner target is not empty')
# enable internal cloners that are enabled by default or required by another cloner
cloners |= {c.name
for c in all_cloners.values()
if c.is_internal and (c.is_default or c.required_by_deep & cloners)}
# enable unavailable cloners that may be pulled in as a dependency nonetheless
extra = {c.name
for c in all_cloners.values()
if not c.is_available and c.always_available_dep and c.required_by_deep & cloners}
cloners |= extra
active_cloners = OrderedDict((name, cloner) for name, cloner in all_cloners.items() if name in cloners)
if not all((c.is_internal or c.is_visible) and c.is_available
for c in active_cloners.values()
if c.name not in extra):
raise Exception('An invisible/unavailable cloner was selected')
for name, cloner in active_cloners.items():
if not (cloners >= cloner.requires_deep):
raise Exception('Cloner {} requires {}'.format(name, ', '.join(cloner.requires_deep - cloners)))
shared_data = {}
cloner_names = set(active_cloners)
for name, cloner in active_cloners.items():
shared_data[name] = cloner.run(new_event, cloner_names, cloner._prepare_shared_data(shared_data),
event_exists=event_exists)
@cached_classproperty
@classmethod
def requires_deep(cls):
"""All cloner names required by this cloner.
This includes cloners required by a requirement.
"""
cloners = get_event_cloners()
todo = set(cls.requires)
required = set()
while todo:
cloner = todo.pop()
required.add(cloner)
todo |= cloners[cloner].requires
return required
@cached_classproperty
@classmethod
def required_by_deep(cls):
"""All cloner names depending on this cloner.
This includes cloners which depend on a cloner depending on
this cloner.
"""
# This is not very efficient, but it runs exactly once on a not-very-large set
return {cloner.name for cloner in get_event_cloners().values() if cls.name in cloner.requires_deep}
def __init__(self, old_event, n_occurrence=0):
self.old_event = old_event
self.n_occurrence = n_occurrence
def run(self, new_event, cloners, shared_data, event_exists=False):
"""Performs the cloning operation.
:param new_event: The `Event` that's created by the cloning
operation.
:param cloners: A set containing the names of all enabled
cloners.
:param shared_data: A dict containing the data returned by
other cloners. Only data from cloners
specified in `requires` or `uses` will
be available in the dict. If a *used*
cloner was not selected, its name will
not be present in the data dict. The
value may be ``None`` depending on the
cloner. This would indicate that the
cloner was executed but did not return
any data.
:param event_exists: If cloning into an existing event
:return: data that may be used by other cloners depending on
or using this cloner
"""
raise NotImplementedError
@property
def is_visible(self):
"""Whether the clone operation should be shown at all.
Use this to hide an option because of a feature not being
enabled or because of the event type not supporting it.
"""
return not self.is_internal
@property
def is_available(self):
"""Whether the clone operation can be selected.
Use this to disable options if selecting them wouldn't make
sense, e.g. because there is nothing to clone.
"""
return True
def has_conflicts(self, target_event):
"""Check for conflicts between source event and target event.
Use this when cloning into an existing event to disable options
where ``target_event`` data would conflict with cloned data.
"""
return True
def _prepare_shared_data(self, shared_data):
linked = self.uses | self.requires
return {k: v for k, v in shared_data.items() if k in linked}
def _resolve_dependencies(cloners):
cloner_deps = {name: (cls.requires, cls.uses) for name, cls in cloners.items()}
resolved_deps = set()
while cloner_deps:
# Get cloners with both hard and soft dependencies being met
ready = {cls for cls, deps in cloner_deps.items() if all(d <= resolved_deps for d in deps)}
if not ready:
# Otherwise check for cloners with all hard dependencies being met
ready = {cls for cls, deps in cloner_deps.items() if deps[0] <= resolved_deps}
if not ready:
# Either a circular dependency or a dependency that's not loaded
raise Exception('Could not resolve dependencies between cloners (remaining: {})'
.format(', '.join(cloner_deps)))
resolved_deps |= ready
for name in ready:
yield name, cloners[name]
del cloner_deps[name]
@memoize_request
def get_event_cloners():
"""Get the dict containing all available event cloners.
The returned dict is ordered based on the dependencies of each
cloner and when executing the cloners MUST be executed in that
order.
"""
cloners = named_objects_from_signal(signals.event_management.get_cloners.send(), plugin_attr='plugin')
return OrderedDict(_resolve_dependencies(cloners))
| 44.228311 | 112 | 0.650423 |
72f2276dfc8a3f64efed6dbd6117c3591cfe1e4f | 17,362 | py | Python | batchq/core/batch.py | troelsfr/BatchQ | 390ea7179ed457673c6a1d764bc78cf816f603f4 | [
"MIT"
] | 1 | 2017-08-17T12:35:28.000Z | 2017-08-17T12:35:28.000Z | batchq/core/batch.py | troelsfr/BatchQ | 390ea7179ed457673c6a1d764bc78cf816f603f4 | [
"MIT"
] | 1 | 2019-03-06T06:12:07.000Z | 2019-03-06T06:12:07.000Z | batchq/core/batch.py | troelsfr/BatchQ | 390ea7179ed457673c6a1d764bc78cf816f603f4 | [
"MIT"
] | null | null | null | from batchq.core.stack import current_machine
# from profilehooks import profile
import re
import unicodedata
import copy
import time
import hashlib
_slugify_strip_re = re.compile(r'[^\w\s-]')
_slugify_hyphenate_re = re.compile(r'[-\s]+')
def slugify(value):
global _slugify_strip_re, _slugify_hyphenate_re
if not isinstance(value, unicode):
value = unicode(value)
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(_slugify_strip_re.sub('', value).strip().lower())
return _slugify_hyphenate_re.sub('-', value)
class Shell(object):
class STATE:
NOJOB = 0
QUEUED = 1
READY = 2
SUBMITTED = 3
PENDING = 4
RUNNING = 5
FAILED = 6
FINISHED = 7
texts = {NOJOB: 'no job',
QUEUED: 'queued',
READY: 'ready',
SUBMITTED: 'submitted',
PENDING: 'pending',
RUNNING: 'running',
FAILED: 'failed',
FINISHED: 'finished'}
def __init__(self, terminal = None, command = None, working_directory=None, dependencies=None, identifier = None, exitcode_zero = True, **kwargs):
self.verbose = False
if terminal is None:
terminal = current_machine()
elif isinstance(terminal, str):
self.terminal_name = terminal
terminal =getattr(current_machine(), terminal)
self.working_directory = working_directory
self.terminal = terminal
self.command = command
self.has_compression = False
self.is_compressed = False
if hasattr(self,"additional_arguments"):
self.additional_arguments.update( kwargs )
else:
self.additional_arguments = kwargs
self._state = self.STATE.NOJOB
self.dependencies = [] if dependencies is None else dependencies
self._identifier = self.generate_identifier() if identifier is None else identifier
self._identifier_filename = ".batchq.%s"%self._identifier
self._ret = ""
self._exitcode = -1
self._was_executed = False
self.exitcode_zero = exitcode_zero
# self.state()
def compress(self, queue):
raise BaseExepction("Object has no compression.")
def identifier(self):
return self._identifier
def completed(self, count):
precendor_count = 0
return len(self.dependencies) + precendor_count
def generate_identifier(self):
## TODO: Extract information from previous dependencies
## TODO: maybe make it with MD5 or SHA
m = hashlib.md5()
if self.command is None:
return "unkown"
m.update(self.command)
return m.hexdigest() #slugify()
def status(self):
return self.STATE.texts[self.state()]
# @profile
def state(self):
if self._state == self.STATE.QUEUED:
self._state = self.STATE.READY
for a in self.dependencies:
if a.state() != self.STATE.FINISHED:
self._state = self.STATE.QUEUED
if not self.command is None and self._was_executed:
self._state = self.STATE.FINISHED
if self.exitcode_zero and not self._exitcode is None and self._exitcode != 0:
self._state = self.STATE.FAILED
return self._state
def pid(self):
return 0
def reset(self):
self._state = self.STATE.NOJOB
self.update_cache_state()
def update_cache_state(self):
pass
# @profile
def run_dependencies(self):
if self._state == self.STATE.NOJOB: self._state = self.STATE.QUEUED
# Waiting for dependencies to finish
if self.state() == self.STATE.QUEUED:
self._state = self.STATE.READY
for a in self.dependencies:
a.run()
if a.state() != self.STATE.FINISHED:
self._state = self.STATE.QUEUED
if self._state == self.STATE.QUEUED:
print(self, "EXIT QUEUED", self.dependencies)
return False
return True
def run(self, force=False):
if not self.run_dependencies(): return False
# Executing job
if not self.command is None:
if self._state == self.STATE.READY or force:
if not self._pushw():
raise BaseException("Could not enter working directory: '%s' from '%s' ('%s'). The executed class is '%s'." %(self.working_directory, self.terminal.pwd(), self.terminal.lazy_pwd(),self.__class__.__name__) )
try:
if self.verbose:
print("$ ", self.command)
self._ret = self.terminal.send_command(self.command)
self._exitcode = self.terminal.last_exitcode()
self._was_executed = True
except:
self._popw()
raise
self._popw()
self.update_cache_state()
else:
return False
return True
def _pushw(self):
self._can_pop = False
if not self.working_directory is None and \
self.working_directory.strip() != "." and \
not self.terminal.lazy_pwd().endswith(self.working_directory):
self._can_pop = True
return self.terminal.pushd(self.working_directory)
return True
def _popw(self):
if self._can_pop and \
not self.working_directory is None and \
self.working_directory.strip() != ".":
self.terminal.popd()
return True
return False
def queued(self):
return self.state() == self.STATE.QUEUED
def ready(self):
return self.state() == self.STATE.READY
def submitted(self):
return self.state() == self.STATE.SUBMITTED
def pending(self):
return self.state() == self.STATE.PENDING
def failed(self):
return self.state() == self.STATE.FAILED
def running(self):
return self.state() == self.STATE.RUNNING
def finished(self):
return self.state() == self.STATE.FINISHED
def standard_error(self):
raise BaseException("Standard error and output are not defined for the shell object. It might be in the future, however, until then use Shell.terminal.buffer")
def standard_output(self):
raise BaseException("Standard error and output are not defined for the shell object. It might be in the future, however, until then use Shell.terminal.buffer")
def log(self):
raise BaseException("'log' is not defined for the shell object. It is a place holder for the log in LSF and other submission systems.")
## TODO: Delete, since it is not used
class Job(object):
def __init__(self,chain, pull_state_from = None):
self.chain = chain
if pull_state_from is None:
self.pull_state = []
else:
self.pull_state = pull_state_from
def state(self):
return [a.STATE.texts[a.state()] for a in self.pull_state]
def queued(self):
return [a.queued() for a in self.pull_state]
def ready(self):
return [a.ready() for a in self.pull_state]
def submitted(self):
return [a.submitted() for a in self.pull_state]
def pending(self):
return [a.pending() for a in self.pull_state]
def failed(self):
return [a.failed() for a in self.pull_state]
def running(self):
return [a.running() for a in self.pull_state]
def finished(self):
return [a.finished() for a in self.pull_state]
def run(self):
self.chain.run()
return self.state()
class Collection(object):
def __init__(self, set = None,results_set = None,complement = None,results_complementary=None):
self._set = []
if not set is None:
self._set = set
self._results =[]
if not results_set is None:
self._results = results_set
else:
self._results = [None]*len(self._set)
if len(self._results) != len(self._set):
raise BaseException("Set list and result list must be equally long")
self._complementary = []
if not complement is None:
self._complementary = complement
self._results_complementary = []
if not results_complementary is None:
self._results_complementary = results_complementary
else:
self._results_complementary = [None]*len(self._complementary)
if len(self._results_complementary) < len(self._complementary):
self._results_complementary += [None]*( len(self._complementary) - len(self._results_complementary) )
if len(self._results_complementary) != len(self._complementary):
raise BaseException("Complementary set list and result list must be equally long")
self._min = -1
self._max = 1
self._until_finish = True
self._split_results = False
def all(self):
return self + ~self
@property
def objects(self):
return self._set
@property
def complementary_objects(self):
return self._complementary
@property
def results(self):
return self._results
@property
def complementary_results(self):
return self._results_complementary
def __append(self, object, ret = None):
if object in self._set:
return
if object in self._complementary:
#TODO: delete this object from complementary
pass
self._set.append(object)
self._results.append(ret)
def __append_complementary(self, object, ret = None):
if object in self._set or object in self._complementary:
return
self._complementary.append(object)
self._results_complementary.append(ret)
def __len__(self):
return len(self._set)
def __iadd__(self, other):
if isinstance(other, Collection):
# Adding objects
n = len(other.objects)
for i in range(0, n):
self.__append(other.objects[i], other.results[i])
# and complementary objects
n = len(other.complementary_objects)
for i in range(0, n):
self.__append_complementary(other.complementary_objects[i], other.complementary_results[i])
elif isinstance(other, Shell):
self.__append(other)
else:
raise BaseException("Cannot add type '%s' to %s." % (str(type(other)), self.__class__.__name__ ))
return self
def __add__(self, other):
ret = Collection()
ret.__iadd__(self)
ret.__iadd__(other)
return ret
def __delitem__(self, n):
del self._set[n]
def __getitem__(self, n):
x = self._set[n]
if not isinstance(x, list): x = [x]
return Collection(x)
def invert(self):
t = self._set
r = self._results
self._set = self._complementary
self._results = self._results_complementary
self._complementary = t
self._results_complementary = r
def __nonzero__(self):
return len(self._set) != 0
def __str__(self):
if len(self._results) != len(self._set):
raise BaseException("Somebody has been tampering with the set/results.")
return ", ".join([str(r) for r in self._results])
def __invert__(self):
x = copy.copy(self)
x.invert()
return x
def __neg__(self):
return ~ self
def _collect_parameters(self, min,max,finish, split = False):
self._min = min
self._max = max
self._until_finish = finish
self._split_results = split
def select(self, *identifiers):
newset,newret = [],[]
comset,comret = [],[]
for obj in self._set:
q = obj.identifier()
if q in identifiers:
newset.append(obj)
newret.append(q)
else:
comset.append(obj)
comret.append(q)
return Collection(newset, newret, comset, comret)
def as_list(self):
if self._results is None:
return []
return self._results
def as_dict(self):
# TODO: implement
if self._results is None:
return []
# TODO: Implement
return self._results
def compressed_invoke(self,name):
try:
attr = object.__getattribute__(self, name)
return attr
except AttributeError:
if name[0] == "_":
return object.__getattribute__(self,name)
def invoke(*args, **kwargs):
if len(self._set) ==0:
raise BaseException("Cannot operate on empty set.")
queue = copy.copy(self._set)
element = queue[0]
queue = queue[1:]
results1 = []
results2 = []
while not element is None:
former = element
if not element.run_dependencies():
ret = False
else:
# Compressing element if possible
if element.has_compression:
queue, element = element.compress(queue)
method = getattr(element,name)
ret = method(*args, **kwargs)
if not element.is_compressed:
rets_elements = [(ret, element)]
else:
rets_elements = element.pack(ret)
for ret, element in rets_elements:
if ret:
results1.append(element)
else:
results2.append(element)
# print "Clearing cache"
# former._pushw()
former.update_cache_state()
# former._popw()
if len(queue)>0:
element = queue[0]
queue = queue[1:]
else:
element = None
return invoke
###
# deprecated stuff
def wait(self, min = -1, max_retries = -1, finish = False, split= False):
ret = copy.copy(self)
ret._collect_parameters(min,max_retries,finish, split)
return ret
def split(self):
return self.wait(self._min,self._max,self._until_finish, True)
def any(self):
return self.wait(1)
def __getattribute__(self,name):
try:
attr = object.__getattribute__(self, name)
return attr
except AttributeError:
# Ensure right behaviour with built-in and hidden variables functions
if name[0] == "_":
return object.__getattribute__(self,name)
def foreach(*args, **kwargs):
ret1 = []
ret2 = []
i = 0
j = 0
progress_fnc = None
if "progress" in kwargs:
progress_fnc = kwargs['progress']
del kwargs['progress']
min = self._min
max = self._max
if not min is None and min < 0: min += 1 + len(self._set)
allowbreak = not self._until_finish
ret2 = copy.copy(self._set)
ret1 = []
notstop = len(ret2) >0
results1 = []
results2 = []
infinity_wait = 10000
while notstop:
results2 = []
cycle = 0
cycle_size = len(ret2)
wait = infinity_wait
for a in ret2 :
cycle += 1
method = getattr(a, name)
b = method(*args, **kwargs)
to = method.cache_timeout if hasattr(method, "cache_timeout") else infinity_wait
if to < wait: wait =to
if not b:
results2.append(b)
else:
i += 1
ret1.append(a)
results1.append(b)
if not min is None and min<=i:
if progress_fnc:
progress_fnc(i,min,cycle,cycle_size, j,b,a)
notstop = False
if allowbreak: break
if progress_fnc:
progress_fnc(i,min,cycle,cycle_size, j,b,a)
j += 1
if not max == -1 and j >= max:
notstop = False
if notstop and wait != infinity_wait:
time.sleep(wait)
ret2 = [a for a in ret2 if not a in ret1]
col = Collection(ret1, results1, ret2, results2)
if self._split_results: return col, ~col
return col
return foreach
| 31.114695 | 226 | 0.548266 |
4693461b497093b03e927ab082508283d3a4d8ea | 1,046 | py | Python | aoj/11/aoj1174.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | 1 | 2018-11-12T15:18:55.000Z | 2018-11-12T15:18:55.000Z | aoj/11/aoj1174.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | null | null | null | aoj/11/aoj1174.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | null | null | null | import copy
import itertools
import sys
if sys.version[0] == '2':
range, input = xrange, raw_input
drc = [(-1, 0), (0, 1), (1, 0), (0, -1)]
C_NUM = 6
def dfs(r, c, now_c, next_c):
tmp_board[r][c] = next_c
for dr, dc in drc:
nr, nc = r + dr, c + dc
if 0 <= nr < H and 0 <= nc < W and tmp_board[nr][nc] == now_c:
dfs(nr, nc, now_c, next_c)
while True:
H, W, C = map(int, input().split())
if not (H | W | C):
break
board = [[int(x) - 1 for x in input().split()] for _ in range(H)]
ans = 0
for ope in itertools.product(range(C_NUM), repeat=4):
if ope[0] == board[0][0] or ope[-1] == C - 1 or any(ope[i] == ope[i + 1] for i in range(3)):
continue
tmp_board = copy.deepcopy(board)
for color in ope:
dfs(0, 0, tmp_board[0][0], color)
dfs(0, 0, tmp_board[0][0], C - 1)
dfs(0, 0, tmp_board[0][0], -1)
cand = sum(row.count(-1) for row in tmp_board)
if cand > ans:
ans = cand
print(ans)
| 27.526316 | 100 | 0.507648 |
d534dd0de5524cb2baf4fb4285a690919d700e1b | 2,338 | py | Python | venv/lib/python2.7/site-packages/lettuce/terminal.py | GrupoMazoGuay/final | 782b7580848ac01dd878e54574b0a5d36132291b | [
"MIT"
] | null | null | null | venv/lib/python2.7/site-packages/lettuce/terminal.py | GrupoMazoGuay/final | 782b7580848ac01dd878e54574b0a5d36132291b | [
"MIT"
] | null | null | null | venv/lib/python2.7/site-packages/lettuce/terminal.py | GrupoMazoGuay/final | 782b7580848ac01dd878e54574b0a5d36132291b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falcão <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import platform
import struct
def get_size():
if platform.system() == "Windows":
size = get_terminal_size_win()
else:
size = get_terminal_size_unix()
if not all(size):
size = (1, 1)
return size
def get_terminal_size_win():
#Windows specific imports
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
(bufx, bufy, curx, cury, wattr, left, top, right, bottom,
maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
else: # can't determine actual size - return default values
sizex, sizey = 80, 25
return sizex, sizey
def get_terminal_size_unix():
# Unix/Posix specific imports
import fcntl
import termios
def ioctl_GWINSZ(fd):
try:
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,
'1234'))
except:
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
cr = (os.getenv('LINES', 25), os.getenv('COLUMNS', 80))
return int(cr[1]), int(cr[0])
| 29.225 | 72 | 0.644996 |
da04f94656cb3e3fc4db0a165939c0daaa8160c0 | 131 | py | Python | src/verinfo.py | user12986714/SpamSoup | 3dace5ed99083995d5d6a25b1abfe9eec7e65b6c | [
"Unlicense"
] | null | null | null | src/verinfo.py | user12986714/SpamSoup | 3dace5ed99083995d5d6a25b1abfe9eec7e65b6c | [
"Unlicense"
] | 29 | 2020-06-06T22:35:05.000Z | 2020-07-18T15:42:19.000Z | src/verinfo.py | user12986714/SpamSoup | 3dace5ed99083995d5d6a25b1abfe9eec7e65b6c | [
"Unlicense"
] | 1 | 2020-06-29T06:38:11.000Z | 2020-06-29T06:38:11.000Z | # coding=utf-8
ver_info = {"major": 1,
"alias": "Config",
"minor": 0}
cfg_ver_min = 2
cfg_ver_active = 2
| 14.555556 | 30 | 0.519084 |
518ddf18532addfefd8af6cb3dec804901299cc6 | 3,456 | py | Python | setup.py | justinjohn0306/uberduck-ml-dev | 439ec326eb7680c4fdd5ee97a09def8e355e0f7c | [
"Apache-2.0"
] | 167 | 2021-10-18T22:04:17.000Z | 2022-03-21T19:44:21.000Z | setup.py | justinjohn0306/uberduck-ml-dev | 439ec326eb7680c4fdd5ee97a09def8e355e0f7c | [
"Apache-2.0"
] | 18 | 2021-10-19T02:33:57.000Z | 2022-03-28T17:25:52.000Z | setup.py | justinjohn0306/uberduck-ml-dev | 439ec326eb7680c4fdd5ee97a09def8e355e0f7c | [
"Apache-2.0"
] | 24 | 2021-10-22T02:16:53.000Z | 2022-03-30T18:22:43.000Z | from pkg_resources import parse_version
from configparser import ConfigParser
import setuptools, re, sys
assert parse_version(setuptools.__version__) >= parse_version("36.2")
# note: all settings are in settings.ini; edit there, not here
config = ConfigParser(delimiters=["="])
config.read("settings.ini")
cfg = config["DEFAULT"]
cfg_keys = "version description keywords author author_email".split()
expected = (
cfg_keys
+ "lib_name user branch license status min_python audience language".split()
)
for o in expected:
assert o in cfg, "missing expected setting: {}".format(o)
setup_cfg = {o: cfg[o] for o in cfg_keys}
if len(sys.argv) > 1 and sys.argv[1] == "version":
print(setup_cfg["version"])
exit()
licenses = {
"apache2": (
"Apache Software License 2.0",
"OSI Approved :: Apache Software License",
),
"mit": ("MIT License", "OSI Approved :: MIT License"),
"gpl2": (
"GNU General Public License v2",
"OSI Approved :: GNU General Public License v2 (GPLv2)",
),
"gpl3": (
"GNU General Public License v3",
"OSI Approved :: GNU General Public License v3 (GPLv3)",
),
"bsd3": ("BSD License", "OSI Approved :: BSD License"),
}
statuses = [
"1 - Planning",
"2 - Pre-Alpha",
"3 - Alpha",
"4 - Beta",
"5 - Production/Stable",
"6 - Mature",
"7 - Inactive",
]
py_versions = (
"2.0 2.1 2.2 2.3 2.4 2.5 2.6 2.7 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8".split()
)
lic = licenses.get(cfg["license"].lower(), (cfg["license"], None))
min_python = cfg["min_python"]
requirements = ["pip", "packaging"]
if cfg.get("requirements"):
requirements += cfg.get("requirements", "").split()
if cfg.get("pip_requirements"):
requirements += cfg.get("pip_requirements", "").split()
dev_requirements = (cfg.get("dev_requirements") or "").split()
long_description = open("README.md", encoding='utf-8').read()
# ![png](docs/images/output_13_0.png)
for ext in ["png", "svg"]:
long_description = re.sub(
r"!\[" + ext + "\]\((.*)\)",
"!["
+ ext
+ "]("
+ "https://raw.githubusercontent.com/{}/{}".format(cfg["user"], cfg["lib_name"])
+ "/"
+ cfg["branch"]
+ "/\\1)",
long_description,
)
long_description = re.sub(
r"src=\"(.*)\." + ext + '"',
'src="https://raw.githubusercontent.com/{}/{}'.format(
cfg["user"], cfg["lib_name"]
)
+ "/"
+ cfg["branch"]
+ "/\\1."
+ ext
+ '"',
long_description,
)
setuptools.setup(
name=cfg["lib_name"],
license=lic[0],
classifiers=[
"Development Status :: " + statuses[int(cfg["status"])],
"Intended Audience :: " + cfg["audience"].title(),
"Natural Language :: " + cfg["language"].title(),
]
+ [
"Programming Language :: Python :: " + o
for o in py_versions[py_versions.index(min_python) :]
]
+ (["License :: " + lic[1]] if lic[1] else []),
url=cfg["git_url"],
packages=setuptools.find_packages(),
include_package_data=True,
install_requires=requirements,
extras_require={"dev": dev_requirements},
python_requires=">=" + cfg["min_python"],
long_description=long_description,
long_description_content_type="text/markdown",
zip_safe=False,
entry_points={"console_scripts": cfg.get("console_scripts", "").split()},
**setup_cfg
)
| 29.793103 | 88 | 0.588831 |
d0ce16a6b66cb17b328c175c17c0f66f9c97ed50 | 2,253 | py | Python | test/test_from_files.py | bmwiedemann/httpolice | 4da2bde3d14a24b0623ee45ae10afd192d6fa771 | [
"MIT"
] | 1 | 2020-09-19T13:50:36.000Z | 2020-09-19T13:50:36.000Z | test/test_from_files.py | 99/httpolice | 8175020af13708801fbefc8d4f3787b11093c962 | [
"MIT"
] | null | null | null | test/test_from_files.py | 99/httpolice | 8175020af13708801fbefc8d4f3787b11093c962 | [
"MIT"
] | null | null | null | # -*- coding: utf-8; -*-
"""File-based test suite.
Treat every file in ``combined_data/`` and ``har_data/`` as a test case.
Run it through HTTPolice and check the list of resulting notices.
In combined stream files, the expected notices are specified in the preamble.
In HAR files, the expected notices are specified in the ``_expected`` key.
"""
import io
import json
import os
import re
import pytest
import six
from httpolice.exchange import check_exchange
from httpolice.inputs.har import har_input
from httpolice.inputs.streams import combined_input, parse_combined
from httpolice.reports import html_report, text_report
from httpolice.util.text import decode_path
base_path = os.path.dirname(decode_path(__file__))
relative_paths = [os.path.join(section, fn)
for section in [u'combined_data', u'har_data']
for fn in os.listdir(os.path.join(base_path, section))]
@pytest.fixture(params=relative_paths)
def input_from_file(request):
path = os.path.join(base_path, request.param)
if path.endswith('.har'):
with io.open(path, 'rt', encoding='utf-8-sig') as f:
expected = sorted(json.load(f)['_expected'])
exchanges = list(har_input([path]))
else:
(_, _, _, preamble) = parse_combined(path)
lines = [ln for ln in preamble.splitlines() if not ln.startswith(u'#')]
expected = sorted(int(n) for n in lines[0].split())
exchanges = list(combined_input([path]))
return (exchanges, expected)
def test_from_file(input_from_file): # pylint: disable=redefined-outer-name
(exchanges, expected) = input_from_file
for exch in exchanges:
check_exchange(exch)
buf = six.BytesIO()
text_report(exchanges, buf)
actual = sorted(int(ln[2:6])
for ln in buf.getvalue().decode('utf-8').splitlines()
if not ln.startswith(u'----'))
assert expected == actual
buf = six.BytesIO()
html_report(exchanges, buf)
# Check that the report does not contain strings that look like default
# Python object reprs, meaning that we failed to render something.
# This pops up from time to time.
assert not re.search(b'<[^>]+ at 0x[0-9a-fA-F]+>', buf.getvalue())
| 34.136364 | 79 | 0.68087 |
d69147bd1dd5395a4d684869551dffb1c0a0b610 | 1,104 | py | Python | SamplePythonScripts/write_and_read_example.py | DigiLog-N/Arrow | e068255b4991bd313ee10ad71258516c8d4ccb9c | [
"Apache-2.0"
] | null | null | null | SamplePythonScripts/write_and_read_example.py | DigiLog-N/Arrow | e068255b4991bd313ee10ad71258516c8d4ccb9c | [
"Apache-2.0"
] | null | null | null | SamplePythonScripts/write_and_read_example.py | DigiLog-N/Arrow | e068255b4991bd313ee10ad71258516c8d4ccb9c | [
"Apache-2.0"
] | null | null | null |
#
# Simple example to write out an Arrow file to disk and read it back in
#
# This is based on:
# https://arrow.apache.org/docs/python/ipc.html
# https://towardsdatascience.com/apache-arrow-read-dataframe-with-zero-memory-69634092b1a
#
import pyarrow as pa
dataA = [
pa.array([1, 2, 3, 4]),
pa.array(['foo', 'bar', 'baz', None]),
pa.array([True, None, False, True])
]
dataB = [
pa.array([11, 22, 33, 44]),
pa.array(['foo', 'bar', 'baz', None]),
pa.array([True, None, False, True])
]
batchA = pa.record_batch(dataA, names=['f0', 'f1', 'f2'])
batchB = pa.record_batch(dataB, names=['f0', 'f1', 'f2'])
print('Schema:\n')
print(batchA.schema)
# Write out the record to file
with pa.OSFile('data.arrow', 'wb') as sink:
with pa.RecordBatchFileWriter(sink, batchA.schema) as writer:
writer.write_batch(batchA)
writer.write_batch(batchB)
# Read the data as memory mapped file
source = pa.memory_map('data.arrow', 'r')
table = pa.ipc.RecordBatchFileReader(source).read_all().column("f0")
print('\nTable read from memory-mapped file, column f0:\n')
print(table)
| 26.926829 | 89 | 0.664855 |
d43aba3e78fe5187056d1a64e7f39ac6a5204bc4 | 5,575 | py | Python | meetup2xibo/updater/meetup2xibo.py | jshprentz/meetup2xibo | 236bef01305878943f27d246dac1b21cc78a521b | [
"MIT"
] | 3 | 2019-11-29T04:32:49.000Z | 2020-06-03T01:34:45.000Z | meetup2xibo/updater/meetup2xibo.py | jshprentz/meetup2xibo | 236bef01305878943f27d246dac1b21cc78a521b | [
"MIT"
] | 194 | 2020-06-01T01:42:41.000Z | 2021-08-02T10:25:58.000Z | meetup2xibo/updater/meetup2xibo.py | jshprentz/meetup2xibo | 236bef01305878943f27d246dac1b21cc78a521b | [
"MIT"
] | 1 | 2019-07-31T14:59:05.000Z | 2019-07-31T14:59:05.000Z | """Retrieve events from Meetup, extract data to display on signs, and update
Xibo."""
from collections import namedtuple
XiboSessionScope = namedtuple(
"XiboSessionScope",
"meetup_events cancelled_meetup_events xibo_session")
XiboEventCrudScope = namedtuple(
"XiboEventCrudScope",
"event_dataset_id event_column_ids")
class Meetup2Xibo:
"""Downloads Meetup events into a Xibo database."""
def __init__(
self, meetup_events_retriever, conflict_analyzer,
event_list_converter, site_cert_assurer, oauth2_session_starter,
event_suppressor, enter_xibo_session_scope):
"""Initialize with a Meetup events retriever, an event list converter,
a site certificate assurer, an OAuth2 session starter, an event
suppressor, and a Xibo sesson scope entrance function."""
self.meetup_events_retriever = meetup_events_retriever
self.conflict_analyzer = conflict_analyzer
self.event_list_converter = event_list_converter
self.site_cert_assurer = site_cert_assurer
self.oauth2_session_starter = oauth2_session_starter
self.event_suppressor = event_suppressor
self.enter_xibo_session_scope = enter_xibo_session_scope
def run(self):
"""Run the Meetup to Xibo conversion."""
meetup_events = self.retreive_meetup_events()
cancelled_meetup_events = self.retreive_cancelled_meetup_events()
self.convert(meetup_events, cancelled_meetup_events)
self.conflict_analyzer.analyze_conflicts(meetup_events)
self.event_suppressor.log_all_ids()
def convert(self, meetup_events, cancelled_meetup_events):
"""Convert Meetup events to Xibo events."""
xibo_session = self.start_xibo_session()
self.update_xibo_events(
meetup_events, cancelled_meetup_events, xibo_session)
def retreive_meetup_events(self):
"""Retrieve a list of Meetup events."""
retriever = self.meetup_events_retriever
json_events = retriever.retrieve_events_json()
converter = self.event_list_converter
return converter.convert_meetup_events(json_events)
def retreive_cancelled_meetup_events(self):
"""Retrieve a list of cancelled Meetup events."""
retriever = self.meetup_events_retriever
json_events = retriever.retrieve_cancelled_events_json()
converter = self.event_list_converter
return converter.convert_cancelled_meetup_events(json_events)
def start_xibo_session(self):
"""Return a new web session with the Xibo API server."""
self.site_cert_assurer.assure_site_cert()
return self.oauth2_session_starter.start_session()
def update_xibo_events(
self, meetup_events, cancelled_meetup_events, xibo_session):
"""Update events stored in Xibo to match the Meetup events."""
xibo_session_scope = XiboSessionScope(
meetup_events, cancelled_meetup_events, xibo_session)
processor = self.enter_xibo_session_scope(xibo_session_scope)
processor.run()
class XiboSessionProcessor:
"""Retreives event dataset metadata from Xibo."""
def __init__(
self, event_dataset_code, dataset_id_finder, column_name_manager,
xibo_api, enter_xibo_event_crud_scope):
"""Initialize with an event dataset code, a Xibo dataset ID finder, a
Xibo event column name manager, a Xibo API manager, and a function to
enter a Xibo event CRUD scope."""
self.event_dataset_code = event_dataset_code
self.dataset_id_finder = dataset_id_finder
self.column_name_manager = column_name_manager
self.xibo_api = xibo_api
self.enter_xibo_event_crud_scope = enter_xibo_event_crud_scope
def run(self):
"""Retrieve event dataset metadata from Xibo."""
dataset_id = self.lookup_dataset_id()
column_ids = self.map_dataset_column_names(dataset_id)
self.update_xibo_events(dataset_id, column_ids)
def lookup_dataset_id(self):
"""Lookup the dataset ID for a dataset code."""
return self.dataset_id_finder.find_dataset_id(self.event_dataset_code)
def map_dataset_column_names(self, dataset_id):
"""Map the dataset column names to IDs for a given dataset."""
json_columns = self.xibo_api.get_dataset_column_by_id(dataset_id)
return self.column_name_manager.json_to_column_ids(json_columns)
def update_xibo_events(self, event_dataset_id, event_column_ids):
"""Update events stored in Xibo to match the Meetup events."""
xibo_event_crud_scope = XiboEventCrudScope(
event_dataset_id, event_column_ids)
processor = self.enter_xibo_event_crud_scope(xibo_event_crud_scope)
processor.run()
class XiboEventCrudProcessor:
"""Updates events stored in Xibo to match the Meetup events."""
def __init__(self, xibo_event_crud, provide_event_updater):
"""Initialize a Xibo event CRUD manager, and a function that provides
an event updater. """
self.xibo_event_crud = xibo_event_crud
self.provide_event_updater = provide_event_updater
def run(self):
"""Update events stored in Xibo to match the Meetup events."""
xibo_events = self.xibo_event_crud.get_xibo_events()
event_updater = self.provide_event_updater(
self.xibo_event_crud, xibo_events)
event_updater.update_xibo()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 autoindent
| 41.604478 | 78 | 0.719641 |
103b209d9fa91c1d152128fc3b3b70d110081cb0 | 6,098 | py | Python | setup.py | prestix-studio/sewer | 67867f778eb92c9c14cd028116f5695b0223baa2 | [
"MIT"
] | null | null | null | setup.py | prestix-studio/sewer | 67867f778eb92c9c14cd028116f5695b0223baa2 | [
"MIT"
] | null | null | null | setup.py | prestix-studio/sewer | 67867f778eb92c9c14cd028116f5695b0223baa2 | [
"MIT"
] | null | null | null | import os
from setuptools import setup, find_packages
# To use a consistent encoding
import codecs
here = os.path.abspath(os.path.dirname(__file__))
about = {}
try:
import pypandoc
long_description = pypandoc.convert("README.md", "rst")
except ImportError:
long_description = codecs.open("README.md", encoding="utf8").read()
with open(os.path.join(here, "sewer", "__version__.py"), "r") as f:
exec(f.read(), about)
dns_provider_deps_map = {
"cloudflare": [""],
"aliyun": ["aliyun-python-sdk-core-v3", "aliyun-python-sdk-alidns"],
"hurricane": ["hurricanedns"],
"aurora": ["tldextract", "apache-libcloud"],
"acmedns": ["dnspython"],
"rackspace": ["tldextract"],
"dnspod": [""],
"duckdns": [""],
"cloudns": ["cloudns-api"],
}
all_deps_of_all_dns_provider = []
for _, vlist in dns_provider_deps_map.items():
all_deps_of_all_dns_provider += vlist
all_deps_of_all_dns_provider = list(set(all_deps_of_all_dns_provider))
setup(
name=about["__title__"],
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=about["__version__"],
description=about["__description__"],
long_description=long_description,
# The project's main homepage.
url=about["__url__"],
# Author details
author=about["__author__"],
author_email=about["__author_email__"],
# Choose your license
license=about["__license__"],
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
"Development Status :: 4 - Beta",
# Indicate who your project is intended for
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Security",
"Topic :: System :: Installation/Setup",
"Topic :: System :: Networking",
"Topic :: System :: Systems Administration",
"Topic :: Utilities",
# Pick your license as you wish (should match "license" above)
"License :: OSI Approved :: MIT License",
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
],
# What does your project relate to?
keywords="letsencrypt",
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
# packages=['sewer'],
packages=find_packages(exclude=["docs", "*tests*"]),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=["requests", "pyopenssl", "cryptography"],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip3 install -e .[dev,test]
extras_require={
"dev": ["coverage", "pypandoc", "twine", "wheel"],
"test": ["mock", "pylint==2.3.1", "black==18.9b0"],
"cloudflare": dns_provider_deps_map["cloudflare"],
"aliyun": dns_provider_deps_map["aliyun"],
"hurricane": dns_provider_deps_map["hurricane"],
"aurora": dns_provider_deps_map["aurora"],
"acmedns": dns_provider_deps_map["acmedns"],
"rackspace": dns_provider_deps_map["rackspace"],
"dnspod": dns_provider_deps_map["dnspod"],
"duckdns": dns_provider_deps_map["duckdns"],
"cloudns": dns_provider_deps_map["cloudns"],
"alldns": all_deps_of_all_dns_provider,
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
entry_points={"console_scripts": ["sewer=sewer.cli:main", "sewer-cli=sewer.cli:main"]},
)
# python packaging documentation:
# 1. https://python-packaging.readthedocs.io/en/latest/index.html
# 2. https://python-packaging-user-guide.readthedocs.io/tutorials/distributing-packages
# a) pip3 install wheel twine
# b) pip3 install -e .
# c) python setup.py sdist
# d) python setup.py bdist_wheel
# e) DONT use python setup.py register and python setup.py upload. They use http
# f) twine upload dist/* -r testpypi
# g) pip3 install -i https://testpypi.python.org/pypi <package name>
# h) twine upload dist/* # prod pypi
# i) pip3 install <package name>
| 41.202703 | 94 | 0.660216 |
ab6da8daaadb2d55d9974dd9301ac1bfea9d45e0 | 6,126 | py | Python | tsai/metrics.py | imilas/tsai | 0dc4833ddd9ef5404c20c8379698d1f3666a2d8f | [
"Apache-2.0"
] | 1,545 | 2020-11-10T22:23:00.000Z | 2022-03-31T19:50:24.000Z | tsai/metrics.py | imilas/tsai | 0dc4833ddd9ef5404c20c8379698d1f3666a2d8f | [
"Apache-2.0"
] | 345 | 2020-11-10T20:23:48.000Z | 2022-03-31T16:36:35.000Z | tsai/metrics.py | imilas/tsai | 0dc4833ddd9ef5404c20c8379698d1f3666a2d8f | [
"Apache-2.0"
] | 220 | 2020-11-19T21:13:55.000Z | 2022-03-31T23:08:37.000Z | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/051_metrics.ipynb (unless otherwise specified).
__all__ = ['MatthewsCorrCoefBinary', 'get_task_metrics', 'accuracy_multi', 'metrics_multi_common', 'precision_multi',
'recall_multi', 'specificity_multi', 'balanced_accuracy_multi', 'Fbeta_multi', 'F1_multi', 'mae', 'mape',
'recall_at_specificity', 'mean_per_class_accuracy']
# Cell
from .imports import *
from fastai.metrics import *
# Cell
mk_class('ActivationType', **{o:o.lower() for o in ['No', 'Sigmoid', 'Softmax', 'BinarySoftmax']},
doc="All possible activation classes for `AccumMetric")
# Cell
def MatthewsCorrCoefBinary(sample_weight=None):
"Matthews correlation coefficient for single-label classification problems"
return AccumMetric(skm.matthews_corrcoef, dim_argmax=-1, activation=ActivationType.BinarySoftmax, thresh=.5, sample_weight=sample_weight)
# Cell
def get_task_metrics(dls, binary_metrics=None, multi_class_metrics=None, regression_metrics=None, verbose=True):
if dls.c == 2:
pv('binary-classification task', verbose)
return binary_metrics
elif dls.c > 2:
pv('multi-class task', verbose)
return multi_class_metrics
else:
pv('regression task', verbose)
return regression_metrics
# Cell
def accuracy_multi(inp, targ, thresh=0.5, sigmoid=True, by_sample=False):
"Computes accuracy when `inp` and `targ` are the same size."
if sigmoid: inp = inp.sigmoid()
correct = (inp>thresh)==targ.bool()
if by_sample:
return (correct.float().mean(-1) == 1).float().mean()
else:
inp,targ = flatten_check(inp,targ)
return correct.float().mean()
def metrics_multi_common(inp, targ, thresh=0.5, sigmoid=True, by_sample=False):
"Computes TP, TN, FP, FN when `inp` and `targ` are the same size."
if not by_sample: inp,targ = flatten_check(inp,targ)
if sigmoid: inp = inp.sigmoid()
pred = inp>thresh
correct = pred==targ.bool()
TP = torch.logical_and(correct, (targ==1).bool()).sum()
TN = torch.logical_and(correct, (targ==0).bool()).sum()
incorrect = pred!=targ.bool()
FN = torch.logical_and(incorrect, (targ==1).bool()).sum()
FP = torch.logical_and(incorrect, (targ==0).bool()).sum()
N = targ.size()[0]
return N, TP, TN, FP, FN
def precision_multi(inp, targ, thresh=0.5, sigmoid=True):
"Computes precision when `inp` and `targ` are the same size."
inp,targ = flatten_check(inp,targ)
if sigmoid: inp = inp.sigmoid()
pred = inp>thresh
correct = pred==targ.bool()
TP = torch.logical_and(correct, (targ==1).bool()).sum()
FP = torch.logical_and(~correct, (targ==0).bool()).sum()
precision = TP/(TP+FP)
return precision
def recall_multi(inp, targ, thresh=0.5, sigmoid=True):
"Computes recall when `inp` and `targ` are the same size."
inp,targ = flatten_check(inp,targ)
if sigmoid: inp = inp.sigmoid()
pred = inp>thresh
correct = pred==targ.bool()
TP = torch.logical_and(correct, (targ==1).bool()).sum()
FN = torch.logical_and(~correct, (targ==1).bool()).sum()
recall = TP/(TP+FN)
return recall
def specificity_multi(inp, targ, thresh=0.5, sigmoid=True):
"Computes specificity (true negative rate) when `inp` and `targ` are the same size."
inp,targ = flatten_check(inp,targ)
if sigmoid: inp = inp.sigmoid()
pred = inp>thresh
correct = pred==targ.bool()
TN = torch.logical_and(correct, (targ==0).bool()).sum()
FP = torch.logical_and(~correct, (targ==0).bool()).sum()
specificity = TN/(TN+FP)
return specificity
def balanced_accuracy_multi(inp, targ, thresh=0.5, sigmoid=True):
"Computes balanced accuracy when `inp` and `targ` are the same size."
inp,targ = flatten_check(inp,targ)
if sigmoid: inp = inp.sigmoid()
pred = inp>thresh
correct = pred==targ.bool()
TP = torch.logical_and(correct, (targ==1).bool()).sum()
TN = torch.logical_and(correct, (targ==0).bool()).sum()
FN = torch.logical_and(~correct, (targ==1).bool()).sum()
FP = torch.logical_and(~correct, (targ==0).bool()).sum()
TPR = TP/(TP+FN)
TNR = TN/(TN+FP)
balanced_accuracy = (TPR+TNR)/2
return balanced_accuracy
def Fbeta_multi(inp, targ, beta=1.0, thresh=0.5, sigmoid=True):
"Computes Fbeta when `inp` and `targ` are the same size."
inp,targ = flatten_check(inp,targ)
if sigmoid: inp = inp.sigmoid()
pred = inp>thresh
correct = pred==targ.bool()
TP = torch.logical_and(correct, (targ==1).bool()).sum()
TN = torch.logical_and(correct, (targ==0).bool()).sum()
FN = torch.logical_and(~correct, (targ==1).bool()).sum()
FP = torch.logical_and(~correct, (targ==0).bool()).sum()
precision = TP/(TP+FP)
recall = TP/(TP+FN)
beta2 = beta*beta
if precision+recall > 0:
Fbeta = (1+beta2)*precision*recall/(beta2*precision+recall)
else:
Fbeta = 0
return Fbeta
def F1_multi(*args, **kwargs):
return Fbeta_multi(*args, **kwargs) # beta defaults to 1.0
# Cell
def mae(inp,targ):
"Mean absolute error between `inp` and `targ`."
inp,targ = flatten_check(inp,targ)
return torch.abs(inp - targ).mean()
# Cell
def mape(inp,targ):
"Mean absolute percentage error between `inp` and `targ`."
inp,targ = flatten_check(inp, targ)
return (torch.abs(inp - targ) / torch.clamp_min(targ, 1e-8)).mean()
# Cell
def _recall_at_specificity(inp, targ, specificity=.95, axis=-1):
inp0 = inp[targ == 0]
inp1 = inp[targ == 1]
thr = torch.sort(inp0).values[-int(len(inp0) * (1 - specificity))]
return (inp1 > thr).float().mean()
recall_at_specificity = AccumMetric(_recall_at_specificity, specificity=.95, activation=ActivationType.BinarySoftmax, flatten=False)
# Cell
def _mean_per_class_accuracy(y_true, y_pred, *, labels=None, sample_weight=None, normalize=None):
cm = skm.confusion_matrix(y_true, y_pred, labels=labels, sample_weight=sample_weight, normalize=normalize)
return (cm.diagonal() / cm.sum(1)).mean()
mean_per_class_accuracy = skm_to_fastai(_mean_per_class_accuracy) | 35.616279 | 141 | 0.667156 |
f7a78a91bbfe6df0d501526af6eeab43022f9356 | 5,742 | py | Python | scripts/integration.py | bhumikapahariapuresoftware/mypy_boto3_builder | 970705c3dee13355c9c5d758841da94d07464b10 | [
"MIT"
] | null | null | null | scripts/integration.py | bhumikapahariapuresoftware/mypy_boto3_builder | 970705c3dee13355c9c5d758841da94d07464b10 | [
"MIT"
] | null | null | null | scripts/integration.py | bhumikapahariapuresoftware/mypy_boto3_builder | 970705c3dee13355c9c5d758841da94d07464b10 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Integration tests.
"""
import argparse
import difflib
import json
import logging
import subprocess
import sys
from pathlib import Path
from mypy_boto3_builder.utils.nice_path import NicePath
ROOT_PATH = Path(__file__).parent.parent.resolve()
EXAMPLES_PATH = ROOT_PATH / "examples"
PYRIGHT_SNAPSHOTS_PATH = EXAMPLES_PATH / "pyright"
MYPY_SNAPSHOTS_PATH = EXAMPLES_PATH / "mypy"
SCRIPTS_PATH = ROOT_PATH / "scripts"
LOGGER_NAME = "int"
class SnapshotMismatchError(Exception):
"""
Exception for e2e failures.
"""
def setup_logging(level: int = 0) -> logging.Logger:
"""
Get Logger instance.
Arguments:
verbose -- Set log level to DEBUG.
panic -- Raise RuntimeError on warning.
Returns:
Overriden Logger.
"""
logger = logging.getLogger(LOGGER_NAME)
stream_handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s %(message)s", datefmt="%H:%M:%S")
stream_handler.setFormatter(formatter)
stream_handler.setLevel(level)
logger.addHandler(stream_handler)
logger.setLevel(level)
return logger
def parse_args() -> argparse.Namespace:
"""
CLI parser.
"""
parser = argparse.ArgumentParser(__file__)
parser.add_argument("-f", "--fast", action="store_true")
parser.add_argument("-u", "--update", action="store_true")
parser.add_argument("services", nargs="*")
return parser.parse_args()
def check_call(cmd: list[str]) -> None:
"""
Check command exit code and output on error.
"""
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
logger = logging.getLogger(LOGGER_NAME)
for line in e.output.decode().splitlines():
logger.error(line)
raise
def install_master() -> None:
"""
Build and install `boto3-stubs`.
"""
check_call([(SCRIPTS_PATH / "build.sh").as_posix(), "--product", "boto3"])
check_call([(SCRIPTS_PATH / "install.sh").as_posix(), "master"])
def install_service(service_name: str) -> None:
"""
Build and install `mypy-boto3-*` subpackage.
"""
check_call(
[(SCRIPTS_PATH / "build.sh").as_posix(), "-s", service_name, "--product", "boto3-services"]
)
check_call([(SCRIPTS_PATH / "install.sh").as_posix(), service_name])
def compare(data: str, snapshot_path: Path, update: bool) -> None:
"""
Compare tool output with a snapshot.
"""
data = data.strip()
logger = logging.getLogger(LOGGER_NAME)
if not snapshot_path.exists():
snapshot_path.write_text(data)
logger.info(f"Created {NicePath(snapshot_path)}")
return
old_data = snapshot_path.read_text().strip()
if old_data == data:
logger.info(f"Matched {NicePath(snapshot_path)}")
return
if update:
snapshot_path.write_text(data)
logger.info(f"Updated {NicePath(snapshot_path)}")
return
diff = difflib.unified_diff(
old_data.strip().splitlines(), data.strip().splitlines(), lineterm=""
)
for line in diff:
logger.warning(line)
raise SnapshotMismatchError(f"Snapshot {snapshot_path} is different")
def run_pyright(path: Path, update: bool) -> None:
"""
Run `pyright` and compare output.
"""
try:
output = subprocess.check_output(
["pyright", path.as_posix(), "--outputjson"],
stderr=subprocess.DEVNULL,
encoding="utf8",
)
except subprocess.CalledProcessError as e:
output = e.output
data = json.loads(output).get("generalDiagnostics", [])
for diag in data:
del diag["file"]
new_data = json.dumps(data, indent=4)
snapshot_path = PYRIGHT_SNAPSHOTS_PATH / f"{path.name}.json"
compare(new_data, snapshot_path, update)
def run_mypy(path: Path, update: bool) -> None:
"""
Run `mypy` and compare output.
"""
try:
output = subprocess.check_output(
[sys.executable, "-m", "mypy", path.as_posix()],
stderr=subprocess.STDOUT,
encoding="utf8",
)
except subprocess.CalledProcessError as e:
output = e.output
new_data_lines = []
for line in output.splitlines():
if line.endswith("defined here"):
continue
new_data_lines.append(line)
new_data = "\n".join(new_data_lines)
snapshot_path = MYPY_SNAPSHOTS_PATH / f"{path.name}.out"
compare(new_data, snapshot_path, update)
def run_call(path: Path) -> None:
"""
Run submodule for sanity.
"""
subprocess.check_call([sys.executable, path.as_posix()])
def main() -> None:
"""
Main CLI entrypoint.
"""
args = parse_args()
setup_logging(logging.INFO)
logger = logging.getLogger(LOGGER_NAME)
if not args.fast:
logger.info("Installing master...")
install_master()
for file in EXAMPLES_PATH.iterdir():
if "_example.py" not in file.name:
continue
service_name = file.name.replace("_example.py", "")
if args.services and service_name not in args.services:
continue
if not args.fast:
logger.info(f"Installing {service_name}...")
install_service(service_name)
try:
logger.info(f"Running {NicePath(file)} ...")
run_call(file)
logger.info(f"Running mypy for {NicePath(file)} ...")
run_mypy(file, args.update)
logger.info(f"Running pyright for {NicePath(file)} ...")
run_pyright(file, args.update)
except SnapshotMismatchError as e:
logger.error(e)
exit(1)
if __name__ == "__main__":
main()
| 27.873786 | 99 | 0.63201 |
c034d85bf60367ff96157d7d7f65c0fed02ee154 | 1,225 | py | Python | resources/bike.py | pelobairro/pelobairro-py-api | e48775655d3ca9ffa2cbaa98ed42c6b70c9a0d4e | [
"MIT"
] | null | null | null | resources/bike.py | pelobairro/pelobairro-py-api | e48775655d3ca9ffa2cbaa98ed42c6b70c9a0d4e | [
"MIT"
] | null | null | null | resources/bike.py | pelobairro/pelobairro-py-api | e48775655d3ca9ffa2cbaa98ed42c6b70c9a0d4e | [
"MIT"
] | null | null | null | from flask import jsonify, make_response
from flask_restful import Resource, request
import pandas as pd
import os
from geopy.distance import geodesic
class Bike(Resource):
def get(self):
lat = request.args.get('lat', default = 38.746118, type = float)
lng = request.args.get('lng', default = -9.109845, type = float)
max = request.args.get('max', default = 3, type = int)
results = []
results = self.getResults(lat, lng, max, results)
response = make_response(jsonify(results), 200)
response.headers["Content-Type"] = "application/json"
return response
def responseData(self, name, lat, lng, distance, type):
return {
'name': name,
'lat': lat,
'lng': lng,
'distance': distance,
'type': type
}
def getResults(self, lat, lng, max, results):
place =(lat, lng)
path = f'{os.getcwd()}/data'
for filename in os.listdir(path):
df = pd.read_csv(f'{path}/{filename}', sep=',')
for index, row in df.iterrows():
x = geodesic(place, (row['_lat_'],row['_lng_'])).km
if x <= max:
results.append(self.responseData(row['_name_'], row['_lat_'], row['_lng_'], x, row['_type_']))
return results
| 29.878049 | 104 | 0.618776 |
ecd0f45cc12d02da0ad03e76593b70ee86e967a3 | 3,752 | py | Python | scripts/retrieve offsets.py | muscatmat/vmi-event-naive-detector | 082a8c383f0258de208e059d7d248d877c42bb63 | [
"MIT"
] | null | null | null | scripts/retrieve offsets.py | muscatmat/vmi-event-naive-detector | 082a8c383f0258de208e059d7d248d877c42bb63 | [
"MIT"
] | null | null | null | scripts/retrieve offsets.py | muscatmat/vmi-event-naive-detector | 082a8c383f0258de208e059d7d248d877c42bb63 | [
"MIT"
] | null | null | null | #!/usr/bin/python
print "--- Starting Malware Detection! ---"
# Import System Required Paths
import sys
sys.path.append('/usr/local/src/volatility-master')
# Import Volalatility
import volatility.conf as conf
import volatility.registry as registry
registry.PluginImporter()
config = conf.ConfObject()
import volatility.commands as commands
import volatility.addrspace as addrspace
registry.register_global_options(config, commands.Command)
registry.register_global_options(config, addrspace.BaseAddressSpace)
config.parse_options()
config.PROFILE="LinuxDebian31604x64"
config.LOCATION = "vmi://debian-hvm"
def dt(objct, address = None, space = None, recursive = False, depth = 0):
if address is not None:
objct = obj.Object(objct, address, space or self._proc.get_process_address_space())
try:
if isinstance(objct, str):
size = profile.get_obj_size(objct)
membs = [ (profile.get_obj_offset(objct, m), m, profile.vtypes[objct][1][m][1]) for m in profile.vtypes[objct][1] ]
print "{0}".format("..." * depth), repr(objct), "({0} bytes)".format(size)
for o, m, t in sorted(membs):
print "{0}{1:6}: {2:30} {3}".format("..." * depth, hex(o), m, t)
if recursive:
if t[0] in profile.vtypes:
dt(t[0], recursive = recursive, depth = depth + 1)
elif isinstance(objct, obj.BaseObject):
membs = [ (o, m) for m, (o, _c) in objct.members.items() ]
if not recursive:
print repr(objct)
offsets = []
for o, m in sorted(membs):
val = getattr(objct, m)
if isinstance(val, list):
val = [ str(v) for v in val ]
# Handle a potentially callable offset
if callable(o):
o = o(objct) - objct.obj_offset
offsets.append((o, m, val))
# Deal with potentially out of order offsets
offsets.sort(key = lambda x: x[0])
for o, m, val in offsets:
try:
print "{0}{1:6}: {2:30} {3}".format("..." * depth, hex(o), m, val)
print "{0}{1:6}: {2:30} {3}".format("..." * depth, hex(o), m, val)
print "{0}{1:6}: {2:30} {3}".format("..." * depth, hex(o), m, val)
print "{0}{1:6}: {2:30} {3}".format("..." * depth, hex(o), m, val)
except UnicodeDecodeError:
print "{0}{1:6}: {2:30} -".format("..." * depth, hex(o), m)
if recursive:
if val.obj_type in profile.vtypes:
dt(val, recursive = recursive, depth = depth + 1)
elif isinstance(objct, obj.NoneObject):
print "ERROR: could not instantiate object"
print
print "Reason: ", objct.reason
else:
print "ERROR: first argument not an object or known type"
print
print "Usage:"
print
hh(dt)
except TypeError:
print "Error: could not instantiate object"
print
print "Reason: ", "displaying types with dynamic attributes is currently not supported"
dt("task_struct");
print "--- Malware Detection Exited! ---"
| 44.666667 | 139 | 0.486407 |
2fa333f7e525c4be85a3d619cc89ff3da4e65c56 | 869 | py | Python | lisa/tools/kill.py | tyhicks/lisa | 50d07cbd13e4e777eaa211b01387721fe2d2094f | [
"MIT"
] | 65 | 2020-12-15T13:42:29.000Z | 2022-03-03T13:14:16.000Z | lisa/tools/kill.py | acidburn0zzz/lisa | 3934d0546592d3ff71bc3e2c4aab5d4bc646a3b9 | [
"MIT"
] | 236 | 2020-11-24T18:28:26.000Z | 2022-03-30T19:19:25.000Z | lisa/tools/kill.py | acidburn0zzz/lisa | 3934d0546592d3ff71bc3e2c4aab5d4bc646a3b9 | [
"MIT"
] | 52 | 2020-12-08T17:40:46.000Z | 2022-03-31T18:24:14.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from lisa.executable import Tool
from .pgrep import Pgrep
class Kill(Tool):
@property
def command(self) -> str:
return "kill"
@property
def can_install(self) -> bool:
return False
def by_name(self, process_name: str, signum: int = 9) -> None:
running_processes = self.node.tools[Pgrep].get_processes(process_name)
for process in running_processes:
self.with_signum(process.id, signum)
def with_signum(self, pid: str, signum: int = 9) -> None:
self.run(
f"-{signum} {pid}",
shell=True,
sudo=True,
force_run=True,
expected_exit_code=0,
expected_exit_code_failure_message="fail to run "
f"{self.command} -{signum} {pid}",
)
| 26.333333 | 78 | 0.601841 |
eb69099a0e26b5b1bbec2b0e55cd1a6679f4a3e6 | 3,898 | py | Python | src/obsolete/state_info.py | cmu-delphi/utils | 9228841e9389a8eb25f6dcd65d905bbe87ed35d5 | [
"MIT"
] | 3 | 2019-07-31T02:16:53.000Z | 2020-05-02T03:28:22.000Z | src/obsolete/state_info.py | cmu-delphi/utils | 9228841e9389a8eb25f6dcd65d905bbe87ed35d5 | [
"MIT"
] | 1 | 2021-03-30T20:51:45.000Z | 2021-03-30T20:51:45.000Z | src/obsolete/state_info.py | cmu-delphi/utils | 9228841e9389a8eb25f6dcd65d905bbe87ed35d5 | [
"MIT"
] | 3 | 2020-04-03T22:38:48.000Z | 2021-03-25T21:21:12.000Z | """
==================
=== Deprecated ===
==================
As of 2018-06-25, this file is no longer used. New code should use
geo/locations.py and geo/populations.py instead.
===============
=== Purpose ===
===============
Contains static data for US regions and states.
=================
=== Changelog ===
=================
2017-12-21
- removed imputation (see impute_missing_values.py)
2016-11-15
* use secrets
* epidata API update
2016-04-06
+ initial version
"""
class StateInfo:
def __init__(self):
# names of all regions and states
nat = ['nat']
hhs = ['hhs%d' % r for r in range(1, 11)]
cen = ['cen%d' % r for r in range(1, 10)]
sta = [
'AK', 'AL', 'AR', 'AZ', 'CA', 'CO', 'CT', 'DC',
'DE', 'FL', 'GA', 'HI', 'IA', 'ID', 'IL', 'IN',
'KS', 'KY', 'LA', 'MA', 'MD', 'ME', 'MI', 'MN',
'MO', 'MS', 'MT', 'NC', 'ND', 'NE', 'NH', 'NJ',
'NM', 'NV', 'NY', 'OH', 'OK', 'OR', 'PA', 'RI',
'SC', 'SD', 'TN', 'TX', 'UT', 'VA', 'VT', 'WA',
'WI', 'WV', 'WY'
]
# population of each state
population = {
'AK': 731449, 'AL': 4822023, 'AR': 2949131, 'AZ': 6553255,
'CA': 38041430, 'CO': 5187582, 'CT': 3590347, 'DC': 632323,
'DE': 917092, 'FL': 19317568, 'GA': 9919945, 'HI': 1392313,
'IA': 3074186, 'ID': 1595728, 'IL': 12875255, 'IN': 6537334,
'KS': 2885905, 'KY': 4380415, 'LA': 4601893, 'MA': 6646144,
'MD': 5884563, 'ME': 1329192, 'MI': 9883360, 'MN': 5379139,
'MO': 6021988, 'MS': 2984926, 'MT': 1005141, 'NC': 9752073,
'ND': 699628, 'NE': 1855525, 'NH': 1320718, 'NJ': 8864590,
'NM': 2085538, 'NV': 2758931, 'NY': 19570261, 'OH': 11544225,
'OK': 3814820, 'OR': 3899353, 'PA': 12763536, 'RI': 1050292,
'SC': 4723723, 'SD': 833354, 'TN': 6456243, 'TX': 26059203,
'UT': 2855287, 'VA': 8185867, 'VT': 626011, 'WA': 6897012,
'WI': 5726398, 'WV': 1855413, 'WY': 576412,
}
# list of states in each region
within = {
'nat': sta,
'hhs1': ['CT', 'MA', 'ME', 'NH', 'RI', 'VT'],
'hhs2': ['NJ', 'NY'],
'hhs3': ['DC', 'DE', 'MD', 'PA', 'VA', 'WV'],
'hhs4': ['AL', 'FL', 'GA', 'KY', 'MS', 'NC', 'SC', 'TN'],
'hhs5': ['IL', 'IN', 'MI', 'MN', 'OH', 'WI'],
'hhs6': ['AR', 'LA', 'NM', 'OK', 'TX'],
'hhs7': ['IA', 'KS', 'MO', 'NE'],
'hhs8': ['CO', 'MT', 'ND', 'SD', 'UT', 'WY'],
'hhs9': ['AZ', 'CA', 'HI', 'NV'],
'hhs10': ['AK', 'ID', 'OR', 'WA'],
'cen1': ['CT', 'MA', 'ME', 'NH', 'RI', 'VT'],
'cen2': ['NJ', 'NY', 'PA'],
'cen3': ['IL', 'IN', 'MI', 'OH', 'WI'],
'cen4': ['IA', 'KS', 'MN', 'MO', 'ND', 'NE', 'SD'],
'cen5': ['DC', 'DE', 'FL', 'GA', 'MD', 'NC', 'SC', 'VA', 'WV'],
'cen6': ['AL', 'KY', 'MS', 'TN'],
'cen7': ['AR', 'LA', 'OK', 'TX'],
'cen8': ['AZ', 'CO', 'ID', 'MT', 'NM', 'NV', 'UT', 'WY'],
'cen9': ['AK', 'CA', 'HI', 'OR', 'WA'],
}
for s in sta:
within[s] = [s]
# weight of each state in each region
weight = {}
for reg in nat + hhs + cen + sta:
weight[reg] = {}
states = within[reg]
total = sum([population[s] for s in states])
population[reg] = total
for s in sta:
if s in states:
weight[reg][s] = population[s] / total
else:
weight[reg][s] = 0
# the regions for each state
state_regions = {}
for s in sta:
h, c = None, None
for r in hhs:
if s in within[r]:
h = r
break
for r in cen:
if s in within[r]:
c = r
break
state_regions[s] = {'hhs': h, 'cen': c}
# exports
self.nat = nat
self.hhs = hhs
self.cen = cen
self.sta = sta
self.population = population
self.within = within
self.weight = weight
self.state_regions = state_regions
| 31.95082 | 69 | 0.448948 |
e3d2266e292ad35a17916a8b0194af285b38b059 | 694 | py | Python | setup.py | prostmich/pykupi | 6570d9ba6bf29457a06907ff44eeb1ffc3615e36 | [
"MIT"
] | null | null | null | setup.py | prostmich/pykupi | 6570d9ba6bf29457a06907ff44eeb1ffc3615e36 | [
"MIT"
] | null | null | null | setup.py | prostmich/pykupi | 6570d9ba6bf29457a06907ff44eeb1ffc3615e36 | [
"MIT"
] | null | null | null | import setuptools
from setuptools import find_packages
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="pykupi",
version="0.0.1",
author="Mikhail Smolnikov",
author_email="[email protected]",
description="The easiest way to getting prices from kupi.cz",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/prostmich/pykupi",
project_urls={"Bug Tracker": "https://github.com/prostmich/pykupi/issues"},
license="MIT",
packages=find_packages(),
install_requires=["aiohttp>=3.7.2,<4.0.0", "bs4==0.0.1", "pydantic>=1.9.0"],
)
| 33.047619 | 80 | 0.695965 |
93cf5be20b9e8d46f0a19e974317a397a57bd8c4 | 19,192 | py | Python | tests/gis_tests/test_geoforms.py | ni-ning/django | 2e7ba6057cfc82a15a22b6021cd60cf307152e2d | [
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 4 | 2016-11-29T13:10:37.000Z | 2016-12-19T11:41:54.000Z | tests/gis_tests/test_geoforms.py | ni-ning/django | 2e7ba6057cfc82a15a22b6021cd60cf307152e2d | [
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 8 | 2017-04-19T16:20:47.000Z | 2022-03-28T14:40:11.000Z | tests/gis_tests/test_geoforms.py | ni-ning/django | 2e7ba6057cfc82a15a22b6021cd60cf307152e2d | [
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 3 | 2020-07-13T04:49:16.000Z | 2021-12-22T21:15:14.000Z | import re
from django.contrib.gis import forms
from django.contrib.gis.forms import BaseGeometryWidget, OpenLayersWidget
from django.contrib.gis.geos import GEOSGeometry
from django.core.exceptions import ValidationError
from django.test import SimpleTestCase, override_settings
from django.utils.html import escape
class GeometryFieldTest(SimpleTestCase):
def test_init(self):
"Testing GeometryField initialization with defaults."
fld = forms.GeometryField()
for bad_default in ('blah', 3, 'FoO', None, 0):
with self.subTest(bad_default=bad_default):
with self.assertRaises(ValidationError):
fld.clean(bad_default)
def test_srid(self):
"Testing GeometryField with a SRID set."
# Input that doesn't specify the SRID is assumed to be in the SRID
# of the input field.
fld = forms.GeometryField(srid=4326)
geom = fld.clean('POINT(5 23)')
self.assertEqual(4326, geom.srid)
# Making the field in a different SRID from that of the geometry, and
# asserting it transforms.
fld = forms.GeometryField(srid=32140)
tol = 0.0001
xform_geom = GEOSGeometry('POINT (951640.547328465 4219369.26171664)', srid=32140)
# The cleaned geometry is transformed to 32140 (the widget map_srid is 3857).
cleaned_geom = fld.clean('SRID=3857;POINT (-10615777.40976205 3473169.895707852)')
self.assertEqual(cleaned_geom.srid, 32140)
self.assertTrue(xform_geom.equals_exact(cleaned_geom, tol))
def test_null(self):
"Testing GeometryField's handling of null (None) geometries."
# Form fields, by default, are required (`required=True`)
fld = forms.GeometryField()
with self.assertRaisesMessage(ValidationError, "No geometry value provided."):
fld.clean(None)
# This will clean None as a geometry (See #10660).
fld = forms.GeometryField(required=False)
self.assertIsNone(fld.clean(None))
def test_geom_type(self):
"Testing GeometryField's handling of different geometry types."
# By default, all geometry types are allowed.
fld = forms.GeometryField()
for wkt in ('POINT(5 23)', 'MULTIPOLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))', 'LINESTRING(0 0, 1 1)'):
with self.subTest(wkt=wkt):
# to_python() uses the SRID of OpenLayersWidget if the
# converted value doesn't have an SRID.
self.assertEqual(GEOSGeometry(wkt, srid=fld.widget.map_srid), fld.clean(wkt))
pnt_fld = forms.GeometryField(geom_type='POINT')
self.assertEqual(GEOSGeometry('POINT(5 23)', srid=pnt_fld.widget.map_srid), pnt_fld.clean('POINT(5 23)'))
# a WKT for any other geom_type will be properly transformed by `to_python`
self.assertEqual(
GEOSGeometry('LINESTRING(0 0, 1 1)', srid=pnt_fld.widget.map_srid),
pnt_fld.to_python('LINESTRING(0 0, 1 1)')
)
# but rejected by `clean`
with self.assertRaises(ValidationError):
pnt_fld.clean('LINESTRING(0 0, 1 1)')
def test_to_python(self):
"""
to_python() either returns a correct GEOSGeometry object or
a ValidationError.
"""
good_inputs = [
'POINT(5 23)',
'MULTIPOLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))',
'LINESTRING(0 0, 1 1)',
]
bad_inputs = [
'POINT(5)',
'MULTI POLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))',
'BLAH(0 0, 1 1)',
'{"type": "FeatureCollection", "features": ['
'{"geometry": {"type": "Point", "coordinates": [508375, 148905]}, "type": "Feature"}]}',
]
fld = forms.GeometryField()
# to_python returns the same GEOSGeometry for a WKT
for geo_input in good_inputs:
with self.subTest(geo_input=geo_input):
self.assertEqual(GEOSGeometry(geo_input, srid=fld.widget.map_srid), fld.to_python(geo_input))
# but raises a ValidationError for any other string
for geo_input in bad_inputs:
with self.subTest(geo_input=geo_input):
with self.assertRaises(ValidationError):
fld.to_python(geo_input)
def test_to_python_different_map_srid(self):
f = forms.GeometryField(widget=OpenLayersWidget)
json = '{ "type": "Point", "coordinates": [ 5.0, 23.0 ] }'
self.assertEqual(GEOSGeometry('POINT(5 23)', srid=f.widget.map_srid), f.to_python(json))
def test_field_with_text_widget(self):
class PointForm(forms.Form):
pt = forms.PointField(srid=4326, widget=forms.TextInput)
form = PointForm()
cleaned_pt = form.fields['pt'].clean('POINT(5 23)')
self.assertEqual(cleaned_pt, GEOSGeometry('POINT(5 23)', srid=4326))
self.assertEqual(4326, cleaned_pt.srid)
with self.assertRaisesMessage(ValidationError, 'Invalid geometry value.'):
form.fields['pt'].clean('POINT(5)')
point = GEOSGeometry('SRID=4326;POINT(5 23)')
form = PointForm(data={'pt': 'POINT(5 23)'}, initial={'pt': point})
self.assertFalse(form.has_changed())
def test_field_string_value(self):
"""
Initialization of a geometry field with a valid/empty/invalid string.
Only the invalid string should trigger an error log entry.
"""
class PointForm(forms.Form):
pt1 = forms.PointField(srid=4326)
pt2 = forms.PointField(srid=4326)
pt3 = forms.PointField(srid=4326)
form = PointForm({
'pt1': 'SRID=4326;POINT(7.3 44)', # valid
'pt2': '', # empty
'pt3': 'PNT(0)', # invalid
})
with self.assertLogs('django.contrib.gis', 'ERROR') as logger_calls:
output = str(form)
# The first point can't use assertInHTML() due to non-deterministic
# ordering of the rendered dictionary.
pt1_serialized = re.search(r'<textarea [^>]*>({[^<]+})<', output)[1]
pt1_json = pt1_serialized.replace('"', '"')
pt1_expected = GEOSGeometry(form.data['pt1']).transform(3857, clone=True)
self.assertJSONEqual(pt1_json, pt1_expected.json)
self.assertInHTML(
'<textarea id="id_pt2" class="vSerializedField required" cols="150"'
' rows="10" name="pt2"></textarea>',
output
)
self.assertInHTML(
'<textarea id="id_pt3" class="vSerializedField required" cols="150"'
' rows="10" name="pt3"></textarea>',
output
)
# Only the invalid PNT(0) triggers an error log entry.
# Deserialization is called in form clean and in widget rendering.
self.assertEqual(len(logger_calls.records), 2)
self.assertEqual(
logger_calls.records[0].getMessage(),
"Error creating geometry from value 'PNT(0)' (String input "
"unrecognized as WKT EWKT, and HEXEWKB.)"
)
class SpecializedFieldTest(SimpleTestCase):
def setUp(self):
self.geometries = {
'point': GEOSGeometry("SRID=4326;POINT(9.052734375 42.451171875)"),
'multipoint': GEOSGeometry("SRID=4326;MULTIPOINT("
"(13.18634033203125 14.504356384277344),"
"(13.207969665527 14.490966796875),"
"(13.177070617675 14.454917907714))"),
'linestring': GEOSGeometry("SRID=4326;LINESTRING("
"-8.26171875 -0.52734375,"
"-7.734375 4.21875,"
"6.85546875 3.779296875,"
"5.44921875 -3.515625)"),
'multilinestring': GEOSGeometry("SRID=4326;MULTILINESTRING("
"(-16.435546875 -2.98828125,"
"-17.2265625 2.98828125,"
"-0.703125 3.515625,"
"-1.494140625 -3.33984375),"
"(-8.0859375 -5.9765625,"
"8.525390625 -8.7890625,"
"12.392578125 -0.87890625,"
"10.01953125 7.646484375))"),
'polygon': GEOSGeometry("SRID=4326;POLYGON("
"(-1.669921875 6.240234375,"
"-3.8671875 -0.615234375,"
"5.9765625 -3.955078125,"
"18.193359375 3.955078125,"
"9.84375 9.4921875,"
"-1.669921875 6.240234375))"),
'multipolygon': GEOSGeometry("SRID=4326;MULTIPOLYGON("
"((-17.578125 13.095703125,"
"-17.2265625 10.8984375,"
"-13.974609375 10.1953125,"
"-13.359375 12.744140625,"
"-15.732421875 13.7109375,"
"-17.578125 13.095703125)),"
"((-8.525390625 5.537109375,"
"-8.876953125 2.548828125,"
"-5.888671875 1.93359375,"
"-5.09765625 4.21875,"
"-6.064453125 6.240234375,"
"-8.525390625 5.537109375)))"),
'geometrycollection': GEOSGeometry("SRID=4326;GEOMETRYCOLLECTION("
"POINT(5.625 -0.263671875),"
"POINT(6.767578125 -3.603515625),"
"POINT(8.525390625 0.087890625),"
"POINT(8.0859375 -2.13134765625),"
"LINESTRING("
"6.273193359375 -1.175537109375,"
"5.77880859375 -1.812744140625,"
"7.27294921875 -2.230224609375,"
"7.657470703125 -1.25244140625))"),
}
def assertMapWidget(self, form_instance):
"""
Make sure the MapWidget js is passed in the form media and a MapWidget
is actually created
"""
self.assertTrue(form_instance.is_valid())
rendered = form_instance.as_p()
self.assertIn('new MapWidget(options);', rendered)
self.assertIn('map_srid: 3857,', rendered)
self.assertIn('gis/js/OLMapWidget.js', str(form_instance.media))
def assertTextarea(self, geom, rendered):
"""Makes sure the wkt and a textarea are in the content"""
self.assertIn('<textarea ', rendered)
self.assertIn('required', rendered)
ogr = geom.ogr
ogr.transform(3857)
self.assertIn(escape(ogr.json), rendered)
# map_srid in openlayers.html template must not be localized.
@override_settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True)
def test_pointfield(self):
class PointForm(forms.Form):
p = forms.PointField()
geom = self.geometries['point']
form = PointForm(data={'p': geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(PointForm().is_valid())
invalid = PointForm(data={'p': 'some invalid geom'})
self.assertFalse(invalid.is_valid())
self.assertIn('Invalid geometry value', str(invalid.errors))
for invalid in [geo for key, geo in self.geometries.items() if key != 'point']:
self.assertFalse(PointForm(data={'p': invalid.wkt}).is_valid())
def test_multipointfield(self):
class PointForm(forms.Form):
p = forms.MultiPointField()
geom = self.geometries['multipoint']
form = PointForm(data={'p': geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(PointForm().is_valid())
for invalid in [geo for key, geo in self.geometries.items() if key != 'multipoint']:
self.assertFalse(PointForm(data={'p': invalid.wkt}).is_valid())
def test_linestringfield(self):
class LineStringForm(forms.Form):
f = forms.LineStringField()
geom = self.geometries['linestring']
form = LineStringForm(data={'f': geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(LineStringForm().is_valid())
for invalid in [geo for key, geo in self.geometries.items() if key != 'linestring']:
self.assertFalse(LineStringForm(data={'p': invalid.wkt}).is_valid())
def test_multilinestringfield(self):
class LineStringForm(forms.Form):
f = forms.MultiLineStringField()
geom = self.geometries['multilinestring']
form = LineStringForm(data={'f': geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(LineStringForm().is_valid())
for invalid in [geo for key, geo in self.geometries.items() if key != 'multilinestring']:
self.assertFalse(LineStringForm(data={'p': invalid.wkt}).is_valid())
def test_polygonfield(self):
class PolygonForm(forms.Form):
p = forms.PolygonField()
geom = self.geometries['polygon']
form = PolygonForm(data={'p': geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(PolygonForm().is_valid())
for invalid in [geo for key, geo in self.geometries.items() if key != 'polygon']:
self.assertFalse(PolygonForm(data={'p': invalid.wkt}).is_valid())
def test_multipolygonfield(self):
class PolygonForm(forms.Form):
p = forms.MultiPolygonField()
geom = self.geometries['multipolygon']
form = PolygonForm(data={'p': geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(PolygonForm().is_valid())
for invalid in [geo for key, geo in self.geometries.items() if key != 'multipolygon']:
self.assertFalse(PolygonForm(data={'p': invalid.wkt}).is_valid())
def test_geometrycollectionfield(self):
class GeometryForm(forms.Form):
g = forms.GeometryCollectionField()
geom = self.geometries['geometrycollection']
form = GeometryForm(data={'g': geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(GeometryForm().is_valid())
for invalid in [geo for key, geo in self.geometries.items() if key != 'geometrycollection']:
self.assertFalse(GeometryForm(data={'g': invalid.wkt}).is_valid())
class OSMWidgetTest(SimpleTestCase):
def setUp(self):
self.geometries = {
'point': GEOSGeometry("SRID=4326;POINT(9.052734375 42.451171875)"),
}
def test_osm_widget(self):
class PointForm(forms.Form):
p = forms.PointField(widget=forms.OSMWidget)
geom = self.geometries['point']
form = PointForm(data={'p': geom})
rendered = form.as_p()
self.assertIn("ol.source.OSM()", rendered)
self.assertIn("id: 'id_p',", rendered)
def test_default_lat_lon(self):
self.assertEqual(forms.OSMWidget.default_lon, 5)
self.assertEqual(forms.OSMWidget.default_lat, 47)
self.assertEqual(forms.OSMWidget.default_zoom, 12)
class PointForm(forms.Form):
p = forms.PointField(
widget=forms.OSMWidget(attrs={
'default_lon': 20,
'default_lat': 30,
'default_zoom': 17,
}),
)
form = PointForm()
rendered = form.as_p()
self.assertIn("options['default_lon'] = 20;", rendered)
self.assertIn("options['default_lat'] = 30;", rendered)
self.assertIn("options['default_zoom'] = 17;", rendered)
class GeometryWidgetTests(SimpleTestCase):
def test_get_context_attrs(self):
# The Widget.get_context() attrs argument overrides self.attrs.
widget = BaseGeometryWidget(attrs={'geom_type': 'POINT'})
context = widget.get_context('point', None, attrs={'geom_type': 'POINT2'})
self.assertEqual(context['geom_type'], 'POINT2')
# Widget.get_context() returns expected name for geom_type.
widget = BaseGeometryWidget(attrs={'geom_type': 'POLYGON'})
context = widget.get_context('polygon', None, None)
self.assertEqual(context['geom_type'], 'Polygon')
# Widget.get_context() returns 'Geometry' instead of 'Unknown'.
widget = BaseGeometryWidget(attrs={'geom_type': 'GEOMETRY'})
context = widget.get_context('geometry', None, None)
self.assertEqual(context['geom_type'], 'Geometry')
def test_subwidgets(self):
widget = forms.BaseGeometryWidget()
self.assertEqual(
list(widget.subwidgets('name', 'value')),
[{
'is_hidden': False,
'attrs': {
'map_srid': 4326,
'map_width': 600,
'geom_type': 'GEOMETRY',
'map_height': 400,
'display_raw': False,
},
'name': 'name',
'template_name': '',
'value': 'value',
'required': False,
}]
)
def test_custom_serialization_widget(self):
class CustomGeometryWidget(forms.BaseGeometryWidget):
template_name = 'gis/openlayers.html'
deserialize_called = 0
def serialize(self, value):
return value.json if value else ''
def deserialize(self, value):
self.deserialize_called += 1
return GEOSGeometry(value)
class PointForm(forms.Form):
p = forms.PointField(widget=CustomGeometryWidget)
point = GEOSGeometry("SRID=4326;POINT(9.052734375 42.451171875)")
form = PointForm(data={'p': point})
self.assertIn(escape(point.json), form.as_p())
CustomGeometryWidget.called = 0
widget = form.fields['p'].widget
# Force deserialize use due to a string value
self.assertIn(escape(point.json), widget.render('p', point.json))
self.assertEqual(widget.deserialize_called, 1)
form = PointForm(data={'p': point.json})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['p'].srid, 4326)
| 43.817352 | 113 | 0.563724 |
503e75712cbdf370691a3c2b9d490c9507d6642d | 8,033 | py | Python | main.py | actuatech/fuel-tourism | 60e6953cdcccf164e5cd03916a1c3b3c2b071a85 | [
"MIT"
] | null | null | null | main.py | actuatech/fuel-tourism | 60e6953cdcccf164e5cd03916a1c3b3c2b071a85 | [
"MIT"
] | null | null | null | main.py | actuatech/fuel-tourism | 60e6953cdcccf164e5cd03916a1c3b3c2b071a85 | [
"MIT"
] | null | null | null | # TODO: Preparar fitxer per fer que ingesta no peti en cas que les columnes del fitxer siguin diferents
# TODO: fitxer configuració
import pandas as pd
from pathlib import Path
from datetime import datetime
import logging
import os
from Logger import setup_logger
from Ingestion import register_ingestor_function
from DataCleaning import (
filter_by_year_greater_or_equal_than,
keep_row_if_na_in_column,
drop_vehicles_with_no_fuel_associated,
filter_by_year_smaller_than,
convert_to_integer_df_columns,
print_info,
save_to_csv_vehicles_with_erroneous_data
)
from Classification import (
category_fuel_segment_euro_classification_wrapper_function,
MAPPING_CATEGORY_LAST_EURO_STANDARD
)
from Activity import (
activity_time_and_km_between_itv_revisions,
check_for_activity_outliers,
calculate_activity_outliers_thresholds,
activity_stats_calculator_by_grouping
)
from Graphing import(
euro_distribution_pie_charts,
stock_per_category_pie_chart,
stock_per_manufacturing_year_and_category_bar_charts,
activity_horizontal_bar_chart
)
# Working directory
dirname = os.path.dirname(__file__)
# Define the current working directory
cwd = Path.cwd()
# Create and initialize loggers
setup_logger('logger', (cwd / 'output/debug.log'))
setup_logger('info_logger', (cwd / 'output/info.log'), stream=False)
logger = logging.getLogger('logger')
info_logger = logging.getLogger('info_logger')
logger.info("Started")
# ----------
# PARAMETERS
# ----------
# ITV original data filename (Parameter)
filename_registre_vehicles = '01FEB2021_Historic_vehicles_amb_ITVs.xlsx'
path_registre_vehicles = cwd / '_data' / filename_registre_vehicles
# 01FEB2021_Historic_vehicles_amb_ITVs
# Years between from which data is keeped
MIN_YEAR = 1990
MAX_DATE = datetime(2021, 1, 1)
MIN_DAYS_BETWEEN_REVISIONS = 150
MIN_STOCK_FOR_MEAN_ACTIVITY_CALCULATION = 50 # Min numb of vehicles in a given grouping to take the mean activity valid
# To keep current stock but calculate activity before covid date
COVID_MILEAGE_ACTIVE = True
COVID_START_DATE = datetime(2019, 3, 1)
# Output folder for results:
output_folder = '/Users/nilcelisfont/dev/fuel-turism/output/'
# Output filename of cleaned and categorized data:
filename_output_categorized_vehicle_data = output_folder + f'Registre_vehicles_{datetime.now().date()}.csv'
# Output filename for stock and activity dataframe
filename_output_stock_activity = output_folder + f'stock_activity_2019_{datetime.now().date()}.csv'
# ----
# CODE
# ----
# LOADING DATA
itv_raw = register_ingestor_function(path_registre_vehicles)
info_logger.info(f'Total number of vehicles loaded: {itv_raw.shape[0]}')
# DATA CLEANING
# Keep only last 30 years of data
vehicles_last_30_years = filter_by_year_greater_or_equal_than(itv_raw, 'ANY_FABRICACIO', MIN_YEAR)
vehicles_last_30_years = filter_by_year_smaller_than(vehicles_last_30_years, 'DATA_ALTA', MAX_DATE)
# Keep only vehicles that are not decommissioned
vehicles_last_30_years_active_today = keep_row_if_na_in_column(vehicles_last_30_years, 'DATA_BAIXA')
# Drop vehicles with missing Fuel data
good_vehicles_df = drop_vehicles_with_no_fuel_associated(vehicles_last_30_years_active_today)
info_logger.info(f'Total number of vehicles taken into account: {good_vehicles_df.shape[0]}')
# CATEGORIZING VEHICLES
categorized_vehicles_df = category_fuel_segment_euro_classification_wrapper_function(good_vehicles_df)
# Create columns Mileage, number of days and corresponding Activity for each vehicle
categorized_vehicles_df['Num_of_days'], categorized_vehicles_df['Mileage'], categorized_vehicles_df['Activity'], \
categorized_vehicles_df['Lifetime Activity'] = zip(*categorized_vehicles_df.apply(
lambda row: activity_time_and_km_between_itv_revisions(row, MAX_DATE), axis=1))
# Assign to nan Activity outliers
activity_outliers_per_category_mapping, lifetime_activity_outliers_per_category_mapping =\
calculate_activity_outliers_thresholds(categorized_vehicles_df)
# Save erroneous data to csv files
save_to_csv_vehicles_with_erroneous_data(categorized_vehicles_df, output_folder,
activity_outliers_per_category_mapping,
lifetime_activity_outliers_per_category_mapping)
categorized_vehicles_df['Activity'], categorized_vehicles_df['Lifetime Activity'] = zip(*categorized_vehicles_df.apply(
lambda row: check_for_activity_outliers(row, activity_outliers_per_category_mapping,
lifetime_activity_outliers_per_category_mapping), axis=1))
# Save cleaned, categorized data and vehicle activity to csv
print_info(categorized_vehicles_df) # print info
logger.info('Saving cleaned, categorized data and vehicle activity to csv')
categorized_vehicles_df.to_csv(filename_output_categorized_vehicle_data)
# Create Stock Column
categorized_vehicles_df['Stock'] = 1
# STOCK CONFIGURATION
stock_df = categorized_vehicles_df.groupby(
['Category', 'Fuel', 'Segment', 'Euro Standard'], dropna=False, as_index=False).agg(Stock=('Stock', 'sum'))
# Filter categorized dataframe to delete vehicles that has revision after COVID_START_DATE
if COVID_MILEAGE_ACTIVE:
categorized_vehicles_df_before_covid = filter_by_year_smaller_than(categorized_vehicles_df,
'DATA_DARRERA_ITV', COVID_START_DATE)
else:
categorized_vehicles_df_before_covid = categorized_vehicles_df
# Statistics calculation
mileage_df = categorized_vehicles_df_before_covid.groupby(
['Category', 'Fuel', 'Segment', 'Euro Standard'], dropna=False, as_index=False).agg(
Mileage=('Mileage', 'sum'),
Min_Activity=('Activity', 'min'),
Max_Activity=('Activity', 'max'),
Std_Activity=('Activity', 'std'),
Mean_Activity=('Activity', 'mean'),
Mean_Lifetime_Activity=('Lifetime Activity', 'mean'),
Notna_Count=('Activity', 'count')
)
# Join stock configuration with associated mileage
stock_and_mileage_df = pd.merge(stock_df, mileage_df, on=['Category', 'Fuel', 'Segment', 'Euro Standard'], how='left')
stock_and_mileage_df['Notna_Count'].fillna(0, inplace=True)
# Calculate stadistics for categorizations that do not have enough data, by grouping
stats_df = stock_and_mileage_df.apply(
lambda row: activity_stats_calculator_by_grouping(
row, categorized_vehicles_df, MAPPING_CATEGORY_LAST_EURO_STANDARD, MIN_STOCK_FOR_MEAN_ACTIVITY_CALCULATION)
, result_type='expand', axis='columns').rename(columns={0: 'Mean_Activity',
1: 'Min_Activity',
2: 'Max_Activity',
3: 'Std_Activity',
4: 'Mean_Lifetime_Activity',
}
)
# Join stock with updated activity statistics
stock_and_mileage_df = pd.concat(
[stock_and_mileage_df.drop(['Mean_Activity', 'Min_Activity', 'Max_Activity', 'Std_Activity',
'Mean_Lifetime_Activity'], axis=1), stats_df],
axis='columns')
# Convert activity statistics columns to integer and check for nan values
convert_to_integer_df_columns(stock_and_mileage_df)
logger.info('Saving stock and activity to csv')
stock_and_mileage_df.drop(['Notna_Count'], axis=1).to_csv(filename_output_stock_activity)
logger.info(f'Number of categories: {stock_and_mileage_df.shape[0]}')
# Save wanted results
logger.info('Loading charts')
stock_per_category_pie_chart(categorized_vehicles_df, output_folder)
euro_distribution_pie_charts(categorized_vehicles_df, output_folder)
stock_per_manufacturing_year_and_category_bar_charts(categorized_vehicles_df, output_folder)
activity_horizontal_bar_chart(stock_and_mileage_df, output_folder)
logger.info('end')
| 43.188172 | 120 | 0.749907 |
6748fb17ea8db09cee2dea140a0c22d3f9bbb057 | 1,209 | py | Python | .history/my_classes/FirstClassFunctions/MapFilterZipList_20210706151716.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | .history/my_classes/FirstClassFunctions/MapFilterZipList_20210706151716.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | .history/my_classes/FirstClassFunctions/MapFilterZipList_20210706151716.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | """Map Filter Zip List Comprehensions
Higher order functions
A function that takes a function as a parameter and/or returns a function as it's return value
Example: sorted
map _
|
-- modern alternative -> list comprehensions and generator expressions
|
filter -
The map function
map(func, *iterables)
*iterables -> avariable number of iterable objects
func -> some function that takes a many arguments as there are iterable objects passed to iterables
map(func, *iterables) will then return an iterator that calculates the function applied to each element of the iterables
The iterator stops as soon as one of the iterables has been exhausted, so, unequal length iterables can be used
Examples
"""
l = [2, 3, 4]
def sq(x):
return x**2
list(map(sq, l)) # [4, 9, 19]
l1 = [1, 2, 3]
l2 = [10, 20, 30]
def add(x, y):
return x + y
list(map(add, l1, l2)) # [11, 22, 33]
"""The filter function
filter(func, iterable)
iterable -> a single iterable
func -> some function that takes a single argument
filter(func, iterable)
"""
| 20.844828 | 120 | 0.622829 |
22de12001a354138cf9571f0703b0331c777d832 | 108 | py | Python | custom/succeed/tests/__init__.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 1 | 2020-07-14T13:00:23.000Z | 2020-07-14T13:00:23.000Z | custom/succeed/tests/__init__.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 94 | 2020-12-11T06:57:31.000Z | 2022-03-15T10:24:06.000Z | custom/succeed/tests/__init__.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | null | null | null | from custom.succeed.reports.all_patients import date_format
__test__ = {
'date_format': date_format,
}
| 18 | 59 | 0.768519 |
6b8aa6e00038b05edbb52dbfe8f810bc45bb2b5e | 2,856 | py | Python | qa/rpc-tests/blockchain.py | supertref/navcoin-core | cc6fd1c9661fcbd3b82c8f2506c0f863da43b392 | [
"MIT"
] | 1 | 2020-08-28T02:32:47.000Z | 2020-08-28T02:32:47.000Z | qa/rpc-tests/blockchain.py | supertref/navcoin-core | cc6fd1c9661fcbd3b82c8f2506c0f863da43b392 | [
"MIT"
] | 3 | 2019-07-18T02:10:02.000Z | 2019-08-28T22:09:31.000Z | qa/rpc-tests/blockchain.py | supertref/navcoin-core | cc6fd1c9661fcbd3b82c8f2506c0f863da43b392 | [
"MIT"
] | 2 | 2020-09-06T20:02:00.000Z | 2020-11-19T18:47:42.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test RPC calls related to blockchain state. Tests correspond to code in
# rpc/blockchain.cpp.
#
from decimal import Decimal
from test_framework.test_framework import NavCoinTestFramework
from test_framework.authproxy import JSONRPCException
from test_framework.util import (
assert_equal,
assert_raises,
assert_is_hex_string,
assert_is_hash_string,
start_nodes,
connect_nodes_bi,
)
class BlockchainTest(NavCoinTestFramework):
"""
Test blockchain-related RPC calls:
- gettxoutsetinfo
- verifychain
"""
def __init__(self):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 2
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
connect_nodes_bi(self.nodes, 0, 1)
self.is_network_split = False
self.sync_all()
def run_test(self):
self._test_gettxoutsetinfo()
self._test_getblockheader()
self.nodes[0].verifychain(4, 0)
def _test_gettxoutsetinfo(self):
node = self.nodes[0]
res = node.gettxoutsetinfo()
assert_equal(res['total_amount'], Decimal('8725.00000000'))
assert_equal(res['transactions'], 200)
assert_equal(res['height'], 200)
assert_equal(res['txouts'], 200)
assert_equal(res['bytes_serialized'], 13924),
assert_equal(len(res['bestblock']), 64)
assert_equal(len(res['hash_serialized']), 64)
def _test_getblockheader(self):
node = self.nodes[0]
assert_raises(
JSONRPCException, lambda: node.getblockheader('nonsense'))
besthash = node.getbestblockhash()
secondbesthash = node.getblockhash(199)
header = node.getblockheader(besthash)
assert_equal(header['hash'], besthash)
assert_equal(header['height'], 200)
assert_equal(header['confirmations'], 1)
assert_equal(header['previousblockhash'], secondbesthash)
assert_is_hex_string(header['chainwork'])
assert_is_hash_string(header['hash'])
assert_is_hash_string(header['previousblockhash'])
assert_is_hash_string(header['merkleroot'])
assert_is_hash_string(header['bits'], length=None)
assert isinstance(header['time'], int)
assert isinstance(header['mediantime'], int)
assert isinstance(header['nonce'], int)
assert isinstance(header['version'], int)
assert isinstance(int(header['versionHex'], 16), int)
assert isinstance(header['difficulty'], Decimal)
if __name__ == '__main__':
BlockchainTest().main()
| 31.733333 | 73 | 0.678221 |
87689f1619752d3b028a5061f45c9dd412b16fd7 | 5,576 | py | Python | gammapy/irf/io.py | AtreyeeS/gammapy | a3b47c3da08900a833f0360e0374203e054cadfc | [
"BSD-3-Clause"
] | 155 | 2015-02-25T12:38:02.000Z | 2022-03-13T17:54:30.000Z | gammapy/irf/io.py | AtreyeeS/gammapy | a3b47c3da08900a833f0360e0374203e054cadfc | [
"BSD-3-Clause"
] | 3,131 | 2015-01-06T15:36:23.000Z | 2022-03-31T17:30:57.000Z | gammapy/irf/io.py | AtreyeeS/gammapy | a3b47c3da08900a833f0360e0374203e054cadfc | [
"BSD-3-Clause"
] | 158 | 2015-03-16T20:36:44.000Z | 2022-03-30T16:05:37.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import logging
from astropy.io import fits
from gammapy.utils.scripts import make_path
from gammapy.utils.fits import HDULocation
from gammapy.data.hdu_index_table import HDUIndexTable
__all__ = ["load_cta_irfs", "load_irf_dict_from_file"]
log = logging.getLogger(__name__)
IRF_DL3_AXES_SPECIFICATION = {
"THETA": {"name": "offset", "interp": "lin"},
"ENERG": {"name": "energy_true", "interp": "log"},
"ETRUE": {"name": "energy_true", "interp": "log"},
"RAD": {"name": "rad", "interp": "lin"},
"DETX": {"name": "fov_lon", "interp": "lin"},
"DETY": {"name": "fov_lat", "interp": "lin"},
"MIGRA": {"name": "migra", "interp": "lin"},
}
# The key is the class tag.
# TODO: extend the info here with the minimal header info
IRF_DL3_HDU_SPECIFICATION = {
"bkg_3d": {
"extname": "BACKGROUND",
"column_name": "BKG",
"hduclas2": "BKG",
},
"bkg_2d": {
"extname": "BACKGROUND",
"column_name": "BKG",
"hduclas2": "BKG",
},
"edisp_2d": {
"extname": "ENERGY DISPERSION",
"column_name": "MATRIX",
"hduclas2": "EDISP",
},
"psf_table": {
"extname": "PSF_2D_TABLE",
"column_name": "RPSF",
"hduclas2": "PSF",
},
"psf_3gauss": {
"extname": "PSF_2D_GAUSS",
"hduclas2": "PSF",
"column_name":
{
"sigma_1": "SIGMA_1",
"sigma_2": "SIGMA_2",
"sigma_3": "SIGMA_3",
"scale": "SCALE",
"ampl_2": "AMPL_2",
"ampl_3": "AMPL_3",
}
},
"psf_king": {
"extname": "PSF_2D_KING",
"hduclas2": "PSF",
"column_name":
{
"sigma": "SIGMA",
"gamma": "GAMMA",
}
},
"aeff_2d": {
"extname": "EFFECTIVE AREA",
"column_name": "EFFAREA",
"hduclas2": "EFF_AREA",
},
"rad_max_2d": {
"extname": "RAD_MAX",
"column_name": "RAD_MAX",
"hduclas2": "RAD_MAX",
}
}
IRF_MAP_HDU_SPECIFICATION = {
"edisp_kernel_map": "edisp",
"edisp_map": "edisp",
"psf_map": "psf"
}
def load_cta_irfs(filename):
"""load CTA instrument response function and return a dictionary container.
The IRF format should be compliant with the one discussed
at http://gamma-astro-data-formats.readthedocs.io/en/latest/irfs/.
The various IRFs are accessible with the following keys:
- 'aeff' is a `~gammapy.irf.EffectiveAreaTable2D`
- 'edisp' is a `~gammapy.irf.EnergyDispersion2D`
- 'psf' is a `~gammapy.irf.EnergyDependentMultiGaussPSF`
- 'bkg' is a `~gammapy.irf.Background3D`
Parameters
----------
filename : str
the input filename. Default is
Returns
-------
cta_irf : dict
the IRF dictionary
Examples
--------
Access the CTA 1DC IRFs stored in the gammapy datasets
>>> from gammapy.irf import load_cta_irfs
>>> cta_irf = load_cta_irfs("$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits")
>>> print(cta_irf['aeff'])
EffectiveAreaTable2D
--------------------
<BLANKLINE>
axes : ['energy_true', 'offset']
shape : (42, 6)
ndim : 2
unit : m2
dtype : >f4
<BLANKLINE>
"""
from .background import Background3D
from .effective_area import EffectiveAreaTable2D
from .edisp import EnergyDispersion2D
from .psf import EnergyDependentMultiGaussPSF
aeff = EffectiveAreaTable2D.read(filename, hdu="EFFECTIVE AREA")
bkg = Background3D.read(filename, hdu="BACKGROUND")
edisp = EnergyDispersion2D.read(filename, hdu="ENERGY DISPERSION")
psf = EnergyDependentMultiGaussPSF.read(filename, hdu="POINT SPREAD FUNCTION")
return dict(aeff=aeff, bkg=bkg, edisp=edisp, psf=psf)
def load_irf_dict_from_file(filename):
"""Open a fits file and generate a dictionary containing the Gammapy objects
corresponding ot the IRF components stored
Parameters
----------
filename : str, Path
path to the file containing the IRF components, if EVENTS and GTI HDUs
are included in the file, they are ignored
Returns
-------
irf_dict : dict of `~gammapy.irf.IRF`
dictionary with instances of the Gammapy objects corresponding
to the IRF components
"""
filename = make_path(filename)
hdulist = fits.open(make_path(filename))
irf_dict = {}
for hdu in hdulist:
hdu_class = hdu.header.get("HDUCLAS1", "").lower()
if hdu_class == "response":
hdu_class = hdu.header.get("HDUCLAS4", "").lower()
loc = HDULocation(
hdu_class=hdu_class,
hdu_name=hdu.name,
file_dir=filename.parent,
file_name=filename.name
)
for name in HDUIndexTable.VALID_HDU_TYPE:
if name in hdu_class:
if name in irf_dict.keys():
log.warning(f"more than one HDU of {name} type found")
log.warning(f"loaded the {irf_dict[name].meta['EXTNAME']} HDU in the dictionary")
continue
data = loc.load()
# TODO: maybe introduce IRF.type attribute...
irf_dict[name] = data
else : # not an IRF component
continue
return irf_dict
| 29.502646 | 107 | 0.571377 |
4d2a1297fea7aa5fe94680cbebb141fe31d40450 | 8,367 | py | Python | sdk/python/pulumi_azure_nextgen/web/v20181101/web_app_deployment.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/web/v20181101/web_app_deployment.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/web/v20181101/web_app_deployment.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['WebAppDeployment']
class WebAppDeployment(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
active: Optional[pulumi.Input[bool]] = None,
author: Optional[pulumi.Input[str]] = None,
author_email: Optional[pulumi.Input[str]] = None,
deployer: Optional[pulumi.Input[str]] = None,
details: Optional[pulumi.Input[str]] = None,
end_time: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
message: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
start_time: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[int]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
User credentials used for publishing activity.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] active: True if deployment is currently active, false if completed and null if not started.
:param pulumi.Input[str] author: Who authored the deployment.
:param pulumi.Input[str] author_email: Author email.
:param pulumi.Input[str] deployer: Who performed the deployment.
:param pulumi.Input[str] details: Details on deployment.
:param pulumi.Input[str] end_time: End time.
:param pulumi.Input[str] id: ID of an existing deployment.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] message: Details about deployment status.
:param pulumi.Input[str] name: Name of the app.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[str] start_time: Start time.
:param pulumi.Input[int] status: Deployment status.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['active'] = active
__props__['author'] = author
__props__['author_email'] = author_email
__props__['deployer'] = deployer
__props__['details'] = details
__props__['end_time'] = end_time
if id is None:
raise TypeError("Missing required property 'id'")
__props__['id'] = id
__props__['kind'] = kind
__props__['message'] = message
if name is None:
raise TypeError("Missing required property 'name'")
__props__['name'] = name
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['start_time'] = start_time
__props__['status'] = status
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:web/latest:WebAppDeployment"), pulumi.Alias(type_="azure-nextgen:web/v20150801:WebAppDeployment"), pulumi.Alias(type_="azure-nextgen:web/v20160801:WebAppDeployment"), pulumi.Alias(type_="azure-nextgen:web/v20180201:WebAppDeployment"), pulumi.Alias(type_="azure-nextgen:web/v20190801:WebAppDeployment"), pulumi.Alias(type_="azure-nextgen:web/v20200601:WebAppDeployment")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(WebAppDeployment, __self__).__init__(
'azure-nextgen:web/v20181101:WebAppDeployment',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'WebAppDeployment':
"""
Get an existing WebAppDeployment resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return WebAppDeployment(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def active(self) -> pulumi.Output[Optional[bool]]:
"""
True if deployment is currently active, false if completed and null if not started.
"""
return pulumi.get(self, "active")
@property
@pulumi.getter
def author(self) -> pulumi.Output[Optional[str]]:
"""
Who authored the deployment.
"""
return pulumi.get(self, "author")
@property
@pulumi.getter(name="authorEmail")
def author_email(self) -> pulumi.Output[Optional[str]]:
"""
Author email.
"""
return pulumi.get(self, "author_email")
@property
@pulumi.getter
def deployer(self) -> pulumi.Output[Optional[str]]:
"""
Who performed the deployment.
"""
return pulumi.get(self, "deployer")
@property
@pulumi.getter
def details(self) -> pulumi.Output[Optional[str]]:
"""
Details on deployment.
"""
return pulumi.get(self, "details")
@property
@pulumi.getter(name="endTime")
def end_time(self) -> pulumi.Output[Optional[str]]:
"""
End time.
"""
return pulumi.get(self, "end_time")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def message(self) -> pulumi.Output[Optional[str]]:
"""
Details about deployment status.
"""
return pulumi.get(self, "message")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="startTime")
def start_time(self) -> pulumi.Output[Optional[str]]:
"""
Start time.
"""
return pulumi.get(self, "start_time")
@property
@pulumi.getter
def status(self) -> pulumi.Output[Optional[int]]:
"""
Deployment status.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 38.380734 | 458 | 0.61635 |
526e57ced88309436252522be14b04625da43b78 | 28,762 | py | Python | integration-testing/client/CasperLabsClient/casperlabs_client/cli.py | zie1ony/CasperLabs | 7921f306ebcc98104cbe628391168afedbb5e2e8 | [
"Apache-2.0"
] | null | null | null | integration-testing/client/CasperLabsClient/casperlabs_client/cli.py | zie1ony/CasperLabs | 7921f306ebcc98104cbe628391168afedbb5e2e8 | [
"Apache-2.0"
] | null | null | null | integration-testing/client/CasperLabsClient/casperlabs_client/cli.py | zie1ony/CasperLabs | 7921f306ebcc98104cbe628391168afedbb5e2e8 | [
"Apache-2.0"
] | null | null | null | """
Command line interface for CasperLabsClient.
"""
import argparse
import textwrap
import base64
import sys
import os
import functools
import logging
from pathlib import Path
import datetime
from casperlabs_client import (
CasperLabsClient,
DEFAULT_HOST,
DEFAULT_PORT,
DEFAULT_INTERNAL_PORT,
bundled_contract,
)
from casperlabs_client.utils import hexify
from casperlabs_client.abi import ABI
from casperlabs_client.crypto import (
read_pem_key,
generate_validators_keys,
generate_key_pair,
public_address,
private_to_public_key,
generate_certificates,
)
from . import consensus_pb2 as consensus
DEFAULT_PAYMENT_AMOUNT = 10000000
DOT_FORMATS = "canon,cmap,cmapx,cmapx_np,dot,dot_json,eps,fig,gd,gd2,gif,gv,imap,imap_np,ismap,jpe,jpeg,jpg,json,json0,mp,pdf,pic,plain,plain-ext,png,pov,ps,ps2,svg,svgz,tk,vml,vmlz,vrml,wbmp,x11,xdot,xdot1.2,xdot1.4,xdot_json,xlib"
def guarded_command(function):
"""
Decorator of functions that implement CLI commands.
Occasionally the node can throw some exceptions instead of properly sending us a response,
those will be deserialized on our end and rethrown by the gRPC layer.
In this case we want to catch the exception and return a non-zero return code to the shell.
:param function: function to be decorated
:return:
"""
@functools.wraps(function)
def wrapper(*args, **kwargs):
try:
rc = function(*args, **kwargs)
# Generally the CLI commands are assumed to succeed if they don't throw,
# but they can also return a positive error code if they need to.
if rc is not None:
return rc
return 0
except Exception as e:
print(str(e), file=sys.stderr)
return 1
return wrapper
def _show_blocks(response, element_name="block"):
count = 0
for block in response:
print(f"------------- {element_name} {count} ---------------")
print(hexify(block))
print("-----------------------------------------------------\n")
count += 1
print("count:", count)
def _show_block(response):
print(hexify(response))
def _set_session(args, file_name):
"""
Use bundled contract unless one of the session* args is set.
"""
if not any((args.session, args.session_hash, args.session_name, args.session_uref)):
args.session = bundled_contract(file_name)
@guarded_command
def bond_command(casperlabs_client, args):
logging.info(f"BOND {args}")
_set_session(args, "bonding.wasm")
if not args.session_args:
args.session_args = ABI.args_to_json(
ABI.args([ABI.long_value("amount", args.amount)])
)
return deploy_command(casperlabs_client, args)
@guarded_command
def unbond_command(casperlabs_client, args):
logging.info(f"UNBOND {args}")
_set_session(args, "unbonding.wasm")
if not args.session_args:
args.session_args = ABI.args_to_json(
ABI.args(
[ABI.optional_value("amount", ABI.long_value("amount", args.amount))]
)
)
return deploy_command(casperlabs_client, args)
@guarded_command
def transfer_command(casperlabs_client, args):
_set_session(args, "transfer_to_account_u512.wasm")
if not args.session_args:
target_account_bytes = base64.b64decode(args.target_account)
if len(target_account_bytes) != 32:
target_account_bytes = bytes.fromhex(args.target_account)
if len(target_account_bytes) != 32:
raise Exception(
"--target_account must be 32 bytes base64 or base16 encoded"
)
args.session_args = ABI.args_to_json(
ABI.args(
[
ABI.account("account", target_account_bytes),
ABI.u512("amount", args.amount),
]
)
)
return deploy_command(casperlabs_client, args)
def _deploy_kwargs(args, private_key_accepted=True):
from_addr = (
getattr(args, "from")
and bytes.fromhex(getattr(args, "from"))
or getattr(args, "public_key")
and read_pem_key(args.public_key)
or private_to_public_key(args.private_key)
)
if from_addr and len(from_addr) != 32:
raise Exception(
"--from must be 32 bytes encoded as 64 characters long hexadecimal"
)
if not (args.payment_amount or args.payment_args):
args.payment_amount = DEFAULT_PAYMENT_AMOUNT
if args.payment_amount:
args.payment_args = ABI.args_to_json(
ABI.args([ABI.big_int("amount", int(args.payment_amount))])
)
# Unless one of payment* options supplied use bundled standard-payment
if not any(
(args.payment, args.payment_name, args.payment_hash, args.payment_uref)
):
args.payment = bundled_contract("standard_payment.wasm")
d = dict(
from_addr=from_addr,
gas_price=args.gas_price,
payment=args.payment or args.session,
session=args.session,
public_key=args.public_key or None,
session_args=args.session_args
and ABI.args_from_json(args.session_args)
or None,
payment_args=args.payment_args
and ABI.args_from_json(args.payment_args)
or None,
payment_hash=args.payment_hash and bytes.fromhex(args.payment_hash),
payment_name=args.payment_name,
payment_uref=args.payment_uref and bytes.fromhex(args.payment_uref),
session_hash=args.session_hash and bytes.fromhex(args.session_hash),
session_name=args.session_name,
session_uref=args.session_uref and bytes.fromhex(args.session_uref),
ttl_millis=args.ttl_millis,
dependencies=args.dependencies,
chain_name=args.chain_name,
)
if private_key_accepted:
d["private_key"] = args.private_key or None
return d
@guarded_command
def make_deploy_command(casperlabs_client, args):
kwargs = _deploy_kwargs(args, private_key_accepted=False)
deploy = casperlabs_client.make_deploy(**kwargs)
data = deploy.SerializeToString()
if not args.deploy_path:
sys.stdout.buffer.write(data)
else:
with open(args.deploy_path, "wb") as f:
f.write(data)
@guarded_command
def sign_deploy_command(casperlabs_client, args):
deploy = consensus.Deploy()
if args.deploy_path:
with open(args.deploy_path, "rb") as input_file:
deploy.ParseFromString(input_file.read())
else:
deploy.ParseFromString(sys.stdin.read())
deploy = casperlabs_client.sign_deploy(
deploy, read_pem_key(args.public_key), args.private_key
)
if not args.signed_deploy_path:
sys.stdout.write(deploy.SerializeToString())
else:
with open(args.signed_deploy_path, "wb") as output_file:
output_file.write(deploy.SerializeToString())
@guarded_command
def send_deploy_command(casperlabs_client, args):
deploy = consensus.Deploy()
with open(args.deploy_path, "rb") as f:
deploy.ParseFromString(f.read())
casperlabs_client.send_deploy(deploy)
print(f"Success! Deploy {deploy.deploy_hash.hex()} deployed")
@guarded_command
def deploy_command(casperlabs_client, args):
kwargs = _deploy_kwargs(args)
deploy_hash = casperlabs_client.deploy(**kwargs)
print(f"Success! Deploy {deploy_hash} deployed")
if args.wait_for_processed:
deploy_info = casperlabs_client.showDeploy(
deploy_hash,
full_view=False,
wait_for_processed=args.wait_for_processed,
timeout_seconds=args.timeout_seconds,
)
print(hexify(deploy_info))
@guarded_command
def propose_command(casperlabs_client, args):
print("Warning: command propose is deprecated.", file=sys.stderr)
response = casperlabs_client.propose()
print(f"Success! Block hash: {response.block_hash.hex()}")
@guarded_command
def show_block_command(casperlabs_client, args):
response = casperlabs_client.showBlock(args.hash, full_view=True)
return _show_block(response)
@guarded_command
def show_blocks_command(casperlabs_client, args):
response = casperlabs_client.showBlocks(args.depth, full_view=False)
_show_blocks(response)
@guarded_command
def vdag_command(casperlabs_client, args):
for o in casperlabs_client.visualizeDag(
args.depth, args.out, args.show_justification_lines, args.stream
):
if not args.out:
print(o)
break
@guarded_command
def query_state_command(casperlabs_client, args):
response = casperlabs_client.queryState(
args.block_hash, args.key, args.path or "", getattr(args, "type")
)
print(hexify(response))
@guarded_command
def balance_command(casperlabs_client, args):
response = casperlabs_client.balance(args.address, args.block_hash)
print(response)
@guarded_command
def show_deploy_command(casperlabs_client, args):
response = casperlabs_client.showDeploy(
args.hash,
full_view=False,
wait_for_processed=args.wait_for_processed,
timeout_seconds=args.timeout_seconds,
)
print(hexify(response))
@guarded_command
def show_deploys_command(casperlabs_client, args):
response = casperlabs_client.showDeploys(args.hash, full_view=False)
_show_blocks(response, element_name="deploy")
def write_file(file_name, text):
with open(file_name, "w") as f:
f.write(text)
def write_file_binary(file_name, data):
with open(file_name, "wb") as f:
f.write(data)
def encode_base64(a: bytes):
return str(base64.b64encode(a), "utf-8")
@guarded_command
def keygen_command(casperlabs_client, args):
directory = Path(args.directory).resolve()
validator_private_path = directory / "validator-private.pem"
validator_pub_path = directory / "validator-public.pem"
validator_id_path = directory / "validator-id"
validator_id_hex_path = directory / "validator-id-hex"
node_priv_path = directory / "node.key.pem"
node_cert_path = directory / "node.certificate.pem"
node_id_path = directory / "node-id"
validator_private_pem, validator_public_pem, validator_public_bytes = (
generate_validators_keys()
)
write_file_binary(validator_private_path, validator_private_pem)
write_file_binary(validator_pub_path, validator_public_pem)
write_file(validator_id_path, encode_base64(validator_public_bytes))
write_file(validator_id_hex_path, validator_public_bytes.hex())
private_key, public_key = generate_key_pair()
node_cert, key_pem = generate_certificates(private_key, public_key)
write_file_binary(node_priv_path, key_pem)
write_file_binary(node_cert_path, node_cert)
write_file(node_id_path, public_address(public_key))
print(f"Keys successfully created in directory: {str(directory.absolute())}")
@guarded_command
def show_peers_command(casperlabs_client, args):
peers = casperlabs_client.show_peers()
i = 0
for i, node in enumerate(peers, 1):
print(f"------------- node {i} ---------------")
print(hexify(node))
print("-----------------------------------------------------")
print(f"count: {i}")
@guarded_command
def stream_events_command(casperlabs_client, args):
subscribed_events = dict(
all=args.all,
block_added=args.block_added,
block_finalized=args.block_finalized,
deploy_added=args.deploy_added,
deploy_discarded=args.deploy_discarded,
deploy_requeued=args.deploy_requeued,
deploy_processed=args.deploy_processed,
deploy_finalized=args.deploy_finalized,
deploy_orphaned=args.deploy_orphaned,
)
if not any(subscribed_events.values()):
raise argparse.ArgumentTypeError("No events chosen")
stream = casperlabs_client.stream_events(
account_public_keys=args.account_public_key,
deploy_hashes=args.deploy_hash,
min_event_id=args.min_event_id,
**subscribed_events,
)
for event in stream:
now = datetime.datetime.now()
print(f"------------- {now.strftime('%Y-%m-%d %H:%M:%S')} -------------")
print(hexify(event))
def check_directory(path):
if not os.path.exists(path):
raise argparse.ArgumentTypeError(f"Directory '{path}' does not exist")
if not os.path.isdir(path):
raise argparse.ArgumentTypeError(f"'{path}' is not a directory")
if not os.access(path, os.W_OK):
raise argparse.ArgumentTypeError(f"'{path}' does not have writing permissions")
return Path(path)
def dot_output(file_name):
"""
Check file name has an extension of one of file formats supported by Graphviz.
"""
parts = file_name.split(".")
if len(parts) == 1:
raise argparse.ArgumentTypeError(
f"'{file_name}' has no extension indicating file format"
)
else:
file_format = parts[-1]
if file_format not in DOT_FORMATS.split(","):
raise argparse.ArgumentTypeError(
f"File extension {file_format} not recognized, must be one of {DOT_FORMATS}"
)
return file_name
def natural(number):
"""Check number is an integer greater than 0"""
n = int(number)
if n < 1:
raise argparse.ArgumentTypeError(f"{number} is not a positive int value")
return n
# fmt: off
def deploy_options(private_key_accepted=True):
return ([
[('-f', '--from'), dict(required=False, type=str, help="The public key of the account which is the context of this deployment, base16 encoded.")],
[('--chain-name',), dict(required=False, type=str, help="Name of the chain to optionally restrict the deploy from being accidentally included anywhere else.")],
[('--dependencies',), dict(required=False, nargs="+", default=None, help="List of deploy hashes (base16 encoded) which must be executed before this deploy.")],
[('--payment-amount',), dict(required=False, type=int, default=None, help="Standard payment amount. Use this with the default payment, or override with --payment-args if custom payment code is used. By default --payment-amount is set to 10000000")],
[('--gas-price',), dict(required=False, type=int, default=10, help='The price of gas for this transaction in units dust/gas. Must be positive integer.')],
[('-p', '--payment'), dict(required=False, type=str, default=None, help='Path to the file with payment code, by default fallbacks to the --session code')],
[('--payment-hash',), dict(required=False, type=str, default=None, help='Hash of the stored contract to be called in the payment; base16 encoded')],
[('--payment-name',), dict(required=False, type=str, default=None, help='Name of the stored contract (associated with the executing account) to be called in the payment')],
[('--payment-uref',), dict(required=False, type=str, default=None, help='URef of the stored contract to be called in the payment; base16 encoded')],
[('-s', '--session'), dict(required=False, type=str, default=None, help='Path to the file with session code')],
[('--session-hash',), dict(required=False, type=str, default=None, help='Hash of the stored contract to be called in the session; base16 encoded')],
[('--session-name',), dict(required=False, type=str, default=None, help='Name of the stored contract (associated with the executing account) to be called in the session')],
[('--session-uref',), dict(required=False, type=str, default=None, help='URef of the stored contract to be called in the session; base16 encoded')],
[('--session-args',), dict(required=False, type=str, help="""JSON encoded list of session args, e.g.: '[{"name": "amount", "value": {"long_value": 123456}}]'""")],
[('--payment-args',), dict(required=False, type=str, help="""JSON encoded list of payment args, e.g.: '[{"name": "amount", "value": {"big_int": {"value": "123456", "bit_width": 512}}}]'""")],
[('--ttl-millis',), dict(required=False, type=int, help="""Time to live. Time (in milliseconds) that the deploy will remain valid for.'""")],
[('-w', '--wait-for-processed'), dict(action='store_true', help='Wait for deploy status PROCESSED or DISCARDED')],
[('--timeout-seconds',), dict(type=int, default=CasperLabsClient.DEPLOY_STATUS_TIMEOUT, help='Timeout in seconds')],
[('--public-key',), dict(required=False, default=None, type=str, help='Path to the file with account public key (Ed25519)')]]
+ (private_key_accepted
and [[('--private-key',), dict(required=True, default=None, type=str, help='Path to the file with account private key (Ed25519)')]]
or []))
# fmt:on
def cli(*arguments) -> int:
"""
Parse list of command line arguments and call appropriate command.
"""
class Parser:
def __init__(self):
# The --help option added by default has a short version -h, which conflicts
# with short version of --host, so we need to disable it.
self.parser = argparse.ArgumentParser(
prog="casperlabs_client", add_help=False
)
self.parser.add_argument(
"--help",
action="help",
default=argparse.SUPPRESS,
help="show this help message and exit",
)
self.parser.add_argument(
"-h",
"--host",
required=False,
default=DEFAULT_HOST,
type=str,
help="Hostname or IP of node on which gRPC service is running.",
)
self.parser.add_argument(
"-p",
"--port",
required=False,
default=DEFAULT_PORT,
type=int,
help="Port used for external gRPC API.",
)
self.parser.add_argument(
"--port-internal",
required=False,
default=DEFAULT_INTERNAL_PORT,
type=int,
help="Port used for internal gRPC API.",
)
self.parser.add_argument(
"--node-id",
required=False,
type=str,
help="node_id parameter for TLS connection",
)
self.parser.add_argument(
"--certificate-file",
required=False,
type=str,
help="Certificate file for TLS connection",
)
self.sp = self.parser.add_subparsers(help="Choose a request")
def no_command(casperlabs_client, args):
print(
"You must provide a command. --help for documentation of commands."
)
self.parser.print_usage()
return 1
self.parser.set_defaults(function=no_command)
def addCommand(self, command: str, function, help, arguments):
command_parser = self.sp.add_parser(command, help=help)
command_parser.set_defaults(function=function)
for (args, options) in arguments:
command_parser.add_argument(*args, **options)
def run(self, argv):
args = self.parser.parse_args(argv)
return args.function(
CasperLabsClient(
args.host,
args.port,
args.port_internal,
args.node_id,
args.certificate_file,
),
args,
)
parser = Parser()
# fmt: off
parser.addCommand('deploy', deploy_command, 'Deploy a smart contract source file to Casper on an existing running node. The deploy will be packaged and sent as a block to the network depending on the configuration of the Casper instance',
deploy_options())
parser.addCommand('make-deploy', make_deploy_command, "Constructs a deploy that can be signed and sent to a node.",
[[('-o', '--deploy-path'), dict(required=False, help="Path to the file where deploy will be saved. Optional, if not provided the deploy will be printed to STDOUT.")]] + deploy_options(private_key_accepted=False))
parser.addCommand('sign-deploy', sign_deploy_command, "Cryptographically signs a deploy. The signature is appended to existing approvals.",
[[('-o', '--signed-deploy-path'), dict(required=False, default=None, help="Path to the file where signed deploy will be saved. Optional, if not provided the deploy will be printed to STDOUT.")],
[('-i', '--deploy-path'), dict(required=False, default=None, help="Path to the deploy file.")],
[('--private-key',), dict(required=True, help="Path to the file with account private key (Ed25519)")],
[('--public-key',), dict(required=True, help="Path to the file with account public key (Ed25519)")]])
parser.addCommand('send-deploy', send_deploy_command, "Deploy a smart contract source file to Casper on an existing running node. The deploy will be packaged and sent as a block to the network depending on the configuration of the Casper instance.",
[[('-i', '--deploy-path'), dict(required=False, default=None, help="Path to the file with signed deploy.")]])
parser.addCommand('bond', bond_command, 'Issues bonding request',
[[('-a', '--amount'), dict(required=True, type=int, help='amount of motes to bond')]] + deploy_options())
parser.addCommand('unbond', unbond_command, 'Issues unbonding request',
[[('-a', '--amount'),
dict(required=False, default=None, type=int, help='Amount of motes to unbond. If not provided then a request to unbond with full staked amount is made.')]] + deploy_options())
parser.addCommand('transfer', transfer_command, 'Transfers funds between accounts',
[[('-a', '--amount'), dict(required=True, default=None, type=int, help='Amount of motes to transfer. Note: a mote is the smallest, indivisible unit of a token.')],
[('-t', '--target-account'), dict(required=True, type=str, help="base64 or base16 representation of target account's public key")],
] + deploy_options(private_key_accepted=True))
parser.addCommand('propose', propose_command, '[DEPRECATED] Force a node to propose a block based on its accumulated deploys.', [])
parser.addCommand('show-block', show_block_command, 'View properties of a block known by Casper on an existing running node. Output includes: parent hashes, storage contents of the tuplespace.',
[[('hash',), dict(type=str, help='the hash value of the block')]])
parser.addCommand('show-blocks', show_blocks_command, 'View list of blocks in the current Casper view on an existing running node.',
[[('-d', '--depth'), dict(required=True, type=int, help='depth in terms of block height')]])
parser.addCommand('show-deploy', show_deploy_command, 'View properties of a deploy known by Casper on an existing running node.',
[[('hash',), dict(type=str, help='Value of the deploy hash, base16 encoded.')],
[('-w', '--wait-for-processed'), dict(action='store_true', help='Wait for deploy status PROCESSED or DISCARDED')],
[('--timeout-seconds',), dict(type=int, default=CasperLabsClient.DEPLOY_STATUS_TIMEOUT, help='Timeout in seconds')]])
parser.addCommand('show-deploys', show_deploys_command, 'View deploys included in a block.',
[[('hash',), dict(type=str, help='Value of the block hash, base16 encoded.')]])
parser.addCommand('vdag', vdag_command, 'DAG in DOT format. You need to install Graphviz from https://www.graphviz.org/ to use it.',
[[('-d', '--depth'), dict(required=True, type=natural, help='depth in terms of block height')],
[('-o', '--out'), dict(required=False, type=dot_output, help=f'output image filename, outputs to stdout if not specified, must end with one of {DOT_FORMATS}')],
[('-s', '--show-justification-lines'), dict(action='store_true', help='if justification lines should be shown')],
[('--stream',), dict(required=False, choices=('single-output', 'multiple-outputs'), help="subscribe to changes, '--out' has to be specified, valid values are 'single-output', 'multiple-outputs'")]])
parser.addCommand('query-state', query_state_command, 'Query a value in the global state.',
[[('-b', '--block-hash'), dict(required=True, type=str, help='Hash of the block to query the state of')],
[('-k', '--key'), dict(required=True, type=str, help='Base16 encoding of the base key')],
[('-p', '--path'), dict(required=False, type=str, help="Path to the value to query. Must be of the form 'key1/key2/.../keyn'")],
[('-t', '--type'), dict(required=True, choices=('hash', 'uref', 'address', 'local'), help="Type of base key. Must be one of 'hash', 'uref', 'address' or 'local'. For 'local' key type, 'key' value format is {seed}:{rest}, where both parts are hex encoded.")]])
parser.addCommand('balance', balance_command, 'Returns the balance of the account at the specified block.',
[[('-a', '--address'), dict(required=True, type=str, help="Account's public key in hex.")],
[('-b', '--block-hash'), dict(required=True, type=str, help='Hash of the block to query the state of')]])
parser.addCommand('keygen', keygen_command, textwrap.dedent("""\
Generate keys.
Usage: casperlabs-client keygen <existingOutputDirectory>
Command will override existing files!
Generated files:
node-id # node ID as in casperlabs://c0a6c82062461c9b7f9f5c3120f44589393edf31@<NODE ADDRESS>?protocol=40400&discovery=40404
# derived from node.key.pem
node.certificate.pem # TLS certificate used for node-to-node interaction encryption
# derived from node.key.pem
node.key.pem # secp256r1 private key
validator-id # validator ID in Base64 format; can be used in accounts.csv
# derived from validator.public.pem
validator-id-hex # validator ID in hex, derived from validator.public.pem
validator-private.pem # ed25519 private key
validator-public.pem # ed25519 public key"""),
[[('directory',), dict(type=check_directory, help="Output directory for keys. Should already exists.")]])
parser.addCommand('show-peers', show_peers_command, "Show peers connected to the node.", [])
parser.addCommand('stream-events', stream_events_command, "Stream block and deploy state transition events.", [
[('--all',), dict(action='store_true', help='Subscribe to all events')],
[('--block-added',), dict(action='store_true', help='Block added')],
[('--block-finalized',), dict(action='store_true', help='Block finalized')],
[('--deploy-added',), dict(action='store_true', help='Deploy added')],
[('--deploy-discarded',), dict(action='store_true', help='Deploy discarded')],
[('--deploy-requeued',), dict(action='store_true', help='Deploy requeued')],
[('--deploy-processed',), dict(action='store_true', help='Deploy processed')],
[('--deploy-finalized',), dict(action='store_true', help='Deploy finalized')],
[('--deploy-orphaned',), dict(action='store_true', help='Deploy orphaned')],
[('-k', '--account-public-key'), dict(action='append', help='Filter by (possibly multiple) account public key(s)')],
[('-d', '--deploy-hash'), dict(action='append', help='Filter by (possibly multiple) deploy hash(es)')],
[('--min-event-id',), dict(required=False, default=0, type=int, help="Supports replaying events from a given ID. If the value is 0, it it will subscribe to future events; if it's non-zero, it will replay all past events from that ID, without subscribing to new. To catch up with events from the beginning, start from 1.")],
])
# fmt:on
return parser.run([str(a) for a in arguments])
def main():
return cli(*sys.argv[1:])
if __name__ == "__main__":
sys.exit(main())
| 44.454405 | 331 | 0.642619 |
6acdfa866890833eaa93b8d60be8bd356b0bd2fe | 6,898 | py | Python | tests/test_fix_type.py | anelendata/getschema | 3cd23ca5bdb98ecc031fdd4bd39864f1c50293f7 | [
"Apache-2.0"
] | 2 | 2021-09-11T20:23:35.000Z | 2021-09-16T21:52:25.000Z | tests/test_fix_type.py | anelendata/getschema | 3cd23ca5bdb98ecc031fdd4bd39864f1c50293f7 | [
"Apache-2.0"
] | 11 | 2020-12-23T01:08:07.000Z | 2021-07-12T04:02:20.000Z | tests/test_fix_type.py | anelendata/getschema | 3cd23ca5bdb98ecc031fdd4bd39864f1c50293f7 | [
"Apache-2.0"
] | 2 | 2021-05-28T14:05:31.000Z | 2021-06-14T12:34:14.000Z | import datetime
import logging
import getschema
import json
LOGGER = logging.getLogger(__name__)
records = [
{
"index": 0,
"array": [
0.0,
],
"nested_field": {
"some_prop": 0,
},
"boolean_field": True,
"another_boolean_field": True,
"number_field": 1,
"string_field": "a",
"datetime_field": "2021-06-04",
},
{
"index": 1,
"array": [
1,
],
"nested_field": {
"some_prop": 1,
},
"boolean_field": False,
"another_boolean_field": True,
"number_field": 0.5,
"string_field": "b",
"datetime_field": "2021-06-04T09:00",
},
]
valid_record = {
"index": 2,
"array": [
1000,
],
"nested_field": {
"some_prop": -1,
},
"datetime_field": "2021-06-01 09:00:00"
}
valid_after_fix = {
"index": "0",
"array": [
"0",
],
"nested_field": {
"some_prop": "0",
},
}
invalid_after_fix = {
"index": "1",
"array": [
"a",
],
"nested_field": {
"some_prop": "1",
},
}
null_entries = {
"index": None,
"array": [
"1.5",
None,
],
"nested_field": None,
"boolean_field": None,
"number_field": None,
"string_field": None,
}
invalid_datetime_record = {
"index": 2,
"array": [
1000,
],
"nested_field": {
"some_prop": -1,
},
"datetime_field": "20"
}
empty_string_record = {
"index": 2,
"array": [
1000,
],
"nested_field": {
"some_prop": -1,
},
"string_field": ""
}
def test_unsupported_schema():
schema = getschema.infer_schema(records)
schema["properties"]["index"]["type"] = ["null", "integer", "string"]
try:
getschema.fix_type(valid_record, schema)
except Exception as e:
assert(str(e).startswith("Sorry, getschema does not support multiple types"))
def test_int_zero():
schema = getschema.infer_schema(records)
# This should pass
getschema.fix_type(valid_record, schema)
fixed_record = getschema.fix_type(valid_after_fix, schema)
assert(isinstance(fixed_record["index"], int))
assert(isinstance(fixed_record["array"][0], float))
assert(isinstance(fixed_record["nested_field"]["some_prop"], int))
try:
fixed_record = getschema.fix_type(invalid_after_fix, schema)
except Exception as e:
assert(str(e).startswith("could not convert string to float"))
else:
assert False, "It should raise an exception"
def test_datetime():
schema = getschema.infer_schema(records)
assert(schema["properties"]["datetime_field"]["type"] ==
["null", "string"])
assert(schema["properties"]["datetime_field"]["format"] == "date-time")
fixed_record = getschema.fix_type(valid_record, schema)
assert(isinstance(fixed_record["datetime_field"], str))
try:
fixed_record = getschema.fix_type(invalid_datetime_record, schema)
except Exception as e:
assert(str(e).startswith("Not in a valid datetime format"))
else:
assert False, "It should raise an exception"
def test_empty_string():
schema = getschema.infer_schema(records)
assert(schema["properties"]["string_field"]["type"] ==
["null", "string"])
fixed_record = getschema.fix_type(empty_string_record, schema)
assert(fixed_record["string_field"] == "")
schema["properties"]["string_field"]["type"] == ["string"]
fixed_record = getschema.fix_type(empty_string_record, schema)
assert(fixed_record["string_field"] == "")
def test_preserve_nulls_boolean():
schema = getschema.infer_schema(records)
assert(schema["properties"]["boolean_field"]["type"] ==
["null", "boolean"])
fixed_record = getschema.fix_type(null_entries, schema)
assert(fixed_record["boolean_field"] is None)
def test_preserve_nulls_integer():
schema = getschema.infer_schema(records)
assert(schema["properties"]["index"]["type"] == ["null", "integer"])
fixed_record = getschema.fix_type(null_entries, schema)
assert(fixed_record["index"] is None)
def test_preserve_nulls_number():
schema = getschema.infer_schema(records)
assert(schema["properties"]["number_field"]["type"] == ["null", "number"])
fixed_record = getschema.fix_type(null_entries, schema)
assert(fixed_record["number_field"] is None)
def test_preserve_nulls_string():
schema = getschema.infer_schema(records)
assert(schema["properties"]["string_field"]["type"] == ["null", "string"])
fixed_record = getschema.fix_type(null_entries, schema)
assert(fixed_record["string_field"] is None)
def test_reject_null_boolean():
schema = getschema.infer_schema(records)
# This will pass
_ = getschema.fix_type(null_entries, schema)
schema["properties"]["boolean_field"]["type"] = ["boolean"]
try:
_ = getschema.fix_type(null_entries, schema)
except Exception as e:
assert(str(e).startswith("Null object given at"))
else:
raise Exception("Supposed to fail with null value")
def test_reject_null_integer():
schema = getschema.infer_schema(records)
# This will pass
_ = getschema.fix_type(null_entries, schema)
schema["properties"]["index"]["type"] = ["integer"]
try:
_ = getschema.fix_type(null_entries, schema)
except Exception as e:
assert(str(e).startswith("Null object given at"))
else:
raise Exception("Supposed to fail with null value")
def test_reject_null_number():
schema = getschema.infer_schema(records)
# This will pass
_ = getschema.fix_type(null_entries, schema)
schema["properties"]["number_field"]["type"] = ["number"]
try:
_ = getschema.fix_type(null_entries, schema)
except Exception as e:
assert(str(e).startswith("Null object given at"))
else:
raise Exception("Supposed to fail with null value")
def test_reject_null_string():
schema = getschema.infer_schema(records)
# This will pass
_ = getschema.fix_type(null_entries, schema)
schema["properties"]["string_field"]["type"] = ["string"]
try:
_ = getschema.fix_type(null_entries, schema)
except Exception as e:
assert(str(e).startswith("Null object given at"))
else:
raise Exception("Supposed to fail with null value")
def test_reject_null_object():
schema = getschema.infer_schema(records)
# This will pass
_ = getschema.fix_type(null_entries, schema)
schema["properties"]["nested_field"]["type"] = ["object"]
try:
_ = getschema.fix_type(null_entries, schema)
except Exception as e:
assert(str(e).startswith("Null object given at"))
else:
raise Exception("Supposed to fail with null value")
| 27.264822 | 85 | 0.626558 |
ad456d38ec9b29ca1378acbe435f78b4c24ab8d5 | 5,292 | py | Python | task3/vcompresslib.py | Chlerry/term-project | 6accf960fea9129ebbb8520d277c47af10ee6d3d | [
"MIT"
] | null | null | null | task3/vcompresslib.py | Chlerry/term-project | 6accf960fea9129ebbb8520d277c47af10ee6d3d | [
"MIT"
] | 9 | 2020-03-14T03:54:26.000Z | 2020-03-14T07:31:35.000Z | task3/vcompresslib.py | Chlerry/term-project | 6accf960fea9129ebbb8520d277c47af10ee6d3d | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
from keras.models import Model
from keras.layers import Input, Dense, Conv2D, Conv2DTranspose, MaxPooling2D, UpSampling2D
import imgpatch
# Ratio options are: 1/32, 1/16, 1/8, 1/4, 1/2
def model1(train_data, patch_shape, ratio):
input_img = Input(shape=patch_shape)
e = Conv2D(64, (7, 7), activation='relu', padding='same')(input_img)
if(ratio == '1/2'):
e = MaxPooling2D((2, 1), padding='same')(e)
else:
e = MaxPooling2D((2, 2), padding='same')(e)
e = Conv2D(32, (5, 5), activation='relu', padding='same')(e)
if(ratio == '1/8'):
e = MaxPooling2D((2, 1), padding='same')(e)
elif (ratio == '1/16' or ratio == '1/32'):
e = MaxPooling2D((2, 2), padding='same')(e)
e = Conv2D(16, (1, 1), activation='relu', padding='same')(e)
if (ratio == '1/32'):
e = MaxPooling2D((2, 1), padding='same')(e)
e = Conv2D(8, (3, 3), activation='relu', padding='same')(e)
encoded = Conv2D(3, (3, 3), activation='relu', padding='same')(e)
d = Conv2D(3, (3, 3), activation='relu', padding='same')(encoded)
d = Conv2D(8, (3, 3), activation='relu', padding='same')(d)
if (ratio == '1/32'):
d = UpSampling2D((2, 1))(d)
d = Conv2D(16, (1, 1), activation='relu', padding='same')(d)
if(ratio == '1/8'):
d = UpSampling2D((2, 1))(d)
elif (ratio == '1/16' or ratio == '1/32'):
d = UpSampling2D((2, 2))(d)
d = Conv2D(32, (5, 5), activation='relu', padding='same')(d)
if(ratio == '1/2'):
d = UpSampling2D((2, 1))(d)
else:
d = UpSampling2D((2, 2))(d)
d = Conv2D(64, (7, 7), activation='relu', padding='same')(d)
decoded = Conv2D(3, (1, 1), activation='linear')(d)
autoencoder = Model(input_img, decoded)
autoencoder.summary()
autoencoder.compile(optimizer='adam', loss='mse')
autoencoder.fit(train_data, train_data, epochs=5, batch_size=25)
return autoencoder
# Ratio options are: 1/32, 1/16, 1/8, 1/4, 1/2
def model2(train_data, patch_shape, ratio):
input_img = Input(shape=patch_shape)
if(ratio == '1/2'):
e = Conv2D(64, (7, 7), activation='relu', strides=(1,2), padding='same')(input_img)
else:
e = Conv2D(64, (7, 7), activation='relu', strides=(2,2), padding='same')(input_img)
if(ratio == '1/8'):
e = Conv2D(32, (5, 5), activation='relu', strides=(1,2),padding='same')(e)
elif (ratio == '1/16' or ratio == '1/32'):
e = Conv2D(32, (5, 5), activation='relu', strides=(2,2),padding='same')(e)
else:
e = Conv2D(32, (5, 5), activation='relu', padding='same')(e)
if (ratio == '1/32'):
e = Conv2D(16, (1, 1), activation='relu', strides=(1,2),padding='same')(e)
else:
e = Conv2D(16, (1, 1), activation='relu', padding='same')(e)
e = Conv2D(8, (3, 3), activation='relu', padding='same')(e)
encoded = Conv2D(3, (3, 3), activation='relu', padding='same')(e)
d = Conv2DTranspose(3, (3, 3), activation='relu', padding='same')(encoded)
d = Conv2DTranspose(8, (3, 3), activation='relu', padding='same')(d)
if (ratio == '1/32'):
d = Conv2DTranspose(16, (1, 1), activation='relu', strides=(1,2),padding='same')(d)
else:
d = Conv2DTranspose(16, (1, 1), activation='relu', padding='same')(d)
if(ratio == '1/8'):
d = Conv2DTranspose(32, (5, 5), activation='relu', strides=(1,2),padding='same')(d)
elif (ratio == '1/16' or ratio == '1/32'):
d = Conv2DTranspose(32, (5, 5), activation='relu', strides=(2,2),padding='same')(d)
else:
d = Conv2DTranspose(32, (5, 5), activation='relu', padding='same')(d)
if(ratio == '1/2'):
d = Conv2DTranspose(64, (7, 7), activation='relu', strides=(1,2),padding='same')(d)
else:
d = Conv2DTranspose(64, (7, 7), activation='relu', strides=(2,2),padding='same')(d)
decoded = Conv2DTranspose(3, (1, 1), activation='linear')(d)
autoencoder = Model(input_img, decoded)
autoencoder.summary()
autoencoder.compile(optimizer='adam', loss='mse')
autoencoder.fit(train_data, train_data, epochs=5, batch_size=25)
return autoencoder
# Calculate average PSNR value
def get_psnr(test_image, decoded_image):
PSNR = 0
n_test = test_image.shape[0]
for i in range(n_test):
MSE = tf.keras.losses.MeanSquaredError()
test_mse = MSE(test_image[i], decoded_image[i])
PSNR += 10.0 * np.log10(1.0 / test_mse)
PSNR /= n_test
return PSNR
# Obtain decoded image patches from the CNN model, and merge patches back to normal images
def get_decoded_image(autoencoder, test_data, patch_shape, image_shape):
# Obtain decoded image for test_data
decoded_patches = autoencoder.predict(test_data)
# Limit pixel value range to [0, 1]
decoded_patches = np.minimum(decoded_patches, np.ones(decoded_patches.shape, dtype = np.float32))
decoded_patches = np.maximum(decoded_patches, np.zeros(decoded_patches.shape, dtype = np.float32))
# Merge patches back to normal images
block_shape = imgpatch.get_block_shape(image_shape, patch_shape)
decoded_image = imgpatch.merge_all_block(decoded_patches, block_shape)
return decoded_image | 36.246575 | 102 | 0.603175 |
61cbba53ba0355e346e74df03f405960397873b3 | 8,502 | py | Python | examples/Nolan/AFRL/Carts/cart49.py | Rapid-Design-of-Systems-Laboratory/beluga-legacy | d14713d8211b64293c4427005cf02fbd58630598 | [
"MIT"
] | 1 | 2019-03-26T03:00:03.000Z | 2019-03-26T03:00:03.000Z | examples/Nolan/AFRL/Carts/cart49.py | Rapid-Design-of-Systems-Laboratory/beluga-legacy | d14713d8211b64293c4427005cf02fbd58630598 | [
"MIT"
] | null | null | null | examples/Nolan/AFRL/Carts/cart49.py | Rapid-Design-of-Systems-Laboratory/beluga-legacy | d14713d8211b64293c4427005cf02fbd58630598 | [
"MIT"
] | 1 | 2019-07-14T22:53:52.000Z | 2019-07-14T22:53:52.000Z | if __name__ == "__main__":
import numpy as np
import beluga.Beluga as Beluga
import beluga.bvpsol as bvpsol
import beluga.bvpsol.algorithms as algorithms
import beluga.optim.Problem
from beluga.optim.problem import *
from beluga.continuation import *
import logging
# Import Libraries for Matrix Calculations
from sympy import symbols, Matrix, Transpose, simplify, diff, diag
from sympy import sin
from sympy import cos, acos
from sympy import sqrt
from sympy import exp
from sympy import atan
from numpy import pi
writeEqn = True
simpList = False
if writeEqn:
writeList = []
# Constants
v, u_max = symbols('v, u_max')
xb, yb = symbols('xb, yb')
Dt, sigv, sigw, sigr = symbols('Dt, sigv, sigw, sigr')
# Primary States
x, y, theta = symbols('x, y, theta')
# Control
w = symbols('w')
# Secondary States
# Primary State Rates
x_dot = v * cos(theta)
y_dot = v * sin(theta)
theta_dot = u_max * sin(w)
writeList = [x_dot, y_dot, theta_dot]
# Covariance Calculations
p11, p12, p13,\
p22, p23, \
p33 \
= symbols('p11 p12 p13\
p22 p23 \
p33')
P = Matrix([[p11, p12, p13],
[p12, p22, p23],
[p13, p13, p33]])
F = Matrix([[diff(x_dot, x), diff(x_dot, y), diff(x_dot, theta)],
[diff(y_dot, x), diff(y_dot, y), diff(y_dot, theta)],
[diff(theta_dot, x), diff(theta_dot, y), diff(theta_dot, theta)],])
G = Matrix([[cos(theta), 0],
[sin(theta), 0],
[0, 1]])
h = sqrt((x - xb)**2 + (y - yb)**2)
H = Matrix([[diff(h, x), diff(h, y), diff(h, theta)]])
Q = Dt*diag(sigv**2, sigw**2)
R = Dt*diag(sigr**2)
P_dot = (F*P + P*F.T - P*H.T*(R**-1)*H*P + G*Q*G.T)
Dim = P_dot.shape
for i in range(0, Dim[0]):
for j in range(i, Dim[1]):
# print(P_dot[i, j])
writeList.append(P_dot[i, j])
# h_new, theta_new, v_new, gam_new = symbols('h_new, theta_new, v_new, gam_new')
# h_scale, theta_scale, v_scale, gam_scale = symbols('h_scale, theta_scale, v_scale, gam_scale')
states = [x, y, theta,
p11, p12, p13,
p22, p23,
p33]
x_s, y_s, theta_s, \
p11_s, p12_s, p13_s, \
p22_s, p23_s, \
p33_s = \
symbols('x_s, y_s, theta_s, \
p11_s, p12_s, p13_s, \
p22_s, p23_s, \
p33_s')
scales = [x_s, y_s, theta_s,
p11_s, p12_s, p13_s,
p22_s, p23_s,
p33_s]
x_n, y_n, theta_n, \
p11_n, p12_n, p13_n, \
p22_n, p23_n, \
p33_n = \
symbols('x_n, y_n, theta_n, \
p11_n, p12_n, p13_n, \
p22_n, p23_n, \
p33_n')
states_new = [x_n, y_n, theta_n,
p11_n, p12_n, p13_n,
p22_n, p23_n,
p33_n]
# print(writeList)
Z1 = zip(writeList, scales)
scaledList = []
for item, Scale in Z1:
# print(item)
item = item/Scale
Z2 = zip(states, states_new, scales)
# print(item)
# for state, new, scale in Z2:
# print(state)
# print(new)
# print(scale)
for state, new, scale in Z2:
# print(new)
item = item.subs(state, scale*new)
# print(item)
scaledList.append(item)
k = 1
with open("eqns.txt", "w") as my_file:
for item in scaledList:
if simpList:
# print('* ' + str(item))
item = simplify(item)
# print('# ' + str(item))
my_file.write(str(item) + "\n")
# print(" Wrote " + str(k) + "/" + str(len(scaledList)))
k += 1
k = 1
with open("eqnsUnscaled.txt", "w") as my_file:
for item in writeList:
my_file.write(str(item) + "\n")
# print(" Wrote " + str(k) + "/" + str(len(writeList)))
k += 1
''' Start Optimal Control Calculations '''
# Read Covariance State Rates from File
with open("eqns.txt", "r") as f:
eqnsList = list(f)
# for item in P_dot_eqns:
# print(item)
# Rename this and/or move to optim package?
problem = beluga.optim.Problem('carts0')
# Define independent variables
problem.independent('t', 's')
# Define equations of motion
problem\
.state('x_n', eqnsList[0] + '+ ep*u_max*cos(w)', '1') \
.state('y_n', eqnsList[1], '1') \
.state('theta_n', eqnsList[2], '1') \
.state('p11_n', eqnsList[3], '1') \
.state('p12_n', eqnsList[4], '1') \
.state('p13_n', eqnsList[5], '1') \
.state('p22_n', eqnsList[6], '1') \
.state('p23_n', eqnsList[7], '1') \
.state('p33_n', eqnsList[8], '1') \
# Define controls
problem.control('w', '1') \
# Define costs
# problem.cost['path'] = Expression('p11', 'm^2/s^2')
# problem.cost['path'] = Expression('sin(w)**2', 's')
problem.cost['terminal'] = Expression('p22_n', '1')
# Define constraints
problem.constraints() \
.initial('x_n-x_n_0', '1') \
.initial('y_n-y_n_0', '1') \
.initial('theta_n-theta_n_0', '1') \
\
.initial('p11_n-p11_n_0', '1') \
.initial('p12_n-p12_n_0', '1') \
.initial('p13_n-p13_n_0', '1') \
.initial('p22_n-p22_n_0', '1') \
.initial('p23_n-p23_n_0', '1') \
.initial('p33_n-p33_n_0', '1') \
\
.terminal('x_n-x_n_f', '1') \
.terminal('y_n-y_n_f', '1') \
\
# Define constants
problem.constant('Dt', 0.1, '1')
problem.constant('sigv', 0.1, '1')
problem.constant('sigw', 0.1, '1')
problem.constant('sigr', 0.1, '1')
problem.constant('xb', 10, '1')
problem.constant('yb', 10, '1')
problem.constant('u_max', 0.1, '1')
problem.constant('v', 30, '1')
problem.constant('x_s', 1, '1')
problem.constant('y_s', 1, '1')
problem.constant('theta_s', 1, '1')
problem.constant('p11_s', 1e-3, '1')
problem.constant('p12_s', 1e-3, '1')
problem.constant('p13_s', 1e-3, '1')
problem.constant('p22_s', 1e-1, '1')
problem.constant('p23_s', 1e-2, '1')
problem.constant('p33_s', 1e-3, '1')
problem.constant('ep', 5, '1')
problem.bvp_solver = algorithms.MultipleShooting(derivative_method='fd', tolerance=1e-4, max_iterations=1000, verbose=True, cached=False, number_arcs=16)
# problem.bvp_solver = algorithms.SingleShooting(derivative_method='fd',tolerance=1e-4, max_iterations=1000, verbose=True, cached=False)
problem.scale.unit('m', 1) \
.unit('s', 1) \
.unit('kg', 1) \
.unit('rad', 1)
# Define quantity (not implemented at present)
# Is this actually an Expression rather than a Value?
# problem.quantity = [Value('tanAng','tan(theta)')]
problem.guess.setup('auto', start=[0, 0, 0, 0, 0, 0, 0, 0, 0], time_integrate=1, costate_guess=[0, 0, 0.001, -0.0001, 0.0, 0.0, 0.001, 0.0, 0.])
# problem.guess.setup('auto',start=[80000,3.38575809e-21,5000,7.98617365e-02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],direction='forward',time_integrate=229.865209,costate_guess =[-1.37514494e+01,3.80852584e+06,-3.26290152e+03,-2.31984720e-14,0.00,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01])
# Figure out nicer way of representing this. Done?
problem.steps.add_step().num_cases(5) \
.terminal('x_n', 10) \
.terminal('y_n', 0) \
problem.steps.add_step().num_cases(20) \
.terminal('x_n', 80) \
.terminal('y_n', 0) \
problem.steps.add_step().num_cases(5)\
.const('u_max', 0.2)
# problem.steps.add_step().num_cases(15) \
# .terminal('theta', 5)
# problem.steps.add_step().num_cases(21) \
# .terminal('theta', 10*pi/180)
Beluga.run(problem, display_level=logging.DEBUG)
| 30.473118 | 288 | 0.507998 |
3bbc03a438a74b2ffb111cbac87922a457a67510 | 4,592 | py | Python | home/settings/base.py | ARAldhafeeri/sary-task | 1fe1c091d397c09acc606d3dcb878e725725c506 | [
"MIT"
] | 2 | 2021-11-11T16:28:27.000Z | 2022-03-07T08:29:13.000Z | home/settings/base.py | ARAldhafeeri/sary-task | 1fe1c091d397c09acc606d3dcb878e725725c506 | [
"MIT"
] | null | null | null | home/settings/base.py | ARAldhafeeri/sary-task | 1fe1c091d397c09acc606d3dcb878e725725c506 | [
"MIT"
] | null | null | null | """
Django settings for home project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = os.path.dirname(os.path.dirname(
os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'mal#x7(fo)h4w!^8^fspiw^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
'corsheaders',
'rest_auth',
'rest_auth.registration',
'rest_framework',
'rest_framework.authtoken',
"django_apscheduler",
'core',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'home.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'src', 'build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'home.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'sarytask',
'USER': 'postgres',
'PASSWORD': 'S3d66221@',
'HOST': 'localhost',
'PORT': '8081',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Riyadh'
USE_TZ = True
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
APSCHEDULER_RUN_NOW_TIMEOUT = 25 # Seconds
APSCHEDULER_DATETIME_FORMAT = "N j, Y, f:s a" # Default
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
AUTH_USER_MODEL = 'core.Users'
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'src', 'build', 'static')]
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
SITE_ID = 1
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.AllowAny',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
}
CORS_ORIGIN_WHITELIST = [
'https://localhost:3000',
'http://127.0.0.1:8000'
]
| 27.171598 | 91 | 0.692726 |
cb3d76d61abe94c61edb950d9ad9219a684e972a | 2,132 | py | Python | test/figure_test.py | leotrs/decu | e920a7046220a0547826084c11b6a74029e3c658 | [
"MIT"
] | 4 | 2017-10-30T21:22:32.000Z | 2020-10-16T04:15:44.000Z | test/figure_test.py | leotrs/decu | e920a7046220a0547826084c11b6a74029e3c658 | [
"MIT"
] | 105 | 2017-10-05T18:27:04.000Z | 2018-10-10T15:55:47.000Z | test/figure_test.py | leotrs/decu | e920a7046220a0547826084c11b6a74029e3c658 | [
"MIT"
] | null | null | null | """
figure_test.py
--------------
Test the @figure decorator.
"""
from os import listdir
from os.path import basename
from decu import figure, DecuException
import util
import matplotlib.pyplot as plt
import pytest
def test_save_false(tmpdir):
"""With save=False, @figure should not write to disk."""
class TestSaveFalse(util.TestScript):
@figure(save=False)
def plot(self):
plt.figure()
plt.plot(range(100), [x**2 for x in range(100)])
script = TestSaveFalse(tmpdir)
fig_filename = basename(script.make_figure_basename('plot'))
assert fig_filename not in listdir(script.figures_dir)
script.plot()
assert fig_filename not in listdir(script.figures_dir)
def test_save_true(tmpdir):
"""With save=True, @figure should write to disk."""
class TestSaveTrue(util.TestScript):
@figure(save=True)
def plot(self):
plt.figure()
plt.plot(range(100), [x**2 for x in range(100)])
script = TestSaveTrue(tmpdir)
fig_filename = basename(script.make_figure_basename('plot'))
assert fig_filename not in listdir(script.figures_dir)
script.plot()
assert fig_filename in listdir(script.figures_dir)
def test_suffix(tmpdir):
"""@figure-decorated methods should accept a suffix argument."""
class TestSuffix(util.TestScript):
@figure(save=True)
def plot(self):
plt.figure()
plt.plot(range(100), [x**2 for x in range(100)])
script = TestSuffix(tmpdir)
suffix = 'test_suffix'
fig_filename = basename(script.make_figure_basename('plot', suffix))
assert fig_filename not in listdir(script.figures_dir)
script.plot(suffix=suffix)
assert fig_filename in listdir(script.figures_dir)
def test_suffix_override(tmpdir):
"""@figure-decorated methods cannot have a 'suffix' argument."""
with pytest.raises(DecuException):
class TestSuffixOverride(util.TestScript):
@figure(save=True)
def plot(self, suffix):
plt.figure()
plt.plot(range(100), [x**2 for x in range(100)])
| 28.810811 | 72 | 0.662758 |
3286db766328fa62c68a39b2c4c8642d477ce5b0 | 1,500 | py | Python | src/Backend/create_suggestions_index/compute_suggestions.py | uhh-lt/cam | 5b8f03fb8d2258adcefbab4e860800658dc34925 | [
"MIT"
] | 9 | 2018-03-10T11:26:17.000Z | 2021-12-30T13:57:12.000Z | src/Backend/create_suggestions_index/compute_suggestions.py | uhh-lt/cam | 5b8f03fb8d2258adcefbab4e860800658dc34925 | [
"MIT"
] | 67 | 2018-03-10T09:16:22.000Z | 2022-03-11T23:59:16.000Z | src/Backend/create_suggestions_index/compute_suggestions.py | uhh-lt/cam | 5b8f03fb8d2258adcefbab4e860800658dc34925 | [
"MIT"
] | 4 | 2019-03-11T09:27:45.000Z | 2020-07-20T13:49:48.000Z | from multiprocessing import Pool
import requests
import time
import json
#from sample_wordlist import comparison_objects
#from sample_wordlist import comparison_objects_small
CCR_BASE_URL = "http://127.0.0.1:5000/ccr/"
#co_suggestions_dict = {}
co_suggestions = []
def requestSuggestions(comparison_object):
ccr_suggestions = requests.get(CCR_BASE_URL + '{}'.format(comparison_object)).json()
data = {
"comparison_object": comparison_object,
"suggestions": ccr_suggestions
}
return data
p = Pool(4)
start = time.time()
for file_counter in range(0,1,1):
# https://github.com/dwyl/english-words
filename = './english_words/wordlist-{}.json'.format(str('%05d' % file_counter))
with open(filename) as json_file:
comparison_objects = json.load(json_file)
#co_suggestions_dict = p.map(requestSuggestions, comparison_objects)
for comparison_object in comparison_objects:
ccr_suggestions = requests.get(CCR_BASE_URL + '{}'.format(comparison_object)).json()
data = {
"comparison_object": comparison_object,
"suggestions": ccr_suggestions
}
co_suggestions.append(data)
outfilename = './suggestions/outfile-{}.json'.format(str('%05d' % file_counter))
with open(outfilename, 'w') as outfile:
#json.dump(co_suggestions_dict, outfile)
json.dump(co_suggestions, outfile)
end = time.time()
print('took: ', end - start)
| 29.411765 | 96 | 0.677333 |
c77a68c6dfeac640472551dba1281a031cd30746 | 9,197 | py | Python | releasenotes/source/conf.py | os-cloud/os_zun | daf9f9d60a00edf5c874a35b621acc7d0e5a8e06 | [
"Apache-2.0"
] | null | null | null | releasenotes/source/conf.py | os-cloud/os_zun | daf9f9d60a00edf5c874a35b621acc7d0e5a8e06 | [
"Apache-2.0"
] | null | null | null | releasenotes/source/conf.py | os-cloud/os_zun | daf9f9d60a00edf5c874a35b621acc7d0e5a8e06 | [
"Apache-2.0"
] | 1 | 2019-06-27T01:31:18.000Z | 2019-06-27T01:31:18.000Z | #!/usr/bin/env python3
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'openstackdocstheme',
'reno.sphinxext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
author = 'OpenStack-Ansible Contributors'
category = 'Miscellaneous'
copyright = '2014-2016, OpenStack-Ansible Contributors'
description = 'OpenStack-Ansible deploys OpenStack environments using Ansible.'
project = 'OpenStack-Ansible'
role_name = 'os_zun'
target_name = 'openstack-ansible-' + role_name
title = 'OpenStack-Ansible Release Notes: ' + role_name + 'role'
# The link to the browsable source code (for the left hand menu)
oslosphinx_cgit_link = (
'https://git.openstack.org/cgit/openstack/{}'.format(target_name)
)
# Release notes do not need a version number in the title, they
# cover multiple releases.
# The full version, including alpha/beta/rc tags.
release = ''
# The short X.Y version.
version = ''
# openstackdocstheme options
repository_name = 'openstack/' + target_name
bug_project = project.lower()
bug_tag = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d %H:%M'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = target_name + '-docs'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, target_name + '.tex',
title, author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, target_name,
title, [author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, target_name,
title, author, project,
description, category),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Internationalization output ------------------------------
locale_dirs = ['locale/']
| 32.157343 | 79 | 0.715016 |
472d8b15cf60edee3a8706fda70d754addef20b0 | 2,379 | bzl | Python | packages/bazel/rules_nodejs_package.bzl | gbamparop/angular | 213c25fb087939cb668bd47ca34ec7a616d3e371 | [
"MIT"
] | 1 | 2020-06-24T10:24:08.000Z | 2020-06-24T10:24:08.000Z | packages/bazel/rules_nodejs_package.bzl | gbamparop/angular | 213c25fb087939cb668bd47ca34ec7a616d3e371 | [
"MIT"
] | null | null | null | packages/bazel/rules_nodejs_package.bzl | gbamparop/angular | 213c25fb087939cb668bd47ca34ec7a616d3e371 | [
"MIT"
] | null | null | null | # Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dependency-related rules defining our version and dependency versions.
Fulfills similar role as the package.json file.
"""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
# This file mirrored from https://raw.githubusercontent.com/bazelbuild/rules_nodejs/0.15.1/package.bzl
VERSION = "0.15.1"
def rules_nodejs_dependencies():
"""
Fetch our transitive dependencies.
If the user wants to get a different version of these, they can just fetch it
from their WORKSPACE before calling this function, or not call this function at all.
"""
_maybe(
http_archive,
name = "bazel_skylib",
url = "https://github.com/bazelbuild/bazel-skylib/archive/0.3.1.zip",
strip_prefix = "bazel-skylib-0.3.1",
sha256 = "95518adafc9a2b656667bbf517a952e54ce7f350779d0dd95133db4eb5c27fb1",
)
# Needed for Remote Build Execution
# See https://releases.bazel.build/bazel-toolchains.html
# Not strictly a dependency for all users, but it is convenient for them to have this repository
# defined to reduce the effort required to on-board to remote execution.
http_archive(
name = "bazel_toolchains",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/cdea5b8675914d0a354d89f108de5d28e54e0edc.tar.gz",
"https://github.com/bazelbuild/bazel-toolchains/archive/cdea5b8675914d0a354d89f108de5d28e54e0edc.tar.gz",
],
strip_prefix = "bazel-toolchains-cdea5b8675914d0a354d89f108de5d28e54e0edc",
sha256 = "cefb6ccf86ca592baaa029bcef04148593c0efe8f734542f10293ea58f170715",
)
def _maybe(repo_rule, name, **kwargs):
if name not in native.existing_rules():
repo_rule(name = name, **kwargs)
| 41.736842 | 136 | 0.7314 |
a4c3374d58963f310db097125c4b527ef8663f74 | 4,094 | py | Python | app/main/views/brief_responses.py | robot2051/dto-digitalmarketplace-api | de5b1fe67a0ce2d665633b083005263739080903 | [
"MIT"
] | null | null | null | app/main/views/brief_responses.py | robot2051/dto-digitalmarketplace-api | de5b1fe67a0ce2d665633b083005263739080903 | [
"MIT"
] | null | null | null | app/main/views/brief_responses.py | robot2051/dto-digitalmarketplace-api | de5b1fe67a0ce2d665633b083005263739080903 | [
"MIT"
] | null | null | null | from flask import jsonify, abort, request, current_app
from sqlalchemy.exc import IntegrityError, DataError
from dmapiclient.audit import AuditTypes
from .. import main
from ...models import db, Brief, BriefResponse, AuditEvent
from ...utils import (
get_json_from_request, json_has_required_keys, get_int_or_400,
pagination_links, get_valid_page_or_1, url_for,
validate_and_return_updater_request,
)
from ...brief_utils import get_supplier_service_eligible_for_brief
from ...service_utils import validate_and_return_supplier
@main.route('/brief-responses', methods=['POST'])
def create_brief_response():
json_payload = get_json_from_request()
updater_json = validate_and_return_updater_request()
json_has_required_keys(json_payload, ['briefResponses'])
brief_response_json = json_payload['briefResponses']
json_has_required_keys(brief_response_json, ['briefId', 'supplierCode'])
try:
brief = Brief.query.get(brief_response_json['briefId'])
except DataError:
brief = None
if brief is None:
abort(400, "Invalid brief ID '{}'".format(brief_response_json['briefId']))
if brief.status != 'live':
abort(400, "Brief must be live")
if brief.framework.status != 'live':
abort(400, "Brief framework must be live")
supplier = validate_and_return_supplier(brief_response_json)
# FIXME: The UK marketplace checks that the supplier has a relevant service and that its day rate meets the budget.
# This Australian marketplace should do that too, but Australian suppliers haven't created services yet.
# Check if brief response already exists from this supplier
if BriefResponse.query.filter(BriefResponse.supplier == supplier, BriefResponse.brief == brief).first():
abort(400, "Brief response already exists for supplier '{}'".format(supplier.code))
brief_response = BriefResponse(
data=brief_response_json,
supplier=supplier,
brief=brief,
)
brief_response.validate()
db.session.add(brief_response)
try:
db.session.flush()
except IntegrityError as e:
db.session.rollback()
abort(400, e.orig)
audit = AuditEvent(
audit_type=AuditTypes.create_brief_response,
user=updater_json['updated_by'],
data={
'briefResponseId': brief_response.id,
'briefResponseJson': brief_response_json,
},
db_object=brief_response,
)
db.session.add(audit)
db.session.commit()
return jsonify(briefResponses=brief_response.serialize()), 201
@main.route('/brief-responses/<int:brief_response_id>', methods=['GET'])
def get_brief_response(brief_response_id):
brief_response = BriefResponse.query.filter(
BriefResponse.id == brief_response_id
).first_or_404()
return jsonify(briefResponses=brief_response.serialize())
@main.route('/brief-responses', methods=['GET'])
def list_brief_responses():
page = get_valid_page_or_1()
brief_id = get_int_or_400(request.args, 'brief_id')
supplier_code = get_int_or_400(request.args, 'supplier_code')
brief_responses = BriefResponse.query
if supplier_code is not None:
brief_responses = brief_responses.filter(BriefResponse.supplier_code == supplier_code)
if brief_id is not None:
brief_responses = brief_responses.filter(BriefResponse.brief_id == brief_id)
if brief_id or supplier_code:
return jsonify(
briefResponses=[brief_response.serialize() for brief_response in brief_responses.all()],
links={'self': url_for('.list_brief_responses', supplier_code=supplier_code, brief_id=brief_id)}
)
brief_responses = brief_responses.paginate(
page=page,
per_page=current_app.config['DM_API_BRIEF_RESPONSES_PAGE_SIZE']
)
return jsonify(
briefResponses=[brief_response.serialize() for brief_response in brief_responses.items],
links=pagination_links(
brief_responses,
'.list_brief_responses',
request.args
)
)
| 33.557377 | 119 | 0.712262 |
57d996c7074b89e897d2a243304c5576bfbf7f86 | 2,514 | py | Python | networkAnalysis.py | jmotis/twitter-networks | 6a91ed659143f59002f6eae736b072ae88f06647 | [
"MIT"
] | 10 | 2019-01-05T01:04:07.000Z | 2021-07-15T02:09:02.000Z | networkAnalysis.py | jmotis/twitter-networks | 6a91ed659143f59002f6eae736b072ae88f06647 | [
"MIT"
] | null | null | null | networkAnalysis.py | jmotis/twitter-networks | 6a91ed659143f59002f6eae736b072ae88f06647 | [
"MIT"
] | 3 | 2019-03-15T23:14:04.000Z | 2020-05-04T15:56:08.000Z | # import modules
import csv
import networkx as nx
from operator import itemgetter
import community
# define the function to create and analyze the network
def create_network(file):
with open(file, 'r') as edgecsv:
edgereader = csv.reader(edgecsv)
edges = [tuple(e) for e in edgereader][1:]
nodes = []
for e in edges:
n1 = e[0]
n2 = e[1]
if (n1 in nodes) and (n2 in nodes):
continue
elif (n1 in nodes) and (n2 != ""):
nodes.append(n2)
elif (n1 != ""):
nodes.append(n1)
#print(nodes)
G = nx.Graph()
G.add_nodes_from(nodes)
#print(G.nodes())
G.add_edges_from(edges)
return G
def analyze_network(N):
print(nx.info(N))
density = nx.density(N)
print("Network density: ", density)
if (nx.is_connected(N)):
diameter = nx.diameter(N)
print("Network is connected, with diameter: ", diameter)
else:
components = nx.connected_components(N)
largest_component = max(components, key=len)
subN = N.subgraph(largest_component)
diameter = nx.diameter(subN)
print("Network is disconnected and its largest connected component has diameter: ", diameter)
triadic_closure = nx.transitivity(N)
print("Triadic closure: ", triadic_closure)
degree_dict = dict(N.degree(N.nodes()))
nx.set_node_attributes(N, degree_dict, 'degree')
sorted_degree = sorted(degree_dict.items(), key=itemgetter(1), reverse=True)
print("Top 10 nodes by degree: ")
for d in sorted_degree[:10]:
print(d)
betweenness_dict = nx.betweenness_centrality(N)
nx.set_node_attributes(N, betweenness_dict, 'betweenness')
sorted_betweenness = sorted(betweenness_dict.items(), key=itemgetter(1), reverse=True)
print("Top 10 nodes by betweenness centrality: ")
for d in sorted_betweenness[:10]:
print(d)
eigenvector_dict = nx.eigenvector_centrality(N)
nx.set_node_attributes(N, eigenvector_dict, 'eigenvector')
sorted_eigenvector = sorted(eigenvector_dict.items(), key=itemgetter(1), reverse=True)
print("Top 10 nodes by eigenvector centrality: ")
for d in sorted_eigenvector[:10]:
print(d)
# begin program
file_name = input('What is the exact name of your csv file (include the .csv) ')
network = create_network(file_name)
analyze_network(network)
# end program | 32.230769 | 101 | 0.633652 |
0696b9f4e25399cfc4f287816c1e31a8ac178434 | 2,133 | py | Python | C3CTF/2019 36C3/numb_theory/solver.py | PurpEth/solved-hacking-problem | 6f289d1647eb9c091caa580c7aae673e3ba02952 | [
"Unlicense"
] | 1 | 2021-08-24T22:16:41.000Z | 2021-08-24T22:16:41.000Z | C3CTF/2019 36C3/numb_theory/solver.py | PurpEth/solved-hacking-problem | 6f289d1647eb9c091caa580c7aae673e3ba02952 | [
"Unlicense"
] | null | null | null | C3CTF/2019 36C3/numb_theory/solver.py | PurpEth/solved-hacking-problem | 6f289d1647eb9c091caa580c7aae673e3ba02952 | [
"Unlicense"
] | null | null | null | # encoding: utf-8
from pwn import *
r = 4
def mul(a, b):
z = [0, 0]*r
for i in range(r):
for j in range(r):
z[i+j] += a[i]*b[j]
while len(z) > r:
y = z.pop()
z[-r] += sum(map(eval, 'yyyyyyy'))
return tuple(t % n for t in z)
def exp(x, k):
y = [not i for i in range(r)]
for i in range(k.bit_length()):
if (k >> i) & 1:
y = mul(y, x)
x = mul(x, x)
return y
def H(msg):
h = hashlib.sha256(msg.encode('utf-8')).digest()
v = tuple(c+1 for c in struct.unpack('>%sH' % r, h[:r+r]))
return v
def get_sig(s):
c.recvuntil("> ")
c.sendline(s)
sig = tuple(map(int, c.recvline().strip().split('|')))
return sig
'''
[+] Found rand-rand pair
s1: BAdfD4jK6be7EHga
s2: kF89c7hfHDgEBj25
H(s1): Block (39336, 43864, 26148, 33266)
H(s2): Block (19668, 21932, 13074, 16633)
[+] Found rand-target pair
s1: p!MoNw]4Os
s2: Hello hxp! I would like the flag, please Thank you�
c1: 1083943, c2: 729790
H(s1): Block (43022, 14508, 39894, 10398)
H(s2): Block (21511, 7254, 19947, 5199)
'''
while True:
c = remote('78.46.199.5', 4444)
n = int(c.recvline().strip())
sig1 = get_sig("BAdfD4jK6be7EHga")
sig2 = get_sig("kF89c7hfHDgEBj25")
with open("divider.sage", "w") as f:
f.write("""n = {}
Z = IntegerModRing(n, is_field=True)
F.<x> = PolynomialRing(Z)
Q.<y> = F.quotient(x^4 - 7)
sig1 = Q({})
sig2 = Q({})
print((sig1^-1 * sig2).list())""".format(n, sig1, sig2))
out = subprocess.check_output(["sage", "divider.sage"])
sig_for_inv2 = tuple(eval(out))
assert mul(sig1, sig_for_inv2) == sig2
sig3 = get_sig("p!MoNw]4Os")
forged = mul(sig3, sig_for_inv2)
forged_str = "|".join(map(str, forged))
c.recvuntil("> ")
c.sendline(u'Hello hxp! I would like the flag, please{} Thank you{} Signature: {}'.
format(unichr(1083943), unichr(729790), forged_str).encode('utf-8'))
# this is probabilistic
# hxp{Num6er_Th30Ry_m4ke5_mY_Br41n_g0_nUmb}
flag = c.recvline().strip()
c.close()
if 'hxp' in flag:
print flag
break
| 22.452632 | 87 | 0.571027 |
74d0f29993b722725281e1c3c3b6b49960370f32 | 500 | py | Python | wrangle/df/df_to_groupby.py | benjolitz/wrangle | bac0c2affe38fd35db171e2a1c1e835355778459 | [
"MIT"
] | 17 | 2018-07-29T20:02:55.000Z | 2022-02-27T20:58:48.000Z | wrangle/df/df_to_groupby.py | benjolitz/wrangle | bac0c2affe38fd35db171e2a1c1e835355778459 | [
"MIT"
] | 24 | 2018-07-10T14:41:40.000Z | 2022-01-31T19:44:32.000Z | wrangle/df/df_to_groupby.py | benjolitz/wrangle | bac0c2affe38fd35db171e2a1c1e835355778459 | [
"MIT"
] | 10 | 2019-07-29T03:36:47.000Z | 2022-03-05T12:29:59.000Z | from ..utils.groupby_func import groupby_func
def df_to_groupby(data, by, func):
'''Takes in a dataframe and returns it in a grouped by format.
data : dataframe
A pandas dataframe
by : str
The column by which the grouping is done
func : str
The function to be used for grouping by: 'median', 'mean', 'first',
'last', 'std', 'mode', 'max', 'min', 'sum', 'random', 'freq', 'string'.
'''
return groupby_func(data=data.groupby(by), func=func)
| 27.777778 | 79 | 0.624 |
0de86af6cbde50d89bb9b4c2f63829365f169688 | 25,221 | py | Python | hw/vendor/lowrisc_ibex/vendor/google_riscv-dv/pygen/pygen_src/riscv_instr_gen_config.py | asb/opentitan | af68ff5041b10c81e97adc075a4d042f8ac7ab20 | [
"Apache-2.0"
] | 698 | 2019-05-02T17:03:07.000Z | 2022-03-30T21:23:13.000Z | hw/vendor/lowrisc_ibex/vendor/google_riscv-dv/pygen/pygen_src/riscv_instr_gen_config.py | asb/opentitan | af68ff5041b10c81e97adc075a4d042f8ac7ab20 | [
"Apache-2.0"
] | 1,021 | 2019-05-03T12:56:07.000Z | 2022-03-31T21:53:39.000Z | hw/vendor/lowrisc_ibex/vendor/google_riscv-dv/pygen/pygen_src/riscv_instr_gen_config.py | asb/opentitan | af68ff5041b10c81e97adc075a4d042f8ac7ab20 | [
"Apache-2.0"
] | 326 | 2019-05-03T10:11:53.000Z | 2022-03-28T15:35:22.000Z | """
Copyright 2020 Google LLC
Copyright 2020 PerfectVIPs Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
import sys
import math
import logging
import argparse
import vsc
from importlib import import_module
from pygen_src.riscv_instr_pkg import (mtvec_mode_t, f_rounding_mode_t,
riscv_reg_t, privileged_mode_t,
riscv_instr_group_t, data_pattern_t,
riscv_instr_category_t, satp_mode_t,
mem_region_t)
@vsc.randobj
class riscv_instr_gen_config:
def __init__(self):
self.main_program_instr_cnt = vsc.rand_int32_t() # count of main_prog
self.sub_program_instr_cnt = [] # count of sub_prog
self.debug_program_instr_cnt = 0 # count of debug_rom
self.debug_sub_program_instr_cnt = [] # count of debug sub_progrms
self.max_directed_instr_stream_seq = 20
self.data_page_pattern = vsc.rand_enum_t(data_pattern_t)
self.init_delegation()
self.argv = self.parse_args()
self.args_dict = vars(self.argv)
global rcs
rcs = import_module("pygen_src.target." + self.argv.target + ".riscv_core_setting")
self.m_mode_exception_delegation = {}
self.s_mode_exception_delegation = {}
self.m_mode_interrupt_delegation = {}
self.s_mode_interrupt_delegation = {}
# init_privileged_mode default to MACHINE_MODE
self.init_privileged_mode = privileged_mode_t.MACHINE_MODE
self.mstatus = vsc.rand_bit_t(rcs.XLEN - 1)
self.mie = vsc.rand_bit_t(rcs.XLEN - 1)
self.sstatus = vsc.rand_bit_t(rcs.XLEN - 1)
self.sie = vsc.rand_bit_t(rcs.XLEN - 1)
self.ustatus = vsc.rand_bit_t(rcs.XLEN - 1)
self.uie = vsc.rand_bit_t(rcs.XLEN - 1)
self.mstatus_mprv = vsc.rand_bit_t(1)
self.mstatus_mxr = vsc.rand_bit_t(1)
self.mstatus_sum = vsc.rand_bit_t(1)
self.mstatus_tvm = vsc.rand_bit_t(1)
self.mstatus_fs = vsc.rand_bit_t(2)
self.mstatus_vs = vsc.rand_bit_t(2)
self.mtvec_mode = vsc.rand_enum_t(mtvec_mode_t)
self.tvec_alignment = vsc.rand_uint8_t(self.argv.tvec_alignment)
self.fcsr_rm = vsc.rand_enum_t(f_rounding_mode_t)
self.enable_sfence = vsc.rand_bit_t(1)
self.gpr = vsc.rand_list_t(vsc.enum_t(riscv_reg_t), sz=4)
self.scratch_reg = vsc.rand_enum_t(riscv_reg_t)
self.pmp_reg = vsc.rand_enum_t(riscv_reg_t)
self.sp = vsc.rand_enum_t(riscv_reg_t)
self.tp = vsc.rand_enum_t(riscv_reg_t)
self.ra = vsc.rand_enum_t(riscv_reg_t)
self.check_misa_init_val = 0
self.check_xstatus = 1
self.virtual_addr_translation_on = 0
# Commenting out for now
# vector_cfg = riscv_vector_cfg # TODO
# pmp_cfg = riscv_pmp_cfg # TODO
self.mem_region = vsc.list_t(mem_region_t())
self.amo_region = vsc.list_t(mem_region_t())
self.s_mem_region = vsc.list_t(mem_region_t())
self.mem_region.extend([mem_region_t(name = "region_0", size_in_bytes = 4096, xwr = 8),
mem_region_t(name = "region_1", size_in_bytes = 4096, xwr = 8)])
self.amo_region.extend([mem_region_t(name = "amo_0", size_in_bytes = 64, xwr = 8)])
self.s_mem_region.extend([mem_region_t(name = "s_region_0", size_in_bytes = 4096, xwr = 8),
mem_region_t(name = "s_region_1", size_in_bytes = 4096, xwr = 8)])
self.stack_len = 5000
self.kernel_stack_len = 4000
self.kernel_program_instr_cnt = 400
# list of main implemented CSRs
self.invalid_priv_mode_csrs = []
self.num_of_sub_program = self.argv.num_of_sub_program
self.instr_cnt = self.argv.instr_cnt
self.num_of_tests = self.argv.num_of_tests
self.no_data_page = self.argv.no_data_page
self.no_branch_jump = self.argv.no_branch_jump
self.no_load_store = self.argv.no_load_store
self.no_csr_instr = self.argv.no_csr_instr
self.no_ebreak = self.argv.no_ebreak
self.no_dret = self.argv.no_dret
self.no_fence = self.argv.no_fence
self.no_wfi = self.argv.no_wfi
self.enable_unaligned_load_store = self.argv.enable_unaligned_load_store
self.illegal_instr_ratio = self.argv.illegal_instr_ratio
self.hint_instr_ratio = self.argv.hint_instr_ratio
if self.argv.num_of_harts is None:
self.num_of_harts = rcs.NUM_HARTS
else:
self.num_of_harts = self.argv.num_of_harts
self.fix_sp = self.argv.fix_sp
self.use_push_data_section = self.argv.use_push_data_section
self.boot_mode_opts = self.argv.boot_mode
# self.isa = self.argv.isa
if self.boot_mode_opts:
logging.info("Got boot mode option - %0s", self.boot_mode_opts)
if self.boot_mode_opts == "m":
self.init_privileged_mode = privileged_mode_t.MACHINE_MODE
elif self.boot_mode_opts == "s":
self.init_privileged_mode = privileged_mode_t.SUPERVISOR_MODE
elif self.boot_mode_opts == "u":
self.init_privileged_mode = privileged_mode_t.USER_MODE
else:
logging.error("Illegal boot mode option - %0s", self.boot_mode_opts)
self.enable_page_table_exception = self.argv.enable_page_table_exception
self.no_directed_instr = self.argv.no_directed_instr
self.asm_test_suffix = self.argv.asm_test_suffix
self.enable_interrupt = self.argv.enable_interrupt
self.enable_nested_interrupt = self.argv.enable_nested_interrupt
self.enable_timer_irq = self.argv.enable_timer_irq
self.bare_program_mode = self.argv.bare_program_mode
self.enable_illegal_csr_instruction = self.argv.enable_illegal_csr_instruction
self.enable_access_invalid_csr_level = self.argv.enable_access_invalid_csr_level
self.enable_misaligned_instr = self.argv.enable_misaligned_instr
self.enable_dummy_csr_write = self.argv.enable_dummy_csr_write
self.randomize_csr = self.argv.randomize_csr
self.allow_sfence_exception = self.argv.allow_sfence_exception
self.no_delegation = self.argv.no_delegation
self.force_m_delegation = self.argv.force_m_delegation
self.force_s_delegation = self.argv.force_s_delegation
self.support_supervisor_mode = 0
self.disable_compressed_instr = self.argv.disable_compressed_instr
self.require_signature_addr = self.argv.require_signature_addr
if self.require_signature_addr:
self.signature_addr = int(self.argv.signature_addr, 16)
else:
self.signature_addr = 0xdeadbeef
self.gen_debug_section = self.argv.gen_debug_section
self.enable_ebreak_in_debug_rom = self.argv.enable_ebreak_in_debug_rom
self.set_dcsr_ebreak = self.argv.set_dcsr_ebreak
self.num_debug_sub_program = self.argv.num_debug_sub_program
self.enable_debug_single_step = self.argv.enable_debug_single_step
self.single_step_iterations = 0
self.set_mstatus_tw = self.argv.set_mstatus_tw
self.set_mstatus_mprv = vsc.bit_t(1)
self.set_mstatus_mprv = self.argv.set_mstatus_mprv
self.min_stack_len_per_program = 10 * (rcs.XLEN / 8)
self.max_stack_len_per_program = 16 * (rcs.XLEN / 8)
self.max_branch_step = 20
self.reserved_regs = vsc.list_t(vsc.enum_t(riscv_reg_t))
self.enable_floating_point = self.argv.enable_floating_point
self.enable_vector_extension = self.argv.enable_vector_extension
self.enable_b_extension = self.argv.enable_b_extension
self.enable_bitmanip_groups = self.argv.enable_bitmanip_groups
self.dist_control_mode = 0
self.category_dist = {}
self.march_isa = self.argv.march_isa
if len(self.march_isa) != 0:
rcs.supported_isa.append(self.march_isa)
if riscv_instr_group_t.RV32C not in rcs.supported_isa:
self.disable_compressed_instr = 1
self.setup_instr_distribution()
self.get_invalid_priv_lvl_csr()
# Helpers fields to build the vsc constraints
self.supported_interrupt_mode = vsc.list_t(vsc.enum_t(mtvec_mode_t))
self.XLEN = vsc.uint32_t()
self.SATP_MODE = vsc.enum_t(satp_mode_t)
self.init_privil_mode = vsc.enum_t(privileged_mode_t)
self.init_privil_mode = self.init_privileged_mode
self.supported_interrupt_mode = rcs.supported_interrupt_mode
self.XLEN = rcs.XLEN
self.SATP_MODE = rcs.SATP_MODE
self.tvec_ceil = vsc.uint32_t()
self.tvec_ceil = math.ceil(math.log2((self.XLEN * 4) / 8))
@vsc.constraint
def default_c(self):
self.main_program_instr_cnt in vsc.rangelist(vsc.rng(10, self.instr_cnt))
@vsc.constraint
def sp_tp_c(self):
if self.fix_sp:
self.sp == riscv_reg_t.SP
self.sp != self.tp
self.sp.not_inside(vsc.rangelist(riscv_reg_t.GP,
riscv_reg_t.RA, riscv_reg_t.ZERO))
self.tp.not_inside(vsc.rangelist(riscv_reg_t.GP,
riscv_reg_t.RA, riscv_reg_t.ZERO))
@vsc.constraint
def gpr_c(self):
with vsc.foreach(self.gpr, idx = True) as i:
self.gpr[i].not_inside(vsc.rangelist(self.sp, self.tp, self.scratch_reg, self.pmp_reg,
riscv_reg_t.ZERO, riscv_reg_t.RA, riscv_reg_t.GP))
vsc.unique(self.gpr)
@vsc.constraint
def ra_c(self):
self.ra != riscv_reg_t.SP
self.ra != riscv_reg_t.TP
self.ra != riscv_reg_t.ZERO
@vsc.constraint
def reserve_scratch_reg_c(self):
self.scratch_reg.not_inside(vsc.rangelist(riscv_reg_t.ZERO, self.sp,
self.tp, self.ra, riscv_reg_t.GP))
@vsc.constraint
def mtvec_c(self):
self.mtvec_mode.inside(vsc.rangelist(self.supported_interrupt_mode))
with vsc.if_then(self.mtvec_mode == mtvec_mode_t.DIRECT):
vsc.soft(self.tvec_alignment == 2)
with vsc.else_then():
vsc.soft(self.tvec_alignment == self.tvec_ceil)
@vsc.constraint
def floating_point_c(self):
with vsc.if_then(self.enable_floating_point):
self.mstatus_fs == 1
with vsc.else_then():
self.mstatus_fs == 0
@vsc.constraint
def mstatus_c(self):
with vsc.if_then(self.set_mstatus_mprv == 1):
self.mstatus_mprv == 1
with vsc.else_then():
self.mstatus_mprv == 0
with vsc.if_then(self.SATP_MODE == satp_mode_t.BARE):
self.mstatus_mxr == 0
self.mstatus_sum == 0
self.mstatus_tvm == 0
def check_setting(self):
support_64b = 0
support_128b = 0
# check the valid isa support
for group in rcs.supported_isa:
if group in [riscv_instr_group_t.RV64I, riscv_instr_group_t.RV64M,
riscv_instr_group_t.RV64A, riscv_instr_group_t.RV64F,
riscv_instr_group_t.RV64D, riscv_instr_group_t.RV64C,
riscv_instr_group_t.RV64B]:
support_64b = 1
logging.info("support_64b = {}".format(support_64b))
logging.debug("Supported ISA = {}".format(group.name))
elif group in [riscv_instr_group_t.RV128I, riscv_instr_group_t.RV128C]:
support_128b = 1
logging.info("support_128b = {}".format(support_128b))
logging.debug("Supported ISA = {}".format(group.name))
if support_128b and rcs.XLEN != 128:
logging.critical("XLEN should be set to 128 based on \
riscv_core_setting.supported_isa setting")
logging.info("XLEN Value = {}".format(rcs.XLEN))
sys.exit("XLEN is not equal to 128, set it Accordingly!")
if not(support_128b) and support_64b and rcs.XLEN != 64:
logging.critical("XLEN should be set to 64 based on \
riscv_core_setting.supported_isa setting")
logging.info("XLEN Value = {}".format(rcs.XLEN))
sys.exit("XLEN is not equal to 64, set it Accordingly!")
if not(support_128b or support_64b) and rcs.XLEN != 32:
logging.critical("XLEN should be set to 32 based on \
riscv_core_setting.supported_isa setting")
logging.info("XLEN Value = {}".format(rcs.XLEN))
sys.exit("XLEN is not equal to 32, set it Accordingly!")
if not(support_128b or support_64b) and \
not(rcs.SATP_MODE in [satp_mode_t.SV32, satp_mode_t.BARE]):
logging.critical("SATP mode {} is not supported for RV32G ISA"
.format(rcs.SATP_MODE.name))
sys.exit("Supported SATP mode is not provided")
def setup_instr_distribution(self):
if self.dist_control_mode:
category_iter = iter([x for x in riscv_instr_category_t.__members__])
category = riscv_instr_category_t(0)
while True:
opts = "dist_{}".format(category.name)
opts = opts.lower()
if self.args_dict[opts]:
self.category_dist[category] = self.args_dict[opts]
else:
self.category_dist[category] = 10
logging.info("Set dist[{}] = {}".format(category, self.category_dist[category]))
category = next(category_iter)
if category != riscv_instr_category_t(0):
break
# TODO
def init_delegation(self):
pass
def pre_randomize(self):
for mode in rcs.supported_privileged_mode:
if mode == privileged_mode_t.SUPERVISOR_MODE:
self.support_supervisor_mode = 1
def get_non_reserved_gpr(self):
pass
def post_randomize(self):
self.reserved_regs.append(self.tp)
self.reserved_regs.append(self.sp)
self.reserved_regs.append(self.scratch_reg)
self.min_stack_len_per_program = 2 * (rcs.XLEN / 8)
logging.info("min_stack_len_per_program value = {}"
.format(self.min_stack_len_per_program))
self.check_setting() # check if the setting is legal
if self.init_privileged_mode == privileged_mode_t.USER_MODE:
logging.info("mode = USER_MODE")
self.no_wfi = 1
def get_invalid_priv_lvl_csr(self):
invalid_lvl = []
# Debug CSRs are inaccessible from all but Debug Mode
# and we cannot boot into Debug Mode.
invalid_lvl.append("D")
if self.init_privileged_mode == privileged_mode_t.MACHINE_MODE:
pass
elif self.init_privileged_mode == privileged_mode_t.SUPERVISOR_MODE:
invalid_lvl.append("M")
logging.info("supr_mode---")
logging.debug(invalid_lvl)
elif self.init_privileged_mode == privileged_mode_t.USER_MODE:
invalid_lvl.append("S")
invalid_lvl.append("M")
logging.info("usr_mode---")
logging.debug(invalid_lvl)
else:
logging.critical("Unsupported initialization privilege mode")
sys.exit(1)
# implemented_csr from riscv_core_setting.py
for csr in rcs.implemented_csr:
if csr in invalid_lvl:
self.invalid_priv_mode_csrs.append(csr)
def parse_args(self):
parse = argparse.ArgumentParser()
parse.add_argument('--num_of_tests', help = 'num_of_tests', type = int, default = 1)
parse.add_argument('--enable_page_table_exception',
help = 'enable_page_table_exception', type = int, default = 0)
parse.add_argument('--enable_interrupt', help = 'enable_interrupt',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--enable_nested_interrupt', help = 'enable_nested_interrupt',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--enable_timer_irq', help = 'enable_timer_irq',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--num_of_sub_program', help = 'num_of_sub_program',
type = int, default = 5)
parse.add_argument('--instr_cnt', help = 'instr_cnt', type = int, default = 200)
parse.add_argument('--tvec_alignment', help = 'tvec_alignment', type = int, default = 2)
parse.add_argument('--no_ebreak', help = 'no_ebreak',
choices = [0, 1], type = int, default = 1)
parse.add_argument('--no_dret', help = 'no_dret', choices = [0, 1], type = int, default = 1)
parse.add_argument('--no_wfi', help = 'no_wfi', choices = [0, 1], type = int, default = 1)
parse.add_argument('--no_branch_jump', help = 'no_branch_jump',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--no_load_store', help = 'no_load_store',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--no_csr_instr', help = 'no_csr_instr',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--fix_sp', help = 'fix_sp',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--use_push_data_section', help = 'use_push_data_section',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--enable_illegal_csr_instruction',
help = 'enable_illegal_csr_instruction', choices = [0, 1],
type = int, default = 0)
parse.add_argument('--enable_access_invalid_csr_level',
help = 'enable_access_invalid_csr_level', choices = [0, 1],
type = int, default = 0)
parse.add_argument('--enable_misaligned_instr', help = 'enable_misaligned_instr',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--enable_dummy_csr_write', help = 'enable_dummy_csr_write',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--allow_sfence_exception', help = 'allow_sfence_exception',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--no_data_page', help = 'no_data_page',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--no_directed_instr', help = 'no_directed_instr',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--no_fence', help = 'no_fence',
choices = [0, 1], type = int, default = 1)
parse.add_argument('--no_delegation', help = 'no_delegation',
choices = [0, 1], type = int, default = 1)
parse.add_argument('--illegal_instr_ratio',
help = 'illegal_instr_ratio', type = int, default = 0)
parse.add_argument('--hint_instr_ratio', help = 'hint_instr_ratio', type = int, default = 0)
parse.add_argument('--num_of_harts', help = 'num_of_harts',
type = int, default = 1)
parse.add_argument('--enable_unaligned_load_store',
help = 'enable_unaligned_load_store', choices = [0, 1],
type = int, default = 0)
parse.add_argument('--force_m_delegation', help = 'force_m_delegation',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--force_s_delegation', help = 'force_s_delegation',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--require_signature_addr', help = 'require_signature_addr',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--signature_addr', help = 'signature_addr', default = 0xdeadbeef)
parse.add_argument('--disable_compressed_instr',
help = 'disable_compressed_instr', choices = [0, 1],
type = int, default = 0)
parse.add_argument('--randomize_csr', help = 'randomize_csr',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--gen_debug_section', help = 'gen_debug_section',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--bare_program_mode', help = 'bare_program_mode',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--num_debug_sub_program',
help = 'num_debug_sub_program', type = int, default = 0)
parse.add_argument('--enable_ebreak_in_debug_rom',
help = 'enable_ebreak_in_debug_rom', choices = [0, 1],
type = int, default = 0)
parse.add_argument('--set_dcsr_ebreak', help = 'set_dcsr_ebreak',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--enable_debug_single_step',
help = 'enable_debug_single_step', choices = [0, 1],
type = int, default = 0)
parse.add_argument('--set_mstatus_tw', help = 'set_mstatus_tw',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--set_mstatus_mprv', help = 'set_mstatus_mprv',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--enable_floating_point', help = 'enable_floating_point',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--enable_vector_extension', help = 'enable_vector_extension',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--enable_b_extension', help = 'enable_b_extension',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--enable_bitmanip_groups', help = 'enable_bitmanip_groups',
default = ['ZBB', 'ZBS', 'ZBP', 'ZBE', 'ZBF',
'ZBC', 'ZBR', 'ZBM', 'ZBT', 'ZB_TMP'], nargs = '*')
parse.add_argument('--boot_mode', help = 'boot_mode', default = "")
parse.add_argument('--asm_test_suffix', help = 'asm_test_suffix', default = "")
parse.add_argument('--march_isa', help = 'march_isa', default = [],
choices = [i.name for i in riscv_instr_group_t], nargs = '*')
for i in range(self.max_directed_instr_stream_seq):
parse.add_argument('--directed_instr_{}'.format(i),
help = 'directed_instr_{}'.format(i), default = "")
parse.add_argument('--stream_name_{}'.format(i),
help = 'stream_name_{}'.format(i), default = "")
parse.add_argument('--stream_freq_{}'.format(i),
help = 'stream_freq_{}'.format(i), default = 4)
parse.add_argument('--start_idx', help='start index', type=int, default=0)
parse.add_argument('--asm_file_name', help='asm file name',
default="riscv_asm_test")
parse.add_argument('--log_file_name', help='log file name',
default="")
parse.add_argument('--target', help='target', default="rv32imc")
parse.add_argument('--gen_test', help='gen_test', default="riscv_instr_base_test")
parse.add_argument("--enable_visualization", action="store_true", default=False,
help="Enabling coverage report visualization for pyflow")
parse.add_argument('--trace_csv', help='List of csv traces', default="")
args, unknown = parse.parse_known_args()
# TODO
'''
if ($value$plusargs("tvec_alignment=%0d", tvec_alignment)) begin
tvec_alignment.rand_mode(0);
end
vector_cfg = riscv_vector_cfg::type_id::create("vector_cfg");
pmp_cfg = riscv_pmp_cfg::type_id::create("pmp_cfg");
pmp_cfg.rand_mode(pmp_cfg.pmp_randomize);
pmp_cfg.initialize(require_signature_addr);
setup_instr_distribution()
get_invalid_priv_lvl_csr();
'''
args = parse.parse_args()
return args
cfg = riscv_instr_gen_config()
| 50.141153 | 100 | 0.610682 |
cc1e31249d6b4c11bb2f723642c83f666730c2f7 | 133 | py | Python | scalation_kernel/__main__.py | KevinBonanno/scalation_kernel | 7686ac72216057fbab72e805d0dfab58bbb24feb | [
"MIT"
] | null | null | null | scalation_kernel/__main__.py | KevinBonanno/scalation_kernel | 7686ac72216057fbab72e805d0dfab58bbb24feb | [
"MIT"
] | null | null | null | scalation_kernel/__main__.py | KevinBonanno/scalation_kernel | 7686ac72216057fbab72e805d0dfab58bbb24feb | [
"MIT"
] | null | null | null | from ipykernel.kernelapp import IPKernelApp
from . import ScalaTionKernel
IPKernelApp.launch_instance(kernel_class=ScalaTionKernel)
| 26.6 | 57 | 0.879699 |
3af0d288cb780627ae5b8806a162337fb55f9570 | 3,857 | py | Python | serrano/tokens.py | rysdyk/serrano | 926d874b19efdd18e359d32bca601058b655b288 | [
"BSD-2-Clause"
] | null | null | null | serrano/tokens.py | rysdyk/serrano | 926d874b19efdd18e359d32bca601058b655b288 | [
"BSD-2-Clause"
] | null | null | null | serrano/tokens.py | rysdyk/serrano | 926d874b19efdd18e359d32bca601058b655b288 | [
"BSD-2-Clause"
] | 1 | 2020-01-16T15:26:37.000Z | 2020-01-16T15:26:37.000Z | import sys
import string
import hashlib
from datetime import datetime
from random import SystemRandom
from django.conf import settings as django_settings
from django.utils.http import int_to_base36, base36_to_int
from serrano.conf import settings
# Hex characters
HEX_CHARS = string.lowercase[:6] + string.digits
# System-level random generator
random = SystemRandom()
def generate_random_token(size=32, max_attempts=100, test=None):
"""Generates a random token that can be tested for uniqueness before
returning.
"""
for _ in xrange(max_attempts):
key = ''.join(random.choice(HEX_CHARS) for i in xrange(size))
if not test or test(key):
return key
raise ValueError('Maximum attempts made to generate key.')
class TokenGenerator(object):
def _total_seconds(self, dt):
"""
Computes the number of seconds in a datetime.timedelta object.
Ideally, this is done just using the built in total seconds method
but if the python version we are running on is < 2.7 we manually
compute the number of seconds in the delta and return that. The
manual computation method comes from the Python docs here:
http://docs.python.org/2/library/datetime.html#datetime.timedelta.total_seconds # noqa
NOTE: Manual computation opens us up to possible loss of precision but
it's the best we can do in Python < 2.7.
"""
# The number of seconds in a day
SEC_PER_DAY = 24 * 3600
# The number of microseconds in a second
MICROSEC_PER_SEC = 10**6
timedelta = (dt - datetime(2001, 1, 1))
if sys.version_info >= (2, 7):
return int(timedelta.total_seconds())
else:
# Get the microseconds from the timedelta itself.
microseconds = timedelta.microseconds
# Add the microseconds contribution from the seconds of the
# timedelta.
microseconds += timedelta.seconds * MICROSEC_PER_SEC
# Add the microseconds contribution from the days property of the
# timedelta.
microseconds += timedelta.days * SEC_PER_DAY * MICROSEC_PER_SEC
# Convert the microseconds to seconds
return int(microseconds / MICROSEC_PER_SEC)
def _make(self, user, timestamp):
ts_b36 = int_to_base36(timestamp)
total = (django_settings.SECRET_KEY + unicode(user.pk) +
user.password + unicode(timestamp))
digest = hashlib.sha1(total).hexdigest()[::2]
return '{0}-{1}-{2}'.format(user.pk, ts_b36, digest)
@property
def timeout(self):
return settings.TOKEN_TIMEOUT or django_settings.SESSION_COOKIE_AGE
def split(self, token):
try:
return token.split('-', 1)[0], token
except AttributeError:
return None, token
def make(self, user):
return self._make(user, self._total_seconds(datetime.now()))
def check(self, user, token):
# Parse the token
try:
pk, ts_b36, hash = token.split('-')
except ValueError:
return False
try:
ts = base36_to_int(ts_b36)
except ValueError:
return False
# Check that the timestamp/uid has not been tampered with
if self._make(user, ts) != token:
return False
# Check the timestamp is within limit
if (self._total_seconds(datetime.now()) - ts) > self.timeout:
return False
return True
token_generator = TokenGenerator()
def get_request_token(request):
"Attempts to retrieve a token from the request."
if 'token' in request.REQUEST:
return request.REQUEST['token']
if 'HTTP_API_TOKEN' in request.META:
return request.META['HTTP_API_TOKEN']
return ''
| 32.141667 | 95 | 0.644802 |
220578fb296d36f9ee65d51f7d5ad17ce7e2ad74 | 5,990 | py | Python | marsi/io/parsers.py | biosustain/marsi | a903924c9345db26227a558e0064bd940b41e912 | [
"Apache-2.0"
] | null | null | null | marsi/io/parsers.py | biosustain/marsi | a903924c9345db26227a558e0064bd940b41e912 | [
"Apache-2.0"
] | null | null | null | marsi/io/parsers.py | biosustain/marsi | a903924c9345db26227a558e0064bd940b41e912 | [
"Apache-2.0"
] | 1 | 2020-01-16T12:05:09.000Z | 2020-01-16T12:05:09.000Z | # Copyright 2016 Chr. Hansen A/S and The Novo Nordisk Foundation Center for Biosustainability, DTU.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from pandas import DataFrame
def parse_kegg_brite(brite_file):
kegg = DataFrame(columns=['group', 'family', 'level', 'target', 'generic_name',
'name', 'drug_type', 'kegg_drug_id'])
with open(brite_file) as kegg_data:
group = None
family = None
generic_name = None
level = None
target = None
i = 0
for line in kegg_data:
line = line.strip("\n")
if line.startswith("A"):
group = line[1:].strip("<b>").strip("<b/>")
if group != "Enzymes":
continue
else:
if line.startswith("B"):
family = line[1:].strip()
level = family
elif line.startswith("C"):
target = line[1:].strip()
elif line.startswith("D"):
generic_name = line[1:].strip()
elif line.startswith("E"):
line = line[1:].strip()
split = line.split()
name = " ".join(split[1:-2])
kegg.loc[i] = [group, family, level, target, generic_name, name, split[-1], split[0]]
i += 1
print("Found %i drugs acting on enzymes" % i)
return kegg
def parse_chebi_data(chebi_names_file, chebi_vertice_file, chebi_relation_file):
chebi_names = DataFrame.from_csv(chebi_names_file, sep="\t")
chebi_names.fillna("", inplace=True)
chebi_names.index.name = "id"
chebi_names.columns = map(str.lower, chebi_names.columns)
chebi_names.drop_duplicates('compound_id', keep='last', inplace=True)
chebi_names['adapted'] = chebi_names.adapted.apply(lambda v: v == "T")
chebi_analogues = chebi_names[chebi_names.name.str.contains('analog')]
chebi_antimetabolite = chebi_names[chebi_names.compound_id == 35221]
chebi_relations = DataFrame.from_csv(chebi_relation_file, sep="\t")
chebi_relations.columns = map(str.lower, chebi_relations.columns)
chebi_relations.index.name = "id"
chebi_vertices = DataFrame.from_csv(chebi_vertice_file, sep="\t")
chebi_vertices.columns = map(str.lower, chebi_vertices.columns)
chebi_vertices.index.name = "id"
def retrieve_child_id(compound_id):
return chebi_vertices.loc[compound_id, 'compound_child_id']
chebi_relations['init_compound_id'] = chebi_relations.init_id.apply(retrieve_child_id)
chebi_relations['final_compound_id'] = chebi_relations.final_id.apply(retrieve_child_id)
chebi_is_a = chebi_relations[chebi_relations['type'] == 'is_a']
chebi_has_role = chebi_relations[chebi_relations['type'] == 'has_role']
def recursive_search(roots, relations, universe, aggregated, forward=True):
aggregated = aggregated.append(roots, ignore_index=True)
if forward:
filtered = relations[relations.init_compound_id.isin(roots.compound_id)]
roots = universe[universe.compound_id.isin(filtered.final_compound_id)]
else:
filtered = relations[relations.final_compound_id.isin(roots.compound_id)]
roots = universe[universe.compound_id.isin(filtered.init_compound_id)]
if len(roots) > 0:
aggregated, roots = recursive_search(roots, relations, universe, aggregated, forward)
return aggregated, roots
data = DataFrame(columns=chebi_names.columns)
anti = DataFrame(columns=chebi_names.columns)
data, _ = recursive_search(chebi_analogues, chebi_is_a, chebi_names, data, True)
data, _ = recursive_search(chebi_antimetabolite, chebi_is_a, chebi_names, data, True)
anti, _ = recursive_search(chebi_antimetabolite, chebi_has_role, chebi_names, anti, True)
data, _ = recursive_search(anti, chebi_is_a, chebi_names, data, True)
data['compound_id'] = data.compound_id.apply(int)
return data
def parse_pubchem(summary_file):
pubchem = DataFrame(columns=["name", "molecular_weight", "formula", "uipac_name", "create_date", "compound_id"])
with open(summary_file) as pubchem_data:
row = dict(name=None, molecular_weight=None, formula=None, uipac_name=None,
create_date=None, compound_id=None)
i = 0
for line in pubchem_data:
line = line.strip("\n")
if len(line) == 0:
if any(v for v in row.values()):
pubchem.loc[i] = [row[k] for k in pubchem.columns]
i += 1
row = dict(name=None, molecular_weight=None, formula=None,
uipac_name=None, create_date=None, compound_id=None)
elif re.match("^\d+\.*", line):
row['name'] = line.split(". ", 1)[1].split("; ")[0]
elif line.startswith("MW:"):
match = re.match("MW:\s+(\d+\.\d+).+MF:\s(\w+)", line)
row['molecular_weight'] = float(match.group(1))
row['formula'] = match.group(2)
elif line.startswith("IUPAC name:"):
row['uipac_name'] = line[10:]
elif line.startswith("Create Date:"):
row['create_date'] = line[12:]
elif line.startswith("CID:"):
row['compound_id'] = int(line[5:])
pubchem['compound_id'] = pubchem.compound_id.apply(int)
return pubchem
| 42.785714 | 116 | 0.629048 |
28155f13a4364d86893aa31dcf9f09251c75f197 | 963 | py | Python | backend/mlarchive/wsgi.py | dkg/mailarch | 562757c09e212c202c35231d7e7c588cd4d3fb65 | [
"BSD-3-Clause"
] | 6 | 2022-03-09T23:10:28.000Z | 2022-03-21T05:32:40.000Z | backend/mlarchive/wsgi.py | dkg/mailarch | 562757c09e212c202c35231d7e7c588cd4d3fb65 | [
"BSD-3-Clause"
] | 5 | 2022-03-11T09:39:47.000Z | 2022-03-30T16:48:09.000Z | backend/mlarchive/wsgi.py | dkg/mailarch | 562757c09e212c202c35231d7e7c588cd4d3fb65 | [
"BSD-3-Clause"
] | 4 | 2022-03-04T15:36:19.000Z | 2022-03-28T23:45:44.000Z | import os
import sys
import syslog
path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
syslog.openlog("mailarchive", syslog.LOG_PID, syslog.LOG_USER)
# Virtualenv support
virtualenv_activation = os.path.join(os.path.dirname(path), "env", "bin", "activate_this.py")
if os.path.exists(virtualenv_activation):
syslog.syslog("Starting mailarchive wsgi with virtualenv %s" % os.path.dirname(os.path.dirname(virtualenv_activation)))
exec(compile(open(virtualenv_activation, "rb").read(), virtualenv_activation, 'exec'), dict(__file__=virtualenv_activation))
else:
syslog.syslog("Starting mailarchive wsgi without virtualenv")
if not path in sys.path:
sys.path.insert(0, path)
syslog.syslog("path: {}".format(sys.path) )
syslog.syslog("version: {}".format(sys.version_info) )
os.environ['DJANGO_SETTINGS_MODULE'] = 'mlarchive.settings.settings'
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| 35.666667 | 128 | 0.76947 |
4c2c8a2ea6494fa8a5f8db732d825142b2f0b9d5 | 7,284 | py | Python | test/functional/feature_nulldummy.py | blinkhash/blinkhash-core | e05662019c2fa4cb2dc3736f38e48492712c23b1 | [
"MIT"
] | 3 | 2021-07-27T16:59:47.000Z | 2021-12-31T20:55:46.000Z | test/functional/feature_nulldummy.py | blinkhash/blinkhash-core | e05662019c2fa4cb2dc3736f38e48492712c23b1 | [
"MIT"
] | null | null | null | test/functional/feature_nulldummy.py | blinkhash/blinkhash-core | e05662019c2fa4cb2dc3736f38e48492712c23b1 | [
"MIT"
] | 1 | 2021-12-31T12:58:23.000Z | 2021-12-31T12:58:23.000Z | #!/usr/bin/env python3
# Copyright (c) 2016-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test NULLDUMMY softfork.
Connect to a single node.
Generate 2 blocks (save the coinbases for later).
Generate COINBASE_MATURITY (CB) more blocks to ensure the coinbases are mature.
[Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in block CB + 3.
[Policy] Check that non-NULLDUMMY transactions are rejected before activation.
[Consensus] Check that the new NULLDUMMY rules are not enforced on block CB + 4.
[Policy/Consensus] Check that the new NULLDUMMY rules are enforced on block CB + 5.
"""
import time
from test_framework.blocktools import (
COINBASE_MATURITY,
NORMAL_GBT_REQUEST_PARAMS,
add_witness_commitment,
create_block,
create_transaction,
)
from test_framework.messages import CTransaction
from test_framework.script import (
OP_0,
OP_TRUE,
)
from test_framework.test_framework import BlinkhashTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
NULLDUMMY_ERROR = "non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)"
def invalidate_nulldummy_tx(tx):
"""Transform a NULLDUMMY compliant tx (i.e. scriptSig starts with OP_0)
to be non-NULLDUMMY compliant by replacing the dummy with OP_TRUE"""
assert_equal(tx.vin[0].scriptSig[0], OP_0)
tx.vin[0].scriptSig = bytes([OP_TRUE]) + tx.vin[0].scriptSig[1:]
tx.rehash()
class NULLDUMMYTest(BlinkhashTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
# This script tests NULLDUMMY activation, which is part of the 'segwit' deployment, so we go through
# normal segwit activation here (and don't use the default always-on behaviour).
self.extra_args = [[
f'-testactivationheight=segwit@{COINBASE_MATURITY + 5}',
'-addresstype=legacy',
'-par=1', # Use only one script thread to get the exact reject reason for testing
]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].createwallet(wallet_name='wmulti', disable_private_keys=True)
wmulti = self.nodes[0].get_wallet_rpc('wmulti')
w0 = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
self.address = w0.getnewaddress()
self.pubkey = w0.getaddressinfo(self.address)['pubkey']
self.ms_address = wmulti.addmultisigaddress(1, [self.pubkey])['address']
self.wit_address = w0.getnewaddress(address_type='p2sh-segwit')
self.wit_ms_address = wmulti.addmultisigaddress(1, [self.pubkey], '', 'p2sh-segwit')['address']
if not self.options.descriptors:
# Legacy wallets need to import these so that they are watched by the wallet. This is unnecessary (and does not need to be tested) for descriptor wallets
wmulti.importaddress(self.ms_address)
wmulti.importaddress(self.wit_ms_address)
self.coinbase_blocks = self.generate(self.nodes[0], 2) # block height = 2
coinbase_txid = []
for i in self.coinbase_blocks:
coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0])
self.generate(self.nodes[0], COINBASE_MATURITY) # block height = COINBASE_MATURITY + 2
self.lastblockhash = self.nodes[0].getbestblockhash()
self.lastblockheight = COINBASE_MATURITY + 2
self.lastblocktime = int(time.time()) + self.lastblockheight
self.log.info(f"Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [{COINBASE_MATURITY + 3}]")
test1txs = [create_transaction(self.nodes[0], coinbase_txid[0], self.ms_address, amount=49)]
txid1 = self.nodes[0].sendrawtransaction(test1txs[0].serialize_with_witness().hex(), 0)
test1txs.append(create_transaction(self.nodes[0], txid1, self.ms_address, amount=48))
txid2 = self.nodes[0].sendrawtransaction(test1txs[1].serialize_with_witness().hex(), 0)
test1txs.append(create_transaction(self.nodes[0], coinbase_txid[1], self.wit_ms_address, amount=49))
txid3 = self.nodes[0].sendrawtransaction(test1txs[2].serialize_with_witness().hex(), 0)
self.block_submit(self.nodes[0], test1txs, accept=True)
self.log.info("Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation")
test2tx = create_transaction(self.nodes[0], txid2, self.ms_address, amount=47)
invalidate_nulldummy_tx(test2tx)
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test2tx.serialize_with_witness().hex(), 0)
self.log.info(f"Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [{COINBASE_MATURITY + 4}]")
self.block_submit(self.nodes[0], [test2tx], accept=True)
self.log.info("Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation")
test4tx = create_transaction(self.nodes[0], test2tx.hash, self.address, amount=46)
test6txs = [CTransaction(test4tx)]
invalidate_nulldummy_tx(test4tx)
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test4tx.serialize_with_witness().hex(), 0)
self.block_submit(self.nodes[0], [test4tx], accept=False)
self.log.info("Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation")
test5tx = create_transaction(self.nodes[0], txid3, self.wit_address, amount=48)
test6txs.append(CTransaction(test5tx))
test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01'
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test5tx.serialize_with_witness().hex(), 0)
self.block_submit(self.nodes[0], [test5tx], with_witness=True, accept=False)
self.log.info(f"Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in block after activation [{COINBASE_MATURITY + 5}]")
for i in test6txs:
self.nodes[0].sendrawtransaction(i.serialize_with_witness().hex(), 0)
self.block_submit(self.nodes[0], test6txs, with_witness=True, accept=True)
def block_submit(self, node, txs, *, with_witness=False, accept):
tmpl = node.getblocktemplate(NORMAL_GBT_REQUEST_PARAMS)
assert_equal(tmpl['previousblockhash'], self.lastblockhash)
assert_equal(tmpl['height'], self.lastblockheight + 1)
block = create_block(tmpl=tmpl, ntime=self.lastblocktime + 1, txlist=txs)
if with_witness:
add_witness_commitment(block)
block.solve()
assert_equal(None if accept else NULLDUMMY_ERROR, node.submitblock(block.serialize().hex()))
if accept:
assert_equal(node.getbestblockhash(), block.hash)
self.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
else:
assert_equal(node.getbestblockhash(), self.lastblockhash)
if __name__ == '__main__':
NULLDUMMYTest().main()
| 52.028571 | 165 | 0.711971 |
8a760040c29e0c477333b62c4cde842c80ea10eb | 18,322 | py | Python | redash/handlers/queries.py | patilvikram/redash | 27259b5abe4480d1112831952eb031560f546937 | [
"BSD-2-Clause"
] | 8 | 2019-05-05T10:33:43.000Z | 2021-07-14T11:21:52.000Z | redash/handlers/queries.py | loaiabdalslam/redash | a00c5a8857bab56d2faaa17e296e6281814b091a | [
"BSD-2-Clause"
] | 1 | 2020-10-07T22:25:04.000Z | 2020-10-07T22:44:50.000Z | redash/handlers/queries.py | loaiabdalslam/redash | a00c5a8857bab56d2faaa17e296e6281814b091a | [
"BSD-2-Clause"
] | 15 | 2019-06-29T13:58:00.000Z | 2022-02-27T14:57:03.000Z | import sqlparse
from flask import jsonify, request, url_for
from flask_login import login_required
from flask_restful import abort
from sqlalchemy.orm.exc import StaleDataError
from funcy import partial
from redash import models, settings
from redash.authentication.org_resolving import current_org
from redash.handlers.base import (BaseResource, filter_by_tags, get_object_or_404,
org_scoped_rule, paginate, routes, order_results as _order_results)
from redash.handlers.query_results import run_query
from redash.permissions import (can_modify, not_view_only, require_access,
require_admin_or_owner,
require_object_modify_permission,
require_permission, view_only)
from redash.utils import collect_parameters_from_request
from redash.serializers import QuerySerializer
from redash.models.parameterized_query import ParameterizedQuery
# Ordering map for relationships
order_map = {
'name': 'lowercase_name',
'-name': '-lowercase_name',
'created_at': 'created_at',
'-created_at': '-created_at',
'schedule': 'schedule',
'-schedule': '-schedule',
'runtime': 'query_results-runtime',
'-runtime': '-query_results-runtime',
'executed_at': 'query_results-retrieved_at',
'-executed_at': '-query_results-retrieved_at',
'created_by': 'users-name',
'-created_by': '-users-name',
}
order_results = partial(
_order_results,
default_order='-created_at',
allowed_orders=order_map,
)
@routes.route(org_scoped_rule('/api/queries/format'), methods=['POST'])
@login_required
def format_sql_query(org_slug=None):
"""
Formats an SQL query using the Python ``sqlparse`` formatter.
:<json string query: The SQL text to format
:>json string query: Formatted SQL text
"""
arguments = request.get_json(force=True)
query = arguments.get("query", "")
return jsonify({'query': sqlparse.format(query, **settings.SQLPARSE_FORMAT_OPTIONS)})
class QuerySearchResource(BaseResource):
@require_permission('view_query')
def get(self):
"""
Search query text, names, and descriptions.
:qparam string q: Search term
:qparam number include_drafts: Whether to include draft in results
Responds with a list of :ref:`query <query-response-label>` objects.
"""
term = request.args.get('q', '')
if not term:
return []
include_drafts = request.args.get('include_drafts') is not None
self.record_event({
'action': 'search',
'object_type': 'query',
'term': term,
})
# this redirects to the new query list API that is aware of search
new_location = url_for(
'queries',
q=term,
org_slug=current_org.slug,
drafts='true' if include_drafts else 'false',
)
return {}, 301, {'Location': new_location}
class QueryRecentResource(BaseResource):
@require_permission('view_query')
def get(self):
"""
Retrieve up to 10 queries recently modified by the user.
Responds with a list of :ref:`query <query-response-label>` objects.
"""
results = models.Query.by_user(self.current_user).order_by(models.Query.updated_at.desc()).limit(10)
return QuerySerializer(results, with_last_modified_by=False, with_user=False).serialize()
class BaseQueryListResource(BaseResource):
def get_queries(self, search_term):
if search_term:
results = models.Query.search(
search_term,
self.current_user.group_ids,
self.current_user.id,
include_drafts=True,
)
else:
results = models.Query.all_queries(
self.current_user.group_ids,
self.current_user.id,
include_drafts=True,
)
return filter_by_tags(results, models.Query.tags)
@require_permission('view_query')
def get(self):
"""
Retrieve a list of queries.
:qparam number page_size: Number of queries to return per page
:qparam number page: Page number to retrieve
:qparam number order: Name of column to order by
:qparam number q: Full text search term
Responds with an array of :ref:`query <query-response-label>` objects.
"""
# See if we want to do full-text search or just regular queries
search_term = request.args.get('q', '')
queries = self.get_queries(search_term)
results = filter_by_tags(queries, models.Query.tags)
# order results according to passed order parameter,
# special-casing search queries where the database
# provides an order by search rank
ordered_results = order_results(results, fallback=not bool(search_term))
page = request.args.get('page', 1, type=int)
page_size = request.args.get('page_size', 25, type=int)
response = paginate(
ordered_results,
page=page,
page_size=page_size,
serializer=QuerySerializer,
with_stats=True,
with_last_modified_by=False
)
if search_term:
self.record_event({
'action': 'search',
'object_type': 'query',
'term': search_term,
})
else:
self.record_event({
'action': 'list',
'object_type': 'query',
})
return response
def require_access_to_dropdown_queries(user, query_def):
parameters = query_def.get('options', {}).get('parameters', [])
dropdown_query_ids = set([str(p['queryId']) for p in parameters if p['type'] == 'query'])
if dropdown_query_ids:
groups = models.Query.all_groups_for_query_ids(dropdown_query_ids)
if len(groups) < len(dropdown_query_ids):
abort(400, message="You are trying to associate a dropdown query that does not have a matching group. "
"Please verify the dropdown query id you are trying to associate with this query.")
require_access(dict(groups), user, view_only)
class QueryListResource(BaseQueryListResource):
@require_permission('create_query')
def post(self):
"""
Create a new query.
:<json number data_source_id: The ID of the data source this query will run on
:<json string query: Query text
:<json string name:
:<json string description:
:<json string schedule: Schedule interval, in seconds, for repeated execution of this query
:<json object options: Query options
.. _query-response-label:
:>json number id: Query ID
:>json number latest_query_data_id: ID for latest output data from this query
:>json string name:
:>json string description:
:>json string query: Query text
:>json string query_hash: Hash of query text
:>json string schedule: Schedule interval, in seconds, for repeated execution of this query
:>json string api_key: Key for public access to this query's results.
:>json boolean is_archived: Whether this query is displayed in indexes and search results or not.
:>json boolean is_draft: Whether this query is a draft or not
:>json string updated_at: Time of last modification, in ISO format
:>json string created_at: Time of creation, in ISO format
:>json number data_source_id: ID of the data source this query will run on
:>json object options: Query options
:>json number version: Revision version (for update conflict avoidance)
:>json number user_id: ID of query creator
:>json number last_modified_by_id: ID of user who last modified this query
:>json string retrieved_at: Time when query results were last retrieved, in ISO format (may be null)
:>json number runtime: Runtime of last query execution, in seconds (may be null)
"""
query_def = request.get_json(force=True)
data_source = models.DataSource.get_by_id_and_org(query_def.pop('data_source_id'), self.current_org)
require_access(data_source, self.current_user, not_view_only)
require_access_to_dropdown_queries(self.current_user, query_def)
for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'last_modified_by']:
query_def.pop(field, None)
query_def['query_text'] = query_def.pop('query')
query_def['user'] = self.current_user
query_def['data_source'] = data_source
query_def['org'] = self.current_org
query_def['is_draft'] = True
query = models.Query.create(**query_def)
models.db.session.add(query)
models.db.session.commit()
self.record_event({
'action': 'create',
'object_id': query.id,
'object_type': 'query'
})
return QuerySerializer(query, with_visualizations=True).serialize()
class QueryArchiveResource(BaseQueryListResource):
def get_queries(self, search_term):
if search_term:
return models.Query.search(
search_term,
self.current_user.group_ids,
self.current_user.id,
include_drafts=False,
include_archived=True,
)
else:
return models.Query.all_queries(
self.current_user.group_ids,
self.current_user.id,
include_drafts=False,
include_archived=True,
)
class MyQueriesResource(BaseResource):
@require_permission('view_query')
def get(self):
"""
Retrieve a list of queries created by the current user.
:qparam number page_size: Number of queries to return per page
:qparam number page: Page number to retrieve
:qparam number order: Name of column to order by
:qparam number search: Full text search term
Responds with an array of :ref:`query <query-response-label>` objects.
"""
search_term = request.args.get('q', '')
if search_term:
results = models.Query.search_by_user(search_term, self.current_user)
else:
results = models.Query.by_user(self.current_user)
results = filter_by_tags(results, models.Query.tags)
# order results according to passed order parameter,
# special-casing search queries where the database
# provides an order by search rank
ordered_results = order_results(results, fallback=not bool(search_term))
page = request.args.get('page', 1, type=int)
page_size = request.args.get('page_size', 25, type=int)
return paginate(
ordered_results,
page,
page_size,
QuerySerializer,
with_stats=True,
with_last_modified_by=False,
)
class QueryResource(BaseResource):
@require_permission('edit_query')
def post(self, query_id):
"""
Modify a query.
:param query_id: ID of query to update
:<json number data_source_id: The ID of the data source this query will run on
:<json string query: Query text
:<json string name:
:<json string description:
:<json string schedule: Schedule interval, in seconds, for repeated execution of this query
:<json object options: Query options
Responds with the updated :ref:`query <query-response-label>` object.
"""
query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
query_def = request.get_json(force=True)
require_object_modify_permission(query, self.current_user)
require_access_to_dropdown_queries(self.current_user, query_def)
for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'user', 'last_modified_by', 'org']:
query_def.pop(field, None)
if 'query' in query_def:
query_def['query_text'] = query_def.pop('query')
if 'tags' in query_def:
query_def['tags'] = filter(None, query_def['tags'])
query_def['last_modified_by'] = self.current_user
query_def['changed_by'] = self.current_user
# SQLAlchemy handles the case where a concurrent transaction beats us
# to the update. But we still have to make sure that we're not starting
# out behind.
if 'version' in query_def and query_def['version'] != query.version:
abort(409)
try:
self.update_model(query, query_def)
models.db.session.commit()
except StaleDataError:
abort(409)
return QuerySerializer(query, with_visualizations=True).serialize()
@require_permission('view_query')
def get(self, query_id):
"""
Retrieve a query.
:param query_id: ID of query to fetch
Responds with the :ref:`query <query-response-label>` contents.
"""
q = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
require_access(q, self.current_user, view_only)
result = QuerySerializer(q, with_visualizations=True).serialize()
result['can_edit'] = can_modify(q, self.current_user)
self.record_event({
'action': 'view',
'object_id': query_id,
'object_type': 'query',
})
return result
# TODO: move to resource of its own? (POST /queries/{id}/archive)
def delete(self, query_id):
"""
Archives a query.
:param query_id: ID of query to archive
"""
query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
require_admin_or_owner(query.user_id)
query.archive(self.current_user)
models.db.session.commit()
class QueryRegenerateApiKeyResource(BaseResource):
@require_permission('edit_query')
def post(self, query_id):
query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
require_admin_or_owner(query.user_id)
query.regenerate_api_key()
models.db.session.commit()
self.record_event({
'action': 'regnerate_api_key',
'object_id': query_id,
'object_type': 'query',
})
result = QuerySerializer(query).serialize()
return result
class QueryForkResource(BaseResource):
@require_permission('edit_query')
def post(self, query_id):
"""
Creates a new query, copying the query text from an existing one.
:param query_id: ID of query to fork
Responds with created :ref:`query <query-response-label>` object.
"""
query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
require_access(query.data_source, self.current_user, not_view_only)
forked_query = query.fork(self.current_user)
models.db.session.commit()
self.record_event({
'action': 'fork',
'object_id': query_id,
'object_type': 'query',
})
return QuerySerializer(forked_query, with_visualizations=True).serialize()
class QueryRefreshResource(BaseResource):
def post(self, query_id):
"""
Execute a query, updating the query object with the results.
:param query_id: ID of query to execute
Responds with query task details.
"""
# TODO: this should actually check for permissions, but because currently you can only
# get here either with a user API key or a query one, we can just check whether it's
# an api key (meaning this is a query API key, which only grants read access).
if self.current_user.is_api_user():
abort(403, message="Please use a user API key.")
query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
require_access(query, self.current_user, not_view_only)
parameter_values = collect_parameters_from_request(request.args)
parameterized_query = ParameterizedQuery(query.query_text)
return run_query(parameterized_query, parameter_values, query.data_source, query.id)
class QueryTagsResource(BaseResource):
def get(self):
"""
Returns all query tags including those for drafts.
"""
tags = models.Query.all_tags(self.current_user, include_drafts=True)
return {
'tags': [
{
'name': name,
'count': count,
}
for name, count in tags
]
}
class QueryFavoriteListResource(BaseResource):
def get(self):
search_term = request.args.get('q')
if search_term:
base_query = models.Query.search(search_term, self.current_user.group_ids, include_drafts=True, limit=None)
favorites = models.Query.favorites(self.current_user, base_query=base_query)
else:
favorites = models.Query.favorites(self.current_user)
favorites = filter_by_tags(favorites, models.Query.tags)
# order results according to passed order parameter,
# special-casing search queries where the database
# provides an order by search rank
ordered_favorites = order_results(favorites, fallback=not bool(search_term))
page = request.args.get('page', 1, type=int)
page_size = request.args.get('page_size', 25, type=int)
response = paginate(
ordered_favorites,
page,
page_size,
QuerySerializer,
with_stats=True,
with_last_modified_by=False,
)
self.record_event({
'action': 'load_favorites',
'object_type': 'query',
'params': {
'q': search_term,
'tags': request.args.getlist('tags'),
'page': page
}
})
return response
| 35.785156 | 127 | 0.633992 |
927f63e909e55bfdbda795c2c8fa0e274c345ffe | 4,473 | py | Python | discodo/client/gateway.py | AkiaCode/discodo | 0a76afb196a7945f525896f56f431e82aaf83f44 | [
"MIT"
] | null | null | null | discodo/client/gateway.py | AkiaCode/discodo | 0a76afb196a7945f525896f56f431e82aaf83f44 | [
"MIT"
] | null | null | null | discodo/client/gateway.py | AkiaCode/discodo | 0a76afb196a7945f525896f56f431e82aaf83f44 | [
"MIT"
] | null | null | null | import asyncio
import concurrent.futures
import json
import logging
import threading
import time
from collections import deque
from typing import Any
import websockets
log = logging.getLogger("discodo.client")
class keepAlive(threading.Thread):
def __init__(self, ws, interval: int, *args, **kwargs) -> None:
threading.Thread.__init__(self, *args, **kwargs)
self.daemon = True
self.ws = ws
self.interval = interval
self.Stopped = threading.Event()
self.latency = None
self.recent_latencies = deque(maxlen=20)
self._lastAck = self._lastSend = time.perf_counter()
self.timeout = ws.heartbeatTimeout
self.threadId = ws.threadId
def run(self) -> None:
while not self.Stopped.wait(self.interval):
if (self._lastAck + self.timeout) < time.perf_counter():
Runner = asyncio.run_coroutine_threadsafe(
self.ws.close(4000), self.ws.loop
)
try:
Runner.result()
except:
pass
finally:
return self.stop()
payload = {"op": "HEARTBEAT", "d": int(time.time() * 1000)}
Runner = asyncio.run_coroutine_threadsafe(
self.ws.sendJson(payload), self.ws.loop
)
try:
totalBlocked = 0
while True:
try:
Runner.result(10)
break
except concurrent.futures.TimeoutError:
totalBlocked += 10
log.warning(
f"Heartbeat blocked for more than {totalBlocked} seconds."
)
except:
return self.stop()
else:
self._lastSend = time.perf_counter()
def ack(self) -> None:
self._lastAck = time.perf_counter()
self.latency = self._lastAck - self._lastSend
self.recent_latencies.append(self.latency)
def stop(self) -> None:
self.Stopped.set()
class NodeConnection(websockets.client.WebSocketClientProtocol):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@classmethod
async def connect(cls, node, loop=asyncio.get_event_loop(), timeout=10.0):
ws = await asyncio.wait_for(
websockets.connect(
node.WS_URL,
loop=loop,
klass=cls,
extra_headers={"Authorization": node.password},
),
timeout=timeout,
)
ws.node = node
ws.heartbeatTimeout = 60.0
ws.threadId = threading.get_ident()
return ws
@property
def is_connected(self) -> bool:
return self.open and not self.closed
@property
def latency(self) -> float:
return self._keepAliver.latency if self._keepAliver else None
@property
def averageLatency(self) -> float:
if not self._keepAliver:
return None
return sum(self._keepAliver.recent_latencies) / len(
self._keepAliver.recent_latencies
)
async def sendJson(self, data) -> None:
log.debug(f"send to websocket {data}")
await super().send(json.dumps(data))
async def send(self, Operation: dict, Data: Any = None) -> None:
payload = {"op": Operation, "d": Data}
await self.sendJson(payload)
async def poll(self) -> None:
try:
Message = await asyncio.wait_for(self.recv(), timeout=30.0)
JsonData = json.loads(Message)
except websockets.exceptions.ConnectionClosed as exc:
raise exc
else:
Operation, Data = JsonData["op"], JsonData["d"]
if Operation == "HELLO":
await self.HELLO(Data)
elif Operation == "HEARTBEAT_ACK":
await self.HEARTBEAT_ACK(Data)
return Operation, Data
async def close(self, *args, **kwargs) -> None:
if self._keepAliver:
self._keepAliver.stop()
await super().close(*args, **kwargs)
async def HELLO(self, Data: dict) -> None:
self._keepAliver = keepAlive(self, min(Data["heartbeat_interval"], 5.0))
self._keepAliver.start()
async def HEARTBEAT_ACK(self, Data: dict) -> None:
self._keepAliver.ack()
| 30.020134 | 86 | 0.558015 |
2e849b07ee0064623c0ced3ad9597c78c10c1031 | 894 | py | Python | oscar/apps/catalogue/reviews/app.py | makielab/django-oscar | 0a325cd0f04a4278201872b2e163868b72b6fabe | [
"BSD-3-Clause"
] | null | null | null | oscar/apps/catalogue/reviews/app.py | makielab/django-oscar | 0a325cd0f04a4278201872b2e163868b72b6fabe | [
"BSD-3-Clause"
] | null | null | null | oscar/apps/catalogue/reviews/app.py | makielab/django-oscar | 0a325cd0f04a4278201872b2e163868b72b6fabe | [
"BSD-3-Clause"
] | null | null | null | from django.conf.urls import patterns, url
from oscar.core.application import Application
from . import views
class ProductReviewsApplication(Application):
name = None
hidable_feature_name = "reviews"
detail_view = views.ProductReviewDetail
create_view = views.CreateProductReview
vote_view = views.AddVoteView
list_view = views.ProductReviewList
def get_urls(self):
urlpatterns = patterns('',
url(r'^(?P<pk>\d+)/$', self.detail_view.as_view(),
name='reviews-detail'),
url(r'^add/$', self.create_view.as_view(), name='reviews-add'),
url(r'^(?P<pk>\d+)/vote/$', self.vote_view.as_view(),
name='reviews-vote'),
url(r'^$', self.list_view.as_view(), name='reviews-list'),
)
return self.post_process_urls(urlpatterns)
application = ProductReviewsApplication()
| 30.827586 | 75 | 0.645414 |
7c073317776baef7d8a08280aaeeaaa5989936f3 | 2,651 | py | Python | geomagio/metadata/Metadata.py | usgs/geomag-algorithms | a83a0e36bed9307828e37b9130c25dbc26dd1bc9 | [
"CC0-1.0"
] | 49 | 2015-10-06T17:57:20.000Z | 2022-01-12T18:40:17.000Z | geomagio/metadata/Metadata.py | usgs/geomag-algorithms | a83a0e36bed9307828e37b9130c25dbc26dd1bc9 | [
"CC0-1.0"
] | 229 | 2015-01-26T20:10:36.000Z | 2022-03-12T00:46:33.000Z | geomagio/metadata/Metadata.py | usgs/geomag-algorithms | a83a0e36bed9307828e37b9130c25dbc26dd1bc9 | [
"CC0-1.0"
] | 44 | 2015-03-03T16:18:18.000Z | 2021-11-06T17:07:38.000Z | from datetime import timezone
from typing import Dict
from obspy import UTCDateTime
from pydantic import BaseModel, validator
from .. import pydantic_utcdatetime
from .MetadataCategory import MetadataCategory
class Metadata(BaseModel):
"""
This class is used for Data flagging and other Metadata.
Flag example:
```
automatic_flag = Metadata(
created_by = 'algorithm/version',
start_time = UTCDateTime('2020-01-02T00:17:00.1Z'),
end_time = UTCDateTime('2020-01-02T00:17:00.1Z'),
network = 'NT',
station = 'BOU',
channel = 'BEU',
category = MetadataCategory.FLAG,
comment = "spike detected",
priority = 1,
data_valid = False)
```
Adjusted Matrix example:
```
adjusted_matrix = Metadata(
created_by = 'algorithm/version',
start_time = UTCDateTime('2020-01-02T00:17:00Z'),
end_time = None,
network = 'NT',
station = 'BOU',
category = MetadataCategory.ADJUSTED_MATRIX,
comment = 'automatic adjusted matrix',
priority = 1,
value = {
'parameters': {'x': 1, 'y': 2, 'z': 3}
'matrix': [ ... ]
}
)
```
"""
# database id
id: int = None
# metadata history id referencing database id
metadata_id: int = None
# author
created_by: str = None
created_time: UTCDateTime = None
# editor
updated_by: str = None
updated_time: UTCDateTime = None
# time range
starttime: UTCDateTime = None
endtime: UTCDateTime = None
# what data metadata references, null for wildcard
network: str = None
station: str = None
channel: str = None
location: str = None
# category (flag, matrix, etc)
category: MetadataCategory = None
# higher priority overrides lower priority
priority: int = 1
# whether data is valid (primarily for flags)
data_valid: bool = True
# metadata json blob
metadata: Dict = None
# general comment
comment: str = None
# review specific comment
review_comment: str = None
# metadata status indicator
status: str = None
def datetime_dict(self, **kwargs):
values = self.dict(**kwargs)
for key in ["created_time", "updated_time", "starttime", "endtime"]:
if key in values and values[key] is not None:
values[key] = values[key].datetime.replace(tzinfo=timezone.utc)
return values
@validator("created_time")
def set_default_created_time(cls, created_time: UTCDateTime = None) -> UTCDateTime:
return created_time or UTCDateTime()
| 28.815217 | 87 | 0.622784 |
249e5eacafdff2c059db3a84e45f1c825138494a | 1,362 | py | Python | test/fixtures_vessel.py | ruy-sevalho/german_lloyds_hsc_rules | ac65158ddfeca0b96487c4959476256e83981d3f | [
"MIT"
] | null | null | null | test/fixtures_vessel.py | ruy-sevalho/german_lloyds_hsc_rules | ac65158ddfeca0b96487c4959476256e83981d3f | [
"MIT"
] | null | null | null | test/fixtures_vessel.py | ruy-sevalho/german_lloyds_hsc_rules | ac65158ddfeca0b96487c4959476256e83981d3f | [
"MIT"
] | null | null | null | """
# @ Author: Ruy Sevalho
# @ Create Time: 2021-08-24 08:59:25
# @ Description:
"""
import pytest as pt
from gl_hsc_scantling.vessel import Monohull, Catamaran
from .exp_output import ExpVessel, ExpCatamaran, ExpCatamaranGlobalLoads
@pt.fixture
def vessel_input_ex1():
"""Test vessel 01"""
return {
"name": "catamaran",
"service_range": "USR",
"type_of_service": "PASSENGER",
"speed": 15,
"displacement": 6,
"length": 10,
"beam": 6.5,
"fwd_perp": 10,
"aft_perp": 0,
"draft": 0.51,
"z_baseline": -0.51,
"block_coef": 0.4,
"water_plane_area": 10,
"lcg": 4,
"deadrise_lcg": 12,
"dist_hull_cl": 4.6,
}
@pt.fixture
def vessel_ex1(vessel_input_ex1) -> Catamaran:
return Catamaran(**vessel_input_ex1)
@pt.fixture
def vessel_ex1_expected():
exp_vessel = ExpVessel(
**{
"vert_acg": 1,
"max_wave_height": 1.424449396,
"sig_wave_height": 0.407531163657313,
}
)
exp_cat_loads = ExpCatamaranGlobalLoads(
**{
"transverse_bending_moment": 54.1512,
"transverse_shear_force": 14.715,
"transverse_torsional_moment": 73.575,
}
)
return ExpCatamaran(general_param=exp_vessel, cat_loads=exp_cat_loads)
| 23.894737 | 74 | 0.585169 |
41ed923f23137df14269d04609f85556aa2a3c13 | 3,688 | py | Python | lib/roi_data_layer/minibatch.py | guanbin1994/arm_fracture_fpn | 9aa59b6d1a8a780addb5c7c3f37ed96b736483f5 | [
"MIT"
] | null | null | null | lib/roi_data_layer/minibatch.py | guanbin1994/arm_fracture_fpn | 9aa59b6d1a8a780addb5c7c3f37ed96b736483f5 | [
"MIT"
] | null | null | null | lib/roi_data_layer/minibatch.py | guanbin1994/arm_fracture_fpn | 9aa59b6d1a8a780addb5c7c3f37ed96b736483f5 | [
"MIT"
] | null | null | null | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Xinlei Chen
# --------------------------------------------------------
"""Compute minibatch blobs for training a Fast R-CNN network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import numpy.random as npr
from scipy.misc import imread
from model.utils.config import cfg
from model.utils.blob import prep_im_for_blob, im_list_to_blob
import pdb
def get_minibatch(roidb, num_classes):
"""Given a roidb, construct a minibatch sampled from it."""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
size=num_images)
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
# Get the input image blob, formatted for caffe
im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)
blobs = {'data': im_blob}
assert len(im_scales) == 1, "Single batch only"
assert len(roidb) == 1, "Single batch only"
# gt boxes: (x1, y1, x2, y2, cls)
if cfg.TRAIN.USE_ALL_GT:
# Include all ground truth boxes
gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
else:
# For the COCO ground truth boxes, exclude the ones that are ''iscrowd''
gt_inds = np.where(roidb[0]['gt_classes'] != 0 & np.all(roidb[0]['gt_overlaps'].toarray() > -1.0, axis=1))[0]
gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]
gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
blobs['gt_boxes'] = gt_boxes
blobs['im_info'] = np.array(
[[im_blob.shape[1], im_blob.shape[2], im_scales[0]]],
dtype=np.float32)
blobs['img_id'] = roidb[0]['img_id']
return blobs
def _get_image_blob(roidb, scale_inds):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in range(num_images):
#im = cv2.imread(roidb[i]['image'])
im = imread(roidb[i]['image'])
if len(im.shape) == 2:
im = im[:,:,np.newaxis]
im = np.concatenate((im,im,im), axis=2)
# flip the channel, since the original one using cv2
# rgb -> bgr
im = im[:,:,::-1]
if False: #cfg.mode == 'trans' or cfg.mode == 'img_trans':
if roidb[i]['flipped'] == 1:
im = np.rot90(im)
elif roidb[i]['flipped'] == 2:
im = np.rot90(im)
im = np.rot90(im)
elif roidb[i]['flipped'] == 3:
im = np.rot90(im)
im = np.rot90(im)
im = np.rot90(im)
elif roidb[i]['flipped'] == 4:
im = im[:, ::-1, :]
elif roidb[i]['flipped'] == 5:
im = im[:, ::-1, :]
im = np.rot90(im)
elif roidb[i]['flipped'] == 6:
im = im[:, ::-1, :]
im = np.rot90(im)
im = np.rot90(im)
elif roidb[i]['flipped'] == 7:
im = im[:, ::-1, :]
im = np.rot90(im)
im = np.rot90(im)
im = np.rot90(im)
else:
if roidb[i]['flipped'] == True:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales
| 32.637168 | 113 | 0.59897 |
9fccf44e999dd6ba32e33eb2cd0a006f408caa15 | 2,114 | py | Python | src/shapeandshare/dicebox/models/layer.py | shapeandshare/dicebox | 63b9c05d8e2217b4f2ecaeaa265229148b895f86 | [
"MIT"
] | null | null | null | src/shapeandshare/dicebox/models/layer.py | shapeandshare/dicebox | 63b9c05d8e2217b4f2ecaeaa265229148b895f86 | [
"MIT"
] | 17 | 2017-09-25T00:29:46.000Z | 2022-03-11T23:15:40.000Z | src/shapeandshare/dicebox/models/layer.py | shapeandshare/dicebox | 63b9c05d8e2217b4f2ecaeaa265229148b895f86 | [
"MIT"
] | null | null | null | from abc import ABC
from enum import Enum
from random import choices
from typing import Tuple
class LayerType(Enum):
DROPOUT = "dropout"
DENSE = "dense"
CONV2D = "conv2d" # https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D
def select_random_layer_type() -> LayerType:
return choices([LayerType.DROPOUT, LayerType.DENSE, LayerType.CONV2D])[0]
class ActivationFunction(Enum):
ELU = "elu"
EXPONENTIAL = "exponential"
HARD_SIGMOID = "hard_sigmoid"
LINEAR = "linear"
RELU = "relu"
SELU = "selu"
SIGMOID = "sigmoid"
SOFTMAX = "softmax"
SOFTPLUS = "softplus"
SOFTSIGN = "softsign"
SWISH = "swish"
TANH = "tanh"
class Conv2DPadding(Enum):
VALID = "valid"
SAME = "same"
def select_random_conv2d_padding_type() -> Conv2DPadding:
return choices([Conv2DPadding.VALID, Conv2DPadding.SAME])[0]
class Layer(ABC):
layer_type: LayerType
def __init__(self, layer_type: LayerType):
self.layer_type: LayerType = layer_type
class DenseLayer(Layer):
size: int
activation: ActivationFunction
def __init__(self, size: int, activation: ActivationFunction):
super().__init__(layer_type=LayerType.DENSE)
self.size = size
self.activation = activation
class DropoutLayer(Layer):
rate: float
def __init__(self, rate: float):
super().__init__(layer_type=LayerType.DROPOUT)
self.rate = rate
# https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D
class Conv2DLayer(Layer):
filters: int
kernel_size: Tuple[int, int]
strides: Tuple[int, int]
padding: Conv2DPadding
activation: ActivationFunction
def __init__(
self,
filters: int,
kernel_size: Tuple[int, int],
strides: Tuple[int, int],
padding: Conv2DPadding,
activation: ActivationFunction,
):
super().__init__(layer_type=LayerType.CONV2D)
self.filters = filters
self.kernel_size = kernel_size
self.strides = strides
self.padding = padding
self.activation = activation
| 23.752809 | 90 | 0.669347 |
fe046340edaf779522cc4cae99e385fe75cd992a | 4,137 | py | Python | TTS_generator/utils/basic_file_operations_class.py | secretsauceai/precise-wakeword-model-maker | 00f91cbeab6d6a230300333ec6557623c99880cb | [
"Apache-2.0"
] | 8 | 2022-02-23T08:17:59.000Z | 2022-03-20T13:34:56.000Z | TTS_generator/utils/basic_file_operations_class.py | secretsauceai/precise-wakeword-model-maker | 00f91cbeab6d6a230300333ec6557623c99880cb | [
"Apache-2.0"
] | 15 | 2021-12-30T11:41:09.000Z | 2022-03-01T18:20:09.000Z | utils/basic_file_operations_class.py | AmateurAcademic/TTS-wakeword-generator | 0d002e9af36c56f97054b06134af2d4e09df1868 | [
"Apache-2.0"
] | 1 | 2021-12-31T19:47:06.000Z | 2021-12-31T19:47:06.000Z | import os
from os import listdir
from os.path import isfile, join
import shutil
class BasicFileOperations:
@staticmethod
def get_files(source_directory):
return [f for f in listdir(source_directory) if isfile(join(source_directory, f))]
@staticmethod
def get_number_of_files(source_directory):
return len(BasicFileOperations.get_files(source_directory))
@staticmethod
def get_limited_number_of_files(source_directory, max_number_of_files):
max_number_of_files -= 1
files = [f for f in listdir(source_directory)[:max_number_of_files] if isfile(join(source_directory, f))]
return files
@staticmethod
def copy_file(file, source_directory, destination_directory):
try:
shutil.copyfile(source_directory+file, destination_directory+file)
except:
print(f"Error with {file}")
@staticmethod
def rename_file(old_filename, new_filename, directory=''):
try:
os.rename(directory + old_filename, directory + new_filename)
except:
print(f"Error with {old_filename}")
@staticmethod
def backup_file(source_file, destination_file, source_directory=None):
'''This will rename a file in a directory
It will also copy the file to the destination directory'''
try:
if source_directory:
shutil.copyfile(source_directory + source_file, source_directory + destination_file)
else:
shutil.copyfile(source_file, destination_file)
except:
print(f"Error with {source_file}")
@staticmethod
def make_directory(directory):
if not os.path.exists(directory):
os.makedirs(directory)
@staticmethod
def copy_directory(files, source_directory, destination_directory):
BasicFileOperations.make_directory(destination_directory)
for file in files:
try:
shutil.copyfile(source_directory + file, destination_directory + file)
except:
...
@staticmethod
def delete_directory(directory):
if os.path.exists(directory):
print(f'Deleting {directory}')
shutil.rmtree(directory)
else:
print(f'Directory {directory} does not exist')
@staticmethod
def delete_file(file):
if os.path.exists(file):
print(f'Removing {file}')
os.remove(file)
else:
print(f'File {file} does not exist')
@staticmethod
def delete_files_in_directory(files, directory):
for file in files:
BasicFileOperations.delete_file(directory + file)
@staticmethod
def rename_directory(source_directory, destination_directory):
if os.path.exists(destination_directory):
print(f'Directory {destination_directory} already exists')
else:
if os.path.exists(source_directory):
os.rename(source_directory, destination_directory)
print(f'Directory {source_directory} renamed to {destination_directory}')
else:
print(f'Directory {source_directory} does not exist')
@staticmethod
def split_files_into_multiple_directories(files, number_of_files_per_directory, source_directory, destination_directory):
'''This will take a directorey with a huge amount of files and break them down into smaller directories
It can have a max number of files (might have to refactor get_files for getting only a max number)'''
directory_number = 1
file_count = 1
for file in files:
if file_count < number_of_files_per_directory:
BasicFileOperations.copy_file(file, source_directory, destination_directory + '_0' + str(directory_number))
file_count += 1
elif file_count == number_of_files_per_directory:
BasicFileOperations.copy_file(file, source_directory, destination_directory + '_0' + str(directory_number))
directory_number += 1
file_count = 1 | 38.663551 | 125 | 0.659898 |
e03be90f8dcd0fb9d385de020ec7d6d67b11d497 | 1,267 | py | Python | tag_generator.py | nshobe/nshobe.github.io | d958bdd7180cb9d9a41553bbee2164e210731bd7 | [
"MIT"
] | null | null | null | tag_generator.py | nshobe/nshobe.github.io | d958bdd7180cb9d9a41553bbee2164e210731bd7 | [
"MIT"
] | null | null | null | tag_generator.py | nshobe/nshobe.github.io | d958bdd7180cb9d9a41553bbee2164e210731bd7 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
'''
tag_generator.py
Copyright 2017 Long Qian
Contact: [email protected]
This script creates tags for your Jekyll blog hosted by Github page.
No plugins required.
'''
import glob
import os
post_dir = '_posts/'
tag_dir = 'tag/'
filenames = glob.glob(post_dir + '*md')
total_tags = []
for filename in filenames:
f = open(filename, 'r', encoding='utf8')
crawl = False
for line in f:
if crawl:
current_tags = line.strip().split()
if current_tags[0] == 'tags:':
total_tags.extend(current_tags[1:])
crawl = False
break
if line.strip() == '---':
if not crawl:
crawl = True
else:
crawl = False
break
f.close()
total_tags = set(total_tags)
old_tags = glob.glob(tag_dir + '*.md')
for tag in old_tags:
os.remove(tag)
if not os.path.exists(tag_dir):
os.makedirs(tag_dir)
for tag in total_tags:
tag_filename = tag_dir + tag + '.md'
f = open(tag_filename, 'a')
write_str = '---\nlayout: tagpage\ntitle: \"Tag: ' + tag + '\"\ntag: ' + tag + '\nrobots: noindex\n---\n'
f.write(write_str)
f.close()
print("Tags generated, count", total_tags.__len__())
| 23.90566 | 109 | 0.579321 |
985ca4b30c64a4b4924dbf5ae3d65d85a52c3d52 | 125 | py | Python | hello.py | jhaigler94/cs3240-labdemo | 4a921f4a8c8dec00fbd12d691513763becab6e0e | [
"MIT"
] | null | null | null | hello.py | jhaigler94/cs3240-labdemo | 4a921f4a8c8dec00fbd12d691513763becab6e0e | [
"MIT"
] | null | null | null | hello.py | jhaigler94/cs3240-labdemo | 4a921f4a8c8dec00fbd12d691513763becab6e0e | [
"MIT"
] | null | null | null | from helper import greeting
if __name__ == "__main__":
greeting("Hello. This greeting comes from the 'develop branch'")
| 25 | 68 | 0.736 |
40db710ebfa02e90add3d34c19b4564c882452d8 | 3,973 | py | Python | contrib/linearize/linearize-hashes.py | hideoussquid/aureus-core-gui | ce075f2f0f9c99a344a1b0629cfd891526daac7b | [
"MIT"
] | null | null | null | contrib/linearize/linearize-hashes.py | hideoussquid/aureus-core-gui | ce075f2f0f9c99a344a1b0629cfd891526daac7b | [
"MIT"
] | null | null | null | contrib/linearize/linearize-hashes.py | hideoussquid/aureus-core-gui | ce075f2f0f9c99a344a1b0629cfd891526daac7b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
try: # Python 3
import http.client as httplib
except ImportError: # Python 2
import httplib
import json
import re
import base64
import sys
settings = {}
##### Switch endian-ness #####
def hex_switchEndian(s):
""" Switches the endianness of a hex string (in pairs of hex chars) """
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
return b''.join(pairList[::-1]).decode()
class AureusRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
authpair = authpair.encode('utf-8')
self.authhdr = b"Basic " + base64.b64encode(authpair)
self.conn = httplib.HTTPConnection(host, port=port, timeout=30)
def execute(self, obj):
try:
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
except ConnectionRefusedError:
print('RPC connection refused. Check RPC settings and the server status.',
file=sys.stderr)
return None
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read().decode('utf-8')
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = AureusRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
if reply is None:
print('Cannot continue. Program will halt.')
return None
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
if settings['rev_hash_bytes'] == 'true':
resp_obj['result'] = hex_switchEndian(resp_obj['result'])
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 19697
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rev_hash_bytes' not in settings:
settings['rev_hash_bytes'] = 'false'
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
# Force hash byte format setting to be lowercase to make comparisons easier.
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
get_block_hashes(settings)
| 29 | 90 | 0.684873 |